dbeadaf12308e9890e8fb2a9a19e2ddd21dd951e
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         err = kstrtobool_from_user(user_buf, count, &enable);
90         if (err)
91                 return err;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158
159         if (err < 0)
160                 return err;
161
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168         return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244         return 0;
245 }
246
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269         return 0;
270 }
271
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498
499         return 0;
500 }
501
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529
530         memset(&cp, 0, sizeof(cp));
531
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547
548         /* If Connectionless Peripheral Broadcast central role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_cpb_central_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Peripheral Page Response Timeout */
555                 events[2] |= 0x20;      /* CPB Channel Map Change */
556                 changed = true;
557         }
558
559         /* If Connectionless Peripheral Broadcast peripheral role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_cpb_peripheral_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CPB Receive */
565                 events[2] |= 0x04;      /* CPB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591
592         hci_setup_event_mask(req);
593
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622
623                 memset(events, 0, sizeof(events));
624
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the corresponding event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744
745                 if ((hdev->commands[38] & 0x80) &&
746                     !test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) {
747                         /* Read LE Min/Max Tx Power*/
748                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
749                                     0, NULL);
750                 }
751
752                 if (hdev->commands[26] & 0x40) {
753                         /* Read LE Accept List Size */
754                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
755                                     0, NULL);
756                 }
757
758                 if (hdev->commands[26] & 0x80) {
759                         /* Clear LE Accept List */
760                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
761                 }
762
763                 if (hdev->commands[34] & 0x40) {
764                         /* Read LE Resolving List Size */
765                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
766                                     0, NULL);
767                 }
768
769                 if (hdev->commands[34] & 0x20) {
770                         /* Clear LE Resolving List */
771                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
772                 }
773
774                 if (hdev->commands[35] & 0x04) {
775                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
776
777                         /* Set RPA timeout */
778                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
779                                     &rpa_timeout);
780                 }
781
782                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
783                         /* Read LE Maximum Data Length */
784                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
785
786                         /* Read LE Suggested Default Data Length */
787                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
788                 }
789
790                 if (ext_adv_capable(hdev)) {
791                         /* Read LE Number of Supported Advertising Sets */
792                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
793                                     0, NULL);
794                 }
795
796                 hci_set_le_support(req);
797         }
798
799         /* Read features beyond page 1 if available */
800         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801                 struct hci_cp_read_local_ext_features cp;
802
803                 cp.page = p;
804                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
805                             sizeof(cp), &cp);
806         }
807
808         return 0;
809 }
810
811 static int hci_init4_req(struct hci_request *req, unsigned long opt)
812 {
813         struct hci_dev *hdev = req->hdev;
814
815         /* Some Broadcom based Bluetooth controllers do not support the
816          * Delete Stored Link Key command. They are clearly indicating its
817          * absence in the bit mask of supported commands.
818          *
819          * Check the supported commands and only if the command is marked
820          * as supported send it. If not supported assume that the controller
821          * does not have actual support for stored link keys which makes this
822          * command redundant anyway.
823          *
824          * Some controllers indicate that they support handling deleting
825          * stored link keys, but they don't. The quirk lets a driver
826          * just disable this command.
827          */
828         if (hdev->commands[6] & 0x80 &&
829             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
830                 struct hci_cp_delete_stored_link_key cp;
831
832                 bacpy(&cp.bdaddr, BDADDR_ANY);
833                 cp.delete_all = 0x01;
834                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
835                             sizeof(cp), &cp);
836         }
837
838         /* Set event mask page 2 if the HCI command for it is supported */
839         if (hdev->commands[22] & 0x04)
840                 hci_set_event_mask_page_2(req);
841
842         /* Read local codec list if the HCI command is supported */
843         if (hdev->commands[29] & 0x20)
844                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
845
846         /* Read local pairing options if the HCI command is supported */
847         if (hdev->commands[41] & 0x08)
848                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
849
850         /* Get MWS transport configuration if the HCI command is supported */
851         if (hdev->commands[30] & 0x08)
852                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
853
854         /* Check for Synchronization Train support */
855         if (lmp_sync_train_capable(hdev))
856                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
857
858         /* Enable Secure Connections if supported and configured */
859         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
860             bredr_sc_enabled(hdev)) {
861                 u8 support = 0x01;
862
863                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
864                             sizeof(support), &support);
865         }
866
867         /* Set erroneous data reporting if supported to the wideband speech
868          * setting value
869          */
870         if (hdev->commands[18] & 0x08 &&
871             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
872                 bool enabled = hci_dev_test_flag(hdev,
873                                                  HCI_WIDEBAND_SPEECH_ENABLED);
874
875                 if (enabled !=
876                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
877                         struct hci_cp_write_def_err_data_reporting cp;
878
879                         cp.err_data_reporting = enabled ?
880                                                 ERR_DATA_REPORTING_ENABLED :
881                                                 ERR_DATA_REPORTING_DISABLED;
882
883                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
884                                     sizeof(cp), &cp);
885                 }
886         }
887
888         /* Set Suggested Default Data Length to maximum if supported */
889         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
890                 struct hci_cp_le_write_def_data_len cp;
891
892                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
893                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
894                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
895         }
896
897         /* Set Default PHY parameters if command is supported */
898         if (hdev->commands[35] & 0x20) {
899                 struct hci_cp_le_set_default_phy cp;
900
901                 cp.all_phys = 0x00;
902                 cp.tx_phys = hdev->le_tx_def_phys;
903                 cp.rx_phys = hdev->le_rx_def_phys;
904
905                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
906         }
907
908         return 0;
909 }
910
911 static int __hci_init(struct hci_dev *hdev)
912 {
913         int err;
914
915         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
916         if (err < 0)
917                 return err;
918
919         if (hci_dev_test_flag(hdev, HCI_SETUP))
920                 hci_debugfs_create_basic(hdev);
921
922         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
923         if (err < 0)
924                 return err;
925
926         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
927          * BR/EDR/LE type controllers. AMP controllers only need the
928          * first two stages of init.
929          */
930         if (hdev->dev_type != HCI_PRIMARY)
931                 return 0;
932
933         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
934         if (err < 0)
935                 return err;
936
937         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
938         if (err < 0)
939                 return err;
940
941         /* This function is only called when the controller is actually in
942          * configured state. When the controller is marked as unconfigured,
943          * this initialization procedure is not run.
944          *
945          * It means that it is possible that a controller runs through its
946          * setup phase and then discovers missing settings. If that is the
947          * case, then this function will not be called. It then will only
948          * be called during the config phase.
949          *
950          * So only when in setup phase or config phase, create the debugfs
951          * entries and register the SMP channels.
952          */
953         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
954             !hci_dev_test_flag(hdev, HCI_CONFIG))
955                 return 0;
956
957         hci_debugfs_create_common(hdev);
958
959         if (lmp_bredr_capable(hdev))
960                 hci_debugfs_create_bredr(hdev);
961
962         if (lmp_le_capable(hdev))
963                 hci_debugfs_create_le(hdev);
964
965         return 0;
966 }
967
968 static int hci_init0_req(struct hci_request *req, unsigned long opt)
969 {
970         struct hci_dev *hdev = req->hdev;
971
972         BT_DBG("%s %ld", hdev->name, opt);
973
974         /* Reset */
975         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
976                 hci_reset_req(req, 0);
977
978         /* Read Local Version */
979         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
980
981         /* Read BD Address */
982         if (hdev->set_bdaddr)
983                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
984
985         return 0;
986 }
987
988 static int __hci_unconf_init(struct hci_dev *hdev)
989 {
990         int err;
991
992         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
993                 return 0;
994
995         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
996         if (err < 0)
997                 return err;
998
999         if (hci_dev_test_flag(hdev, HCI_SETUP))
1000                 hci_debugfs_create_basic(hdev);
1001
1002         return 0;
1003 }
1004
1005 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1006 {
1007         __u8 scan = opt;
1008
1009         BT_DBG("%s %x", req->hdev->name, scan);
1010
1011         /* Inquiry and Page scans */
1012         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1013         return 0;
1014 }
1015
1016 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1017 {
1018         __u8 auth = opt;
1019
1020         BT_DBG("%s %x", req->hdev->name, auth);
1021
1022         /* Authentication */
1023         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1024         return 0;
1025 }
1026
1027 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1028 {
1029         __u8 encrypt = opt;
1030
1031         BT_DBG("%s %x", req->hdev->name, encrypt);
1032
1033         /* Encryption */
1034         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1035         return 0;
1036 }
1037
1038 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 {
1040         __le16 policy = cpu_to_le16(opt);
1041
1042         BT_DBG("%s %x", req->hdev->name, policy);
1043
1044         /* Default link policy */
1045         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046         return 0;
1047 }
1048
1049 /* Get HCI device by index.
1050  * Device is held on return. */
1051 struct hci_dev *hci_dev_get(int index)
1052 {
1053         struct hci_dev *hdev = NULL, *d;
1054
1055         BT_DBG("%d", index);
1056
1057         if (index < 0)
1058                 return NULL;
1059
1060         read_lock(&hci_dev_list_lock);
1061         list_for_each_entry(d, &hci_dev_list, list) {
1062                 if (d->id == index) {
1063                         hdev = hci_dev_hold(d);
1064                         break;
1065                 }
1066         }
1067         read_unlock(&hci_dev_list_lock);
1068         return hdev;
1069 }
1070
1071 /* ---- Inquiry support ---- */
1072
1073 bool hci_discovery_active(struct hci_dev *hdev)
1074 {
1075         struct discovery_state *discov = &hdev->discovery;
1076
1077         switch (discov->state) {
1078         case DISCOVERY_FINDING:
1079         case DISCOVERY_RESOLVING:
1080                 return true;
1081
1082         default:
1083                 return false;
1084         }
1085 }
1086
1087 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1088 {
1089         int old_state = hdev->discovery.state;
1090
1091         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1092
1093         if (old_state == state)
1094                 return;
1095
1096         hdev->discovery.state = state;
1097
1098         switch (state) {
1099         case DISCOVERY_STOPPED:
1100                 hci_update_background_scan(hdev);
1101
1102                 if (old_state != DISCOVERY_STARTING)
1103                         mgmt_discovering(hdev, 0);
1104                 break;
1105         case DISCOVERY_STARTING:
1106                 break;
1107         case DISCOVERY_FINDING:
1108                 mgmt_discovering(hdev, 1);
1109                 break;
1110         case DISCOVERY_RESOLVING:
1111                 break;
1112         case DISCOVERY_STOPPING:
1113                 break;
1114         }
1115 }
1116
1117 #ifdef TIZEN_BT
1118 bool hci_le_discovery_active(struct hci_dev *hdev)
1119 {
1120         struct discovery_state *discov = &hdev->le_discovery;
1121
1122         switch (discov->state) {
1123         case DISCOVERY_FINDING:
1124         case DISCOVERY_RESOLVING:
1125                 return true;
1126
1127         default:
1128                 return false;
1129         }
1130 }
1131
1132 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
1133 {
1134         BT_DBG("%s state %u -> %u", hdev->name,
1135                         hdev->le_discovery.state, state);
1136
1137         if (hdev->le_discovery.state == state)
1138                 return;
1139
1140         switch (state) {
1141         case DISCOVERY_STOPPED:
1142                 hci_update_background_scan(hdev);
1143
1144                 if (hdev->le_discovery.state != DISCOVERY_STARTING)
1145                         mgmt_le_discovering(hdev, 0);
1146                 break;
1147         case DISCOVERY_STARTING:
1148                 break;
1149         case DISCOVERY_FINDING:
1150                 mgmt_le_discovering(hdev, 1);
1151                 break;
1152         case DISCOVERY_RESOLVING:
1153                 break;
1154         case DISCOVERY_STOPPING:
1155                 break;
1156         }
1157
1158         hdev->le_discovery.state = state;
1159 }
1160 #endif
1161
1162 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1163 {
1164         struct discovery_state *cache = &hdev->discovery;
1165         struct inquiry_entry *p, *n;
1166
1167         list_for_each_entry_safe(p, n, &cache->all, all) {
1168                 list_del(&p->all);
1169                 kfree(p);
1170         }
1171
1172         INIT_LIST_HEAD(&cache->unknown);
1173         INIT_LIST_HEAD(&cache->resolve);
1174 }
1175
1176 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1177                                                bdaddr_t *bdaddr)
1178 {
1179         struct discovery_state *cache = &hdev->discovery;
1180         struct inquiry_entry *e;
1181
1182         BT_DBG("cache %p, %pMR", cache, bdaddr);
1183
1184         list_for_each_entry(e, &cache->all, all) {
1185                 if (!bacmp(&e->data.bdaddr, bdaddr))
1186                         return e;
1187         }
1188
1189         return NULL;
1190 }
1191
1192 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1193                                                        bdaddr_t *bdaddr)
1194 {
1195         struct discovery_state *cache = &hdev->discovery;
1196         struct inquiry_entry *e;
1197
1198         BT_DBG("cache %p, %pMR", cache, bdaddr);
1199
1200         list_for_each_entry(e, &cache->unknown, list) {
1201                 if (!bacmp(&e->data.bdaddr, bdaddr))
1202                         return e;
1203         }
1204
1205         return NULL;
1206 }
1207
1208 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1209                                                        bdaddr_t *bdaddr,
1210                                                        int state)
1211 {
1212         struct discovery_state *cache = &hdev->discovery;
1213         struct inquiry_entry *e;
1214
1215         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1216
1217         list_for_each_entry(e, &cache->resolve, list) {
1218                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1219                         return e;
1220                 if (!bacmp(&e->data.bdaddr, bdaddr))
1221                         return e;
1222         }
1223
1224         return NULL;
1225 }
1226
1227 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1228                                       struct inquiry_entry *ie)
1229 {
1230         struct discovery_state *cache = &hdev->discovery;
1231         struct list_head *pos = &cache->resolve;
1232         struct inquiry_entry *p;
1233
1234         list_del(&ie->list);
1235
1236         list_for_each_entry(p, &cache->resolve, list) {
1237                 if (p->name_state != NAME_PENDING &&
1238                     abs(p->data.rssi) >= abs(ie->data.rssi))
1239                         break;
1240                 pos = &p->list;
1241         }
1242
1243         list_add(&ie->list, pos);
1244 }
1245
1246 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1247                              bool name_known)
1248 {
1249         struct discovery_state *cache = &hdev->discovery;
1250         struct inquiry_entry *ie;
1251         u32 flags = 0;
1252
1253         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1254
1255         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1256
1257         if (!data->ssp_mode)
1258                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1259
1260         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1261         if (ie) {
1262                 if (!ie->data.ssp_mode)
1263                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1264
1265                 if (ie->name_state == NAME_NEEDED &&
1266                     data->rssi != ie->data.rssi) {
1267                         ie->data.rssi = data->rssi;
1268                         hci_inquiry_cache_update_resolve(hdev, ie);
1269                 }
1270
1271                 goto update;
1272         }
1273
1274         /* Entry not in the cache. Add new one. */
1275         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1276         if (!ie) {
1277                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1278                 goto done;
1279         }
1280
1281         list_add(&ie->all, &cache->all);
1282
1283         if (name_known) {
1284                 ie->name_state = NAME_KNOWN;
1285         } else {
1286                 ie->name_state = NAME_NOT_KNOWN;
1287                 list_add(&ie->list, &cache->unknown);
1288         }
1289
1290 update:
1291         if (name_known && ie->name_state != NAME_KNOWN &&
1292             ie->name_state != NAME_PENDING) {
1293                 ie->name_state = NAME_KNOWN;
1294                 list_del(&ie->list);
1295         }
1296
1297         memcpy(&ie->data, data, sizeof(*data));
1298         ie->timestamp = jiffies;
1299         cache->timestamp = jiffies;
1300
1301         if (ie->name_state == NAME_NOT_KNOWN)
1302                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1303
1304 done:
1305         return flags;
1306 }
1307
1308 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1309 {
1310         struct discovery_state *cache = &hdev->discovery;
1311         struct inquiry_info *info = (struct inquiry_info *) buf;
1312         struct inquiry_entry *e;
1313         int copied = 0;
1314
1315         list_for_each_entry(e, &cache->all, all) {
1316                 struct inquiry_data *data = &e->data;
1317
1318                 if (copied >= num)
1319                         break;
1320
1321                 bacpy(&info->bdaddr, &data->bdaddr);
1322                 info->pscan_rep_mode    = data->pscan_rep_mode;
1323                 info->pscan_period_mode = data->pscan_period_mode;
1324                 info->pscan_mode        = data->pscan_mode;
1325                 memcpy(info->dev_class, data->dev_class, 3);
1326                 info->clock_offset      = data->clock_offset;
1327
1328                 info++;
1329                 copied++;
1330         }
1331
1332         BT_DBG("cache %p, copied %d", cache, copied);
1333         return copied;
1334 }
1335
1336 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1337 {
1338         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1339         struct hci_dev *hdev = req->hdev;
1340         struct hci_cp_inquiry cp;
1341
1342         BT_DBG("%s", hdev->name);
1343
1344         if (test_bit(HCI_INQUIRY, &hdev->flags))
1345                 return 0;
1346
1347         /* Start Inquiry */
1348         memcpy(&cp.lap, &ir->lap, 3);
1349         cp.length  = ir->length;
1350         cp.num_rsp = ir->num_rsp;
1351         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1352
1353         return 0;
1354 }
1355
1356 int hci_inquiry(void __user *arg)
1357 {
1358         __u8 __user *ptr = arg;
1359         struct hci_inquiry_req ir;
1360         struct hci_dev *hdev;
1361         int err = 0, do_inquiry = 0, max_rsp;
1362         long timeo;
1363         __u8 *buf;
1364
1365         if (copy_from_user(&ir, ptr, sizeof(ir)))
1366                 return -EFAULT;
1367
1368         hdev = hci_dev_get(ir.dev_id);
1369         if (!hdev)
1370                 return -ENODEV;
1371
1372         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1373                 err = -EBUSY;
1374                 goto done;
1375         }
1376
1377         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1378                 err = -EOPNOTSUPP;
1379                 goto done;
1380         }
1381
1382         if (hdev->dev_type != HCI_PRIMARY) {
1383                 err = -EOPNOTSUPP;
1384                 goto done;
1385         }
1386
1387         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1388                 err = -EOPNOTSUPP;
1389                 goto done;
1390         }
1391
1392         /* Restrict maximum inquiry length to 60 seconds */
1393         if (ir.length > 60) {
1394                 err = -EINVAL;
1395                 goto done;
1396         }
1397
1398         hci_dev_lock(hdev);
1399         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1400             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1401                 hci_inquiry_cache_flush(hdev);
1402                 do_inquiry = 1;
1403         }
1404         hci_dev_unlock(hdev);
1405
1406         timeo = ir.length * msecs_to_jiffies(2000);
1407
1408         if (do_inquiry) {
1409                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1410                                    timeo, NULL);
1411                 if (err < 0)
1412                         goto done;
1413
1414                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1415                  * cleared). If it is interrupted by a signal, return -EINTR.
1416                  */
1417                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1418                                 TASK_INTERRUPTIBLE)) {
1419                         err = -EINTR;
1420                         goto done;
1421                 }
1422         }
1423
1424         /* for unlimited number of responses we will use buffer with
1425          * 255 entries
1426          */
1427         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1428
1429         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1430          * copy it to the user space.
1431          */
1432         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1433         if (!buf) {
1434                 err = -ENOMEM;
1435                 goto done;
1436         }
1437
1438         hci_dev_lock(hdev);
1439         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1440         hci_dev_unlock(hdev);
1441
1442         BT_DBG("num_rsp %d", ir.num_rsp);
1443
1444         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1445                 ptr += sizeof(ir);
1446                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1447                                  ir.num_rsp))
1448                         err = -EFAULT;
1449         } else
1450                 err = -EFAULT;
1451
1452         kfree(buf);
1453
1454 done:
1455         hci_dev_put(hdev);
1456         return err;
1457 }
1458
1459 /**
1460  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1461  *                                     (BD_ADDR) for a HCI device from
1462  *                                     a firmware node property.
1463  * @hdev:       The HCI device
1464  *
1465  * Search the firmware node for 'local-bd-address'.
1466  *
1467  * All-zero BD addresses are rejected, because those could be properties
1468  * that exist in the firmware tables, but were not updated by the firmware. For
1469  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1470  */
1471 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1472 {
1473         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1474         bdaddr_t ba;
1475         int ret;
1476
1477         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1478                                             (u8 *)&ba, sizeof(ba));
1479         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1480                 return;
1481
1482         bacpy(&hdev->public_addr, &ba);
1483 }
1484
1485 static int hci_dev_do_open(struct hci_dev *hdev)
1486 {
1487         int ret = 0;
1488
1489         BT_DBG("%s %p", hdev->name, hdev);
1490
1491         hci_req_sync_lock(hdev);
1492
1493         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1494                 ret = -ENODEV;
1495                 goto done;
1496         }
1497
1498         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1499             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1500                 /* Check for rfkill but allow the HCI setup stage to
1501                  * proceed (which in itself doesn't cause any RF activity).
1502                  */
1503                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1504                         ret = -ERFKILL;
1505                         goto done;
1506                 }
1507
1508                 /* Check for valid public address or a configured static
1509                  * random address, but let the HCI setup proceed to
1510                  * be able to determine if there is a public address
1511                  * or not.
1512                  *
1513                  * In case of user channel usage, it is not important
1514                  * if a public address or static random address is
1515                  * available.
1516                  *
1517                  * This check is only valid for BR/EDR controllers
1518                  * since AMP controllers do not have an address.
1519                  */
1520                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1521                     hdev->dev_type == HCI_PRIMARY &&
1522                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1523                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1524                         ret = -EADDRNOTAVAIL;
1525                         goto done;
1526                 }
1527         }
1528
1529         if (test_bit(HCI_UP, &hdev->flags)) {
1530                 ret = -EALREADY;
1531                 goto done;
1532         }
1533
1534         if (hdev->open(hdev)) {
1535                 ret = -EIO;
1536                 goto done;
1537         }
1538
1539         set_bit(HCI_RUNNING, &hdev->flags);
1540         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1541
1542         atomic_set(&hdev->cmd_cnt, 1);
1543         set_bit(HCI_INIT, &hdev->flags);
1544
1545         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1546             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1547                 bool invalid_bdaddr;
1548
1549                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1550
1551                 if (hdev->setup)
1552                         ret = hdev->setup(hdev);
1553
1554                 /* The transport driver can set the quirk to mark the
1555                  * BD_ADDR invalid before creating the HCI device or in
1556                  * its setup callback.
1557                  */
1558                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1559                                           &hdev->quirks);
1560
1561                 if (ret)
1562                         goto setup_failed;
1563
1564                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1565                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1566                                 hci_dev_get_bd_addr_from_property(hdev);
1567
1568                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1569                             hdev->set_bdaddr) {
1570                                 ret = hdev->set_bdaddr(hdev,
1571                                                        &hdev->public_addr);
1572
1573                                 /* If setting of the BD_ADDR from the device
1574                                  * property succeeds, then treat the address
1575                                  * as valid even if the invalid BD_ADDR
1576                                  * quirk indicates otherwise.
1577                                  */
1578                                 if (!ret)
1579                                         invalid_bdaddr = false;
1580                         }
1581                 }
1582
1583 setup_failed:
1584                 /* The transport driver can set these quirks before
1585                  * creating the HCI device or in its setup callback.
1586                  *
1587                  * For the invalid BD_ADDR quirk it is possible that
1588                  * it becomes a valid address if the bootloader does
1589                  * provide it (see above).
1590                  *
1591                  * In case any of them is set, the controller has to
1592                  * start up as unconfigured.
1593                  */
1594                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1595                     invalid_bdaddr)
1596                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1597
1598                 /* For an unconfigured controller it is required to
1599                  * read at least the version information provided by
1600                  * the Read Local Version Information command.
1601                  *
1602                  * If the set_bdaddr driver callback is provided, then
1603                  * also the original Bluetooth public device address
1604                  * will be read using the Read BD Address command.
1605                  */
1606                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1607                         ret = __hci_unconf_init(hdev);
1608         }
1609
1610         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1611                 /* If public address change is configured, ensure that
1612                  * the address gets programmed. If the driver does not
1613                  * support changing the public address, fail the power
1614                  * on procedure.
1615                  */
1616                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1617                     hdev->set_bdaddr)
1618                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1619                 else
1620                         ret = -EADDRNOTAVAIL;
1621         }
1622
1623         if (!ret) {
1624                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1625                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1626                         ret = __hci_init(hdev);
1627                         if (!ret && hdev->post_init)
1628                                 ret = hdev->post_init(hdev);
1629                 }
1630         }
1631
1632         /* If the HCI Reset command is clearing all diagnostic settings,
1633          * then they need to be reprogrammed after the init procedure
1634          * completed.
1635          */
1636         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1637             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1638             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1639                 ret = hdev->set_diag(hdev, true);
1640
1641         msft_do_open(hdev);
1642         aosp_do_open(hdev);
1643
1644         clear_bit(HCI_INIT, &hdev->flags);
1645
1646         if (!ret) {
1647                 hci_dev_hold(hdev);
1648                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1649                 hci_adv_instances_set_rpa_expired(hdev, true);
1650                 set_bit(HCI_UP, &hdev->flags);
1651                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1652                 hci_leds_update_powered(hdev, true);
1653                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1654                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1655                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1656                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1657                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1658                     hdev->dev_type == HCI_PRIMARY) {
1659                         ret = __hci_req_hci_power_on(hdev);
1660                         mgmt_power_on(hdev, ret);
1661                 }
1662         } else {
1663                 /* Init failed, cleanup */
1664                 flush_work(&hdev->tx_work);
1665
1666                 /* Since hci_rx_work() is possible to awake new cmd_work
1667                  * it should be flushed first to avoid unexpected call of
1668                  * hci_cmd_work()
1669                  */
1670                 flush_work(&hdev->rx_work);
1671                 flush_work(&hdev->cmd_work);
1672
1673                 skb_queue_purge(&hdev->cmd_q);
1674                 skb_queue_purge(&hdev->rx_q);
1675
1676                 if (hdev->flush)
1677                         hdev->flush(hdev);
1678
1679                 if (hdev->sent_cmd) {
1680                         cancel_delayed_work_sync(&hdev->cmd_timer);
1681                         kfree_skb(hdev->sent_cmd);
1682                         hdev->sent_cmd = NULL;
1683                 }
1684
1685                 clear_bit(HCI_RUNNING, &hdev->flags);
1686                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1687
1688                 hdev->close(hdev);
1689                 hdev->flags &= BIT(HCI_RAW);
1690         }
1691
1692 done:
1693         hci_req_sync_unlock(hdev);
1694         return ret;
1695 }
1696
1697 /* ---- HCI ioctl helpers ---- */
1698
1699 int hci_dev_open(__u16 dev)
1700 {
1701         struct hci_dev *hdev;
1702         int err;
1703
1704         hdev = hci_dev_get(dev);
1705         if (!hdev)
1706                 return -ENODEV;
1707
1708         /* Devices that are marked as unconfigured can only be powered
1709          * up as user channel. Trying to bring them up as normal devices
1710          * will result into a failure. Only user channel operation is
1711          * possible.
1712          *
1713          * When this function is called for a user channel, the flag
1714          * HCI_USER_CHANNEL will be set first before attempting to
1715          * open the device.
1716          */
1717         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1718             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1719                 err = -EOPNOTSUPP;
1720                 goto done;
1721         }
1722
1723         /* We need to ensure that no other power on/off work is pending
1724          * before proceeding to call hci_dev_do_open. This is
1725          * particularly important if the setup procedure has not yet
1726          * completed.
1727          */
1728         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1729                 cancel_delayed_work(&hdev->power_off);
1730
1731         /* After this call it is guaranteed that the setup procedure
1732          * has finished. This means that error conditions like RFKILL
1733          * or no valid public or static random address apply.
1734          */
1735         flush_workqueue(hdev->req_workqueue);
1736
1737         /* For controllers not using the management interface and that
1738          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1739          * so that pairing works for them. Once the management interface
1740          * is in use this bit will be cleared again and userspace has
1741          * to explicitly enable it.
1742          */
1743         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1744             !hci_dev_test_flag(hdev, HCI_MGMT))
1745                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1746
1747         err = hci_dev_do_open(hdev);
1748
1749 done:
1750         hci_dev_put(hdev);
1751         return err;
1752 }
1753
1754 /* This function requires the caller holds hdev->lock */
1755 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1756 {
1757         struct hci_conn_params *p;
1758
1759         list_for_each_entry(p, &hdev->le_conn_params, list) {
1760                 if (p->conn) {
1761                         hci_conn_drop(p->conn);
1762                         hci_conn_put(p->conn);
1763                         p->conn = NULL;
1764                 }
1765                 list_del_init(&p->action);
1766         }
1767
1768         BT_DBG("All LE pending actions cleared");
1769 }
1770
1771 int hci_dev_do_close(struct hci_dev *hdev)
1772 {
1773         bool auto_off;
1774         int err = 0;
1775
1776         BT_DBG("%s %p", hdev->name, hdev);
1777
1778         cancel_delayed_work(&hdev->power_off);
1779         cancel_delayed_work(&hdev->ncmd_timer);
1780
1781         hci_request_cancel_all(hdev);
1782         hci_req_sync_lock(hdev);
1783
1784         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1785             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1786             test_bit(HCI_UP, &hdev->flags)) {
1787                 /* Execute vendor specific shutdown routine */
1788                 if (hdev->shutdown)
1789                         err = hdev->shutdown(hdev);
1790         }
1791
1792         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1793                 cancel_delayed_work_sync(&hdev->cmd_timer);
1794                 hci_req_sync_unlock(hdev);
1795                 return err;
1796         }
1797
1798         hci_leds_update_powered(hdev, false);
1799
1800         /* Flush RX and TX works */
1801         flush_work(&hdev->tx_work);
1802         flush_work(&hdev->rx_work);
1803
1804         if (hdev->discov_timeout > 0) {
1805                 hdev->discov_timeout = 0;
1806                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1807                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1808         }
1809
1810         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1811                 cancel_delayed_work(&hdev->service_cache);
1812
1813         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1814                 struct adv_info *adv_instance;
1815
1816                 cancel_delayed_work_sync(&hdev->rpa_expired);
1817
1818                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1819                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1820         }
1821
1822         /* Avoid potential lockdep warnings from the *_flush() calls by
1823          * ensuring the workqueue is empty up front.
1824          */
1825         drain_workqueue(hdev->workqueue);
1826
1827         hci_dev_lock(hdev);
1828
1829         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1830
1831         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1832
1833         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1834             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1835             hci_dev_test_flag(hdev, HCI_MGMT))
1836                 __mgmt_power_off(hdev);
1837
1838         hci_inquiry_cache_flush(hdev);
1839         hci_pend_le_actions_clear(hdev);
1840         hci_conn_hash_flush(hdev);
1841         hci_dev_unlock(hdev);
1842
1843         smp_unregister(hdev);
1844
1845         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1846
1847         aosp_do_close(hdev);
1848         msft_do_close(hdev);
1849
1850         if (hdev->flush)
1851                 hdev->flush(hdev);
1852
1853         /* Reset device */
1854         skb_queue_purge(&hdev->cmd_q);
1855         atomic_set(&hdev->cmd_cnt, 1);
1856         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1857             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1858                 set_bit(HCI_INIT, &hdev->flags);
1859                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1860                 clear_bit(HCI_INIT, &hdev->flags);
1861         }
1862
1863         /* flush cmd  work */
1864         flush_work(&hdev->cmd_work);
1865
1866         /* Drop queues */
1867         skb_queue_purge(&hdev->rx_q);
1868         skb_queue_purge(&hdev->cmd_q);
1869         skb_queue_purge(&hdev->raw_q);
1870
1871         /* Drop last sent command */
1872         if (hdev->sent_cmd) {
1873                 cancel_delayed_work_sync(&hdev->cmd_timer);
1874                 kfree_skb(hdev->sent_cmd);
1875                 hdev->sent_cmd = NULL;
1876         }
1877
1878         clear_bit(HCI_RUNNING, &hdev->flags);
1879         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1880
1881         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1882                 wake_up(&hdev->suspend_wait_q);
1883
1884         /* After this point our queues are empty
1885          * and no tasks are scheduled. */
1886         hdev->close(hdev);
1887
1888         /* Clear flags */
1889         hdev->flags &= BIT(HCI_RAW);
1890         hci_dev_clear_volatile_flags(hdev);
1891
1892         /* Controller radio is available but is currently powered down */
1893         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1894
1895         memset(hdev->eir, 0, sizeof(hdev->eir));
1896         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1897         bacpy(&hdev->random_addr, BDADDR_ANY);
1898
1899         hci_req_sync_unlock(hdev);
1900
1901         hci_dev_put(hdev);
1902         return err;
1903 }
1904
1905 int hci_dev_close(__u16 dev)
1906 {
1907         struct hci_dev *hdev;
1908         int err;
1909
1910         hdev = hci_dev_get(dev);
1911         if (!hdev)
1912                 return -ENODEV;
1913
1914         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1915                 err = -EBUSY;
1916                 goto done;
1917         }
1918
1919         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1920                 cancel_delayed_work(&hdev->power_off);
1921
1922         err = hci_dev_do_close(hdev);
1923
1924 done:
1925         hci_dev_put(hdev);
1926         return err;
1927 }
1928
1929 static int hci_dev_do_reset(struct hci_dev *hdev)
1930 {
1931         int ret;
1932
1933         BT_DBG("%s %p", hdev->name, hdev);
1934
1935         hci_req_sync_lock(hdev);
1936
1937         /* Drop queues */
1938         skb_queue_purge(&hdev->rx_q);
1939         skb_queue_purge(&hdev->cmd_q);
1940
1941         /* Avoid potential lockdep warnings from the *_flush() calls by
1942          * ensuring the workqueue is empty up front.
1943          */
1944         drain_workqueue(hdev->workqueue);
1945
1946         hci_dev_lock(hdev);
1947         hci_inquiry_cache_flush(hdev);
1948         hci_conn_hash_flush(hdev);
1949         hci_dev_unlock(hdev);
1950
1951         if (hdev->flush)
1952                 hdev->flush(hdev);
1953
1954         atomic_set(&hdev->cmd_cnt, 1);
1955         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1956
1957         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1958
1959         hci_req_sync_unlock(hdev);
1960         return ret;
1961 }
1962
1963 int hci_dev_reset(__u16 dev)
1964 {
1965         struct hci_dev *hdev;
1966         int err;
1967
1968         hdev = hci_dev_get(dev);
1969         if (!hdev)
1970                 return -ENODEV;
1971
1972         if (!test_bit(HCI_UP, &hdev->flags)) {
1973                 err = -ENETDOWN;
1974                 goto done;
1975         }
1976
1977         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1978                 err = -EBUSY;
1979                 goto done;
1980         }
1981
1982         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1983                 err = -EOPNOTSUPP;
1984                 goto done;
1985         }
1986
1987         err = hci_dev_do_reset(hdev);
1988
1989 done:
1990         hci_dev_put(hdev);
1991         return err;
1992 }
1993
1994 int hci_dev_reset_stat(__u16 dev)
1995 {
1996         struct hci_dev *hdev;
1997         int ret = 0;
1998
1999         hdev = hci_dev_get(dev);
2000         if (!hdev)
2001                 return -ENODEV;
2002
2003         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2004                 ret = -EBUSY;
2005                 goto done;
2006         }
2007
2008         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2009                 ret = -EOPNOTSUPP;
2010                 goto done;
2011         }
2012
2013         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2014
2015 done:
2016         hci_dev_put(hdev);
2017         return ret;
2018 }
2019
2020 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2021 {
2022         bool conn_changed, discov_changed;
2023
2024         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2025
2026         if ((scan & SCAN_PAGE))
2027                 conn_changed = !hci_dev_test_and_set_flag(hdev,
2028                                                           HCI_CONNECTABLE);
2029         else
2030                 conn_changed = hci_dev_test_and_clear_flag(hdev,
2031                                                            HCI_CONNECTABLE);
2032
2033         if ((scan & SCAN_INQUIRY)) {
2034                 discov_changed = !hci_dev_test_and_set_flag(hdev,
2035                                                             HCI_DISCOVERABLE);
2036         } else {
2037                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2038                 discov_changed = hci_dev_test_and_clear_flag(hdev,
2039                                                              HCI_DISCOVERABLE);
2040         }
2041
2042         if (!hci_dev_test_flag(hdev, HCI_MGMT))
2043                 return;
2044
2045         if (conn_changed || discov_changed) {
2046                 /* In case this was disabled through mgmt */
2047                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2048
2049                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2050                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2051
2052                 mgmt_new_settings(hdev);
2053         }
2054 }
2055
2056 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2057 {
2058         struct hci_dev *hdev;
2059         struct hci_dev_req dr;
2060         int err = 0;
2061
2062         if (copy_from_user(&dr, arg, sizeof(dr)))
2063                 return -EFAULT;
2064
2065         hdev = hci_dev_get(dr.dev_id);
2066         if (!hdev)
2067                 return -ENODEV;
2068
2069         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2070                 err = -EBUSY;
2071                 goto done;
2072         }
2073
2074         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2075                 err = -EOPNOTSUPP;
2076                 goto done;
2077         }
2078
2079         if (hdev->dev_type != HCI_PRIMARY) {
2080                 err = -EOPNOTSUPP;
2081                 goto done;
2082         }
2083
2084         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2085                 err = -EOPNOTSUPP;
2086                 goto done;
2087         }
2088
2089         switch (cmd) {
2090         case HCISETAUTH:
2091                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2092                                    HCI_INIT_TIMEOUT, NULL);
2093                 break;
2094
2095         case HCISETENCRYPT:
2096                 if (!lmp_encrypt_capable(hdev)) {
2097                         err = -EOPNOTSUPP;
2098                         break;
2099                 }
2100
2101                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2102                         /* Auth must be enabled first */
2103                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2104                                            HCI_INIT_TIMEOUT, NULL);
2105                         if (err)
2106                                 break;
2107                 }
2108
2109                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2110                                    HCI_INIT_TIMEOUT, NULL);
2111                 break;
2112
2113         case HCISETSCAN:
2114                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2115                                    HCI_INIT_TIMEOUT, NULL);
2116
2117                 /* Ensure that the connectable and discoverable states
2118                  * get correctly modified as this was a non-mgmt change.
2119                  */
2120                 if (!err)
2121                         hci_update_scan_state(hdev, dr.dev_opt);
2122                 break;
2123
2124         case HCISETLINKPOL:
2125                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2126                                    HCI_INIT_TIMEOUT, NULL);
2127                 break;
2128
2129         case HCISETLINKMODE:
2130                 hdev->link_mode = ((__u16) dr.dev_opt) &
2131                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2132                 break;
2133
2134         case HCISETPTYPE:
2135                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2136                         break;
2137
2138                 hdev->pkt_type = (__u16) dr.dev_opt;
2139                 mgmt_phy_configuration_changed(hdev, NULL);
2140                 break;
2141
2142         case HCISETACLMTU:
2143                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2144                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2145                 break;
2146
2147         case HCISETSCOMTU:
2148                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2149                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2150                 break;
2151
2152         default:
2153                 err = -EINVAL;
2154                 break;
2155         }
2156
2157 done:
2158         hci_dev_put(hdev);
2159         return err;
2160 }
2161
2162 int hci_get_dev_list(void __user *arg)
2163 {
2164         struct hci_dev *hdev;
2165         struct hci_dev_list_req *dl;
2166         struct hci_dev_req *dr;
2167         int n = 0, size, err;
2168         __u16 dev_num;
2169
2170         if (get_user(dev_num, (__u16 __user *) arg))
2171                 return -EFAULT;
2172
2173         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2174                 return -EINVAL;
2175
2176         size = sizeof(*dl) + dev_num * sizeof(*dr);
2177
2178         dl = kzalloc(size, GFP_KERNEL);
2179         if (!dl)
2180                 return -ENOMEM;
2181
2182         dr = dl->dev_req;
2183
2184         read_lock(&hci_dev_list_lock);
2185         list_for_each_entry(hdev, &hci_dev_list, list) {
2186                 unsigned long flags = hdev->flags;
2187
2188                 /* When the auto-off is configured it means the transport
2189                  * is running, but in that case still indicate that the
2190                  * device is actually down.
2191                  */
2192                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2193                         flags &= ~BIT(HCI_UP);
2194
2195                 (dr + n)->dev_id  = hdev->id;
2196                 (dr + n)->dev_opt = flags;
2197
2198                 if (++n >= dev_num)
2199                         break;
2200         }
2201         read_unlock(&hci_dev_list_lock);
2202
2203         dl->dev_num = n;
2204         size = sizeof(*dl) + n * sizeof(*dr);
2205
2206         err = copy_to_user(arg, dl, size);
2207         kfree(dl);
2208
2209         return err ? -EFAULT : 0;
2210 }
2211
2212 int hci_get_dev_info(void __user *arg)
2213 {
2214         struct hci_dev *hdev;
2215         struct hci_dev_info di;
2216         unsigned long flags;
2217         int err = 0;
2218
2219         if (copy_from_user(&di, arg, sizeof(di)))
2220                 return -EFAULT;
2221
2222         hdev = hci_dev_get(di.dev_id);
2223         if (!hdev)
2224                 return -ENODEV;
2225
2226         /* When the auto-off is configured it means the transport
2227          * is running, but in that case still indicate that the
2228          * device is actually down.
2229          */
2230         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2231                 flags = hdev->flags & ~BIT(HCI_UP);
2232         else
2233                 flags = hdev->flags;
2234
2235         strcpy(di.name, hdev->name);
2236         di.bdaddr   = hdev->bdaddr;
2237         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2238         di.flags    = flags;
2239         di.pkt_type = hdev->pkt_type;
2240         if (lmp_bredr_capable(hdev)) {
2241                 di.acl_mtu  = hdev->acl_mtu;
2242                 di.acl_pkts = hdev->acl_pkts;
2243                 di.sco_mtu  = hdev->sco_mtu;
2244                 di.sco_pkts = hdev->sco_pkts;
2245         } else {
2246                 di.acl_mtu  = hdev->le_mtu;
2247                 di.acl_pkts = hdev->le_pkts;
2248                 di.sco_mtu  = 0;
2249                 di.sco_pkts = 0;
2250         }
2251         di.link_policy = hdev->link_policy;
2252         di.link_mode   = hdev->link_mode;
2253
2254         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2255         memcpy(&di.features, &hdev->features, sizeof(di.features));
2256
2257         if (copy_to_user(arg, &di, sizeof(di)))
2258                 err = -EFAULT;
2259
2260         hci_dev_put(hdev);
2261
2262         return err;
2263 }
2264
2265 /* ---- Interface to HCI drivers ---- */
2266
2267 static int hci_rfkill_set_block(void *data, bool blocked)
2268 {
2269         struct hci_dev *hdev = data;
2270
2271         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2272
2273         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2274                 return -EBUSY;
2275
2276         if (blocked) {
2277                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2278                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2279                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2280                         hci_dev_do_close(hdev);
2281         } else {
2282                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2283         }
2284
2285         return 0;
2286 }
2287
2288 static const struct rfkill_ops hci_rfkill_ops = {
2289         .set_block = hci_rfkill_set_block,
2290 };
2291
2292 static void hci_power_on(struct work_struct *work)
2293 {
2294         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2295         int err;
2296
2297         BT_DBG("%s", hdev->name);
2298
2299         if (test_bit(HCI_UP, &hdev->flags) &&
2300             hci_dev_test_flag(hdev, HCI_MGMT) &&
2301             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2302                 cancel_delayed_work(&hdev->power_off);
2303                 hci_req_sync_lock(hdev);
2304                 err = __hci_req_hci_power_on(hdev);
2305                 hci_req_sync_unlock(hdev);
2306                 mgmt_power_on(hdev, err);
2307                 return;
2308         }
2309
2310         err = hci_dev_do_open(hdev);
2311         if (err < 0) {
2312                 hci_dev_lock(hdev);
2313                 mgmt_set_powered_failed(hdev, err);
2314                 hci_dev_unlock(hdev);
2315                 return;
2316         }
2317
2318         /* During the HCI setup phase, a few error conditions are
2319          * ignored and they need to be checked now. If they are still
2320          * valid, it is important to turn the device back off.
2321          */
2322         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2323             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2324             (hdev->dev_type == HCI_PRIMARY &&
2325              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2326              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2327                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2328                 hci_dev_do_close(hdev);
2329         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2330                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2331                                    HCI_AUTO_OFF_TIMEOUT);
2332         }
2333
2334         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2335                 /* For unconfigured devices, set the HCI_RAW flag
2336                  * so that userspace can easily identify them.
2337                  */
2338                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2339                         set_bit(HCI_RAW, &hdev->flags);
2340
2341                 /* For fully configured devices, this will send
2342                  * the Index Added event. For unconfigured devices,
2343                  * it will send Unconfigued Index Added event.
2344                  *
2345                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2346                  * and no event will be send.
2347                  */
2348                 mgmt_index_added(hdev);
2349         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2350                 /* When the controller is now configured, then it
2351                  * is important to clear the HCI_RAW flag.
2352                  */
2353                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2354                         clear_bit(HCI_RAW, &hdev->flags);
2355
2356                 /* Powering on the controller with HCI_CONFIG set only
2357                  * happens with the transition from unconfigured to
2358                  * configured. This will send the Index Added event.
2359                  */
2360                 mgmt_index_added(hdev);
2361         }
2362 }
2363
2364 static void hci_power_off(struct work_struct *work)
2365 {
2366         struct hci_dev *hdev = container_of(work, struct hci_dev,
2367                                             power_off.work);
2368
2369         BT_DBG("%s", hdev->name);
2370
2371         hci_dev_do_close(hdev);
2372 }
2373
2374 static void hci_error_reset(struct work_struct *work)
2375 {
2376         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2377
2378         BT_DBG("%s", hdev->name);
2379
2380         if (hdev->hw_error)
2381                 hdev->hw_error(hdev, hdev->hw_error_code);
2382         else
2383                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2384
2385         if (hci_dev_do_close(hdev))
2386                 return;
2387
2388         hci_dev_do_open(hdev);
2389 }
2390
2391 void hci_uuids_clear(struct hci_dev *hdev)
2392 {
2393         struct bt_uuid *uuid, *tmp;
2394
2395         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2396                 list_del(&uuid->list);
2397                 kfree(uuid);
2398         }
2399 }
2400
2401 void hci_link_keys_clear(struct hci_dev *hdev)
2402 {
2403         struct link_key *key;
2404
2405         list_for_each_entry(key, &hdev->link_keys, list) {
2406                 list_del_rcu(&key->list);
2407                 kfree_rcu(key, rcu);
2408         }
2409 }
2410
2411 void hci_smp_ltks_clear(struct hci_dev *hdev)
2412 {
2413         struct smp_ltk *k;
2414
2415         list_for_each_entry(k, &hdev->long_term_keys, list) {
2416                 list_del_rcu(&k->list);
2417                 kfree_rcu(k, rcu);
2418         }
2419 }
2420
2421 void hci_smp_irks_clear(struct hci_dev *hdev)
2422 {
2423         struct smp_irk *k;
2424
2425         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2426                 list_del_rcu(&k->list);
2427                 kfree_rcu(k, rcu);
2428         }
2429 }
2430
2431 void hci_blocked_keys_clear(struct hci_dev *hdev)
2432 {
2433         struct blocked_key *b;
2434
2435         list_for_each_entry(b, &hdev->blocked_keys, list) {
2436                 list_del_rcu(&b->list);
2437                 kfree_rcu(b, rcu);
2438         }
2439 }
2440
2441 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2442 {
2443         bool blocked = false;
2444         struct blocked_key *b;
2445
2446         rcu_read_lock();
2447         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2448                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2449                         blocked = true;
2450                         break;
2451                 }
2452         }
2453
2454         rcu_read_unlock();
2455         return blocked;
2456 }
2457
2458 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2459 {
2460         struct link_key *k;
2461
2462         rcu_read_lock();
2463         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2464                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2465                         rcu_read_unlock();
2466
2467                         if (hci_is_blocked_key(hdev,
2468                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2469                                                k->val)) {
2470                                 bt_dev_warn_ratelimited(hdev,
2471                                                         "Link key blocked for %pMR",
2472                                                         &k->bdaddr);
2473                                 return NULL;
2474                         }
2475
2476                         return k;
2477                 }
2478         }
2479         rcu_read_unlock();
2480
2481         return NULL;
2482 }
2483
2484 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2485                                u8 key_type, u8 old_key_type)
2486 {
2487         /* Legacy key */
2488         if (key_type < 0x03)
2489                 return true;
2490
2491         /* Debug keys are insecure so don't store them persistently */
2492         if (key_type == HCI_LK_DEBUG_COMBINATION)
2493                 return false;
2494
2495         /* Changed combination key and there's no previous one */
2496         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2497                 return false;
2498
2499         /* Security mode 3 case */
2500         if (!conn)
2501                 return true;
2502
2503         /* BR/EDR key derived using SC from an LE link */
2504         if (conn->type == LE_LINK)
2505                 return true;
2506
2507         /* Neither local nor remote side had no-bonding as requirement */
2508         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2509                 return true;
2510
2511         /* Local side had dedicated bonding as requirement */
2512         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2513                 return true;
2514
2515         /* Remote side had dedicated bonding as requirement */
2516         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2517                 return true;
2518
2519         /* If none of the above criteria match, then don't store the key
2520          * persistently */
2521         return false;
2522 }
2523
2524 static u8 ltk_role(u8 type)
2525 {
2526         if (type == SMP_LTK)
2527                 return HCI_ROLE_MASTER;
2528
2529         return HCI_ROLE_SLAVE;
2530 }
2531
2532 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2533                              u8 addr_type, u8 role)
2534 {
2535         struct smp_ltk *k;
2536
2537         rcu_read_lock();
2538         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2539                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2540                         continue;
2541
2542                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2543                         rcu_read_unlock();
2544
2545                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2546                                                k->val)) {
2547                                 bt_dev_warn_ratelimited(hdev,
2548                                                         "LTK blocked for %pMR",
2549                                                         &k->bdaddr);
2550                                 return NULL;
2551                         }
2552
2553                         return k;
2554                 }
2555         }
2556         rcu_read_unlock();
2557
2558         return NULL;
2559 }
2560
2561 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2562 {
2563         struct smp_irk *irk_to_return = NULL;
2564         struct smp_irk *irk;
2565
2566         rcu_read_lock();
2567         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2568                 if (!bacmp(&irk->rpa, rpa)) {
2569                         irk_to_return = irk;
2570                         goto done;
2571                 }
2572         }
2573
2574         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2575                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2576                         bacpy(&irk->rpa, rpa);
2577                         irk_to_return = irk;
2578                         goto done;
2579                 }
2580         }
2581
2582 done:
2583         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2584                                                 irk_to_return->val)) {
2585                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2586                                         &irk_to_return->bdaddr);
2587                 irk_to_return = NULL;
2588         }
2589
2590         rcu_read_unlock();
2591
2592         return irk_to_return;
2593 }
2594
2595 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2596                                      u8 addr_type)
2597 {
2598         struct smp_irk *irk_to_return = NULL;
2599         struct smp_irk *irk;
2600
2601         /* Identity Address must be public or static random */
2602         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2603                 return NULL;
2604
2605         rcu_read_lock();
2606         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2607                 if (addr_type == irk->addr_type &&
2608                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2609                         irk_to_return = irk;
2610                         goto done;
2611                 }
2612         }
2613
2614 done:
2615
2616         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2617                                                 irk_to_return->val)) {
2618                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2619                                         &irk_to_return->bdaddr);
2620                 irk_to_return = NULL;
2621         }
2622
2623         rcu_read_unlock();
2624
2625         return irk_to_return;
2626 }
2627
2628 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2629                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2630                                   u8 pin_len, bool *persistent)
2631 {
2632         struct link_key *key, *old_key;
2633         u8 old_key_type;
2634
2635         old_key = hci_find_link_key(hdev, bdaddr);
2636         if (old_key) {
2637                 old_key_type = old_key->type;
2638                 key = old_key;
2639         } else {
2640                 old_key_type = conn ? conn->key_type : 0xff;
2641                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2642                 if (!key)
2643                         return NULL;
2644                 list_add_rcu(&key->list, &hdev->link_keys);
2645         }
2646
2647         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2648
2649         /* Some buggy controller combinations generate a changed
2650          * combination key for legacy pairing even when there's no
2651          * previous key */
2652         if (type == HCI_LK_CHANGED_COMBINATION &&
2653             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2654                 type = HCI_LK_COMBINATION;
2655                 if (conn)
2656                         conn->key_type = type;
2657         }
2658
2659         bacpy(&key->bdaddr, bdaddr);
2660         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2661         key->pin_len = pin_len;
2662
2663         if (type == HCI_LK_CHANGED_COMBINATION)
2664                 key->type = old_key_type;
2665         else
2666                 key->type = type;
2667
2668         if (persistent)
2669                 *persistent = hci_persistent_key(hdev, conn, type,
2670                                                  old_key_type);
2671
2672         return key;
2673 }
2674
2675 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2676                             u8 addr_type, u8 type, u8 authenticated,
2677                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2678 {
2679         struct smp_ltk *key, *old_key;
2680         u8 role = ltk_role(type);
2681
2682         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2683         if (old_key)
2684                 key = old_key;
2685         else {
2686                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2687                 if (!key)
2688                         return NULL;
2689                 list_add_rcu(&key->list, &hdev->long_term_keys);
2690         }
2691
2692         bacpy(&key->bdaddr, bdaddr);
2693         key->bdaddr_type = addr_type;
2694         memcpy(key->val, tk, sizeof(key->val));
2695         key->authenticated = authenticated;
2696         key->ediv = ediv;
2697         key->rand = rand;
2698         key->enc_size = enc_size;
2699         key->type = type;
2700
2701         return key;
2702 }
2703
2704 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2705                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2706 {
2707         struct smp_irk *irk;
2708
2709         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2710         if (!irk) {
2711                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2712                 if (!irk)
2713                         return NULL;
2714
2715                 bacpy(&irk->bdaddr, bdaddr);
2716                 irk->addr_type = addr_type;
2717
2718                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2719         }
2720
2721         memcpy(irk->val, val, 16);
2722         bacpy(&irk->rpa, rpa);
2723
2724         return irk;
2725 }
2726
2727 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2728 {
2729         struct link_key *key;
2730
2731         key = hci_find_link_key(hdev, bdaddr);
2732         if (!key)
2733                 return -ENOENT;
2734
2735         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2736
2737         list_del_rcu(&key->list);
2738         kfree_rcu(key, rcu);
2739
2740         return 0;
2741 }
2742
2743 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2744 {
2745         struct smp_ltk *k;
2746         int removed = 0;
2747
2748         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2749                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2750                         continue;
2751
2752                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2753
2754                 list_del_rcu(&k->list);
2755                 kfree_rcu(k, rcu);
2756                 removed++;
2757         }
2758
2759         return removed ? 0 : -ENOENT;
2760 }
2761
2762 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2763 {
2764         struct smp_irk *k;
2765
2766         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2767                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2768                         continue;
2769
2770                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2771
2772                 list_del_rcu(&k->list);
2773                 kfree_rcu(k, rcu);
2774         }
2775 }
2776
2777 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2778 {
2779         struct smp_ltk *k;
2780         struct smp_irk *irk;
2781         u8 addr_type;
2782
2783         if (type == BDADDR_BREDR) {
2784                 if (hci_find_link_key(hdev, bdaddr))
2785                         return true;
2786                 return false;
2787         }
2788
2789         /* Convert to HCI addr type which struct smp_ltk uses */
2790         if (type == BDADDR_LE_PUBLIC)
2791                 addr_type = ADDR_LE_DEV_PUBLIC;
2792         else
2793                 addr_type = ADDR_LE_DEV_RANDOM;
2794
2795         irk = hci_get_irk(hdev, bdaddr, addr_type);
2796         if (irk) {
2797                 bdaddr = &irk->bdaddr;
2798                 addr_type = irk->addr_type;
2799         }
2800
2801         rcu_read_lock();
2802         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2803                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2804                         rcu_read_unlock();
2805                         return true;
2806                 }
2807         }
2808         rcu_read_unlock();
2809
2810         return false;
2811 }
2812
2813 /* HCI command timer function */
2814 static void hci_cmd_timeout(struct work_struct *work)
2815 {
2816         struct hci_dev *hdev = container_of(work, struct hci_dev,
2817                                             cmd_timer.work);
2818
2819         if (hdev->sent_cmd) {
2820                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2821                 u16 opcode = __le16_to_cpu(sent->opcode);
2822
2823                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2824         } else {
2825                 bt_dev_err(hdev, "command tx timeout");
2826         }
2827
2828         if (hdev->cmd_timeout)
2829                 hdev->cmd_timeout(hdev);
2830
2831         atomic_set(&hdev->cmd_cnt, 1);
2832         queue_work(hdev->workqueue, &hdev->cmd_work);
2833 }
2834
2835 /* HCI ncmd timer function */
2836 static void hci_ncmd_timeout(struct work_struct *work)
2837 {
2838         struct hci_dev *hdev = container_of(work, struct hci_dev,
2839                                             ncmd_timer.work);
2840
2841         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2842
2843         /* During HCI_INIT phase no events can be injected if the ncmd timer
2844          * triggers since the procedure has its own timeout handling.
2845          */
2846         if (test_bit(HCI_INIT, &hdev->flags))
2847                 return;
2848
2849         /* This is an irrecoverable state, inject hardware error event */
2850         hci_reset_dev(hdev);
2851 }
2852
2853 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2854                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2855 {
2856         struct oob_data *data;
2857
2858         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2859                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2860                         continue;
2861                 if (data->bdaddr_type != bdaddr_type)
2862                         continue;
2863                 return data;
2864         }
2865
2866         return NULL;
2867 }
2868
2869 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2870                                u8 bdaddr_type)
2871 {
2872         struct oob_data *data;
2873
2874         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2875         if (!data)
2876                 return -ENOENT;
2877
2878         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2879
2880         list_del(&data->list);
2881         kfree(data);
2882
2883         return 0;
2884 }
2885
2886 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2887 {
2888         struct oob_data *data, *n;
2889
2890         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2891                 list_del(&data->list);
2892                 kfree(data);
2893         }
2894 }
2895
2896 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2897                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2898                             u8 *hash256, u8 *rand256)
2899 {
2900         struct oob_data *data;
2901
2902         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2903         if (!data) {
2904                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2905                 if (!data)
2906                         return -ENOMEM;
2907
2908                 bacpy(&data->bdaddr, bdaddr);
2909                 data->bdaddr_type = bdaddr_type;
2910                 list_add(&data->list, &hdev->remote_oob_data);
2911         }
2912
2913         if (hash192 && rand192) {
2914                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2915                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2916                 if (hash256 && rand256)
2917                         data->present = 0x03;
2918         } else {
2919                 memset(data->hash192, 0, sizeof(data->hash192));
2920                 memset(data->rand192, 0, sizeof(data->rand192));
2921                 if (hash256 && rand256)
2922                         data->present = 0x02;
2923                 else
2924                         data->present = 0x00;
2925         }
2926
2927         if (hash256 && rand256) {
2928                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2929                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2930         } else {
2931                 memset(data->hash256, 0, sizeof(data->hash256));
2932                 memset(data->rand256, 0, sizeof(data->rand256));
2933                 if (hash192 && rand192)
2934                         data->present = 0x01;
2935         }
2936
2937         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2938
2939         return 0;
2940 }
2941
2942 /* This function requires the caller holds hdev->lock */
2943 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2944 {
2945         struct adv_info *adv_instance;
2946
2947         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2948                 if (adv_instance->instance == instance)
2949                         return adv_instance;
2950         }
2951
2952         return NULL;
2953 }
2954
2955 /* This function requires the caller holds hdev->lock */
2956 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2957 {
2958         struct adv_info *cur_instance;
2959
2960         cur_instance = hci_find_adv_instance(hdev, instance);
2961         if (!cur_instance)
2962                 return NULL;
2963
2964         if (cur_instance == list_last_entry(&hdev->adv_instances,
2965                                             struct adv_info, list))
2966                 return list_first_entry(&hdev->adv_instances,
2967                                                  struct adv_info, list);
2968         else
2969                 return list_next_entry(cur_instance, list);
2970 }
2971
2972 /* This function requires the caller holds hdev->lock */
2973 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2974 {
2975         struct adv_info *adv_instance;
2976
2977         adv_instance = hci_find_adv_instance(hdev, instance);
2978         if (!adv_instance)
2979                 return -ENOENT;
2980
2981         BT_DBG("%s removing %dMR", hdev->name, instance);
2982
2983         if (hdev->cur_adv_instance == instance) {
2984                 if (hdev->adv_instance_timeout) {
2985                         cancel_delayed_work(&hdev->adv_instance_expire);
2986                         hdev->adv_instance_timeout = 0;
2987                 }
2988                 hdev->cur_adv_instance = 0x00;
2989         }
2990
2991         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2992
2993         list_del(&adv_instance->list);
2994         kfree(adv_instance);
2995
2996         hdev->adv_instance_cnt--;
2997
2998         return 0;
2999 }
3000
3001 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
3002 {
3003         struct adv_info *adv_instance, *n;
3004
3005         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
3006                 adv_instance->rpa_expired = rpa_expired;
3007 }
3008
3009 /* This function requires the caller holds hdev->lock */
3010 void hci_adv_instances_clear(struct hci_dev *hdev)
3011 {
3012         struct adv_info *adv_instance, *n;
3013
3014         if (hdev->adv_instance_timeout) {
3015                 cancel_delayed_work(&hdev->adv_instance_expire);
3016                 hdev->adv_instance_timeout = 0;
3017         }
3018
3019         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
3020                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
3021                 list_del(&adv_instance->list);
3022                 kfree(adv_instance);
3023         }
3024
3025         hdev->adv_instance_cnt = 0;
3026         hdev->cur_adv_instance = 0x00;
3027 }
3028
3029 static void adv_instance_rpa_expired(struct work_struct *work)
3030 {
3031         struct adv_info *adv_instance = container_of(work, struct adv_info,
3032                                                      rpa_expired_cb.work);
3033
3034         BT_DBG("");
3035
3036         adv_instance->rpa_expired = true;
3037 }
3038
3039 /* This function requires the caller holds hdev->lock */
3040 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
3041                          u16 adv_data_len, u8 *adv_data,
3042                          u16 scan_rsp_len, u8 *scan_rsp_data,
3043                          u16 timeout, u16 duration, s8 tx_power,
3044                          u32 min_interval, u32 max_interval)
3045 {
3046         struct adv_info *adv_instance;
3047
3048         adv_instance = hci_find_adv_instance(hdev, instance);
3049         if (adv_instance) {
3050                 memset(adv_instance->adv_data, 0,
3051                        sizeof(adv_instance->adv_data));
3052                 memset(adv_instance->scan_rsp_data, 0,
3053                        sizeof(adv_instance->scan_rsp_data));
3054         } else {
3055                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3056                     instance < 1 || instance > hdev->le_num_of_adv_sets)
3057                         return -EOVERFLOW;
3058
3059                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3060                 if (!adv_instance)
3061                         return -ENOMEM;
3062
3063                 adv_instance->pending = true;
3064                 adv_instance->instance = instance;
3065                 list_add(&adv_instance->list, &hdev->adv_instances);
3066                 hdev->adv_instance_cnt++;
3067         }
3068
3069         adv_instance->flags = flags;
3070         adv_instance->adv_data_len = adv_data_len;
3071         adv_instance->scan_rsp_len = scan_rsp_len;
3072         adv_instance->min_interval = min_interval;
3073         adv_instance->max_interval = max_interval;
3074         adv_instance->tx_power = tx_power;
3075
3076         if (adv_data_len)
3077                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3078
3079         if (scan_rsp_len)
3080                 memcpy(adv_instance->scan_rsp_data,
3081                        scan_rsp_data, scan_rsp_len);
3082
3083         adv_instance->timeout = timeout;
3084         adv_instance->remaining_time = timeout;
3085
3086         if (duration == 0)
3087                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3088         else
3089                 adv_instance->duration = duration;
3090
3091         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3092                           adv_instance_rpa_expired);
3093
3094         BT_DBG("%s for %dMR", hdev->name, instance);
3095
3096         return 0;
3097 }
3098
3099 /* This function requires the caller holds hdev->lock */
3100 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3101                               u16 adv_data_len, u8 *adv_data,
3102                               u16 scan_rsp_len, u8 *scan_rsp_data)
3103 {
3104         struct adv_info *adv_instance;
3105
3106         adv_instance = hci_find_adv_instance(hdev, instance);
3107
3108         /* If advertisement doesn't exist, we can't modify its data */
3109         if (!adv_instance)
3110                 return -ENOENT;
3111
3112         if (adv_data_len) {
3113                 memset(adv_instance->adv_data, 0,
3114                        sizeof(adv_instance->adv_data));
3115                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3116                 adv_instance->adv_data_len = adv_data_len;
3117         }
3118
3119         if (scan_rsp_len) {
3120                 memset(adv_instance->scan_rsp_data, 0,
3121                        sizeof(adv_instance->scan_rsp_data));
3122                 memcpy(adv_instance->scan_rsp_data,
3123                        scan_rsp_data, scan_rsp_len);
3124                 adv_instance->scan_rsp_len = scan_rsp_len;
3125         }
3126
3127         return 0;
3128 }
3129
3130 /* This function requires the caller holds hdev->lock */
3131 void hci_adv_monitors_clear(struct hci_dev *hdev)
3132 {
3133         struct adv_monitor *monitor;
3134         int handle;
3135
3136         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3137                 hci_free_adv_monitor(hdev, monitor);
3138
3139         idr_destroy(&hdev->adv_monitors_idr);
3140 }
3141
3142 /* Frees the monitor structure and do some bookkeepings.
3143  * This function requires the caller holds hdev->lock.
3144  */
3145 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3146 {
3147         struct adv_pattern *pattern;
3148         struct adv_pattern *tmp;
3149
3150         if (!monitor)
3151                 return;
3152
3153         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3154                 list_del(&pattern->list);
3155                 kfree(pattern);
3156         }
3157
3158         if (monitor->handle)
3159                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3160
3161         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3162                 hdev->adv_monitors_cnt--;
3163                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3164         }
3165
3166         kfree(monitor);
3167 }
3168
3169 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3170 {
3171         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3172 }
3173
3174 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3175 {
3176         return mgmt_remove_adv_monitor_complete(hdev, status);
3177 }
3178
3179 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3180  * also attempts to forward the request to the controller.
3181  * Returns true if request is forwarded (result is pending), false otherwise.
3182  * This function requires the caller holds hdev->lock.
3183  */
3184 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3185                          int *err)
3186 {
3187         int min, max, handle;
3188
3189         *err = 0;
3190
3191         if (!monitor) {
3192                 *err = -EINVAL;
3193                 return false;
3194         }
3195
3196         min = HCI_MIN_ADV_MONITOR_HANDLE;
3197         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3198         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3199                            GFP_KERNEL);
3200         if (handle < 0) {
3201                 *err = handle;
3202                 return false;
3203         }
3204
3205         monitor->handle = handle;
3206
3207         if (!hdev_is_powered(hdev))
3208                 return false;
3209
3210         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3211         case HCI_ADV_MONITOR_EXT_NONE:
3212                 hci_update_background_scan(hdev);
3213                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3214                 /* Message was not forwarded to controller - not an error */
3215                 return false;
3216         case HCI_ADV_MONITOR_EXT_MSFT:
3217                 *err = msft_add_monitor_pattern(hdev, monitor);
3218                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3219                            *err);
3220                 break;
3221         }
3222
3223         return (*err == 0);
3224 }
3225
3226 /* Attempts to tell the controller and free the monitor. If somehow the
3227  * controller doesn't have a corresponding handle, remove anyway.
3228  * Returns true if request is forwarded (result is pending), false otherwise.
3229  * This function requires the caller holds hdev->lock.
3230  */
3231 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3232                                    struct adv_monitor *monitor,
3233                                    u16 handle, int *err)
3234 {
3235         *err = 0;
3236
3237         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3238         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3239                 goto free_monitor;
3240         case HCI_ADV_MONITOR_EXT_MSFT:
3241                 *err = msft_remove_monitor(hdev, monitor, handle);
3242                 break;
3243         }
3244
3245         /* In case no matching handle registered, just free the monitor */
3246         if (*err == -ENOENT)
3247                 goto free_monitor;
3248
3249         return (*err == 0);
3250
3251 free_monitor:
3252         if (*err == -ENOENT)
3253                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3254                             monitor->handle);
3255         hci_free_adv_monitor(hdev, monitor);
3256
3257         *err = 0;
3258         return false;
3259 }
3260
3261 /* Returns true if request is forwarded (result is pending), false otherwise.
3262  * This function requires the caller holds hdev->lock.
3263  */
3264 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3265 {
3266         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3267         bool pending;
3268
3269         if (!monitor) {
3270                 *err = -EINVAL;
3271                 return false;
3272         }
3273
3274         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3275         if (!*err && !pending)
3276                 hci_update_background_scan(hdev);
3277
3278         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3279                    hdev->name, handle, *err, pending ? "" : "not ");
3280
3281         return pending;
3282 }
3283
3284 /* Returns true if request is forwarded (result is pending), false otherwise.
3285  * This function requires the caller holds hdev->lock.
3286  */
3287 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3288 {
3289         struct adv_monitor *monitor;
3290         int idr_next_id = 0;
3291         bool pending = false;
3292         bool update = false;
3293
3294         *err = 0;
3295
3296         while (!*err && !pending) {
3297                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3298                 if (!monitor)
3299                         break;
3300
3301                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3302
3303                 if (!*err && !pending)
3304                         update = true;
3305         }
3306
3307         if (update)
3308                 hci_update_background_scan(hdev);
3309
3310         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3311                    hdev->name, *err, pending ? "" : "not ");
3312
3313         return pending;
3314 }
3315
3316 /* This function requires the caller holds hdev->lock */
3317 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3318 {
3319         return !idr_is_empty(&hdev->adv_monitors_idr);
3320 }
3321
3322 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3323 {
3324         if (msft_monitor_supported(hdev))
3325                 return HCI_ADV_MONITOR_EXT_MSFT;
3326
3327         return HCI_ADV_MONITOR_EXT_NONE;
3328 }
3329
3330 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3331                                          bdaddr_t *bdaddr, u8 type)
3332 {
3333         struct bdaddr_list *b;
3334
3335         list_for_each_entry(b, bdaddr_list, list) {
3336                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3337                         return b;
3338         }
3339
3340         return NULL;
3341 }
3342
3343 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3344                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3345                                 u8 type)
3346 {
3347         struct bdaddr_list_with_irk *b;
3348
3349         list_for_each_entry(b, bdaddr_list, list) {
3350                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3351                         return b;
3352         }
3353
3354         return NULL;
3355 }
3356
3357 struct bdaddr_list_with_flags *
3358 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3359                                   bdaddr_t *bdaddr, u8 type)
3360 {
3361         struct bdaddr_list_with_flags *b;
3362
3363         list_for_each_entry(b, bdaddr_list, list) {
3364                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3365                         return b;
3366         }
3367
3368         return NULL;
3369 }
3370
3371 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3372 {
3373         struct bdaddr_list *b, *n;
3374
3375         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3376                 list_del(&b->list);
3377                 kfree(b);
3378         }
3379 }
3380
3381 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3382 {
3383         struct bdaddr_list *entry;
3384
3385         if (!bacmp(bdaddr, BDADDR_ANY))
3386                 return -EBADF;
3387
3388         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3389                 return -EEXIST;
3390
3391         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3392         if (!entry)
3393                 return -ENOMEM;
3394
3395         bacpy(&entry->bdaddr, bdaddr);
3396         entry->bdaddr_type = type;
3397
3398         list_add(&entry->list, list);
3399
3400         return 0;
3401 }
3402
3403 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3404                                         u8 type, u8 *peer_irk, u8 *local_irk)
3405 {
3406         struct bdaddr_list_with_irk *entry;
3407
3408         if (!bacmp(bdaddr, BDADDR_ANY))
3409                 return -EBADF;
3410
3411         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3412                 return -EEXIST;
3413
3414         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3415         if (!entry)
3416                 return -ENOMEM;
3417
3418         bacpy(&entry->bdaddr, bdaddr);
3419         entry->bdaddr_type = type;
3420
3421         if (peer_irk)
3422                 memcpy(entry->peer_irk, peer_irk, 16);
3423
3424         if (local_irk)
3425                 memcpy(entry->local_irk, local_irk, 16);
3426
3427         list_add(&entry->list, list);
3428
3429         return 0;
3430 }
3431
3432 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3433                                    u8 type, u32 flags)
3434 {
3435         struct bdaddr_list_with_flags *entry;
3436
3437         if (!bacmp(bdaddr, BDADDR_ANY))
3438                 return -EBADF;
3439
3440         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3441                 return -EEXIST;
3442
3443         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3444         if (!entry)
3445                 return -ENOMEM;
3446
3447         bacpy(&entry->bdaddr, bdaddr);
3448         entry->bdaddr_type = type;
3449         entry->current_flags = flags;
3450
3451         list_add(&entry->list, list);
3452
3453         return 0;
3454 }
3455
3456 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3457 {
3458         struct bdaddr_list *entry;
3459
3460         if (!bacmp(bdaddr, BDADDR_ANY)) {
3461                 hci_bdaddr_list_clear(list);
3462                 return 0;
3463         }
3464
3465         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3466         if (!entry)
3467                 return -ENOENT;
3468
3469         list_del(&entry->list);
3470         kfree(entry);
3471
3472         return 0;
3473 }
3474
3475 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3476                                                         u8 type)
3477 {
3478         struct bdaddr_list_with_irk *entry;
3479
3480         if (!bacmp(bdaddr, BDADDR_ANY)) {
3481                 hci_bdaddr_list_clear(list);
3482                 return 0;
3483         }
3484
3485         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3486         if (!entry)
3487                 return -ENOENT;
3488
3489         list_del(&entry->list);
3490         kfree(entry);
3491
3492         return 0;
3493 }
3494
3495 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3496                                    u8 type)
3497 {
3498         struct bdaddr_list_with_flags *entry;
3499
3500         if (!bacmp(bdaddr, BDADDR_ANY)) {
3501                 hci_bdaddr_list_clear(list);
3502                 return 0;
3503         }
3504
3505         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3506         if (!entry)
3507                 return -ENOENT;
3508
3509         list_del(&entry->list);
3510         kfree(entry);
3511
3512         return 0;
3513 }
3514
3515 /* This function requires the caller holds hdev->lock */
3516 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3517                                                bdaddr_t *addr, u8 addr_type)
3518 {
3519         struct hci_conn_params *params;
3520
3521         list_for_each_entry(params, &hdev->le_conn_params, list) {
3522                 if (bacmp(&params->addr, addr) == 0 &&
3523                     params->addr_type == addr_type) {
3524                         return params;
3525                 }
3526         }
3527
3528         return NULL;
3529 }
3530
3531 /* This function requires the caller holds hdev->lock */
3532 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3533                                                   bdaddr_t *addr, u8 addr_type)
3534 {
3535         struct hci_conn_params *param;
3536
3537         switch (addr_type) {
3538         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3539                 addr_type = ADDR_LE_DEV_PUBLIC;
3540                 break;
3541         case ADDR_LE_DEV_RANDOM_RESOLVED:
3542                 addr_type = ADDR_LE_DEV_RANDOM;
3543                 break;
3544         }
3545
3546         list_for_each_entry(param, list, action) {
3547                 if (bacmp(&param->addr, addr) == 0 &&
3548                     param->addr_type == addr_type)
3549                         return param;
3550         }
3551
3552         return NULL;
3553 }
3554
3555 /* This function requires the caller holds hdev->lock */
3556 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3557                                             bdaddr_t *addr, u8 addr_type)
3558 {
3559         struct hci_conn_params *params;
3560
3561         params = hci_conn_params_lookup(hdev, addr, addr_type);
3562         if (params)
3563                 return params;
3564
3565         params = kzalloc(sizeof(*params), GFP_KERNEL);
3566         if (!params) {
3567                 bt_dev_err(hdev, "out of memory");
3568                 return NULL;
3569         }
3570
3571         bacpy(&params->addr, addr);
3572         params->addr_type = addr_type;
3573
3574         list_add(&params->list, &hdev->le_conn_params);
3575         INIT_LIST_HEAD(&params->action);
3576
3577         params->conn_min_interval = hdev->le_conn_min_interval;
3578         params->conn_max_interval = hdev->le_conn_max_interval;
3579         params->conn_latency = hdev->le_conn_latency;
3580         params->supervision_timeout = hdev->le_supv_timeout;
3581         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3582
3583         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3584
3585         return params;
3586 }
3587
3588 static void hci_conn_params_free(struct hci_conn_params *params)
3589 {
3590         if (params->conn) {
3591                 hci_conn_drop(params->conn);
3592                 hci_conn_put(params->conn);
3593         }
3594
3595         list_del(&params->action);
3596         list_del(&params->list);
3597         kfree(params);
3598 }
3599
3600 /* This function requires the caller holds hdev->lock */
3601 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3602 {
3603         struct hci_conn_params *params;
3604
3605         params = hci_conn_params_lookup(hdev, addr, addr_type);
3606         if (!params)
3607                 return;
3608
3609         hci_conn_params_free(params);
3610
3611         hci_update_background_scan(hdev);
3612
3613         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3614 }
3615
3616 /* This function requires the caller holds hdev->lock */
3617 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3618 {
3619         struct hci_conn_params *params, *tmp;
3620
3621         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3622                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3623                         continue;
3624
3625                 /* If trying to establish one time connection to disabled
3626                  * device, leave the params, but mark them as just once.
3627                  */
3628                 if (params->explicit_connect) {
3629                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3630                         continue;
3631                 }
3632
3633                 list_del(&params->list);
3634                 kfree(params);
3635         }
3636
3637         BT_DBG("All LE disabled connection parameters were removed");
3638 }
3639
3640 /* This function requires the caller holds hdev->lock */
3641 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3642 {
3643         struct hci_conn_params *params, *tmp;
3644
3645         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3646                 hci_conn_params_free(params);
3647
3648         BT_DBG("All LE connection parameters were removed");
3649 }
3650
3651 /* Copy the Identity Address of the controller.
3652  *
3653  * If the controller has a public BD_ADDR, then by default use that one.
3654  * If this is a LE only controller without a public address, default to
3655  * the static random address.
3656  *
3657  * For debugging purposes it is possible to force controllers with a
3658  * public address to use the static random address instead.
3659  *
3660  * In case BR/EDR has been disabled on a dual-mode controller and
3661  * userspace has configured a static address, then that address
3662  * becomes the identity address instead of the public BR/EDR address.
3663  */
3664 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3665                                u8 *bdaddr_type)
3666 {
3667         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3668             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3669             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3670              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3671                 bacpy(bdaddr, &hdev->static_addr);
3672                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3673         } else {
3674                 bacpy(bdaddr, &hdev->bdaddr);
3675                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3676         }
3677 }
3678
3679 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3680 {
3681         int i;
3682
3683         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3684                 clear_bit(i, hdev->suspend_tasks);
3685
3686         wake_up(&hdev->suspend_wait_q);
3687 }
3688
3689 static int hci_suspend_wait_event(struct hci_dev *hdev)
3690 {
3691 #define WAKE_COND                                                              \
3692         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3693          __SUSPEND_NUM_TASKS)
3694
3695         int i;
3696         int ret = wait_event_timeout(hdev->suspend_wait_q,
3697                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3698
3699         if (ret == 0) {
3700                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3701                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3702                         if (test_bit(i, hdev->suspend_tasks))
3703                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3704                         clear_bit(i, hdev->suspend_tasks);
3705                 }
3706
3707                 ret = -ETIMEDOUT;
3708         } else {
3709                 ret = 0;
3710         }
3711
3712         return ret;
3713 }
3714
3715 static void hci_prepare_suspend(struct work_struct *work)
3716 {
3717         struct hci_dev *hdev =
3718                 container_of(work, struct hci_dev, suspend_prepare);
3719
3720         hci_dev_lock(hdev);
3721         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3722         hci_dev_unlock(hdev);
3723 }
3724
3725 static int hci_change_suspend_state(struct hci_dev *hdev,
3726                                     enum suspended_state next)
3727 {
3728         hdev->suspend_state_next = next;
3729         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3730         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3731         return hci_suspend_wait_event(hdev);
3732 }
3733
3734 static void hci_clear_wake_reason(struct hci_dev *hdev)
3735 {
3736         hci_dev_lock(hdev);
3737
3738         hdev->wake_reason = 0;
3739         bacpy(&hdev->wake_addr, BDADDR_ANY);
3740         hdev->wake_addr_type = 0;
3741
3742         hci_dev_unlock(hdev);
3743 }
3744
3745 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3746                                 void *data)
3747 {
3748         struct hci_dev *hdev =
3749                 container_of(nb, struct hci_dev, suspend_notifier);
3750         int ret = 0;
3751         u8 state = BT_RUNNING;
3752
3753         /* If powering down, wait for completion. */
3754         if (mgmt_powering_down(hdev)) {
3755                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3756                 ret = hci_suspend_wait_event(hdev);
3757                 if (ret)
3758                         goto done;
3759         }
3760
3761         /* Suspend notifier should only act on events when powered. */
3762         if (!hdev_is_powered(hdev) ||
3763             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3764                 goto done;
3765
3766         if (action == PM_SUSPEND_PREPARE) {
3767                 /* Suspend consists of two actions:
3768                  *  - First, disconnect everything and make the controller not
3769                  *    connectable (disabling scanning)
3770                  *  - Second, program event filter/accept list and enable scan
3771                  */
3772                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3773                 if (!ret)
3774                         state = BT_SUSPEND_DISCONNECT;
3775
3776                 /* Only configure accept list if disconnect succeeded and wake
3777                  * isn't being prevented.
3778                  */
3779                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3780                         ret = hci_change_suspend_state(hdev,
3781                                                 BT_SUSPEND_CONFIGURE_WAKE);
3782                         if (!ret)
3783                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3784                 }
3785
3786                 hci_clear_wake_reason(hdev);
3787                 mgmt_suspending(hdev, state);
3788
3789         } else if (action == PM_POST_SUSPEND) {
3790                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3791
3792                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3793                               hdev->wake_addr_type);
3794         }
3795
3796 done:
3797         /* We always allow suspend even if suspend preparation failed and
3798          * attempt to recover in resume.
3799          */
3800         if (ret)
3801                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3802                            action, ret);
3803
3804         return NOTIFY_DONE;
3805 }
3806
3807 /* Alloc HCI device */
3808 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3809 {
3810         struct hci_dev *hdev;
3811         unsigned int alloc_size;
3812
3813         alloc_size = sizeof(*hdev);
3814         if (sizeof_priv) {
3815                 /* Fixme: May need ALIGN-ment? */
3816                 alloc_size += sizeof_priv;
3817         }
3818
3819         hdev = kzalloc(alloc_size, GFP_KERNEL);
3820         if (!hdev)
3821                 return NULL;
3822
3823         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3824         hdev->esco_type = (ESCO_HV1);
3825         hdev->link_mode = (HCI_LM_ACCEPT);
3826         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3827         hdev->io_capability = 0x03;     /* No Input No Output */
3828         hdev->manufacturer = 0xffff;    /* Default to internal use */
3829         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3830         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3831         hdev->adv_instance_cnt = 0;
3832         hdev->cur_adv_instance = 0x00;
3833         hdev->adv_instance_timeout = 0;
3834
3835         hdev->advmon_allowlist_duration = 300;
3836         hdev->advmon_no_filter_duration = 500;
3837         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3838
3839         hdev->sniff_max_interval = 800;
3840         hdev->sniff_min_interval = 80;
3841
3842         hdev->le_adv_channel_map = 0x07;
3843         hdev->le_adv_min_interval = 0x0800;
3844         hdev->le_adv_max_interval = 0x0800;
3845 #ifdef TIZEN_BT
3846         hdev->adv_filter_policy = 0x00;
3847         hdev->adv_type = 0x00;
3848 #endif
3849         hdev->le_scan_interval = 0x0060;
3850         hdev->le_scan_window = 0x0030;
3851         hdev->le_scan_int_suspend = 0x0400;
3852         hdev->le_scan_window_suspend = 0x0012;
3853         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3854         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3855         hdev->le_scan_int_adv_monitor = 0x0060;
3856         hdev->le_scan_window_adv_monitor = 0x0030;
3857         hdev->le_scan_int_connect = 0x0060;
3858         hdev->le_scan_window_connect = 0x0060;
3859         hdev->le_conn_min_interval = 0x0018;
3860         hdev->le_conn_max_interval = 0x0028;
3861         hdev->le_conn_latency = 0x0000;
3862         hdev->le_supv_timeout = 0x002a;
3863         hdev->le_def_tx_len = 0x001b;
3864         hdev->le_def_tx_time = 0x0148;
3865         hdev->le_max_tx_len = 0x001b;
3866         hdev->le_max_tx_time = 0x0148;
3867         hdev->le_max_rx_len = 0x001b;
3868         hdev->le_max_rx_time = 0x0148;
3869         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3870         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3871         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3872         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3873         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3874         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3875         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3876         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3877         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3878
3879         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3880         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3881         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3882         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3883         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3884         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3885
3886         /* default 1.28 sec page scan */
3887         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3888         hdev->def_page_scan_int = 0x0800;
3889         hdev->def_page_scan_window = 0x0012;
3890
3891         mutex_init(&hdev->lock);
3892         mutex_init(&hdev->req_lock);
3893
3894         INIT_LIST_HEAD(&hdev->mgmt_pending);
3895         INIT_LIST_HEAD(&hdev->reject_list);
3896         INIT_LIST_HEAD(&hdev->accept_list);
3897         INIT_LIST_HEAD(&hdev->uuids);
3898         INIT_LIST_HEAD(&hdev->link_keys);
3899         INIT_LIST_HEAD(&hdev->long_term_keys);
3900         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3901         INIT_LIST_HEAD(&hdev->remote_oob_data);
3902         INIT_LIST_HEAD(&hdev->le_accept_list);
3903         INIT_LIST_HEAD(&hdev->le_resolv_list);
3904         INIT_LIST_HEAD(&hdev->le_conn_params);
3905         INIT_LIST_HEAD(&hdev->pend_le_conns);
3906         INIT_LIST_HEAD(&hdev->pend_le_reports);
3907         INIT_LIST_HEAD(&hdev->conn_hash.list);
3908         INIT_LIST_HEAD(&hdev->adv_instances);
3909         INIT_LIST_HEAD(&hdev->blocked_keys);
3910
3911         INIT_WORK(&hdev->rx_work, hci_rx_work);
3912         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3913         INIT_WORK(&hdev->tx_work, hci_tx_work);
3914         INIT_WORK(&hdev->power_on, hci_power_on);
3915         INIT_WORK(&hdev->error_reset, hci_error_reset);
3916         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3917
3918         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3919
3920         skb_queue_head_init(&hdev->rx_q);
3921         skb_queue_head_init(&hdev->cmd_q);
3922         skb_queue_head_init(&hdev->raw_q);
3923
3924         init_waitqueue_head(&hdev->req_wait_q);
3925         init_waitqueue_head(&hdev->suspend_wait_q);
3926
3927         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3928         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3929
3930         hci_request_setup(hdev);
3931
3932         hci_init_sysfs(hdev);
3933         discovery_init(hdev);
3934
3935         return hdev;
3936 }
3937 EXPORT_SYMBOL(hci_alloc_dev_priv);
3938
3939 /* Free HCI device */
3940 void hci_free_dev(struct hci_dev *hdev)
3941 {
3942         /* will free via device release */
3943         put_device(&hdev->dev);
3944 }
3945 EXPORT_SYMBOL(hci_free_dev);
3946
3947 /* Register HCI device */
3948 int hci_register_dev(struct hci_dev *hdev)
3949 {
3950         int id, error;
3951
3952         if (!hdev->open || !hdev->close || !hdev->send)
3953                 return -EINVAL;
3954
3955         /* Do not allow HCI_AMP devices to register at index 0,
3956          * so the index can be used as the AMP controller ID.
3957          */
3958         switch (hdev->dev_type) {
3959         case HCI_PRIMARY:
3960                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3961                 break;
3962         case HCI_AMP:
3963                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3964                 break;
3965         default:
3966                 return -EINVAL;
3967         }
3968
3969         if (id < 0)
3970                 return id;
3971
3972         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3973         hdev->id = id;
3974
3975         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3976
3977         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3978         if (!hdev->workqueue) {
3979                 error = -ENOMEM;
3980                 goto err;
3981         }
3982
3983         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3984                                                       hdev->name);
3985         if (!hdev->req_workqueue) {
3986                 destroy_workqueue(hdev->workqueue);
3987                 error = -ENOMEM;
3988                 goto err;
3989         }
3990
3991         if (!IS_ERR_OR_NULL(bt_debugfs))
3992                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3993
3994         dev_set_name(&hdev->dev, "%s", hdev->name);
3995
3996         error = device_add(&hdev->dev);
3997         if (error < 0)
3998                 goto err_wqueue;
3999
4000         hci_leds_init(hdev);
4001
4002         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4003                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4004                                     hdev);
4005         if (hdev->rfkill) {
4006                 if (rfkill_register(hdev->rfkill) < 0) {
4007                         rfkill_destroy(hdev->rfkill);
4008                         hdev->rfkill = NULL;
4009                 }
4010         }
4011
4012         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4013                 hci_dev_set_flag(hdev, HCI_RFKILLED);
4014
4015         hci_dev_set_flag(hdev, HCI_SETUP);
4016         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
4017
4018         if (hdev->dev_type == HCI_PRIMARY) {
4019                 /* Assume BR/EDR support until proven otherwise (such as
4020                  * through reading supported features during init.
4021                  */
4022                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4023         }
4024
4025         write_lock(&hci_dev_list_lock);
4026         list_add(&hdev->list, &hci_dev_list);
4027         write_unlock(&hci_dev_list_lock);
4028
4029         /* Devices that are marked for raw-only usage are unconfigured
4030          * and should not be included in normal operation.
4031          */
4032         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4033                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4034
4035         hci_sock_dev_event(hdev, HCI_DEV_REG);
4036         hci_dev_hold(hdev);
4037
4038         if (!hdev->suspend_notifier.notifier_call &&
4039             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4040                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
4041                 error = register_pm_notifier(&hdev->suspend_notifier);
4042                 if (error)
4043                         goto err_wqueue;
4044         }
4045
4046         queue_work(hdev->req_workqueue, &hdev->power_on);
4047
4048         idr_init(&hdev->adv_monitors_idr);
4049
4050         return id;
4051
4052 err_wqueue:
4053         debugfs_remove_recursive(hdev->debugfs);
4054         destroy_workqueue(hdev->workqueue);
4055         destroy_workqueue(hdev->req_workqueue);
4056 err:
4057         ida_simple_remove(&hci_index_ida, hdev->id);
4058
4059         return error;
4060 }
4061 EXPORT_SYMBOL(hci_register_dev);
4062
4063 /* Unregister HCI device */
4064 void hci_unregister_dev(struct hci_dev *hdev)
4065 {
4066         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4067
4068         hci_dev_set_flag(hdev, HCI_UNREGISTER);
4069
4070         write_lock(&hci_dev_list_lock);
4071         list_del(&hdev->list);
4072         write_unlock(&hci_dev_list_lock);
4073
4074         cancel_work_sync(&hdev->power_on);
4075
4076         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4077                 hci_suspend_clear_tasks(hdev);
4078                 unregister_pm_notifier(&hdev->suspend_notifier);
4079                 cancel_work_sync(&hdev->suspend_prepare);
4080         }
4081
4082         hci_dev_do_close(hdev);
4083
4084         if (!test_bit(HCI_INIT, &hdev->flags) &&
4085             !hci_dev_test_flag(hdev, HCI_SETUP) &&
4086             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4087                 hci_dev_lock(hdev);
4088                 mgmt_index_removed(hdev);
4089                 hci_dev_unlock(hdev);
4090         }
4091
4092         /* mgmt_index_removed should take care of emptying the
4093          * pending list */
4094         BUG_ON(!list_empty(&hdev->mgmt_pending));
4095
4096         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4097
4098         if (hdev->rfkill) {
4099                 rfkill_unregister(hdev->rfkill);
4100                 rfkill_destroy(hdev->rfkill);
4101         }
4102
4103         device_del(&hdev->dev);
4104         /* Actual cleanup is deferred until hci_release_dev(). */
4105         hci_dev_put(hdev);
4106 }
4107 EXPORT_SYMBOL(hci_unregister_dev);
4108
4109 /* Release HCI device */
4110 void hci_release_dev(struct hci_dev *hdev)
4111 {
4112         debugfs_remove_recursive(hdev->debugfs);
4113         kfree_const(hdev->hw_info);
4114         kfree_const(hdev->fw_info);
4115
4116         destroy_workqueue(hdev->workqueue);
4117         destroy_workqueue(hdev->req_workqueue);
4118
4119         hci_dev_lock(hdev);
4120         hci_bdaddr_list_clear(&hdev->reject_list);
4121         hci_bdaddr_list_clear(&hdev->accept_list);
4122         hci_uuids_clear(hdev);
4123         hci_link_keys_clear(hdev);
4124         hci_smp_ltks_clear(hdev);
4125         hci_smp_irks_clear(hdev);
4126         hci_remote_oob_data_clear(hdev);
4127         hci_adv_instances_clear(hdev);
4128         hci_adv_monitors_clear(hdev);
4129         hci_bdaddr_list_clear(&hdev->le_accept_list);
4130         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4131         hci_conn_params_clear_all(hdev);
4132         hci_discovery_filter_clear(hdev);
4133         hci_blocked_keys_clear(hdev);
4134         hci_dev_unlock(hdev);
4135
4136         ida_simple_remove(&hci_index_ida, hdev->id);
4137         kfree_skb(hdev->sent_cmd);
4138         kfree(hdev);
4139 }
4140 EXPORT_SYMBOL(hci_release_dev);
4141
4142 /* Suspend HCI device */
4143 int hci_suspend_dev(struct hci_dev *hdev)
4144 {
4145         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4146         return 0;
4147 }
4148 EXPORT_SYMBOL(hci_suspend_dev);
4149
4150 /* Resume HCI device */
4151 int hci_resume_dev(struct hci_dev *hdev)
4152 {
4153         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4154         return 0;
4155 }
4156 EXPORT_SYMBOL(hci_resume_dev);
4157
4158 /* Reset HCI device */
4159 int hci_reset_dev(struct hci_dev *hdev)
4160 {
4161         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4162         struct sk_buff *skb;
4163
4164         skb = bt_skb_alloc(3, GFP_ATOMIC);
4165         if (!skb)
4166                 return -ENOMEM;
4167
4168         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4169         skb_put_data(skb, hw_err, 3);
4170
4171         bt_dev_err(hdev, "Injecting HCI hardware error event");
4172
4173         /* Send Hardware Error to upper stack */
4174         return hci_recv_frame(hdev, skb);
4175 }
4176 EXPORT_SYMBOL(hci_reset_dev);
4177
4178 /* Receive frame from HCI drivers */
4179 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4180 {
4181         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4182                       && !test_bit(HCI_INIT, &hdev->flags))) {
4183                 kfree_skb(skb);
4184                 return -ENXIO;
4185         }
4186
4187         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4188             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4189             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4190             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4191                 kfree_skb(skb);
4192                 return -EINVAL;
4193         }
4194
4195         /* Incoming skb */
4196         bt_cb(skb)->incoming = 1;
4197
4198         /* Time stamp */
4199         __net_timestamp(skb);
4200
4201         skb_queue_tail(&hdev->rx_q, skb);
4202         queue_work(hdev->workqueue, &hdev->rx_work);
4203
4204         return 0;
4205 }
4206 EXPORT_SYMBOL(hci_recv_frame);
4207
4208 /* Receive diagnostic message from HCI drivers */
4209 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4210 {
4211         /* Mark as diagnostic packet */
4212         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4213
4214         /* Time stamp */
4215         __net_timestamp(skb);
4216
4217         skb_queue_tail(&hdev->rx_q, skb);
4218         queue_work(hdev->workqueue, &hdev->rx_work);
4219
4220         return 0;
4221 }
4222 EXPORT_SYMBOL(hci_recv_diag);
4223
4224 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4225 {
4226         va_list vargs;
4227
4228         va_start(vargs, fmt);
4229         kfree_const(hdev->hw_info);
4230         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4231         va_end(vargs);
4232 }
4233 EXPORT_SYMBOL(hci_set_hw_info);
4234
4235 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4236 {
4237         va_list vargs;
4238
4239         va_start(vargs, fmt);
4240         kfree_const(hdev->fw_info);
4241         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4242         va_end(vargs);
4243 }
4244 EXPORT_SYMBOL(hci_set_fw_info);
4245
4246 /* ---- Interface to upper protocols ---- */
4247
4248 int hci_register_cb(struct hci_cb *cb)
4249 {
4250         BT_DBG("%p name %s", cb, cb->name);
4251
4252         mutex_lock(&hci_cb_list_lock);
4253         list_add_tail(&cb->list, &hci_cb_list);
4254         mutex_unlock(&hci_cb_list_lock);
4255
4256         return 0;
4257 }
4258 EXPORT_SYMBOL(hci_register_cb);
4259
4260 int hci_unregister_cb(struct hci_cb *cb)
4261 {
4262         BT_DBG("%p name %s", cb, cb->name);
4263
4264         mutex_lock(&hci_cb_list_lock);
4265         list_del(&cb->list);
4266         mutex_unlock(&hci_cb_list_lock);
4267
4268         return 0;
4269 }
4270 EXPORT_SYMBOL(hci_unregister_cb);
4271
4272 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4273 {
4274         int err;
4275
4276         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4277                skb->len);
4278
4279         /* Time stamp */
4280         __net_timestamp(skb);
4281
4282         /* Send copy to monitor */
4283         hci_send_to_monitor(hdev, skb);
4284
4285         if (atomic_read(&hdev->promisc)) {
4286                 /* Send copy to the sockets */
4287                 hci_send_to_sock(hdev, skb);
4288         }
4289
4290         /* Get rid of skb owner, prior to sending to the driver. */
4291         skb_orphan(skb);
4292
4293         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4294                 kfree_skb(skb);
4295                 return;
4296         }
4297
4298         err = hdev->send(hdev, skb);
4299         if (err < 0) {
4300                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4301                 kfree_skb(skb);
4302         }
4303 }
4304
4305 /* Send HCI command */
4306 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4307                  const void *param)
4308 {
4309         struct sk_buff *skb;
4310
4311         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4312
4313         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4314         if (!skb) {
4315                 bt_dev_err(hdev, "no memory for command");
4316                 return -ENOMEM;
4317         }
4318
4319         /* Stand-alone HCI commands must be flagged as
4320          * single-command requests.
4321          */
4322         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4323
4324         skb_queue_tail(&hdev->cmd_q, skb);
4325         queue_work(hdev->workqueue, &hdev->cmd_work);
4326
4327         return 0;
4328 }
4329
4330 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4331                    const void *param)
4332 {
4333         struct sk_buff *skb;
4334
4335         if (hci_opcode_ogf(opcode) != 0x3f) {
4336                 /* A controller receiving a command shall respond with either
4337                  * a Command Status Event or a Command Complete Event.
4338                  * Therefore, all standard HCI commands must be sent via the
4339                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4340                  * Some vendors do not comply with this rule for vendor-specific
4341                  * commands and do not return any event. We want to support
4342                  * unresponded commands for such cases only.
4343                  */
4344                 bt_dev_err(hdev, "unresponded command not supported");
4345                 return -EINVAL;
4346         }
4347
4348         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4349         if (!skb) {
4350                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4351                            opcode);
4352                 return -ENOMEM;
4353         }
4354
4355         hci_send_frame(hdev, skb);
4356
4357         return 0;
4358 }
4359 EXPORT_SYMBOL(__hci_cmd_send);
4360
4361 /* Get data from the previously sent command */
4362 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4363 {
4364         struct hci_command_hdr *hdr;
4365
4366         if (!hdev->sent_cmd)
4367                 return NULL;
4368
4369         hdr = (void *) hdev->sent_cmd->data;
4370
4371         if (hdr->opcode != cpu_to_le16(opcode))
4372                 return NULL;
4373
4374         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4375
4376         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4377 }
4378
4379 /* Send HCI command and wait for command complete event */
4380 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4381                              const void *param, u32 timeout)
4382 {
4383         struct sk_buff *skb;
4384
4385         if (!test_bit(HCI_UP, &hdev->flags))
4386                 return ERR_PTR(-ENETDOWN);
4387
4388         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4389
4390         hci_req_sync_lock(hdev);
4391         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4392         hci_req_sync_unlock(hdev);
4393
4394         return skb;
4395 }
4396 EXPORT_SYMBOL(hci_cmd_sync);
4397
4398 /* Send ACL data */
4399 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4400 {
4401         struct hci_acl_hdr *hdr;
4402         int len = skb->len;
4403
4404         skb_push(skb, HCI_ACL_HDR_SIZE);
4405         skb_reset_transport_header(skb);
4406         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4407         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4408         hdr->dlen   = cpu_to_le16(len);
4409 }
4410
4411 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4412                           struct sk_buff *skb, __u16 flags)
4413 {
4414         struct hci_conn *conn = chan->conn;
4415         struct hci_dev *hdev = conn->hdev;
4416         struct sk_buff *list;
4417
4418         skb->len = skb_headlen(skb);
4419         skb->data_len = 0;
4420
4421         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4422
4423         switch (hdev->dev_type) {
4424         case HCI_PRIMARY:
4425                 hci_add_acl_hdr(skb, conn->handle, flags);
4426                 break;
4427         case HCI_AMP:
4428                 hci_add_acl_hdr(skb, chan->handle, flags);
4429                 break;
4430         default:
4431                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4432                 return;
4433         }
4434
4435         list = skb_shinfo(skb)->frag_list;
4436         if (!list) {
4437                 /* Non fragmented */
4438                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4439
4440                 skb_queue_tail(queue, skb);
4441         } else {
4442                 /* Fragmented */
4443                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4444
4445                 skb_shinfo(skb)->frag_list = NULL;
4446
4447                 /* Queue all fragments atomically. We need to use spin_lock_bh
4448                  * here because of 6LoWPAN links, as there this function is
4449                  * called from softirq and using normal spin lock could cause
4450                  * deadlocks.
4451                  */
4452                 spin_lock_bh(&queue->lock);
4453
4454                 __skb_queue_tail(queue, skb);
4455
4456                 flags &= ~ACL_START;
4457                 flags |= ACL_CONT;
4458                 do {
4459                         skb = list; list = list->next;
4460
4461                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4462                         hci_add_acl_hdr(skb, conn->handle, flags);
4463
4464                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4465
4466                         __skb_queue_tail(queue, skb);
4467                 } while (list);
4468
4469                 spin_unlock_bh(&queue->lock);
4470         }
4471 }
4472
4473 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4474 {
4475         struct hci_dev *hdev = chan->conn->hdev;
4476
4477         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4478
4479         hci_queue_acl(chan, &chan->data_q, skb, flags);
4480
4481         queue_work(hdev->workqueue, &hdev->tx_work);
4482 }
4483
4484 /* Send SCO data */
4485 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4486 {
4487         struct hci_dev *hdev = conn->hdev;
4488         struct hci_sco_hdr hdr;
4489
4490         BT_DBG("%s len %d", hdev->name, skb->len);
4491
4492         hdr.handle = cpu_to_le16(conn->handle);
4493         hdr.dlen   = skb->len;
4494
4495         skb_push(skb, HCI_SCO_HDR_SIZE);
4496         skb_reset_transport_header(skb);
4497         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4498
4499         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4500
4501         skb_queue_tail(&conn->data_q, skb);
4502         queue_work(hdev->workqueue, &hdev->tx_work);
4503 }
4504
4505 /* ---- HCI TX task (outgoing data) ---- */
4506
4507 /* HCI Connection scheduler */
4508 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4509                                      int *quote)
4510 {
4511         struct hci_conn_hash *h = &hdev->conn_hash;
4512         struct hci_conn *conn = NULL, *c;
4513         unsigned int num = 0, min = ~0;
4514
4515         /* We don't have to lock device here. Connections are always
4516          * added and removed with TX task disabled. */
4517
4518         rcu_read_lock();
4519
4520         list_for_each_entry_rcu(c, &h->list, list) {
4521                 if (c->type != type || skb_queue_empty(&c->data_q))
4522                         continue;
4523
4524                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4525                         continue;
4526
4527                 num++;
4528
4529                 if (c->sent < min) {
4530                         min  = c->sent;
4531                         conn = c;
4532                 }
4533
4534                 if (hci_conn_num(hdev, type) == num)
4535                         break;
4536         }
4537
4538         rcu_read_unlock();
4539
4540         if (conn) {
4541                 int cnt, q;
4542
4543                 switch (conn->type) {
4544                 case ACL_LINK:
4545                         cnt = hdev->acl_cnt;
4546                         break;
4547                 case SCO_LINK:
4548                 case ESCO_LINK:
4549                         cnt = hdev->sco_cnt;
4550                         break;
4551                 case LE_LINK:
4552                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4553                         break;
4554                 default:
4555                         cnt = 0;
4556                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4557                 }
4558
4559                 q = cnt / num;
4560                 *quote = q ? q : 1;
4561         } else
4562                 *quote = 0;
4563
4564         BT_DBG("conn %p quote %d", conn, *quote);
4565         return conn;
4566 }
4567
4568 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4569 {
4570         struct hci_conn_hash *h = &hdev->conn_hash;
4571         struct hci_conn *c;
4572
4573         bt_dev_err(hdev, "link tx timeout");
4574
4575         rcu_read_lock();
4576
4577         /* Kill stalled connections */
4578         list_for_each_entry_rcu(c, &h->list, list) {
4579                 if (c->type == type && c->sent) {
4580                         bt_dev_err(hdev, "killing stalled connection %pMR",
4581                                    &c->dst);
4582                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4583                 }
4584         }
4585
4586         rcu_read_unlock();
4587 }
4588
4589 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4590                                       int *quote)
4591 {
4592         struct hci_conn_hash *h = &hdev->conn_hash;
4593         struct hci_chan *chan = NULL;
4594         unsigned int num = 0, min = ~0, cur_prio = 0;
4595         struct hci_conn *conn;
4596         int cnt, q, conn_num = 0;
4597
4598         BT_DBG("%s", hdev->name);
4599
4600         rcu_read_lock();
4601
4602         list_for_each_entry_rcu(conn, &h->list, list) {
4603                 struct hci_chan *tmp;
4604
4605                 if (conn->type != type)
4606                         continue;
4607
4608                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4609                         continue;
4610
4611                 conn_num++;
4612
4613                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4614                         struct sk_buff *skb;
4615
4616                         if (skb_queue_empty(&tmp->data_q))
4617                                 continue;
4618
4619                         skb = skb_peek(&tmp->data_q);
4620                         if (skb->priority < cur_prio)
4621                                 continue;
4622
4623                         if (skb->priority > cur_prio) {
4624                                 num = 0;
4625                                 min = ~0;
4626                                 cur_prio = skb->priority;
4627                         }
4628
4629                         num++;
4630
4631                         if (conn->sent < min) {
4632                                 min  = conn->sent;
4633                                 chan = tmp;
4634                         }
4635                 }
4636
4637                 if (hci_conn_num(hdev, type) == conn_num)
4638                         break;
4639         }
4640
4641         rcu_read_unlock();
4642
4643         if (!chan)
4644                 return NULL;
4645
4646         switch (chan->conn->type) {
4647         case ACL_LINK:
4648                 cnt = hdev->acl_cnt;
4649                 break;
4650         case AMP_LINK:
4651                 cnt = hdev->block_cnt;
4652                 break;
4653         case SCO_LINK:
4654         case ESCO_LINK:
4655                 cnt = hdev->sco_cnt;
4656                 break;
4657         case LE_LINK:
4658                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4659                 break;
4660         default:
4661                 cnt = 0;
4662                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4663         }
4664
4665         q = cnt / num;
4666         *quote = q ? q : 1;
4667         BT_DBG("chan %p quote %d", chan, *quote);
4668         return chan;
4669 }
4670
4671 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4672 {
4673         struct hci_conn_hash *h = &hdev->conn_hash;
4674         struct hci_conn *conn;
4675         int num = 0;
4676
4677         BT_DBG("%s", hdev->name);
4678
4679         rcu_read_lock();
4680
4681         list_for_each_entry_rcu(conn, &h->list, list) {
4682                 struct hci_chan *chan;
4683
4684                 if (conn->type != type)
4685                         continue;
4686
4687                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4688                         continue;
4689
4690                 num++;
4691
4692                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4693                         struct sk_buff *skb;
4694
4695                         if (chan->sent) {
4696                                 chan->sent = 0;
4697                                 continue;
4698                         }
4699
4700                         if (skb_queue_empty(&chan->data_q))
4701                                 continue;
4702
4703                         skb = skb_peek(&chan->data_q);
4704                         if (skb->priority >= HCI_PRIO_MAX - 1)
4705                                 continue;
4706
4707                         skb->priority = HCI_PRIO_MAX - 1;
4708
4709                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4710                                skb->priority);
4711                 }
4712
4713                 if (hci_conn_num(hdev, type) == num)
4714                         break;
4715         }
4716
4717         rcu_read_unlock();
4718
4719 }
4720
4721 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4722 {
4723         /* Calculate count of blocks used by this packet */
4724         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4725 }
4726
4727 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4728 {
4729         unsigned long last_tx;
4730
4731         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4732                 return;
4733
4734         switch (type) {
4735         case LE_LINK:
4736                 last_tx = hdev->le_last_tx;
4737                 break;
4738         default:
4739                 last_tx = hdev->acl_last_tx;
4740                 break;
4741         }
4742
4743         /* tx timeout must be longer than maximum link supervision timeout
4744          * (40.9 seconds)
4745          */
4746         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4747                 hci_link_tx_to(hdev, type);
4748 }
4749
4750 /* Schedule SCO */
4751 static void hci_sched_sco(struct hci_dev *hdev)
4752 {
4753         struct hci_conn *conn;
4754         struct sk_buff *skb;
4755         int quote;
4756
4757         BT_DBG("%s", hdev->name);
4758
4759         if (!hci_conn_num(hdev, SCO_LINK))
4760                 return;
4761
4762         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4763                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4764                         BT_DBG("skb %p len %d", skb, skb->len);
4765                         hci_send_frame(hdev, skb);
4766
4767                         conn->sent++;
4768                         if (conn->sent == ~0)
4769                                 conn->sent = 0;
4770                 }
4771         }
4772 }
4773
4774 static void hci_sched_esco(struct hci_dev *hdev)
4775 {
4776         struct hci_conn *conn;
4777         struct sk_buff *skb;
4778         int quote;
4779
4780         BT_DBG("%s", hdev->name);
4781
4782         if (!hci_conn_num(hdev, ESCO_LINK))
4783                 return;
4784
4785         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4786                                                      &quote))) {
4787                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4788                         BT_DBG("skb %p len %d", skb, skb->len);
4789                         hci_send_frame(hdev, skb);
4790
4791                         conn->sent++;
4792                         if (conn->sent == ~0)
4793                                 conn->sent = 0;
4794                 }
4795         }
4796 }
4797
4798 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4799 {
4800         unsigned int cnt = hdev->acl_cnt;
4801         struct hci_chan *chan;
4802         struct sk_buff *skb;
4803         int quote;
4804
4805         __check_timeout(hdev, cnt, ACL_LINK);
4806
4807         while (hdev->acl_cnt &&
4808                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4809                 u32 priority = (skb_peek(&chan->data_q))->priority;
4810                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4811                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4812                                skb->len, skb->priority);
4813
4814                         /* Stop if priority has changed */
4815                         if (skb->priority < priority)
4816                                 break;
4817
4818                         skb = skb_dequeue(&chan->data_q);
4819
4820                         hci_conn_enter_active_mode(chan->conn,
4821                                                    bt_cb(skb)->force_active);
4822
4823                         hci_send_frame(hdev, skb);
4824                         hdev->acl_last_tx = jiffies;
4825
4826                         hdev->acl_cnt--;
4827                         chan->sent++;
4828                         chan->conn->sent++;
4829
4830                         /* Send pending SCO packets right away */
4831                         hci_sched_sco(hdev);
4832                         hci_sched_esco(hdev);
4833                 }
4834         }
4835
4836         if (cnt != hdev->acl_cnt)
4837                 hci_prio_recalculate(hdev, ACL_LINK);
4838 }
4839
4840 static void hci_sched_acl_blk(struct hci_dev *hdev)
4841 {
4842         unsigned int cnt = hdev->block_cnt;
4843         struct hci_chan *chan;
4844         struct sk_buff *skb;
4845         int quote;
4846         u8 type;
4847
4848         BT_DBG("%s", hdev->name);
4849
4850         if (hdev->dev_type == HCI_AMP)
4851                 type = AMP_LINK;
4852         else
4853                 type = ACL_LINK;
4854
4855         __check_timeout(hdev, cnt, type);
4856
4857         while (hdev->block_cnt > 0 &&
4858                (chan = hci_chan_sent(hdev, type, &quote))) {
4859                 u32 priority = (skb_peek(&chan->data_q))->priority;
4860                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4861                         int blocks;
4862
4863                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4864                                skb->len, skb->priority);
4865
4866                         /* Stop if priority has changed */
4867                         if (skb->priority < priority)
4868                                 break;
4869
4870                         skb = skb_dequeue(&chan->data_q);
4871
4872                         blocks = __get_blocks(hdev, skb);
4873                         if (blocks > hdev->block_cnt)
4874                                 return;
4875
4876                         hci_conn_enter_active_mode(chan->conn,
4877                                                    bt_cb(skb)->force_active);
4878
4879                         hci_send_frame(hdev, skb);
4880                         hdev->acl_last_tx = jiffies;
4881
4882                         hdev->block_cnt -= blocks;
4883                         quote -= blocks;
4884
4885                         chan->sent += blocks;
4886                         chan->conn->sent += blocks;
4887                 }
4888         }
4889
4890         if (cnt != hdev->block_cnt)
4891                 hci_prio_recalculate(hdev, type);
4892 }
4893
4894 static void hci_sched_acl(struct hci_dev *hdev)
4895 {
4896         BT_DBG("%s", hdev->name);
4897
4898         /* No ACL link over BR/EDR controller */
4899         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4900                 return;
4901
4902         /* No AMP link over AMP controller */
4903         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4904                 return;
4905
4906         switch (hdev->flow_ctl_mode) {
4907         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4908                 hci_sched_acl_pkt(hdev);
4909                 break;
4910
4911         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4912                 hci_sched_acl_blk(hdev);
4913                 break;
4914         }
4915 }
4916
4917 static void hci_sched_le(struct hci_dev *hdev)
4918 {
4919         struct hci_chan *chan;
4920         struct sk_buff *skb;
4921         int quote, cnt, tmp;
4922
4923         BT_DBG("%s", hdev->name);
4924
4925         if (!hci_conn_num(hdev, LE_LINK))
4926                 return;
4927
4928         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4929
4930         __check_timeout(hdev, cnt, LE_LINK);
4931
4932         tmp = cnt;
4933         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4934                 u32 priority = (skb_peek(&chan->data_q))->priority;
4935                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4936                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4937                                skb->len, skb->priority);
4938
4939                         /* Stop if priority has changed */
4940                         if (skb->priority < priority)
4941                                 break;
4942
4943                         skb = skb_dequeue(&chan->data_q);
4944
4945                         hci_send_frame(hdev, skb);
4946                         hdev->le_last_tx = jiffies;
4947
4948                         cnt--;
4949                         chan->sent++;
4950                         chan->conn->sent++;
4951
4952                         /* Send pending SCO packets right away */
4953                         hci_sched_sco(hdev);
4954                         hci_sched_esco(hdev);
4955                 }
4956         }
4957
4958         if (hdev->le_pkts)
4959                 hdev->le_cnt = cnt;
4960         else
4961                 hdev->acl_cnt = cnt;
4962
4963         if (cnt != tmp)
4964                 hci_prio_recalculate(hdev, LE_LINK);
4965 }
4966
4967 static void hci_tx_work(struct work_struct *work)
4968 {
4969         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4970         struct sk_buff *skb;
4971
4972         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4973                hdev->sco_cnt, hdev->le_cnt);
4974
4975         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4976                 /* Schedule queues and send stuff to HCI driver */
4977                 hci_sched_sco(hdev);
4978                 hci_sched_esco(hdev);
4979                 hci_sched_acl(hdev);
4980                 hci_sched_le(hdev);
4981         }
4982
4983         /* Send next queued raw (unknown type) packet */
4984         while ((skb = skb_dequeue(&hdev->raw_q)))
4985                 hci_send_frame(hdev, skb);
4986 }
4987
4988 /* ----- HCI RX task (incoming data processing) ----- */
4989
4990 /* ACL data packet */
4991 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4992 {
4993         struct hci_acl_hdr *hdr = (void *) skb->data;
4994         struct hci_conn *conn;
4995         __u16 handle, flags;
4996
4997         skb_pull(skb, HCI_ACL_HDR_SIZE);
4998
4999         handle = __le16_to_cpu(hdr->handle);
5000         flags  = hci_flags(handle);
5001         handle = hci_handle(handle);
5002
5003         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5004                handle, flags);
5005
5006         hdev->stat.acl_rx++;
5007
5008         hci_dev_lock(hdev);
5009         conn = hci_conn_hash_lookup_handle(hdev, handle);
5010         hci_dev_unlock(hdev);
5011
5012         if (conn) {
5013                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5014
5015                 /* Send to upper protocol */
5016                 l2cap_recv_acldata(conn, skb, flags);
5017                 return;
5018         } else {
5019                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
5020                            handle);
5021         }
5022
5023         kfree_skb(skb);
5024 }
5025
5026 /* SCO data packet */
5027 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5028 {
5029         struct hci_sco_hdr *hdr = (void *) skb->data;
5030         struct hci_conn *conn;
5031         __u16 handle, flags;
5032
5033         skb_pull(skb, HCI_SCO_HDR_SIZE);
5034
5035         handle = __le16_to_cpu(hdr->handle);
5036         flags  = hci_flags(handle);
5037         handle = hci_handle(handle);
5038
5039         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5040                handle, flags);
5041
5042         hdev->stat.sco_rx++;
5043
5044         hci_dev_lock(hdev);
5045         conn = hci_conn_hash_lookup_handle(hdev, handle);
5046         hci_dev_unlock(hdev);
5047
5048         if (conn) {
5049                 /* Send to upper protocol */
5050                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
5051                 sco_recv_scodata(conn, skb);
5052                 return;
5053         } else {
5054                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
5055                            handle);
5056         }
5057
5058         kfree_skb(skb);
5059 }
5060
5061 static bool hci_req_is_complete(struct hci_dev *hdev)
5062 {
5063         struct sk_buff *skb;
5064
5065         skb = skb_peek(&hdev->cmd_q);
5066         if (!skb)
5067                 return true;
5068
5069         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5070 }
5071
5072 static void hci_resend_last(struct hci_dev *hdev)
5073 {
5074         struct hci_command_hdr *sent;
5075         struct sk_buff *skb;
5076         u16 opcode;
5077
5078         if (!hdev->sent_cmd)
5079                 return;
5080
5081         sent = (void *) hdev->sent_cmd->data;
5082         opcode = __le16_to_cpu(sent->opcode);
5083         if (opcode == HCI_OP_RESET)
5084                 return;
5085
5086         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5087         if (!skb)
5088                 return;
5089
5090         skb_queue_head(&hdev->cmd_q, skb);
5091         queue_work(hdev->workqueue, &hdev->cmd_work);
5092 }
5093
5094 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5095                           hci_req_complete_t *req_complete,
5096                           hci_req_complete_skb_t *req_complete_skb)
5097 {
5098         struct sk_buff *skb;
5099         unsigned long flags;
5100
5101         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5102
5103         /* If the completed command doesn't match the last one that was
5104          * sent we need to do special handling of it.
5105          */
5106         if (!hci_sent_cmd_data(hdev, opcode)) {
5107                 /* Some CSR based controllers generate a spontaneous
5108                  * reset complete event during init and any pending
5109                  * command will never be completed. In such a case we
5110                  * need to resend whatever was the last sent
5111                  * command.
5112                  */
5113                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5114                         hci_resend_last(hdev);
5115
5116                 return;
5117         }
5118
5119         /* If we reach this point this event matches the last command sent */
5120         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5121
5122         /* If the command succeeded and there's still more commands in
5123          * this request the request is not yet complete.
5124          */
5125         if (!status && !hci_req_is_complete(hdev))
5126                 return;
5127
5128         /* If this was the last command in a request the complete
5129          * callback would be found in hdev->sent_cmd instead of the
5130          * command queue (hdev->cmd_q).
5131          */
5132         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5133                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5134                 return;
5135         }
5136
5137         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5138                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5139                 return;
5140         }
5141
5142         /* Remove all pending commands belonging to this request */
5143         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5144         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5145                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5146                         __skb_queue_head(&hdev->cmd_q, skb);
5147                         break;
5148                 }
5149
5150                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5151                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5152                 else
5153                         *req_complete = bt_cb(skb)->hci.req_complete;
5154                 dev_kfree_skb_irq(skb);
5155         }
5156         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5157 }
5158
5159 static void hci_rx_work(struct work_struct *work)
5160 {
5161         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5162         struct sk_buff *skb;
5163
5164         BT_DBG("%s", hdev->name);
5165
5166         while ((skb = skb_dequeue(&hdev->rx_q))) {
5167                 /* Send copy to monitor */
5168                 hci_send_to_monitor(hdev, skb);
5169
5170                 if (atomic_read(&hdev->promisc)) {
5171                         /* Send copy to the sockets */
5172                         hci_send_to_sock(hdev, skb);
5173                 }
5174
5175                 /* If the device has been opened in HCI_USER_CHANNEL,
5176                  * the userspace has exclusive access to device.
5177                  * When device is HCI_INIT, we still need to process
5178                  * the data packets to the driver in order
5179                  * to complete its setup().
5180                  */
5181                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5182                     !test_bit(HCI_INIT, &hdev->flags)) {
5183                         kfree_skb(skb);
5184                         continue;
5185                 }
5186
5187                 if (test_bit(HCI_INIT, &hdev->flags)) {
5188                         /* Don't process data packets in this states. */
5189                         switch (hci_skb_pkt_type(skb)) {
5190                         case HCI_ACLDATA_PKT:
5191                         case HCI_SCODATA_PKT:
5192                         case HCI_ISODATA_PKT:
5193                                 kfree_skb(skb);
5194                                 continue;
5195                         }
5196                 }
5197
5198                 /* Process frame */
5199                 switch (hci_skb_pkt_type(skb)) {
5200                 case HCI_EVENT_PKT:
5201                         BT_DBG("%s Event packet", hdev->name);
5202                         hci_event_packet(hdev, skb);
5203                         break;
5204
5205                 case HCI_ACLDATA_PKT:
5206                         BT_DBG("%s ACL data packet", hdev->name);
5207                         hci_acldata_packet(hdev, skb);
5208                         break;
5209
5210                 case HCI_SCODATA_PKT:
5211                         BT_DBG("%s SCO data packet", hdev->name);
5212                         hci_scodata_packet(hdev, skb);
5213                         break;
5214
5215                 default:
5216                         kfree_skb(skb);
5217                         break;
5218                 }
5219         }
5220 }
5221
5222 static void hci_cmd_work(struct work_struct *work)
5223 {
5224         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5225         struct sk_buff *skb;
5226
5227         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5228                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5229
5230         /* Send queued commands */
5231         if (atomic_read(&hdev->cmd_cnt)) {
5232                 skb = skb_dequeue(&hdev->cmd_q);
5233                 if (!skb)
5234                         return;
5235
5236                 kfree_skb(hdev->sent_cmd);
5237
5238                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5239                 if (hdev->sent_cmd) {
5240                         if (hci_req_status_pend(hdev))
5241                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5242                         atomic_dec(&hdev->cmd_cnt);
5243                         hci_send_frame(hdev, skb);
5244                         if (test_bit(HCI_RESET, &hdev->flags))
5245                                 cancel_delayed_work(&hdev->cmd_timer);
5246                         else
5247                                 schedule_delayed_work(&hdev->cmd_timer,
5248                                                       HCI_CMD_TIMEOUT);
5249                 } else {
5250                         skb_queue_head(&hdev->cmd_q, skb);
5251                         queue_work(hdev->workqueue, &hdev->cmd_work);
5252                 }
5253         }
5254 }