x86: add tizen_qemu_x86_defconfig & tizen_qemu_x86_64_defconfig
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         err = kstrtobool_from_user(user_buf, count, &enable);
90         if (err)
91                 return err;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158
159         if (err < 0)
160                 return err;
161
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168         return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244         return 0;
245 }
246
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269         return 0;
270 }
271
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498
499         return 0;
500 }
501
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529
530         memset(&cp, 0, sizeof(cp));
531
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547
548         /* If Connectionless Peripheral Broadcast central role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_cpb_central_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Peripheral Page Response Timeout */
555                 events[2] |= 0x20;      /* CPB Channel Map Change */
556                 changed = true;
557         }
558
559         /* If Connectionless Peripheral Broadcast peripheral role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_cpb_peripheral_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CPB Receive */
565                 events[2] |= 0x04;      /* CPB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591
592         hci_setup_event_mask(req);
593
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622
623                 memset(events, 0, sizeof(events));
624
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the corresponding event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744
745                 if ((hdev->commands[38] & 0x80) &&
746                     !test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) {
747                         /* Read LE Min/Max Tx Power*/
748                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
749                                     0, NULL);
750                 }
751
752                 if (hdev->commands[26] & 0x40) {
753                         /* Read LE Accept List Size */
754                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
755                                     0, NULL);
756                 }
757
758                 if (hdev->commands[26] & 0x80) {
759                         /* Clear LE Accept List */
760                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
761                 }
762
763                 if (hdev->commands[34] & 0x40) {
764                         /* Read LE Resolving List Size */
765                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
766                                     0, NULL);
767                 }
768
769                 if (hdev->commands[34] & 0x20) {
770                         /* Clear LE Resolving List */
771                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
772                 }
773
774                 if (hdev->commands[35] & 0x04) {
775                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
776
777                         /* Set RPA timeout */
778                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
779                                     &rpa_timeout);
780                 }
781
782                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
783                         /* Read LE Maximum Data Length */
784                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
785
786                         /* Read LE Suggested Default Data Length */
787                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
788                 }
789
790                 if (ext_adv_capable(hdev)) {
791                         /* Read LE Number of Supported Advertising Sets */
792                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
793                                     0, NULL);
794                 }
795
796                 hci_set_le_support(req);
797         }
798
799         /* Read features beyond page 1 if available */
800         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801                 struct hci_cp_read_local_ext_features cp;
802
803                 cp.page = p;
804                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
805                             sizeof(cp), &cp);
806         }
807
808         return 0;
809 }
810
811 static int hci_init4_req(struct hci_request *req, unsigned long opt)
812 {
813         struct hci_dev *hdev = req->hdev;
814
815         /* Some Broadcom based Bluetooth controllers do not support the
816          * Delete Stored Link Key command. They are clearly indicating its
817          * absence in the bit mask of supported commands.
818          *
819          * Check the supported commands and only if the command is marked
820          * as supported send it. If not supported assume that the controller
821          * does not have actual support for stored link keys which makes this
822          * command redundant anyway.
823          *
824          * Some controllers indicate that they support handling deleting
825          * stored link keys, but they don't. The quirk lets a driver
826          * just disable this command.
827          */
828         if (hdev->commands[6] & 0x80 &&
829             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
830                 struct hci_cp_delete_stored_link_key cp;
831
832                 bacpy(&cp.bdaddr, BDADDR_ANY);
833                 cp.delete_all = 0x01;
834                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
835                             sizeof(cp), &cp);
836         }
837
838         /* Set event mask page 2 if the HCI command for it is supported */
839         if (hdev->commands[22] & 0x04)
840                 hci_set_event_mask_page_2(req);
841
842         /* Read local codec list if the HCI command is supported */
843         if (hdev->commands[29] & 0x20)
844                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
845
846         /* Read local pairing options if the HCI command is supported */
847         if (hdev->commands[41] & 0x08)
848                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
849
850         /* Get MWS transport configuration if the HCI command is supported */
851         if (hdev->commands[30] & 0x08)
852                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
853
854         /* Check for Synchronization Train support */
855         if (lmp_sync_train_capable(hdev))
856                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
857
858         /* Enable Secure Connections if supported and configured */
859         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
860             bredr_sc_enabled(hdev)) {
861                 u8 support = 0x01;
862
863                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
864                             sizeof(support), &support);
865         }
866
867         /* Set erroneous data reporting if supported to the wideband speech
868          * setting value
869          */
870         if (hdev->commands[18] & 0x08 &&
871             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
872                 bool enabled = hci_dev_test_flag(hdev,
873                                                  HCI_WIDEBAND_SPEECH_ENABLED);
874
875                 if (enabled !=
876                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
877                         struct hci_cp_write_def_err_data_reporting cp;
878
879                         cp.err_data_reporting = enabled ?
880                                                 ERR_DATA_REPORTING_ENABLED :
881                                                 ERR_DATA_REPORTING_DISABLED;
882
883                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
884                                     sizeof(cp), &cp);
885                 }
886         }
887
888         /* Set Suggested Default Data Length to maximum if supported */
889         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
890                 struct hci_cp_le_write_def_data_len cp;
891
892                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
893                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
894                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
895         }
896
897         /* Set Default PHY parameters if command is supported */
898         if (hdev->commands[35] & 0x20) {
899                 struct hci_cp_le_set_default_phy cp;
900
901                 cp.all_phys = 0x00;
902                 cp.tx_phys = hdev->le_tx_def_phys;
903                 cp.rx_phys = hdev->le_rx_def_phys;
904
905                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
906         }
907
908         return 0;
909 }
910
911 static int __hci_init(struct hci_dev *hdev)
912 {
913         int err;
914
915         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
916         if (err < 0)
917                 return err;
918
919         if (hci_dev_test_flag(hdev, HCI_SETUP))
920                 hci_debugfs_create_basic(hdev);
921
922         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
923         if (err < 0)
924                 return err;
925
926         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
927          * BR/EDR/LE type controllers. AMP controllers only need the
928          * first two stages of init.
929          */
930         if (hdev->dev_type != HCI_PRIMARY)
931                 return 0;
932
933         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
934         if (err < 0)
935                 return err;
936
937         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
938         if (err < 0)
939                 return err;
940
941         /* This function is only called when the controller is actually in
942          * configured state. When the controller is marked as unconfigured,
943          * this initialization procedure is not run.
944          *
945          * It means that it is possible that a controller runs through its
946          * setup phase and then discovers missing settings. If that is the
947          * case, then this function will not be called. It then will only
948          * be called during the config phase.
949          *
950          * So only when in setup phase or config phase, create the debugfs
951          * entries and register the SMP channels.
952          */
953         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
954             !hci_dev_test_flag(hdev, HCI_CONFIG))
955                 return 0;
956
957         hci_debugfs_create_common(hdev);
958
959         if (lmp_bredr_capable(hdev))
960                 hci_debugfs_create_bredr(hdev);
961
962         if (lmp_le_capable(hdev))
963                 hci_debugfs_create_le(hdev);
964
965         return 0;
966 }
967
968 static int hci_init0_req(struct hci_request *req, unsigned long opt)
969 {
970         struct hci_dev *hdev = req->hdev;
971
972         BT_DBG("%s %ld", hdev->name, opt);
973
974         /* Reset */
975         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
976                 hci_reset_req(req, 0);
977
978         /* Read Local Version */
979         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
980
981         /* Read BD Address */
982         if (hdev->set_bdaddr)
983                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
984
985         return 0;
986 }
987
988 static int __hci_unconf_init(struct hci_dev *hdev)
989 {
990         int err;
991
992         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
993                 return 0;
994
995         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
996         if (err < 0)
997                 return err;
998
999         if (hci_dev_test_flag(hdev, HCI_SETUP))
1000                 hci_debugfs_create_basic(hdev);
1001
1002         return 0;
1003 }
1004
1005 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1006 {
1007         __u8 scan = opt;
1008
1009         BT_DBG("%s %x", req->hdev->name, scan);
1010
1011         /* Inquiry and Page scans */
1012         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1013         return 0;
1014 }
1015
1016 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1017 {
1018         __u8 auth = opt;
1019
1020         BT_DBG("%s %x", req->hdev->name, auth);
1021
1022         /* Authentication */
1023         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1024         return 0;
1025 }
1026
1027 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1028 {
1029         __u8 encrypt = opt;
1030
1031         BT_DBG("%s %x", req->hdev->name, encrypt);
1032
1033         /* Encryption */
1034         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1035         return 0;
1036 }
1037
1038 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 {
1040         __le16 policy = cpu_to_le16(opt);
1041
1042         BT_DBG("%s %x", req->hdev->name, policy);
1043
1044         /* Default link policy */
1045         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046         return 0;
1047 }
1048
1049 /* Get HCI device by index.
1050  * Device is held on return. */
1051 struct hci_dev *hci_dev_get(int index)
1052 {
1053         struct hci_dev *hdev = NULL, *d;
1054
1055         BT_DBG("%d", index);
1056
1057         if (index < 0)
1058                 return NULL;
1059
1060         read_lock(&hci_dev_list_lock);
1061         list_for_each_entry(d, &hci_dev_list, list) {
1062                 if (d->id == index) {
1063                         hdev = hci_dev_hold(d);
1064                         break;
1065                 }
1066         }
1067         read_unlock(&hci_dev_list_lock);
1068         return hdev;
1069 }
1070
1071 /* ---- Inquiry support ---- */
1072
1073 bool hci_discovery_active(struct hci_dev *hdev)
1074 {
1075         struct discovery_state *discov = &hdev->discovery;
1076
1077         switch (discov->state) {
1078         case DISCOVERY_FINDING:
1079         case DISCOVERY_RESOLVING:
1080                 return true;
1081
1082         default:
1083                 return false;
1084         }
1085 }
1086
1087 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1088 {
1089         int old_state = hdev->discovery.state;
1090
1091         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1092
1093         if (old_state == state)
1094                 return;
1095
1096         hdev->discovery.state = state;
1097
1098         switch (state) {
1099         case DISCOVERY_STOPPED:
1100                 hci_update_background_scan(hdev);
1101
1102                 if (old_state != DISCOVERY_STARTING)
1103                         mgmt_discovering(hdev, 0);
1104                 break;
1105         case DISCOVERY_STARTING:
1106                 break;
1107         case DISCOVERY_FINDING:
1108                 mgmt_discovering(hdev, 1);
1109                 break;
1110         case DISCOVERY_RESOLVING:
1111                 break;
1112         case DISCOVERY_STOPPING:
1113                 break;
1114         }
1115 }
1116
1117 #ifdef TIZEN_BT
1118 bool hci_le_discovery_active(struct hci_dev *hdev)
1119 {
1120         struct discovery_state *discov = &hdev->le_discovery;
1121
1122         switch (discov->state) {
1123         case DISCOVERY_FINDING:
1124         case DISCOVERY_RESOLVING:
1125                 return true;
1126
1127         default:
1128                 return false;
1129         }
1130 }
1131
1132 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
1133 {
1134         BT_DBG("%s state %u -> %u", hdev->name,
1135                         hdev->le_discovery.state, state);
1136
1137         if (hdev->le_discovery.state == state)
1138                 return;
1139
1140         switch (state) {
1141         case DISCOVERY_STOPPED:
1142                 hci_update_background_scan(hdev);
1143
1144                 if (hdev->le_discovery.state != DISCOVERY_STARTING)
1145                         mgmt_le_discovering(hdev, 0);
1146                 break;
1147         case DISCOVERY_STARTING:
1148                 break;
1149         case DISCOVERY_FINDING:
1150                 mgmt_le_discovering(hdev, 1);
1151                 break;
1152         case DISCOVERY_RESOLVING:
1153                 break;
1154         case DISCOVERY_STOPPING:
1155                 break;
1156         }
1157
1158         hdev->le_discovery.state = state;
1159 }
1160
1161 static void hci_tx_timeout_error_evt(struct hci_dev *hdev)
1162 {
1163         BT_ERR("%s H/W TX Timeout error", hdev->name);
1164
1165         mgmt_tx_timeout_error(hdev);
1166 }
1167 #endif
1168
1169 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1170 {
1171         struct discovery_state *cache = &hdev->discovery;
1172         struct inquiry_entry *p, *n;
1173
1174         list_for_each_entry_safe(p, n, &cache->all, all) {
1175                 list_del(&p->all);
1176                 kfree(p);
1177         }
1178
1179         INIT_LIST_HEAD(&cache->unknown);
1180         INIT_LIST_HEAD(&cache->resolve);
1181 }
1182
1183 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1184                                                bdaddr_t *bdaddr)
1185 {
1186         struct discovery_state *cache = &hdev->discovery;
1187         struct inquiry_entry *e;
1188
1189         BT_DBG("cache %p, %pMR", cache, bdaddr);
1190
1191         list_for_each_entry(e, &cache->all, all) {
1192                 if (!bacmp(&e->data.bdaddr, bdaddr))
1193                         return e;
1194         }
1195
1196         return NULL;
1197 }
1198
1199 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1200                                                        bdaddr_t *bdaddr)
1201 {
1202         struct discovery_state *cache = &hdev->discovery;
1203         struct inquiry_entry *e;
1204
1205         BT_DBG("cache %p, %pMR", cache, bdaddr);
1206
1207         list_for_each_entry(e, &cache->unknown, list) {
1208                 if (!bacmp(&e->data.bdaddr, bdaddr))
1209                         return e;
1210         }
1211
1212         return NULL;
1213 }
1214
1215 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1216                                                        bdaddr_t *bdaddr,
1217                                                        int state)
1218 {
1219         struct discovery_state *cache = &hdev->discovery;
1220         struct inquiry_entry *e;
1221
1222         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1223
1224         list_for_each_entry(e, &cache->resolve, list) {
1225                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1226                         return e;
1227                 if (!bacmp(&e->data.bdaddr, bdaddr))
1228                         return e;
1229         }
1230
1231         return NULL;
1232 }
1233
1234 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1235                                       struct inquiry_entry *ie)
1236 {
1237         struct discovery_state *cache = &hdev->discovery;
1238         struct list_head *pos = &cache->resolve;
1239         struct inquiry_entry *p;
1240
1241         list_del(&ie->list);
1242
1243         list_for_each_entry(p, &cache->resolve, list) {
1244                 if (p->name_state != NAME_PENDING &&
1245                     abs(p->data.rssi) >= abs(ie->data.rssi))
1246                         break;
1247                 pos = &p->list;
1248         }
1249
1250         list_add(&ie->list, pos);
1251 }
1252
1253 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1254                              bool name_known)
1255 {
1256         struct discovery_state *cache = &hdev->discovery;
1257         struct inquiry_entry *ie;
1258         u32 flags = 0;
1259
1260         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1261
1262         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1263
1264         if (!data->ssp_mode)
1265                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1266
1267         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1268         if (ie) {
1269                 if (!ie->data.ssp_mode)
1270                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1271
1272                 if (ie->name_state == NAME_NEEDED &&
1273                     data->rssi != ie->data.rssi) {
1274                         ie->data.rssi = data->rssi;
1275                         hci_inquiry_cache_update_resolve(hdev, ie);
1276                 }
1277
1278                 goto update;
1279         }
1280
1281         /* Entry not in the cache. Add new one. */
1282         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1283         if (!ie) {
1284                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1285                 goto done;
1286         }
1287
1288         list_add(&ie->all, &cache->all);
1289
1290         if (name_known) {
1291                 ie->name_state = NAME_KNOWN;
1292         } else {
1293                 ie->name_state = NAME_NOT_KNOWN;
1294                 list_add(&ie->list, &cache->unknown);
1295         }
1296
1297 update:
1298         if (name_known && ie->name_state != NAME_KNOWN &&
1299             ie->name_state != NAME_PENDING) {
1300                 ie->name_state = NAME_KNOWN;
1301                 list_del(&ie->list);
1302         }
1303
1304         memcpy(&ie->data, data, sizeof(*data));
1305         ie->timestamp = jiffies;
1306         cache->timestamp = jiffies;
1307
1308         if (ie->name_state == NAME_NOT_KNOWN)
1309                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1310
1311 done:
1312         return flags;
1313 }
1314
1315 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1316 {
1317         struct discovery_state *cache = &hdev->discovery;
1318         struct inquiry_info *info = (struct inquiry_info *) buf;
1319         struct inquiry_entry *e;
1320         int copied = 0;
1321
1322         list_for_each_entry(e, &cache->all, all) {
1323                 struct inquiry_data *data = &e->data;
1324
1325                 if (copied >= num)
1326                         break;
1327
1328                 bacpy(&info->bdaddr, &data->bdaddr);
1329                 info->pscan_rep_mode    = data->pscan_rep_mode;
1330                 info->pscan_period_mode = data->pscan_period_mode;
1331                 info->pscan_mode        = data->pscan_mode;
1332                 memcpy(info->dev_class, data->dev_class, 3);
1333                 info->clock_offset      = data->clock_offset;
1334
1335                 info++;
1336                 copied++;
1337         }
1338
1339         BT_DBG("cache %p, copied %d", cache, copied);
1340         return copied;
1341 }
1342
1343 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1344 {
1345         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1346         struct hci_dev *hdev = req->hdev;
1347         struct hci_cp_inquiry cp;
1348
1349         BT_DBG("%s", hdev->name);
1350
1351         if (test_bit(HCI_INQUIRY, &hdev->flags))
1352                 return 0;
1353
1354         /* Start Inquiry */
1355         memcpy(&cp.lap, &ir->lap, 3);
1356         cp.length  = ir->length;
1357         cp.num_rsp = ir->num_rsp;
1358         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1359
1360         return 0;
1361 }
1362
1363 int hci_inquiry(void __user *arg)
1364 {
1365         __u8 __user *ptr = arg;
1366         struct hci_inquiry_req ir;
1367         struct hci_dev *hdev;
1368         int err = 0, do_inquiry = 0, max_rsp;
1369         long timeo;
1370         __u8 *buf;
1371
1372         if (copy_from_user(&ir, ptr, sizeof(ir)))
1373                 return -EFAULT;
1374
1375         hdev = hci_dev_get(ir.dev_id);
1376         if (!hdev)
1377                 return -ENODEV;
1378
1379         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1380                 err = -EBUSY;
1381                 goto done;
1382         }
1383
1384         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1385                 err = -EOPNOTSUPP;
1386                 goto done;
1387         }
1388
1389         if (hdev->dev_type != HCI_PRIMARY) {
1390                 err = -EOPNOTSUPP;
1391                 goto done;
1392         }
1393
1394         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1395                 err = -EOPNOTSUPP;
1396                 goto done;
1397         }
1398
1399         /* Restrict maximum inquiry length to 60 seconds */
1400         if (ir.length > 60) {
1401                 err = -EINVAL;
1402                 goto done;
1403         }
1404
1405         hci_dev_lock(hdev);
1406         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1407             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1408                 hci_inquiry_cache_flush(hdev);
1409                 do_inquiry = 1;
1410         }
1411         hci_dev_unlock(hdev);
1412
1413         timeo = ir.length * msecs_to_jiffies(2000);
1414
1415         if (do_inquiry) {
1416                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1417                                    timeo, NULL);
1418                 if (err < 0)
1419                         goto done;
1420
1421                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1422                  * cleared). If it is interrupted by a signal, return -EINTR.
1423                  */
1424                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1425                                 TASK_INTERRUPTIBLE)) {
1426                         err = -EINTR;
1427                         goto done;
1428                 }
1429         }
1430
1431         /* for unlimited number of responses we will use buffer with
1432          * 255 entries
1433          */
1434         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1435
1436         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1437          * copy it to the user space.
1438          */
1439         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1440         if (!buf) {
1441                 err = -ENOMEM;
1442                 goto done;
1443         }
1444
1445         hci_dev_lock(hdev);
1446         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1447         hci_dev_unlock(hdev);
1448
1449         BT_DBG("num_rsp %d", ir.num_rsp);
1450
1451         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1452                 ptr += sizeof(ir);
1453                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1454                                  ir.num_rsp))
1455                         err = -EFAULT;
1456         } else
1457                 err = -EFAULT;
1458
1459         kfree(buf);
1460
1461 done:
1462         hci_dev_put(hdev);
1463         return err;
1464 }
1465
1466 /**
1467  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1468  *                                     (BD_ADDR) for a HCI device from
1469  *                                     a firmware node property.
1470  * @hdev:       The HCI device
1471  *
1472  * Search the firmware node for 'local-bd-address'.
1473  *
1474  * All-zero BD addresses are rejected, because those could be properties
1475  * that exist in the firmware tables, but were not updated by the firmware. For
1476  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1477  */
1478 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1479 {
1480         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1481         bdaddr_t ba;
1482         int ret;
1483
1484         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1485                                             (u8 *)&ba, sizeof(ba));
1486         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1487                 return;
1488
1489         bacpy(&hdev->public_addr, &ba);
1490 }
1491
1492 static int hci_dev_do_open(struct hci_dev *hdev)
1493 {
1494         int ret = 0;
1495
1496         BT_DBG("%s %p", hdev->name, hdev);
1497
1498         hci_req_sync_lock(hdev);
1499
1500         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1501                 ret = -ENODEV;
1502                 goto done;
1503         }
1504
1505         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1506             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1507                 /* Check for rfkill but allow the HCI setup stage to
1508                  * proceed (which in itself doesn't cause any RF activity).
1509                  */
1510                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1511                         ret = -ERFKILL;
1512                         goto done;
1513                 }
1514
1515                 /* Check for valid public address or a configured static
1516                  * random address, but let the HCI setup proceed to
1517                  * be able to determine if there is a public address
1518                  * or not.
1519                  *
1520                  * In case of user channel usage, it is not important
1521                  * if a public address or static random address is
1522                  * available.
1523                  *
1524                  * This check is only valid for BR/EDR controllers
1525                  * since AMP controllers do not have an address.
1526                  */
1527                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1528                     hdev->dev_type == HCI_PRIMARY &&
1529                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1530                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1531                         ret = -EADDRNOTAVAIL;
1532                         goto done;
1533                 }
1534         }
1535
1536         if (test_bit(HCI_UP, &hdev->flags)) {
1537                 ret = -EALREADY;
1538                 goto done;
1539         }
1540
1541         if (hdev->open(hdev)) {
1542                 ret = -EIO;
1543                 goto done;
1544         }
1545
1546         set_bit(HCI_RUNNING, &hdev->flags);
1547         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1548
1549         atomic_set(&hdev->cmd_cnt, 1);
1550         set_bit(HCI_INIT, &hdev->flags);
1551
1552         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1553             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1554                 bool invalid_bdaddr;
1555
1556                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1557
1558                 if (hdev->setup)
1559                         ret = hdev->setup(hdev);
1560
1561                 /* The transport driver can set the quirk to mark the
1562                  * BD_ADDR invalid before creating the HCI device or in
1563                  * its setup callback.
1564                  */
1565                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1566                                           &hdev->quirks);
1567
1568                 if (ret)
1569                         goto setup_failed;
1570
1571                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1572                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1573                                 hci_dev_get_bd_addr_from_property(hdev);
1574
1575                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1576                             hdev->set_bdaddr) {
1577                                 ret = hdev->set_bdaddr(hdev,
1578                                                        &hdev->public_addr);
1579
1580                                 /* If setting of the BD_ADDR from the device
1581                                  * property succeeds, then treat the address
1582                                  * as valid even if the invalid BD_ADDR
1583                                  * quirk indicates otherwise.
1584                                  */
1585                                 if (!ret)
1586                                         invalid_bdaddr = false;
1587                         }
1588                 }
1589
1590 setup_failed:
1591                 /* The transport driver can set these quirks before
1592                  * creating the HCI device or in its setup callback.
1593                  *
1594                  * For the invalid BD_ADDR quirk it is possible that
1595                  * it becomes a valid address if the bootloader does
1596                  * provide it (see above).
1597                  *
1598                  * In case any of them is set, the controller has to
1599                  * start up as unconfigured.
1600                  */
1601                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1602                     invalid_bdaddr)
1603                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1604
1605                 /* For an unconfigured controller it is required to
1606                  * read at least the version information provided by
1607                  * the Read Local Version Information command.
1608                  *
1609                  * If the set_bdaddr driver callback is provided, then
1610                  * also the original Bluetooth public device address
1611                  * will be read using the Read BD Address command.
1612                  */
1613                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1614                         ret = __hci_unconf_init(hdev);
1615         }
1616
1617         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1618                 /* If public address change is configured, ensure that
1619                  * the address gets programmed. If the driver does not
1620                  * support changing the public address, fail the power
1621                  * on procedure.
1622                  */
1623                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1624                     hdev->set_bdaddr)
1625                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1626                 else
1627                         ret = -EADDRNOTAVAIL;
1628         }
1629
1630         if (!ret) {
1631                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1632                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1633                         ret = __hci_init(hdev);
1634                         if (!ret && hdev->post_init)
1635                                 ret = hdev->post_init(hdev);
1636                 }
1637         }
1638
1639         /* If the HCI Reset command is clearing all diagnostic settings,
1640          * then they need to be reprogrammed after the init procedure
1641          * completed.
1642          */
1643         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1644             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1645             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1646                 ret = hdev->set_diag(hdev, true);
1647
1648         msft_do_open(hdev);
1649         aosp_do_open(hdev);
1650
1651         clear_bit(HCI_INIT, &hdev->flags);
1652
1653         if (!ret) {
1654                 hci_dev_hold(hdev);
1655                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1656                 hci_adv_instances_set_rpa_expired(hdev, true);
1657                 set_bit(HCI_UP, &hdev->flags);
1658                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1659                 hci_leds_update_powered(hdev, true);
1660                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1661                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1662                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1663                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1664                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1665                     hdev->dev_type == HCI_PRIMARY) {
1666                         ret = __hci_req_hci_power_on(hdev);
1667                         mgmt_power_on(hdev, ret);
1668                 }
1669         } else {
1670                 /* Init failed, cleanup */
1671                 flush_work(&hdev->tx_work);
1672
1673                 /* Since hci_rx_work() is possible to awake new cmd_work
1674                  * it should be flushed first to avoid unexpected call of
1675                  * hci_cmd_work()
1676                  */
1677                 flush_work(&hdev->rx_work);
1678                 flush_work(&hdev->cmd_work);
1679
1680                 skb_queue_purge(&hdev->cmd_q);
1681                 skb_queue_purge(&hdev->rx_q);
1682
1683                 if (hdev->flush)
1684                         hdev->flush(hdev);
1685
1686                 if (hdev->sent_cmd) {
1687                         cancel_delayed_work_sync(&hdev->cmd_timer);
1688                         kfree_skb(hdev->sent_cmd);
1689                         hdev->sent_cmd = NULL;
1690                 }
1691
1692                 clear_bit(HCI_RUNNING, &hdev->flags);
1693                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1694
1695                 hdev->close(hdev);
1696                 hdev->flags &= BIT(HCI_RAW);
1697         }
1698
1699 done:
1700         hci_req_sync_unlock(hdev);
1701         return ret;
1702 }
1703
1704 /* ---- HCI ioctl helpers ---- */
1705
1706 int hci_dev_open(__u16 dev)
1707 {
1708         struct hci_dev *hdev;
1709         int err;
1710
1711         hdev = hci_dev_get(dev);
1712         if (!hdev)
1713                 return -ENODEV;
1714
1715         /* Devices that are marked as unconfigured can only be powered
1716          * up as user channel. Trying to bring them up as normal devices
1717          * will result into a failure. Only user channel operation is
1718          * possible.
1719          *
1720          * When this function is called for a user channel, the flag
1721          * HCI_USER_CHANNEL will be set first before attempting to
1722          * open the device.
1723          */
1724         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1725             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1726                 err = -EOPNOTSUPP;
1727                 goto done;
1728         }
1729
1730         /* We need to ensure that no other power on/off work is pending
1731          * before proceeding to call hci_dev_do_open. This is
1732          * particularly important if the setup procedure has not yet
1733          * completed.
1734          */
1735         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1736                 cancel_delayed_work(&hdev->power_off);
1737
1738         /* After this call it is guaranteed that the setup procedure
1739          * has finished. This means that error conditions like RFKILL
1740          * or no valid public or static random address apply.
1741          */
1742         flush_workqueue(hdev->req_workqueue);
1743
1744         /* For controllers not using the management interface and that
1745          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1746          * so that pairing works for them. Once the management interface
1747          * is in use this bit will be cleared again and userspace has
1748          * to explicitly enable it.
1749          */
1750         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1751             !hci_dev_test_flag(hdev, HCI_MGMT))
1752                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1753
1754         err = hci_dev_do_open(hdev);
1755
1756 done:
1757         hci_dev_put(hdev);
1758         return err;
1759 }
1760
1761 /* This function requires the caller holds hdev->lock */
1762 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1763 {
1764         struct hci_conn_params *p;
1765
1766         list_for_each_entry(p, &hdev->le_conn_params, list) {
1767                 if (p->conn) {
1768                         hci_conn_drop(p->conn);
1769                         hci_conn_put(p->conn);
1770                         p->conn = NULL;
1771                 }
1772                 list_del_init(&p->action);
1773         }
1774
1775         BT_DBG("All LE pending actions cleared");
1776 }
1777
1778 int hci_dev_do_close(struct hci_dev *hdev)
1779 {
1780         bool auto_off;
1781         int err = 0;
1782
1783         BT_DBG("%s %p", hdev->name, hdev);
1784
1785         cancel_delayed_work(&hdev->power_off);
1786         cancel_delayed_work(&hdev->ncmd_timer);
1787
1788         hci_request_cancel_all(hdev);
1789         hci_req_sync_lock(hdev);
1790
1791         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1792             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1793             test_bit(HCI_UP, &hdev->flags)) {
1794                 /* Execute vendor specific shutdown routine */
1795                 if (hdev->shutdown)
1796                         err = hdev->shutdown(hdev);
1797         }
1798
1799         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1800                 cancel_delayed_work_sync(&hdev->cmd_timer);
1801                 hci_req_sync_unlock(hdev);
1802                 return err;
1803         }
1804
1805         hci_leds_update_powered(hdev, false);
1806
1807         /* Flush RX and TX works */
1808         flush_work(&hdev->tx_work);
1809         flush_work(&hdev->rx_work);
1810
1811         if (hdev->discov_timeout > 0) {
1812                 hdev->discov_timeout = 0;
1813                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1814                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1815         }
1816
1817         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1818                 cancel_delayed_work(&hdev->service_cache);
1819
1820         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1821                 struct adv_info *adv_instance;
1822
1823                 cancel_delayed_work_sync(&hdev->rpa_expired);
1824
1825                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1826                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1827         }
1828
1829         /* Avoid potential lockdep warnings from the *_flush() calls by
1830          * ensuring the workqueue is empty up front.
1831          */
1832         drain_workqueue(hdev->workqueue);
1833
1834         hci_dev_lock(hdev);
1835
1836         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1837
1838         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1839
1840         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1841             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1842             hci_dev_test_flag(hdev, HCI_MGMT))
1843                 __mgmt_power_off(hdev);
1844
1845         hci_inquiry_cache_flush(hdev);
1846         hci_pend_le_actions_clear(hdev);
1847         hci_conn_hash_flush(hdev);
1848         hci_dev_unlock(hdev);
1849
1850         smp_unregister(hdev);
1851
1852         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1853
1854         aosp_do_close(hdev);
1855         msft_do_close(hdev);
1856
1857         if (hdev->flush)
1858                 hdev->flush(hdev);
1859
1860         /* Reset device */
1861         skb_queue_purge(&hdev->cmd_q);
1862         atomic_set(&hdev->cmd_cnt, 1);
1863         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1864             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1865                 set_bit(HCI_INIT, &hdev->flags);
1866                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1867                 clear_bit(HCI_INIT, &hdev->flags);
1868         }
1869
1870         /* flush cmd  work */
1871         flush_work(&hdev->cmd_work);
1872
1873         /* Drop queues */
1874         skb_queue_purge(&hdev->rx_q);
1875         skb_queue_purge(&hdev->cmd_q);
1876         skb_queue_purge(&hdev->raw_q);
1877
1878         /* Drop last sent command */
1879         if (hdev->sent_cmd) {
1880                 cancel_delayed_work_sync(&hdev->cmd_timer);
1881                 kfree_skb(hdev->sent_cmd);
1882                 hdev->sent_cmd = NULL;
1883         }
1884
1885         clear_bit(HCI_RUNNING, &hdev->flags);
1886         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1887
1888         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1889                 wake_up(&hdev->suspend_wait_q);
1890
1891         /* After this point our queues are empty
1892          * and no tasks are scheduled. */
1893         hdev->close(hdev);
1894
1895         /* Clear flags */
1896         hdev->flags &= BIT(HCI_RAW);
1897         hci_dev_clear_volatile_flags(hdev);
1898
1899         /* Controller radio is available but is currently powered down */
1900         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1901
1902         memset(hdev->eir, 0, sizeof(hdev->eir));
1903         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1904         bacpy(&hdev->random_addr, BDADDR_ANY);
1905
1906         hci_req_sync_unlock(hdev);
1907
1908         hci_dev_put(hdev);
1909         return err;
1910 }
1911
1912 int hci_dev_close(__u16 dev)
1913 {
1914         struct hci_dev *hdev;
1915         int err;
1916
1917         hdev = hci_dev_get(dev);
1918         if (!hdev)
1919                 return -ENODEV;
1920
1921         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1922                 err = -EBUSY;
1923                 goto done;
1924         }
1925
1926         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1927                 cancel_delayed_work(&hdev->power_off);
1928
1929         err = hci_dev_do_close(hdev);
1930
1931 done:
1932         hci_dev_put(hdev);
1933         return err;
1934 }
1935
1936 static int hci_dev_do_reset(struct hci_dev *hdev)
1937 {
1938         int ret;
1939
1940         BT_DBG("%s %p", hdev->name, hdev);
1941
1942         hci_req_sync_lock(hdev);
1943
1944         /* Drop queues */
1945         skb_queue_purge(&hdev->rx_q);
1946         skb_queue_purge(&hdev->cmd_q);
1947
1948         /* Avoid potential lockdep warnings from the *_flush() calls by
1949          * ensuring the workqueue is empty up front.
1950          */
1951         drain_workqueue(hdev->workqueue);
1952
1953         hci_dev_lock(hdev);
1954         hci_inquiry_cache_flush(hdev);
1955         hci_conn_hash_flush(hdev);
1956         hci_dev_unlock(hdev);
1957
1958         if (hdev->flush)
1959                 hdev->flush(hdev);
1960
1961         atomic_set(&hdev->cmd_cnt, 1);
1962         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1963
1964         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1965
1966         hci_req_sync_unlock(hdev);
1967         return ret;
1968 }
1969
1970 int hci_dev_reset(__u16 dev)
1971 {
1972         struct hci_dev *hdev;
1973         int err;
1974
1975         hdev = hci_dev_get(dev);
1976         if (!hdev)
1977                 return -ENODEV;
1978
1979         if (!test_bit(HCI_UP, &hdev->flags)) {
1980                 err = -ENETDOWN;
1981                 goto done;
1982         }
1983
1984         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1985                 err = -EBUSY;
1986                 goto done;
1987         }
1988
1989         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1990                 err = -EOPNOTSUPP;
1991                 goto done;
1992         }
1993
1994         err = hci_dev_do_reset(hdev);
1995
1996 done:
1997         hci_dev_put(hdev);
1998         return err;
1999 }
2000
2001 int hci_dev_reset_stat(__u16 dev)
2002 {
2003         struct hci_dev *hdev;
2004         int ret = 0;
2005
2006         hdev = hci_dev_get(dev);
2007         if (!hdev)
2008                 return -ENODEV;
2009
2010         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2011                 ret = -EBUSY;
2012                 goto done;
2013         }
2014
2015         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2016                 ret = -EOPNOTSUPP;
2017                 goto done;
2018         }
2019
2020         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2021
2022 done:
2023         hci_dev_put(hdev);
2024         return ret;
2025 }
2026
2027 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2028 {
2029         bool conn_changed, discov_changed;
2030
2031         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2032
2033         if ((scan & SCAN_PAGE))
2034                 conn_changed = !hci_dev_test_and_set_flag(hdev,
2035                                                           HCI_CONNECTABLE);
2036         else
2037                 conn_changed = hci_dev_test_and_clear_flag(hdev,
2038                                                            HCI_CONNECTABLE);
2039
2040         if ((scan & SCAN_INQUIRY)) {
2041                 discov_changed = !hci_dev_test_and_set_flag(hdev,
2042                                                             HCI_DISCOVERABLE);
2043         } else {
2044                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2045                 discov_changed = hci_dev_test_and_clear_flag(hdev,
2046                                                              HCI_DISCOVERABLE);
2047         }
2048
2049         if (!hci_dev_test_flag(hdev, HCI_MGMT))
2050                 return;
2051
2052         if (conn_changed || discov_changed) {
2053                 /* In case this was disabled through mgmt */
2054                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2055
2056                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2057                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2058
2059                 mgmt_new_settings(hdev);
2060         }
2061 }
2062
2063 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2064 {
2065         struct hci_dev *hdev;
2066         struct hci_dev_req dr;
2067         int err = 0;
2068
2069         if (copy_from_user(&dr, arg, sizeof(dr)))
2070                 return -EFAULT;
2071
2072         hdev = hci_dev_get(dr.dev_id);
2073         if (!hdev)
2074                 return -ENODEV;
2075
2076         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2077                 err = -EBUSY;
2078                 goto done;
2079         }
2080
2081         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2082                 err = -EOPNOTSUPP;
2083                 goto done;
2084         }
2085
2086         if (hdev->dev_type != HCI_PRIMARY) {
2087                 err = -EOPNOTSUPP;
2088                 goto done;
2089         }
2090
2091         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2092                 err = -EOPNOTSUPP;
2093                 goto done;
2094         }
2095
2096         switch (cmd) {
2097         case HCISETAUTH:
2098                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2099                                    HCI_INIT_TIMEOUT, NULL);
2100                 break;
2101
2102         case HCISETENCRYPT:
2103                 if (!lmp_encrypt_capable(hdev)) {
2104                         err = -EOPNOTSUPP;
2105                         break;
2106                 }
2107
2108                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2109                         /* Auth must be enabled first */
2110                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2111                                            HCI_INIT_TIMEOUT, NULL);
2112                         if (err)
2113                                 break;
2114                 }
2115
2116                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2117                                    HCI_INIT_TIMEOUT, NULL);
2118                 break;
2119
2120         case HCISETSCAN:
2121                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2122                                    HCI_INIT_TIMEOUT, NULL);
2123
2124                 /* Ensure that the connectable and discoverable states
2125                  * get correctly modified as this was a non-mgmt change.
2126                  */
2127                 if (!err)
2128                         hci_update_scan_state(hdev, dr.dev_opt);
2129                 break;
2130
2131         case HCISETLINKPOL:
2132                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2133                                    HCI_INIT_TIMEOUT, NULL);
2134                 break;
2135
2136         case HCISETLINKMODE:
2137                 hdev->link_mode = ((__u16) dr.dev_opt) &
2138                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2139                 break;
2140
2141         case HCISETPTYPE:
2142                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2143                         break;
2144
2145                 hdev->pkt_type = (__u16) dr.dev_opt;
2146                 mgmt_phy_configuration_changed(hdev, NULL);
2147                 break;
2148
2149         case HCISETACLMTU:
2150                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2151                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2152                 break;
2153
2154         case HCISETSCOMTU:
2155                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2156                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2157                 break;
2158
2159         default:
2160                 err = -EINVAL;
2161                 break;
2162         }
2163
2164 done:
2165         hci_dev_put(hdev);
2166         return err;
2167 }
2168
2169 int hci_get_dev_list(void __user *arg)
2170 {
2171         struct hci_dev *hdev;
2172         struct hci_dev_list_req *dl;
2173         struct hci_dev_req *dr;
2174         int n = 0, size, err;
2175         __u16 dev_num;
2176
2177         if (get_user(dev_num, (__u16 __user *) arg))
2178                 return -EFAULT;
2179
2180         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2181                 return -EINVAL;
2182
2183         size = sizeof(*dl) + dev_num * sizeof(*dr);
2184
2185         dl = kzalloc(size, GFP_KERNEL);
2186         if (!dl)
2187                 return -ENOMEM;
2188
2189         dr = dl->dev_req;
2190
2191         read_lock(&hci_dev_list_lock);
2192         list_for_each_entry(hdev, &hci_dev_list, list) {
2193                 unsigned long flags = hdev->flags;
2194
2195                 /* When the auto-off is configured it means the transport
2196                  * is running, but in that case still indicate that the
2197                  * device is actually down.
2198                  */
2199                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2200                         flags &= ~BIT(HCI_UP);
2201
2202                 (dr + n)->dev_id  = hdev->id;
2203                 (dr + n)->dev_opt = flags;
2204
2205                 if (++n >= dev_num)
2206                         break;
2207         }
2208         read_unlock(&hci_dev_list_lock);
2209
2210         dl->dev_num = n;
2211         size = sizeof(*dl) + n * sizeof(*dr);
2212
2213         err = copy_to_user(arg, dl, size);
2214         kfree(dl);
2215
2216         return err ? -EFAULT : 0;
2217 }
2218
2219 int hci_get_dev_info(void __user *arg)
2220 {
2221         struct hci_dev *hdev;
2222         struct hci_dev_info di;
2223         unsigned long flags;
2224         int err = 0;
2225
2226         if (copy_from_user(&di, arg, sizeof(di)))
2227                 return -EFAULT;
2228
2229         hdev = hci_dev_get(di.dev_id);
2230         if (!hdev)
2231                 return -ENODEV;
2232
2233         /* When the auto-off is configured it means the transport
2234          * is running, but in that case still indicate that the
2235          * device is actually down.
2236          */
2237         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2238                 flags = hdev->flags & ~BIT(HCI_UP);
2239         else
2240                 flags = hdev->flags;
2241
2242         strcpy(di.name, hdev->name);
2243         di.bdaddr   = hdev->bdaddr;
2244         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2245         di.flags    = flags;
2246         di.pkt_type = hdev->pkt_type;
2247         if (lmp_bredr_capable(hdev)) {
2248                 di.acl_mtu  = hdev->acl_mtu;
2249                 di.acl_pkts = hdev->acl_pkts;
2250                 di.sco_mtu  = hdev->sco_mtu;
2251                 di.sco_pkts = hdev->sco_pkts;
2252         } else {
2253                 di.acl_mtu  = hdev->le_mtu;
2254                 di.acl_pkts = hdev->le_pkts;
2255                 di.sco_mtu  = 0;
2256                 di.sco_pkts = 0;
2257         }
2258         di.link_policy = hdev->link_policy;
2259         di.link_mode   = hdev->link_mode;
2260
2261         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2262         memcpy(&di.features, &hdev->features, sizeof(di.features));
2263
2264         if (copy_to_user(arg, &di, sizeof(di)))
2265                 err = -EFAULT;
2266
2267         hci_dev_put(hdev);
2268
2269         return err;
2270 }
2271
2272 /* ---- Interface to HCI drivers ---- */
2273
2274 static int hci_rfkill_set_block(void *data, bool blocked)
2275 {
2276         struct hci_dev *hdev = data;
2277
2278         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2279
2280         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2281                 return -EBUSY;
2282
2283         if (blocked) {
2284                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2285                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2286                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2287                         hci_dev_do_close(hdev);
2288         } else {
2289                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2290         }
2291
2292         return 0;
2293 }
2294
2295 static const struct rfkill_ops hci_rfkill_ops = {
2296         .set_block = hci_rfkill_set_block,
2297 };
2298
2299 static void hci_power_on(struct work_struct *work)
2300 {
2301         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2302         int err;
2303
2304         BT_DBG("%s", hdev->name);
2305
2306         if (test_bit(HCI_UP, &hdev->flags) &&
2307             hci_dev_test_flag(hdev, HCI_MGMT) &&
2308             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2309                 cancel_delayed_work(&hdev->power_off);
2310                 hci_req_sync_lock(hdev);
2311                 err = __hci_req_hci_power_on(hdev);
2312                 hci_req_sync_unlock(hdev);
2313                 mgmt_power_on(hdev, err);
2314                 return;
2315         }
2316
2317         err = hci_dev_do_open(hdev);
2318         if (err < 0) {
2319                 hci_dev_lock(hdev);
2320                 mgmt_set_powered_failed(hdev, err);
2321                 hci_dev_unlock(hdev);
2322                 return;
2323         }
2324
2325         /* During the HCI setup phase, a few error conditions are
2326          * ignored and they need to be checked now. If they are still
2327          * valid, it is important to turn the device back off.
2328          */
2329         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2330             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2331             (hdev->dev_type == HCI_PRIMARY &&
2332              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2333              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2334                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2335                 hci_dev_do_close(hdev);
2336         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2337                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2338                                    HCI_AUTO_OFF_TIMEOUT);
2339         }
2340
2341         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2342                 /* For unconfigured devices, set the HCI_RAW flag
2343                  * so that userspace can easily identify them.
2344                  */
2345                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2346                         set_bit(HCI_RAW, &hdev->flags);
2347
2348                 /* For fully configured devices, this will send
2349                  * the Index Added event. For unconfigured devices,
2350                  * it will send Unconfigued Index Added event.
2351                  *
2352                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2353                  * and no event will be send.
2354                  */
2355                 mgmt_index_added(hdev);
2356         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2357                 /* When the controller is now configured, then it
2358                  * is important to clear the HCI_RAW flag.
2359                  */
2360                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2361                         clear_bit(HCI_RAW, &hdev->flags);
2362
2363                 /* Powering on the controller with HCI_CONFIG set only
2364                  * happens with the transition from unconfigured to
2365                  * configured. This will send the Index Added event.
2366                  */
2367                 mgmt_index_added(hdev);
2368         }
2369 }
2370
2371 static void hci_power_off(struct work_struct *work)
2372 {
2373         struct hci_dev *hdev = container_of(work, struct hci_dev,
2374                                             power_off.work);
2375
2376         BT_DBG("%s", hdev->name);
2377
2378         hci_dev_do_close(hdev);
2379 }
2380
2381 static void hci_error_reset(struct work_struct *work)
2382 {
2383         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2384
2385         BT_DBG("%s", hdev->name);
2386
2387         if (hdev->hw_error)
2388                 hdev->hw_error(hdev, hdev->hw_error_code);
2389         else
2390                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2391
2392         if (hci_dev_do_close(hdev))
2393                 return;
2394
2395         hci_dev_do_open(hdev);
2396 }
2397
2398 void hci_uuids_clear(struct hci_dev *hdev)
2399 {
2400         struct bt_uuid *uuid, *tmp;
2401
2402         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2403                 list_del(&uuid->list);
2404                 kfree(uuid);
2405         }
2406 }
2407
2408 void hci_link_keys_clear(struct hci_dev *hdev)
2409 {
2410         struct link_key *key;
2411
2412         list_for_each_entry(key, &hdev->link_keys, list) {
2413                 list_del_rcu(&key->list);
2414                 kfree_rcu(key, rcu);
2415         }
2416 }
2417
2418 void hci_smp_ltks_clear(struct hci_dev *hdev)
2419 {
2420         struct smp_ltk *k;
2421
2422         list_for_each_entry(k, &hdev->long_term_keys, list) {
2423                 list_del_rcu(&k->list);
2424                 kfree_rcu(k, rcu);
2425         }
2426 }
2427
2428 void hci_smp_irks_clear(struct hci_dev *hdev)
2429 {
2430         struct smp_irk *k;
2431
2432         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2433                 list_del_rcu(&k->list);
2434                 kfree_rcu(k, rcu);
2435         }
2436 }
2437
2438 void hci_blocked_keys_clear(struct hci_dev *hdev)
2439 {
2440         struct blocked_key *b;
2441
2442         list_for_each_entry(b, &hdev->blocked_keys, list) {
2443                 list_del_rcu(&b->list);
2444                 kfree_rcu(b, rcu);
2445         }
2446 }
2447
2448 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2449 {
2450         bool blocked = false;
2451         struct blocked_key *b;
2452
2453         rcu_read_lock();
2454         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2455                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2456                         blocked = true;
2457                         break;
2458                 }
2459         }
2460
2461         rcu_read_unlock();
2462         return blocked;
2463 }
2464
2465 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2466 {
2467         struct link_key *k;
2468
2469         rcu_read_lock();
2470         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2471                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2472                         rcu_read_unlock();
2473
2474                         if (hci_is_blocked_key(hdev,
2475                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2476                                                k->val)) {
2477                                 bt_dev_warn_ratelimited(hdev,
2478                                                         "Link key blocked for %pMR",
2479                                                         &k->bdaddr);
2480                                 return NULL;
2481                         }
2482
2483                         return k;
2484                 }
2485         }
2486         rcu_read_unlock();
2487
2488         return NULL;
2489 }
2490
2491 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2492                                u8 key_type, u8 old_key_type)
2493 {
2494         /* Legacy key */
2495         if (key_type < 0x03)
2496                 return true;
2497
2498         /* Debug keys are insecure so don't store them persistently */
2499         if (key_type == HCI_LK_DEBUG_COMBINATION)
2500                 return false;
2501
2502         /* Changed combination key and there's no previous one */
2503         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2504                 return false;
2505
2506         /* Security mode 3 case */
2507         if (!conn)
2508                 return true;
2509
2510         /* BR/EDR key derived using SC from an LE link */
2511         if (conn->type == LE_LINK)
2512                 return true;
2513
2514         /* Neither local nor remote side had no-bonding as requirement */
2515         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2516                 return true;
2517
2518         /* Local side had dedicated bonding as requirement */
2519         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2520                 return true;
2521
2522         /* Remote side had dedicated bonding as requirement */
2523         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2524                 return true;
2525
2526 #ifdef TIZEN_BT
2527         /* In case of auth_type '0x01', it is authenticated by MITM.
2528          * So store it.
2529          */
2530         if (key_type == HCI_LK_AUTH_COMBINATION_P192)
2531                 return true;
2532 #endif
2533
2534         /* If none of the above criteria match, then don't store the key
2535          * persistently */
2536         return false;
2537 }
2538
2539 static u8 ltk_role(u8 type)
2540 {
2541         if (type == SMP_LTK)
2542                 return HCI_ROLE_MASTER;
2543
2544         return HCI_ROLE_SLAVE;
2545 }
2546
2547 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2548                              u8 addr_type, u8 role)
2549 {
2550         struct smp_ltk *k;
2551
2552         rcu_read_lock();
2553         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2554                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2555                         continue;
2556
2557                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2558                         rcu_read_unlock();
2559
2560                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2561                                                k->val)) {
2562                                 bt_dev_warn_ratelimited(hdev,
2563                                                         "LTK blocked for %pMR",
2564                                                         &k->bdaddr);
2565                                 return NULL;
2566                         }
2567
2568                         return k;
2569                 }
2570         }
2571         rcu_read_unlock();
2572
2573         return NULL;
2574 }
2575
2576 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2577 {
2578         struct smp_irk *irk_to_return = NULL;
2579         struct smp_irk *irk;
2580
2581         rcu_read_lock();
2582         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2583                 if (!bacmp(&irk->rpa, rpa)) {
2584                         irk_to_return = irk;
2585                         goto done;
2586                 }
2587         }
2588
2589         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2590                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2591                         bacpy(&irk->rpa, rpa);
2592                         irk_to_return = irk;
2593                         goto done;
2594                 }
2595         }
2596
2597 done:
2598         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2599                                                 irk_to_return->val)) {
2600                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2601                                         &irk_to_return->bdaddr);
2602                 irk_to_return = NULL;
2603         }
2604
2605         rcu_read_unlock();
2606
2607         return irk_to_return;
2608 }
2609
2610 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2611                                      u8 addr_type)
2612 {
2613         struct smp_irk *irk_to_return = NULL;
2614         struct smp_irk *irk;
2615
2616         /* Identity Address must be public or static random */
2617         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2618                 return NULL;
2619
2620         rcu_read_lock();
2621         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2622                 if (addr_type == irk->addr_type &&
2623                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2624                         irk_to_return = irk;
2625                         goto done;
2626                 }
2627         }
2628
2629 done:
2630
2631         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2632                                                 irk_to_return->val)) {
2633                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2634                                         &irk_to_return->bdaddr);
2635                 irk_to_return = NULL;
2636         }
2637
2638         rcu_read_unlock();
2639
2640         return irk_to_return;
2641 }
2642
2643 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2644                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2645                                   u8 pin_len, bool *persistent)
2646 {
2647         struct link_key *key, *old_key;
2648         u8 old_key_type;
2649
2650         old_key = hci_find_link_key(hdev, bdaddr);
2651         if (old_key) {
2652                 old_key_type = old_key->type;
2653                 key = old_key;
2654         } else {
2655                 old_key_type = conn ? conn->key_type : 0xff;
2656                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2657                 if (!key)
2658                         return NULL;
2659                 list_add_rcu(&key->list, &hdev->link_keys);
2660         }
2661
2662         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2663
2664         /* Some buggy controller combinations generate a changed
2665          * combination key for legacy pairing even when there's no
2666          * previous key */
2667         if (type == HCI_LK_CHANGED_COMBINATION &&
2668             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2669                 type = HCI_LK_COMBINATION;
2670                 if (conn)
2671                         conn->key_type = type;
2672         }
2673
2674         bacpy(&key->bdaddr, bdaddr);
2675         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2676         key->pin_len = pin_len;
2677
2678         if (type == HCI_LK_CHANGED_COMBINATION)
2679                 key->type = old_key_type;
2680         else
2681                 key->type = type;
2682
2683         if (persistent)
2684                 *persistent = hci_persistent_key(hdev, conn, type,
2685                                                  old_key_type);
2686
2687         return key;
2688 }
2689
2690 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2691                             u8 addr_type, u8 type, u8 authenticated,
2692                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2693 {
2694         struct smp_ltk *key, *old_key;
2695         u8 role = ltk_role(type);
2696
2697         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2698         if (old_key)
2699                 key = old_key;
2700         else {
2701                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2702                 if (!key)
2703                         return NULL;
2704                 list_add_rcu(&key->list, &hdev->long_term_keys);
2705         }
2706
2707         bacpy(&key->bdaddr, bdaddr);
2708         key->bdaddr_type = addr_type;
2709         memcpy(key->val, tk, sizeof(key->val));
2710         key->authenticated = authenticated;
2711         key->ediv = ediv;
2712         key->rand = rand;
2713         key->enc_size = enc_size;
2714         key->type = type;
2715
2716         return key;
2717 }
2718
2719 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2720                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2721 {
2722         struct smp_irk *irk;
2723
2724         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2725         if (!irk) {
2726                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2727                 if (!irk)
2728                         return NULL;
2729
2730                 bacpy(&irk->bdaddr, bdaddr);
2731                 irk->addr_type = addr_type;
2732
2733                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2734         }
2735
2736         memcpy(irk->val, val, 16);
2737         bacpy(&irk->rpa, rpa);
2738
2739         return irk;
2740 }
2741
2742 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2743 {
2744         struct link_key *key;
2745
2746         key = hci_find_link_key(hdev, bdaddr);
2747         if (!key)
2748                 return -ENOENT;
2749
2750         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2751
2752         list_del_rcu(&key->list);
2753         kfree_rcu(key, rcu);
2754
2755         return 0;
2756 }
2757
2758 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2759 {
2760         struct smp_ltk *k;
2761         int removed = 0;
2762
2763         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2764                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2765                         continue;
2766
2767                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2768
2769                 list_del_rcu(&k->list);
2770                 kfree_rcu(k, rcu);
2771                 removed++;
2772         }
2773
2774         return removed ? 0 : -ENOENT;
2775 }
2776
2777 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2778 {
2779         struct smp_irk *k;
2780
2781         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2782                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2783                         continue;
2784
2785                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2786
2787                 list_del_rcu(&k->list);
2788                 kfree_rcu(k, rcu);
2789         }
2790 }
2791
2792 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2793 {
2794         struct smp_ltk *k;
2795         struct smp_irk *irk;
2796         u8 addr_type;
2797
2798         if (type == BDADDR_BREDR) {
2799                 if (hci_find_link_key(hdev, bdaddr))
2800                         return true;
2801                 return false;
2802         }
2803
2804         /* Convert to HCI addr type which struct smp_ltk uses */
2805         if (type == BDADDR_LE_PUBLIC)
2806                 addr_type = ADDR_LE_DEV_PUBLIC;
2807         else
2808                 addr_type = ADDR_LE_DEV_RANDOM;
2809
2810         irk = hci_get_irk(hdev, bdaddr, addr_type);
2811         if (irk) {
2812                 bdaddr = &irk->bdaddr;
2813                 addr_type = irk->addr_type;
2814         }
2815
2816         rcu_read_lock();
2817         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2818                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2819                         rcu_read_unlock();
2820                         return true;
2821                 }
2822         }
2823         rcu_read_unlock();
2824
2825         return false;
2826 }
2827
2828 /* HCI command timer function */
2829 static void hci_cmd_timeout(struct work_struct *work)
2830 {
2831         struct hci_dev *hdev = container_of(work, struct hci_dev,
2832                                             cmd_timer.work);
2833
2834         if (hdev->sent_cmd) {
2835                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2836                 u16 opcode = __le16_to_cpu(sent->opcode);
2837
2838                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2839         } else {
2840                 bt_dev_err(hdev, "command tx timeout");
2841         }
2842
2843         if (hdev->cmd_timeout)
2844                 hdev->cmd_timeout(hdev);
2845
2846 #ifdef TIZEN_BT
2847         hci_tx_timeout_error_evt(hdev);
2848 #endif
2849
2850         atomic_set(&hdev->cmd_cnt, 1);
2851         queue_work(hdev->workqueue, &hdev->cmd_work);
2852 }
2853
2854 /* HCI ncmd timer function */
2855 static void hci_ncmd_timeout(struct work_struct *work)
2856 {
2857         struct hci_dev *hdev = container_of(work, struct hci_dev,
2858                                             ncmd_timer.work);
2859
2860         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2861
2862         /* During HCI_INIT phase no events can be injected if the ncmd timer
2863          * triggers since the procedure has its own timeout handling.
2864          */
2865         if (test_bit(HCI_INIT, &hdev->flags))
2866                 return;
2867
2868         /* This is an irrecoverable state, inject hardware error event */
2869         hci_reset_dev(hdev);
2870 }
2871
2872 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2873                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2874 {
2875         struct oob_data *data;
2876
2877         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2878                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2879                         continue;
2880                 if (data->bdaddr_type != bdaddr_type)
2881                         continue;
2882                 return data;
2883         }
2884
2885         return NULL;
2886 }
2887
2888 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2889                                u8 bdaddr_type)
2890 {
2891         struct oob_data *data;
2892
2893         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2894         if (!data)
2895                 return -ENOENT;
2896
2897         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2898
2899         list_del(&data->list);
2900         kfree(data);
2901
2902         return 0;
2903 }
2904
2905 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2906 {
2907         struct oob_data *data, *n;
2908
2909         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2910                 list_del(&data->list);
2911                 kfree(data);
2912         }
2913 }
2914
2915 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2916                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2917                             u8 *hash256, u8 *rand256)
2918 {
2919         struct oob_data *data;
2920
2921         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2922         if (!data) {
2923                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2924                 if (!data)
2925                         return -ENOMEM;
2926
2927                 bacpy(&data->bdaddr, bdaddr);
2928                 data->bdaddr_type = bdaddr_type;
2929                 list_add(&data->list, &hdev->remote_oob_data);
2930         }
2931
2932         if (hash192 && rand192) {
2933                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2934                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2935                 if (hash256 && rand256)
2936                         data->present = 0x03;
2937         } else {
2938                 memset(data->hash192, 0, sizeof(data->hash192));
2939                 memset(data->rand192, 0, sizeof(data->rand192));
2940                 if (hash256 && rand256)
2941                         data->present = 0x02;
2942                 else
2943                         data->present = 0x00;
2944         }
2945
2946         if (hash256 && rand256) {
2947                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2948                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2949         } else {
2950                 memset(data->hash256, 0, sizeof(data->hash256));
2951                 memset(data->rand256, 0, sizeof(data->rand256));
2952                 if (hash192 && rand192)
2953                         data->present = 0x01;
2954         }
2955
2956         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2957
2958         return 0;
2959 }
2960
2961 /* This function requires the caller holds hdev->lock */
2962 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2963 {
2964         struct adv_info *adv_instance;
2965
2966         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2967                 if (adv_instance->instance == instance)
2968                         return adv_instance;
2969         }
2970
2971         return NULL;
2972 }
2973
2974 /* This function requires the caller holds hdev->lock */
2975 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2976 {
2977         struct adv_info *cur_instance;
2978
2979         cur_instance = hci_find_adv_instance(hdev, instance);
2980         if (!cur_instance)
2981                 return NULL;
2982
2983         if (cur_instance == list_last_entry(&hdev->adv_instances,
2984                                             struct adv_info, list))
2985                 return list_first_entry(&hdev->adv_instances,
2986                                                  struct adv_info, list);
2987         else
2988                 return list_next_entry(cur_instance, list);
2989 }
2990
2991 /* This function requires the caller holds hdev->lock */
2992 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2993 {
2994         struct adv_info *adv_instance;
2995
2996         adv_instance = hci_find_adv_instance(hdev, instance);
2997         if (!adv_instance)
2998                 return -ENOENT;
2999
3000         BT_DBG("%s removing %dMR", hdev->name, instance);
3001
3002         if (hdev->cur_adv_instance == instance) {
3003                 if (hdev->adv_instance_timeout) {
3004                         cancel_delayed_work(&hdev->adv_instance_expire);
3005                         hdev->adv_instance_timeout = 0;
3006                 }
3007                 hdev->cur_adv_instance = 0x00;
3008         }
3009
3010         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
3011
3012         list_del(&adv_instance->list);
3013         kfree(adv_instance);
3014
3015         hdev->adv_instance_cnt--;
3016
3017         return 0;
3018 }
3019
3020 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
3021 {
3022         struct adv_info *adv_instance, *n;
3023
3024         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
3025                 adv_instance->rpa_expired = rpa_expired;
3026 }
3027
3028 /* This function requires the caller holds hdev->lock */
3029 void hci_adv_instances_clear(struct hci_dev *hdev)
3030 {
3031         struct adv_info *adv_instance, *n;
3032
3033         if (hdev->adv_instance_timeout) {
3034                 cancel_delayed_work(&hdev->adv_instance_expire);
3035                 hdev->adv_instance_timeout = 0;
3036         }
3037
3038         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
3039                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
3040                 list_del(&adv_instance->list);
3041                 kfree(adv_instance);
3042         }
3043
3044         hdev->adv_instance_cnt = 0;
3045         hdev->cur_adv_instance = 0x00;
3046 }
3047
3048 static void adv_instance_rpa_expired(struct work_struct *work)
3049 {
3050         struct adv_info *adv_instance = container_of(work, struct adv_info,
3051                                                      rpa_expired_cb.work);
3052
3053         BT_DBG("");
3054
3055         adv_instance->rpa_expired = true;
3056 }
3057
3058 /* This function requires the caller holds hdev->lock */
3059 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
3060                          u16 adv_data_len, u8 *adv_data,
3061                          u16 scan_rsp_len, u8 *scan_rsp_data,
3062                          u16 timeout, u16 duration, s8 tx_power,
3063                          u32 min_interval, u32 max_interval)
3064 {
3065         struct adv_info *adv_instance;
3066
3067         adv_instance = hci_find_adv_instance(hdev, instance);
3068         if (adv_instance) {
3069                 memset(adv_instance->adv_data, 0,
3070                        sizeof(adv_instance->adv_data));
3071                 memset(adv_instance->scan_rsp_data, 0,
3072                        sizeof(adv_instance->scan_rsp_data));
3073         } else {
3074                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3075                     instance < 1 || instance > hdev->le_num_of_adv_sets)
3076                         return -EOVERFLOW;
3077
3078                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3079                 if (!adv_instance)
3080                         return -ENOMEM;
3081
3082                 adv_instance->pending = true;
3083                 adv_instance->instance = instance;
3084                 list_add(&adv_instance->list, &hdev->adv_instances);
3085                 hdev->adv_instance_cnt++;
3086         }
3087
3088         adv_instance->flags = flags;
3089         adv_instance->adv_data_len = adv_data_len;
3090         adv_instance->scan_rsp_len = scan_rsp_len;
3091         adv_instance->min_interval = min_interval;
3092         adv_instance->max_interval = max_interval;
3093         adv_instance->tx_power = tx_power;
3094
3095         if (adv_data_len)
3096                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3097
3098         if (scan_rsp_len)
3099                 memcpy(adv_instance->scan_rsp_data,
3100                        scan_rsp_data, scan_rsp_len);
3101
3102         adv_instance->timeout = timeout;
3103         adv_instance->remaining_time = timeout;
3104
3105         if (duration == 0)
3106                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3107         else
3108                 adv_instance->duration = duration;
3109
3110         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3111                           adv_instance_rpa_expired);
3112
3113         BT_DBG("%s for %dMR", hdev->name, instance);
3114
3115         return 0;
3116 }
3117
3118 /* This function requires the caller holds hdev->lock */
3119 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3120                               u16 adv_data_len, u8 *adv_data,
3121                               u16 scan_rsp_len, u8 *scan_rsp_data)
3122 {
3123         struct adv_info *adv_instance;
3124
3125         adv_instance = hci_find_adv_instance(hdev, instance);
3126
3127         /* If advertisement doesn't exist, we can't modify its data */
3128         if (!adv_instance)
3129                 return -ENOENT;
3130
3131         if (adv_data_len) {
3132                 memset(adv_instance->adv_data, 0,
3133                        sizeof(adv_instance->adv_data));
3134                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3135                 adv_instance->adv_data_len = adv_data_len;
3136         }
3137
3138         if (scan_rsp_len) {
3139                 memset(adv_instance->scan_rsp_data, 0,
3140                        sizeof(adv_instance->scan_rsp_data));
3141                 memcpy(adv_instance->scan_rsp_data,
3142                        scan_rsp_data, scan_rsp_len);
3143                 adv_instance->scan_rsp_len = scan_rsp_len;
3144         }
3145
3146         return 0;
3147 }
3148
3149 /* This function requires the caller holds hdev->lock */
3150 void hci_adv_monitors_clear(struct hci_dev *hdev)
3151 {
3152         struct adv_monitor *monitor;
3153         int handle;
3154
3155         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3156                 hci_free_adv_monitor(hdev, monitor);
3157
3158         idr_destroy(&hdev->adv_monitors_idr);
3159 }
3160
3161 /* Frees the monitor structure and do some bookkeepings.
3162  * This function requires the caller holds hdev->lock.
3163  */
3164 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3165 {
3166         struct adv_pattern *pattern;
3167         struct adv_pattern *tmp;
3168
3169         if (!monitor)
3170                 return;
3171
3172         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3173                 list_del(&pattern->list);
3174                 kfree(pattern);
3175         }
3176
3177         if (monitor->handle)
3178                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3179
3180         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3181                 hdev->adv_monitors_cnt--;
3182                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3183         }
3184
3185         kfree(monitor);
3186 }
3187
3188 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3189 {
3190         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3191 }
3192
3193 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3194 {
3195         return mgmt_remove_adv_monitor_complete(hdev, status);
3196 }
3197
3198 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3199  * also attempts to forward the request to the controller.
3200  * Returns true if request is forwarded (result is pending), false otherwise.
3201  * This function requires the caller holds hdev->lock.
3202  */
3203 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3204                          int *err)
3205 {
3206         int min, max, handle;
3207
3208         *err = 0;
3209
3210         if (!monitor) {
3211                 *err = -EINVAL;
3212                 return false;
3213         }
3214
3215         min = HCI_MIN_ADV_MONITOR_HANDLE;
3216         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3217         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3218                            GFP_KERNEL);
3219         if (handle < 0) {
3220                 *err = handle;
3221                 return false;
3222         }
3223
3224         monitor->handle = handle;
3225
3226         if (!hdev_is_powered(hdev))
3227                 return false;
3228
3229         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3230         case HCI_ADV_MONITOR_EXT_NONE:
3231                 hci_update_background_scan(hdev);
3232                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3233                 /* Message was not forwarded to controller - not an error */
3234                 return false;
3235         case HCI_ADV_MONITOR_EXT_MSFT:
3236                 *err = msft_add_monitor_pattern(hdev, monitor);
3237                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3238                            *err);
3239                 break;
3240         }
3241
3242         return (*err == 0);
3243 }
3244
3245 /* Attempts to tell the controller and free the monitor. If somehow the
3246  * controller doesn't have a corresponding handle, remove anyway.
3247  * Returns true if request is forwarded (result is pending), false otherwise.
3248  * This function requires the caller holds hdev->lock.
3249  */
3250 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3251                                    struct adv_monitor *monitor,
3252                                    u16 handle, int *err)
3253 {
3254         *err = 0;
3255
3256         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3257         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3258                 goto free_monitor;
3259         case HCI_ADV_MONITOR_EXT_MSFT:
3260                 *err = msft_remove_monitor(hdev, monitor, handle);
3261                 break;
3262         }
3263
3264         /* In case no matching handle registered, just free the monitor */
3265         if (*err == -ENOENT)
3266                 goto free_monitor;
3267
3268         return (*err == 0);
3269
3270 free_monitor:
3271         if (*err == -ENOENT)
3272                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3273                             monitor->handle);
3274         hci_free_adv_monitor(hdev, monitor);
3275
3276         *err = 0;
3277         return false;
3278 }
3279
3280 /* Returns true if request is forwarded (result is pending), false otherwise.
3281  * This function requires the caller holds hdev->lock.
3282  */
3283 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3284 {
3285         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3286         bool pending;
3287
3288         if (!monitor) {
3289                 *err = -EINVAL;
3290                 return false;
3291         }
3292
3293         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3294         if (!*err && !pending)
3295                 hci_update_background_scan(hdev);
3296
3297         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3298                    hdev->name, handle, *err, pending ? "" : "not ");
3299
3300         return pending;
3301 }
3302
3303 /* Returns true if request is forwarded (result is pending), false otherwise.
3304  * This function requires the caller holds hdev->lock.
3305  */
3306 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3307 {
3308         struct adv_monitor *monitor;
3309         int idr_next_id = 0;
3310         bool pending = false;
3311         bool update = false;
3312
3313         *err = 0;
3314
3315         while (!*err && !pending) {
3316                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3317                 if (!monitor)
3318                         break;
3319
3320                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3321
3322                 if (!*err && !pending)
3323                         update = true;
3324         }
3325
3326         if (update)
3327                 hci_update_background_scan(hdev);
3328
3329         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3330                    hdev->name, *err, pending ? "" : "not ");
3331
3332         return pending;
3333 }
3334
3335 /* This function requires the caller holds hdev->lock */
3336 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3337 {
3338         return !idr_is_empty(&hdev->adv_monitors_idr);
3339 }
3340
3341 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3342 {
3343         if (msft_monitor_supported(hdev))
3344                 return HCI_ADV_MONITOR_EXT_MSFT;
3345
3346         return HCI_ADV_MONITOR_EXT_NONE;
3347 }
3348
3349 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3350                                          bdaddr_t *bdaddr, u8 type)
3351 {
3352         struct bdaddr_list *b;
3353
3354         list_for_each_entry(b, bdaddr_list, list) {
3355                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3356                         return b;
3357         }
3358
3359         return NULL;
3360 }
3361
3362 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3363                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3364                                 u8 type)
3365 {
3366         struct bdaddr_list_with_irk *b;
3367
3368         list_for_each_entry(b, bdaddr_list, list) {
3369                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3370                         return b;
3371         }
3372
3373         return NULL;
3374 }
3375
3376 struct bdaddr_list_with_flags *
3377 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3378                                   bdaddr_t *bdaddr, u8 type)
3379 {
3380         struct bdaddr_list_with_flags *b;
3381
3382         list_for_each_entry(b, bdaddr_list, list) {
3383                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3384                         return b;
3385         }
3386
3387         return NULL;
3388 }
3389
3390 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3391 {
3392         struct bdaddr_list *b, *n;
3393
3394         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3395                 list_del(&b->list);
3396                 kfree(b);
3397         }
3398 }
3399
3400 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3401 {
3402         struct bdaddr_list *entry;
3403
3404         if (!bacmp(bdaddr, BDADDR_ANY))
3405                 return -EBADF;
3406
3407         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3408                 return -EEXIST;
3409
3410         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3411         if (!entry)
3412                 return -ENOMEM;
3413
3414         bacpy(&entry->bdaddr, bdaddr);
3415         entry->bdaddr_type = type;
3416
3417         list_add(&entry->list, list);
3418
3419         return 0;
3420 }
3421
3422 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3423                                         u8 type, u8 *peer_irk, u8 *local_irk)
3424 {
3425         struct bdaddr_list_with_irk *entry;
3426
3427         if (!bacmp(bdaddr, BDADDR_ANY))
3428                 return -EBADF;
3429
3430         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3431                 return -EEXIST;
3432
3433         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3434         if (!entry)
3435                 return -ENOMEM;
3436
3437         bacpy(&entry->bdaddr, bdaddr);
3438         entry->bdaddr_type = type;
3439
3440         if (peer_irk)
3441                 memcpy(entry->peer_irk, peer_irk, 16);
3442
3443         if (local_irk)
3444                 memcpy(entry->local_irk, local_irk, 16);
3445
3446         list_add(&entry->list, list);
3447
3448         return 0;
3449 }
3450
3451 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3452                                    u8 type, u32 flags)
3453 {
3454         struct bdaddr_list_with_flags *entry;
3455
3456         if (!bacmp(bdaddr, BDADDR_ANY))
3457                 return -EBADF;
3458
3459         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3460                 return -EEXIST;
3461
3462         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3463         if (!entry)
3464                 return -ENOMEM;
3465
3466         bacpy(&entry->bdaddr, bdaddr);
3467         entry->bdaddr_type = type;
3468         entry->current_flags = flags;
3469
3470         list_add(&entry->list, list);
3471
3472         return 0;
3473 }
3474
3475 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3476 {
3477         struct bdaddr_list *entry;
3478
3479         if (!bacmp(bdaddr, BDADDR_ANY)) {
3480                 hci_bdaddr_list_clear(list);
3481                 return 0;
3482         }
3483
3484         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3485         if (!entry)
3486                 return -ENOENT;
3487
3488         list_del(&entry->list);
3489         kfree(entry);
3490
3491         return 0;
3492 }
3493
3494 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3495                                                         u8 type)
3496 {
3497         struct bdaddr_list_with_irk *entry;
3498
3499         if (!bacmp(bdaddr, BDADDR_ANY)) {
3500                 hci_bdaddr_list_clear(list);
3501                 return 0;
3502         }
3503
3504         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3505         if (!entry)
3506                 return -ENOENT;
3507
3508         list_del(&entry->list);
3509         kfree(entry);
3510
3511         return 0;
3512 }
3513
3514 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3515                                    u8 type)
3516 {
3517         struct bdaddr_list_with_flags *entry;
3518
3519         if (!bacmp(bdaddr, BDADDR_ANY)) {
3520                 hci_bdaddr_list_clear(list);
3521                 return 0;
3522         }
3523
3524         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3525         if (!entry)
3526                 return -ENOENT;
3527
3528         list_del(&entry->list);
3529         kfree(entry);
3530
3531         return 0;
3532 }
3533
3534 /* This function requires the caller holds hdev->lock */
3535 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3536                                                bdaddr_t *addr, u8 addr_type)
3537 {
3538         struct hci_conn_params *params;
3539
3540         list_for_each_entry(params, &hdev->le_conn_params, list) {
3541                 if (bacmp(&params->addr, addr) == 0 &&
3542                     params->addr_type == addr_type) {
3543                         return params;
3544                 }
3545         }
3546
3547         return NULL;
3548 }
3549
3550 /* This function requires the caller holds hdev->lock */
3551 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3552                                                   bdaddr_t *addr, u8 addr_type)
3553 {
3554         struct hci_conn_params *param;
3555
3556         switch (addr_type) {
3557         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3558                 addr_type = ADDR_LE_DEV_PUBLIC;
3559                 break;
3560         case ADDR_LE_DEV_RANDOM_RESOLVED:
3561                 addr_type = ADDR_LE_DEV_RANDOM;
3562                 break;
3563         }
3564
3565         list_for_each_entry(param, list, action) {
3566                 if (bacmp(&param->addr, addr) == 0 &&
3567                     param->addr_type == addr_type)
3568                         return param;
3569         }
3570
3571         return NULL;
3572 }
3573
3574 /* This function requires the caller holds hdev->lock */
3575 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3576                                             bdaddr_t *addr, u8 addr_type)
3577 {
3578         struct hci_conn_params *params;
3579
3580         params = hci_conn_params_lookup(hdev, addr, addr_type);
3581         if (params)
3582                 return params;
3583
3584         params = kzalloc(sizeof(*params), GFP_KERNEL);
3585         if (!params) {
3586                 bt_dev_err(hdev, "out of memory");
3587                 return NULL;
3588         }
3589
3590         bacpy(&params->addr, addr);
3591         params->addr_type = addr_type;
3592
3593         list_add(&params->list, &hdev->le_conn_params);
3594         INIT_LIST_HEAD(&params->action);
3595
3596         params->conn_min_interval = hdev->le_conn_min_interval;
3597         params->conn_max_interval = hdev->le_conn_max_interval;
3598         params->conn_latency = hdev->le_conn_latency;
3599         params->supervision_timeout = hdev->le_supv_timeout;
3600         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3601
3602         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3603
3604         return params;
3605 }
3606
3607 static void hci_conn_params_free(struct hci_conn_params *params)
3608 {
3609         if (params->conn) {
3610                 hci_conn_drop(params->conn);
3611                 hci_conn_put(params->conn);
3612         }
3613
3614         list_del(&params->action);
3615         list_del(&params->list);
3616         kfree(params);
3617 }
3618
3619 /* This function requires the caller holds hdev->lock */
3620 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3621 {
3622         struct hci_conn_params *params;
3623
3624         params = hci_conn_params_lookup(hdev, addr, addr_type);
3625         if (!params)
3626                 return;
3627
3628         hci_conn_params_free(params);
3629
3630         hci_update_background_scan(hdev);
3631
3632         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3633 }
3634
3635 /* This function requires the caller holds hdev->lock */
3636 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3637 {
3638         struct hci_conn_params *params, *tmp;
3639
3640         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3641                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3642                         continue;
3643
3644                 /* If trying to establish one time connection to disabled
3645                  * device, leave the params, but mark them as just once.
3646                  */
3647                 if (params->explicit_connect) {
3648                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3649                         continue;
3650                 }
3651
3652                 list_del(&params->list);
3653                 kfree(params);
3654         }
3655
3656         BT_DBG("All LE disabled connection parameters were removed");
3657 }
3658
3659 /* This function requires the caller holds hdev->lock */
3660 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3661 {
3662         struct hci_conn_params *params, *tmp;
3663
3664         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3665                 hci_conn_params_free(params);
3666
3667         BT_DBG("All LE connection parameters were removed");
3668 }
3669
3670 /* Copy the Identity Address of the controller.
3671  *
3672  * If the controller has a public BD_ADDR, then by default use that one.
3673  * If this is a LE only controller without a public address, default to
3674  * the static random address.
3675  *
3676  * For debugging purposes it is possible to force controllers with a
3677  * public address to use the static random address instead.
3678  *
3679  * In case BR/EDR has been disabled on a dual-mode controller and
3680  * userspace has configured a static address, then that address
3681  * becomes the identity address instead of the public BR/EDR address.
3682  */
3683 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3684                                u8 *bdaddr_type)
3685 {
3686         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3687             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3688             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3689              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3690                 bacpy(bdaddr, &hdev->static_addr);
3691                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3692         } else {
3693                 bacpy(bdaddr, &hdev->bdaddr);
3694                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3695         }
3696 }
3697
3698 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3699 {
3700         int i;
3701
3702         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3703                 clear_bit(i, hdev->suspend_tasks);
3704
3705         wake_up(&hdev->suspend_wait_q);
3706 }
3707
3708 static int hci_suspend_wait_event(struct hci_dev *hdev)
3709 {
3710 #define WAKE_COND                                                              \
3711         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3712          __SUSPEND_NUM_TASKS)
3713
3714         int i;
3715         int ret = wait_event_timeout(hdev->suspend_wait_q,
3716                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3717
3718         if (ret == 0) {
3719                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3720                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3721                         if (test_bit(i, hdev->suspend_tasks))
3722                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3723                         clear_bit(i, hdev->suspend_tasks);
3724                 }
3725
3726                 ret = -ETIMEDOUT;
3727         } else {
3728                 ret = 0;
3729         }
3730
3731         return ret;
3732 }
3733
3734 static void hci_prepare_suspend(struct work_struct *work)
3735 {
3736         struct hci_dev *hdev =
3737                 container_of(work, struct hci_dev, suspend_prepare);
3738
3739         hci_dev_lock(hdev);
3740         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3741         hci_dev_unlock(hdev);
3742 }
3743
3744 static int hci_change_suspend_state(struct hci_dev *hdev,
3745                                     enum suspended_state next)
3746 {
3747         hdev->suspend_state_next = next;
3748         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3749         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3750         return hci_suspend_wait_event(hdev);
3751 }
3752
3753 static void hci_clear_wake_reason(struct hci_dev *hdev)
3754 {
3755         hci_dev_lock(hdev);
3756
3757         hdev->wake_reason = 0;
3758         bacpy(&hdev->wake_addr, BDADDR_ANY);
3759         hdev->wake_addr_type = 0;
3760
3761         hci_dev_unlock(hdev);
3762 }
3763
3764 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3765                                 void *data)
3766 {
3767         struct hci_dev *hdev =
3768                 container_of(nb, struct hci_dev, suspend_notifier);
3769         int ret = 0;
3770         u8 state = BT_RUNNING;
3771
3772         /* If powering down, wait for completion. */
3773         if (mgmt_powering_down(hdev)) {
3774                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3775                 ret = hci_suspend_wait_event(hdev);
3776                 if (ret)
3777                         goto done;
3778         }
3779
3780         /* Suspend notifier should only act on events when powered. */
3781         if (!hdev_is_powered(hdev) ||
3782             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3783                 goto done;
3784
3785         if (action == PM_SUSPEND_PREPARE) {
3786                 /* Suspend consists of two actions:
3787                  *  - First, disconnect everything and make the controller not
3788                  *    connectable (disabling scanning)
3789                  *  - Second, program event filter/accept list and enable scan
3790                  */
3791                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3792                 if (!ret)
3793                         state = BT_SUSPEND_DISCONNECT;
3794
3795                 /* Only configure accept list if disconnect succeeded and wake
3796                  * isn't being prevented.
3797                  */
3798                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3799                         ret = hci_change_suspend_state(hdev,
3800                                                 BT_SUSPEND_CONFIGURE_WAKE);
3801                         if (!ret)
3802                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3803                 }
3804
3805                 hci_clear_wake_reason(hdev);
3806                 mgmt_suspending(hdev, state);
3807
3808         } else if (action == PM_POST_SUSPEND) {
3809                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3810
3811                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3812                               hdev->wake_addr_type);
3813         }
3814
3815 done:
3816         /* We always allow suspend even if suspend preparation failed and
3817          * attempt to recover in resume.
3818          */
3819         if (ret)
3820                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3821                            action, ret);
3822
3823         return NOTIFY_DONE;
3824 }
3825
3826 /* Alloc HCI device */
3827 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3828 {
3829         struct hci_dev *hdev;
3830         unsigned int alloc_size;
3831
3832         alloc_size = sizeof(*hdev);
3833         if (sizeof_priv) {
3834                 /* Fixme: May need ALIGN-ment? */
3835                 alloc_size += sizeof_priv;
3836         }
3837
3838         hdev = kzalloc(alloc_size, GFP_KERNEL);
3839         if (!hdev)
3840                 return NULL;
3841
3842         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3843         hdev->esco_type = (ESCO_HV1);
3844         hdev->link_mode = (HCI_LM_ACCEPT);
3845         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3846         hdev->io_capability = 0x03;     /* No Input No Output */
3847         hdev->manufacturer = 0xffff;    /* Default to internal use */
3848         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3849         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3850         hdev->adv_instance_cnt = 0;
3851         hdev->cur_adv_instance = 0x00;
3852         hdev->adv_instance_timeout = 0;
3853
3854         hdev->advmon_allowlist_duration = 300;
3855         hdev->advmon_no_filter_duration = 500;
3856         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3857
3858         hdev->sniff_max_interval = 800;
3859 #ifdef TIZEN_BT
3860         hdev->sniff_min_interval = 400;
3861 #else
3862         hdev->sniff_min_interval = 80;
3863 #endif
3864         hdev->le_adv_channel_map = 0x07;
3865         hdev->le_adv_min_interval = 0x0800;
3866         hdev->le_adv_max_interval = 0x0800;
3867 #ifdef TIZEN_BT
3868         /* automatically enable sniff mode for connection */
3869         hdev->idle_timeout = TIZEN_SNIFF_TIMEOUT * 1000;
3870
3871         hdev->adv_filter_policy = 0x00;
3872         hdev->adv_type = 0x00;
3873 #endif
3874         hdev->le_scan_interval = 0x0060;
3875         hdev->le_scan_window = 0x0030;
3876         hdev->le_scan_int_suspend = 0x0400;
3877         hdev->le_scan_window_suspend = 0x0012;
3878         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3879         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3880         hdev->le_scan_int_adv_monitor = 0x0060;
3881         hdev->le_scan_window_adv_monitor = 0x0030;
3882         hdev->le_scan_int_connect = 0x0060;
3883         hdev->le_scan_window_connect = 0x0060;
3884         hdev->le_conn_min_interval = 0x0018;
3885         hdev->le_conn_max_interval = 0x0028;
3886         hdev->le_conn_latency = 0x0000;
3887         hdev->le_supv_timeout = 0x002a;
3888         hdev->le_def_tx_len = 0x001b;
3889         hdev->le_def_tx_time = 0x0148;
3890         hdev->le_max_tx_len = 0x001b;
3891         hdev->le_max_tx_time = 0x0148;
3892         hdev->le_max_rx_len = 0x001b;
3893         hdev->le_max_rx_time = 0x0148;
3894         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3895         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3896         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3897         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3898         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3899         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3900         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3901         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3902         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3903
3904         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3905         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3906         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3907         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3908         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3909         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3910
3911         /* default 1.28 sec page scan */
3912         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3913         hdev->def_page_scan_int = 0x0800;
3914         hdev->def_page_scan_window = 0x0012;
3915
3916         mutex_init(&hdev->lock);
3917         mutex_init(&hdev->req_lock);
3918
3919         INIT_LIST_HEAD(&hdev->mgmt_pending);
3920         INIT_LIST_HEAD(&hdev->reject_list);
3921         INIT_LIST_HEAD(&hdev->accept_list);
3922         INIT_LIST_HEAD(&hdev->uuids);
3923         INIT_LIST_HEAD(&hdev->link_keys);
3924         INIT_LIST_HEAD(&hdev->long_term_keys);
3925         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3926         INIT_LIST_HEAD(&hdev->remote_oob_data);
3927         INIT_LIST_HEAD(&hdev->le_accept_list);
3928         INIT_LIST_HEAD(&hdev->le_resolv_list);
3929         INIT_LIST_HEAD(&hdev->le_conn_params);
3930         INIT_LIST_HEAD(&hdev->pend_le_conns);
3931         INIT_LIST_HEAD(&hdev->pend_le_reports);
3932         INIT_LIST_HEAD(&hdev->conn_hash.list);
3933         INIT_LIST_HEAD(&hdev->adv_instances);
3934         INIT_LIST_HEAD(&hdev->blocked_keys);
3935
3936         INIT_WORK(&hdev->rx_work, hci_rx_work);
3937         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3938         INIT_WORK(&hdev->tx_work, hci_tx_work);
3939         INIT_WORK(&hdev->power_on, hci_power_on);
3940         INIT_WORK(&hdev->error_reset, hci_error_reset);
3941         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3942
3943         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3944
3945         skb_queue_head_init(&hdev->rx_q);
3946         skb_queue_head_init(&hdev->cmd_q);
3947         skb_queue_head_init(&hdev->raw_q);
3948
3949         init_waitqueue_head(&hdev->req_wait_q);
3950         init_waitqueue_head(&hdev->suspend_wait_q);
3951
3952         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3953         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3954
3955         hci_request_setup(hdev);
3956
3957         hci_init_sysfs(hdev);
3958         discovery_init(hdev);
3959
3960         return hdev;
3961 }
3962 EXPORT_SYMBOL(hci_alloc_dev_priv);
3963
3964 /* Free HCI device */
3965 void hci_free_dev(struct hci_dev *hdev)
3966 {
3967         /* will free via device release */
3968         put_device(&hdev->dev);
3969 }
3970 EXPORT_SYMBOL(hci_free_dev);
3971
3972 /* Register HCI device */
3973 int hci_register_dev(struct hci_dev *hdev)
3974 {
3975         int id, error;
3976
3977         if (!hdev->open || !hdev->close || !hdev->send)
3978                 return -EINVAL;
3979
3980         /* Do not allow HCI_AMP devices to register at index 0,
3981          * so the index can be used as the AMP controller ID.
3982          */
3983         switch (hdev->dev_type) {
3984         case HCI_PRIMARY:
3985                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3986                 break;
3987         case HCI_AMP:
3988                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3989                 break;
3990         default:
3991                 return -EINVAL;
3992         }
3993
3994         if (id < 0)
3995                 return id;
3996
3997         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3998         hdev->id = id;
3999
4000         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4001
4002         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
4003         if (!hdev->workqueue) {
4004                 error = -ENOMEM;
4005                 goto err;
4006         }
4007
4008         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
4009                                                       hdev->name);
4010         if (!hdev->req_workqueue) {
4011                 destroy_workqueue(hdev->workqueue);
4012                 error = -ENOMEM;
4013                 goto err;
4014         }
4015
4016         if (!IS_ERR_OR_NULL(bt_debugfs))
4017                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4018
4019         dev_set_name(&hdev->dev, "%s", hdev->name);
4020
4021         error = device_add(&hdev->dev);
4022         if (error < 0)
4023                 goto err_wqueue;
4024
4025         hci_leds_init(hdev);
4026
4027         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4028                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4029                                     hdev);
4030         if (hdev->rfkill) {
4031                 if (rfkill_register(hdev->rfkill) < 0) {
4032                         rfkill_destroy(hdev->rfkill);
4033                         hdev->rfkill = NULL;
4034                 }
4035         }
4036
4037         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4038                 hci_dev_set_flag(hdev, HCI_RFKILLED);
4039
4040         hci_dev_set_flag(hdev, HCI_SETUP);
4041         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
4042
4043         if (hdev->dev_type == HCI_PRIMARY) {
4044                 /* Assume BR/EDR support until proven otherwise (such as
4045                  * through reading supported features during init.
4046                  */
4047                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4048         }
4049
4050         write_lock(&hci_dev_list_lock);
4051         list_add(&hdev->list, &hci_dev_list);
4052         write_unlock(&hci_dev_list_lock);
4053
4054         /* Devices that are marked for raw-only usage are unconfigured
4055          * and should not be included in normal operation.
4056          */
4057         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4058                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4059
4060         hci_sock_dev_event(hdev, HCI_DEV_REG);
4061         hci_dev_hold(hdev);
4062
4063         if (!hdev->suspend_notifier.notifier_call &&
4064             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4065                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
4066                 error = register_pm_notifier(&hdev->suspend_notifier);
4067                 if (error)
4068                         goto err_wqueue;
4069         }
4070
4071         queue_work(hdev->req_workqueue, &hdev->power_on);
4072
4073         idr_init(&hdev->adv_monitors_idr);
4074
4075         return id;
4076
4077 err_wqueue:
4078         debugfs_remove_recursive(hdev->debugfs);
4079         destroy_workqueue(hdev->workqueue);
4080         destroy_workqueue(hdev->req_workqueue);
4081 err:
4082         ida_simple_remove(&hci_index_ida, hdev->id);
4083
4084         return error;
4085 }
4086 EXPORT_SYMBOL(hci_register_dev);
4087
4088 /* Unregister HCI device */
4089 void hci_unregister_dev(struct hci_dev *hdev)
4090 {
4091         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4092
4093         hci_dev_set_flag(hdev, HCI_UNREGISTER);
4094
4095         write_lock(&hci_dev_list_lock);
4096         list_del(&hdev->list);
4097         write_unlock(&hci_dev_list_lock);
4098
4099         cancel_work_sync(&hdev->power_on);
4100
4101         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4102                 hci_suspend_clear_tasks(hdev);
4103                 unregister_pm_notifier(&hdev->suspend_notifier);
4104                 cancel_work_sync(&hdev->suspend_prepare);
4105         }
4106
4107         hci_dev_do_close(hdev);
4108
4109         if (!test_bit(HCI_INIT, &hdev->flags) &&
4110             !hci_dev_test_flag(hdev, HCI_SETUP) &&
4111             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4112                 hci_dev_lock(hdev);
4113                 mgmt_index_removed(hdev);
4114                 hci_dev_unlock(hdev);
4115         }
4116
4117         /* mgmt_index_removed should take care of emptying the
4118          * pending list */
4119         BUG_ON(!list_empty(&hdev->mgmt_pending));
4120
4121         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4122
4123         if (hdev->rfkill) {
4124                 rfkill_unregister(hdev->rfkill);
4125                 rfkill_destroy(hdev->rfkill);
4126         }
4127
4128         device_del(&hdev->dev);
4129         /* Actual cleanup is deferred until hci_release_dev(). */
4130         hci_dev_put(hdev);
4131 }
4132 EXPORT_SYMBOL(hci_unregister_dev);
4133
4134 /* Release HCI device */
4135 void hci_release_dev(struct hci_dev *hdev)
4136 {
4137         debugfs_remove_recursive(hdev->debugfs);
4138         kfree_const(hdev->hw_info);
4139         kfree_const(hdev->fw_info);
4140
4141         destroy_workqueue(hdev->workqueue);
4142         destroy_workqueue(hdev->req_workqueue);
4143
4144         hci_dev_lock(hdev);
4145         hci_bdaddr_list_clear(&hdev->reject_list);
4146         hci_bdaddr_list_clear(&hdev->accept_list);
4147         hci_uuids_clear(hdev);
4148         hci_link_keys_clear(hdev);
4149         hci_smp_ltks_clear(hdev);
4150         hci_smp_irks_clear(hdev);
4151         hci_remote_oob_data_clear(hdev);
4152         hci_adv_instances_clear(hdev);
4153         hci_adv_monitors_clear(hdev);
4154         hci_bdaddr_list_clear(&hdev->le_accept_list);
4155         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4156         hci_conn_params_clear_all(hdev);
4157         hci_discovery_filter_clear(hdev);
4158         hci_blocked_keys_clear(hdev);
4159         hci_dev_unlock(hdev);
4160
4161         ida_simple_remove(&hci_index_ida, hdev->id);
4162         kfree_skb(hdev->sent_cmd);
4163         kfree(hdev);
4164 }
4165 EXPORT_SYMBOL(hci_release_dev);
4166
4167 /* Suspend HCI device */
4168 int hci_suspend_dev(struct hci_dev *hdev)
4169 {
4170         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4171         return 0;
4172 }
4173 EXPORT_SYMBOL(hci_suspend_dev);
4174
4175 /* Resume HCI device */
4176 int hci_resume_dev(struct hci_dev *hdev)
4177 {
4178         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4179         return 0;
4180 }
4181 EXPORT_SYMBOL(hci_resume_dev);
4182
4183 /* Reset HCI device */
4184 int hci_reset_dev(struct hci_dev *hdev)
4185 {
4186         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4187         struct sk_buff *skb;
4188
4189         skb = bt_skb_alloc(3, GFP_ATOMIC);
4190         if (!skb)
4191                 return -ENOMEM;
4192
4193         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4194         skb_put_data(skb, hw_err, 3);
4195
4196         bt_dev_err(hdev, "Injecting HCI hardware error event");
4197
4198         /* Send Hardware Error to upper stack */
4199         return hci_recv_frame(hdev, skb);
4200 }
4201 EXPORT_SYMBOL(hci_reset_dev);
4202
4203 /* Receive frame from HCI drivers */
4204 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4205 {
4206         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4207                       && !test_bit(HCI_INIT, &hdev->flags))) {
4208                 kfree_skb(skb);
4209                 return -ENXIO;
4210         }
4211
4212         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4213             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4214             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4215             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4216                 kfree_skb(skb);
4217                 return -EINVAL;
4218         }
4219
4220         /* Incoming skb */
4221         bt_cb(skb)->incoming = 1;
4222
4223         /* Time stamp */
4224         __net_timestamp(skb);
4225
4226         skb_queue_tail(&hdev->rx_q, skb);
4227         queue_work(hdev->workqueue, &hdev->rx_work);
4228
4229         return 0;
4230 }
4231 EXPORT_SYMBOL(hci_recv_frame);
4232
4233 /* Receive diagnostic message from HCI drivers */
4234 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4235 {
4236         /* Mark as diagnostic packet */
4237         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4238
4239         /* Time stamp */
4240         __net_timestamp(skb);
4241
4242         skb_queue_tail(&hdev->rx_q, skb);
4243         queue_work(hdev->workqueue, &hdev->rx_work);
4244
4245         return 0;
4246 }
4247 EXPORT_SYMBOL(hci_recv_diag);
4248
4249 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4250 {
4251         va_list vargs;
4252
4253         va_start(vargs, fmt);
4254         kfree_const(hdev->hw_info);
4255         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4256         va_end(vargs);
4257 }
4258 EXPORT_SYMBOL(hci_set_hw_info);
4259
4260 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4261 {
4262         va_list vargs;
4263
4264         va_start(vargs, fmt);
4265         kfree_const(hdev->fw_info);
4266         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4267         va_end(vargs);
4268 }
4269 EXPORT_SYMBOL(hci_set_fw_info);
4270
4271 /* ---- Interface to upper protocols ---- */
4272
4273 int hci_register_cb(struct hci_cb *cb)
4274 {
4275         BT_DBG("%p name %s", cb, cb->name);
4276
4277         mutex_lock(&hci_cb_list_lock);
4278         list_add_tail(&cb->list, &hci_cb_list);
4279         mutex_unlock(&hci_cb_list_lock);
4280
4281         return 0;
4282 }
4283 EXPORT_SYMBOL(hci_register_cb);
4284
4285 int hci_unregister_cb(struct hci_cb *cb)
4286 {
4287         BT_DBG("%p name %s", cb, cb->name);
4288
4289         mutex_lock(&hci_cb_list_lock);
4290         list_del(&cb->list);
4291         mutex_unlock(&hci_cb_list_lock);
4292
4293         return 0;
4294 }
4295 EXPORT_SYMBOL(hci_unregister_cb);
4296
4297 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4298 {
4299         int err;
4300
4301         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4302                skb->len);
4303
4304         /* Time stamp */
4305         __net_timestamp(skb);
4306
4307         /* Send copy to monitor */
4308         hci_send_to_monitor(hdev, skb);
4309
4310         if (atomic_read(&hdev->promisc)) {
4311                 /* Send copy to the sockets */
4312                 hci_send_to_sock(hdev, skb);
4313         }
4314
4315         /* Get rid of skb owner, prior to sending to the driver. */
4316         skb_orphan(skb);
4317
4318         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4319                 kfree_skb(skb);
4320                 return;
4321         }
4322
4323         err = hdev->send(hdev, skb);
4324         if (err < 0) {
4325                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4326                 kfree_skb(skb);
4327         }
4328 }
4329
4330 /* Send HCI command */
4331 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4332                  const void *param)
4333 {
4334         struct sk_buff *skb;
4335
4336         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4337
4338         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4339         if (!skb) {
4340                 bt_dev_err(hdev, "no memory for command");
4341                 return -ENOMEM;
4342         }
4343
4344         /* Stand-alone HCI commands must be flagged as
4345          * single-command requests.
4346          */
4347         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4348
4349         skb_queue_tail(&hdev->cmd_q, skb);
4350         queue_work(hdev->workqueue, &hdev->cmd_work);
4351
4352         return 0;
4353 }
4354
4355 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4356                    const void *param)
4357 {
4358         struct sk_buff *skb;
4359
4360         if (hci_opcode_ogf(opcode) != 0x3f) {
4361                 /* A controller receiving a command shall respond with either
4362                  * a Command Status Event or a Command Complete Event.
4363                  * Therefore, all standard HCI commands must be sent via the
4364                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4365                  * Some vendors do not comply with this rule for vendor-specific
4366                  * commands and do not return any event. We want to support
4367                  * unresponded commands for such cases only.
4368                  */
4369                 bt_dev_err(hdev, "unresponded command not supported");
4370                 return -EINVAL;
4371         }
4372
4373         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4374         if (!skb) {
4375                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4376                            opcode);
4377                 return -ENOMEM;
4378         }
4379
4380         hci_send_frame(hdev, skb);
4381
4382         return 0;
4383 }
4384 EXPORT_SYMBOL(__hci_cmd_send);
4385
4386 /* Get data from the previously sent command */
4387 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4388 {
4389         struct hci_command_hdr *hdr;
4390
4391         if (!hdev->sent_cmd)
4392                 return NULL;
4393
4394         hdr = (void *) hdev->sent_cmd->data;
4395
4396         if (hdr->opcode != cpu_to_le16(opcode))
4397                 return NULL;
4398
4399         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4400
4401         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4402 }
4403
4404 /* Send HCI command and wait for command complete event */
4405 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4406                              const void *param, u32 timeout)
4407 {
4408         struct sk_buff *skb;
4409
4410         if (!test_bit(HCI_UP, &hdev->flags))
4411                 return ERR_PTR(-ENETDOWN);
4412
4413         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4414
4415         hci_req_sync_lock(hdev);
4416         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4417         hci_req_sync_unlock(hdev);
4418
4419         return skb;
4420 }
4421 EXPORT_SYMBOL(hci_cmd_sync);
4422
4423 /* Send ACL data */
4424 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4425 {
4426         struct hci_acl_hdr *hdr;
4427         int len = skb->len;
4428
4429         skb_push(skb, HCI_ACL_HDR_SIZE);
4430         skb_reset_transport_header(skb);
4431         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4432         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4433         hdr->dlen   = cpu_to_le16(len);
4434 }
4435
4436 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4437                           struct sk_buff *skb, __u16 flags)
4438 {
4439         struct hci_conn *conn = chan->conn;
4440         struct hci_dev *hdev = conn->hdev;
4441         struct sk_buff *list;
4442
4443         skb->len = skb_headlen(skb);
4444         skb->data_len = 0;
4445
4446         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4447
4448         switch (hdev->dev_type) {
4449         case HCI_PRIMARY:
4450                 hci_add_acl_hdr(skb, conn->handle, flags);
4451                 break;
4452         case HCI_AMP:
4453                 hci_add_acl_hdr(skb, chan->handle, flags);
4454                 break;
4455         default:
4456                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4457                 return;
4458         }
4459
4460         list = skb_shinfo(skb)->frag_list;
4461         if (!list) {
4462                 /* Non fragmented */
4463                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4464
4465                 skb_queue_tail(queue, skb);
4466         } else {
4467                 /* Fragmented */
4468                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4469
4470                 skb_shinfo(skb)->frag_list = NULL;
4471
4472                 /* Queue all fragments atomically. We need to use spin_lock_bh
4473                  * here because of 6LoWPAN links, as there this function is
4474                  * called from softirq and using normal spin lock could cause
4475                  * deadlocks.
4476                  */
4477                 spin_lock_bh(&queue->lock);
4478
4479                 __skb_queue_tail(queue, skb);
4480
4481                 flags &= ~ACL_START;
4482                 flags |= ACL_CONT;
4483                 do {
4484                         skb = list; list = list->next;
4485
4486                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4487                         hci_add_acl_hdr(skb, conn->handle, flags);
4488
4489                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4490
4491                         __skb_queue_tail(queue, skb);
4492                 } while (list);
4493
4494                 spin_unlock_bh(&queue->lock);
4495         }
4496 }
4497
4498 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4499 {
4500         struct hci_dev *hdev = chan->conn->hdev;
4501
4502         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4503
4504         hci_queue_acl(chan, &chan->data_q, skb, flags);
4505
4506         queue_work(hdev->workqueue, &hdev->tx_work);
4507 }
4508
4509 /* Send SCO data */
4510 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4511 {
4512         struct hci_dev *hdev = conn->hdev;
4513         struct hci_sco_hdr hdr;
4514
4515         BT_DBG("%s len %d", hdev->name, skb->len);
4516
4517         hdr.handle = cpu_to_le16(conn->handle);
4518         hdr.dlen   = skb->len;
4519
4520         skb_push(skb, HCI_SCO_HDR_SIZE);
4521         skb_reset_transport_header(skb);
4522         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4523
4524         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4525
4526         skb_queue_tail(&conn->data_q, skb);
4527         queue_work(hdev->workqueue, &hdev->tx_work);
4528 }
4529
4530 /* ---- HCI TX task (outgoing data) ---- */
4531
4532 /* HCI Connection scheduler */
4533 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4534                                      int *quote)
4535 {
4536         struct hci_conn_hash *h = &hdev->conn_hash;
4537         struct hci_conn *conn = NULL, *c;
4538         unsigned int num = 0, min = ~0;
4539
4540         /* We don't have to lock device here. Connections are always
4541          * added and removed with TX task disabled. */
4542
4543         rcu_read_lock();
4544
4545         list_for_each_entry_rcu(c, &h->list, list) {
4546                 if (c->type != type || skb_queue_empty(&c->data_q))
4547                         continue;
4548
4549                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4550                         continue;
4551
4552                 num++;
4553
4554                 if (c->sent < min) {
4555                         min  = c->sent;
4556                         conn = c;
4557                 }
4558
4559                 if (hci_conn_num(hdev, type) == num)
4560                         break;
4561         }
4562
4563         rcu_read_unlock();
4564
4565         if (conn) {
4566                 int cnt, q;
4567
4568                 switch (conn->type) {
4569                 case ACL_LINK:
4570                         cnt = hdev->acl_cnt;
4571                         break;
4572                 case SCO_LINK:
4573                 case ESCO_LINK:
4574                         cnt = hdev->sco_cnt;
4575                         break;
4576                 case LE_LINK:
4577                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4578                         break;
4579                 default:
4580                         cnt = 0;
4581                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4582                 }
4583
4584                 q = cnt / num;
4585                 *quote = q ? q : 1;
4586         } else
4587                 *quote = 0;
4588
4589         BT_DBG("conn %p quote %d", conn, *quote);
4590         return conn;
4591 }
4592
4593 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4594 {
4595         struct hci_conn_hash *h = &hdev->conn_hash;
4596         struct hci_conn *c;
4597
4598         bt_dev_err(hdev, "link tx timeout");
4599
4600         rcu_read_lock();
4601
4602         /* Kill stalled connections */
4603         list_for_each_entry_rcu(c, &h->list, list) {
4604                 if (c->type == type && c->sent) {
4605                         bt_dev_err(hdev, "killing stalled connection %pMR",
4606                                    &c->dst);
4607                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4608                 }
4609         }
4610
4611         rcu_read_unlock();
4612 }
4613
4614 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4615                                       int *quote)
4616 {
4617         struct hci_conn_hash *h = &hdev->conn_hash;
4618         struct hci_chan *chan = NULL;
4619         unsigned int num = 0, min = ~0, cur_prio = 0;
4620         struct hci_conn *conn;
4621         int cnt, q, conn_num = 0;
4622
4623         BT_DBG("%s", hdev->name);
4624
4625         rcu_read_lock();
4626
4627         list_for_each_entry_rcu(conn, &h->list, list) {
4628                 struct hci_chan *tmp;
4629
4630                 if (conn->type != type)
4631                         continue;
4632
4633                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4634                         continue;
4635
4636                 conn_num++;
4637
4638                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4639                         struct sk_buff *skb;
4640
4641                         if (skb_queue_empty(&tmp->data_q))
4642                                 continue;
4643
4644                         skb = skb_peek(&tmp->data_q);
4645                         if (skb->priority < cur_prio)
4646                                 continue;
4647
4648                         if (skb->priority > cur_prio) {
4649                                 num = 0;
4650                                 min = ~0;
4651                                 cur_prio = skb->priority;
4652                         }
4653
4654                         num++;
4655
4656                         if (conn->sent < min) {
4657                                 min  = conn->sent;
4658                                 chan = tmp;
4659                         }
4660                 }
4661
4662                 if (hci_conn_num(hdev, type) == conn_num)
4663                         break;
4664         }
4665
4666         rcu_read_unlock();
4667
4668         if (!chan)
4669                 return NULL;
4670
4671         switch (chan->conn->type) {
4672         case ACL_LINK:
4673                 cnt = hdev->acl_cnt;
4674                 break;
4675         case AMP_LINK:
4676                 cnt = hdev->block_cnt;
4677                 break;
4678         case SCO_LINK:
4679         case ESCO_LINK:
4680                 cnt = hdev->sco_cnt;
4681                 break;
4682         case LE_LINK:
4683                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4684                 break;
4685         default:
4686                 cnt = 0;
4687                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4688         }
4689
4690         q = cnt / num;
4691         *quote = q ? q : 1;
4692         BT_DBG("chan %p quote %d", chan, *quote);
4693         return chan;
4694 }
4695
4696 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4697 {
4698         struct hci_conn_hash *h = &hdev->conn_hash;
4699         struct hci_conn *conn;
4700         int num = 0;
4701
4702         BT_DBG("%s", hdev->name);
4703
4704         rcu_read_lock();
4705
4706         list_for_each_entry_rcu(conn, &h->list, list) {
4707                 struct hci_chan *chan;
4708
4709                 if (conn->type != type)
4710                         continue;
4711
4712                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4713                         continue;
4714
4715                 num++;
4716
4717                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4718                         struct sk_buff *skb;
4719
4720                         if (chan->sent) {
4721                                 chan->sent = 0;
4722                                 continue;
4723                         }
4724
4725                         if (skb_queue_empty(&chan->data_q))
4726                                 continue;
4727
4728                         skb = skb_peek(&chan->data_q);
4729                         if (skb->priority >= HCI_PRIO_MAX - 1)
4730                                 continue;
4731
4732                         skb->priority = HCI_PRIO_MAX - 1;
4733
4734                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4735                                skb->priority);
4736                 }
4737
4738                 if (hci_conn_num(hdev, type) == num)
4739                         break;
4740         }
4741
4742         rcu_read_unlock();
4743
4744 }
4745
4746 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4747 {
4748         /* Calculate count of blocks used by this packet */
4749         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4750 }
4751
4752 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4753 {
4754         unsigned long last_tx;
4755
4756         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4757                 return;
4758
4759         switch (type) {
4760         case LE_LINK:
4761                 last_tx = hdev->le_last_tx;
4762                 break;
4763         default:
4764                 last_tx = hdev->acl_last_tx;
4765                 break;
4766         }
4767
4768         /* tx timeout must be longer than maximum link supervision timeout
4769          * (40.9 seconds)
4770          */
4771         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4772                 hci_link_tx_to(hdev, type);
4773 }
4774
4775 /* Schedule SCO */
4776 static void hci_sched_sco(struct hci_dev *hdev)
4777 {
4778         struct hci_conn *conn;
4779         struct sk_buff *skb;
4780         int quote;
4781
4782         BT_DBG("%s", hdev->name);
4783
4784         if (!hci_conn_num(hdev, SCO_LINK))
4785                 return;
4786
4787         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4788                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4789                         BT_DBG("skb %p len %d", skb, skb->len);
4790                         hci_send_frame(hdev, skb);
4791
4792                         conn->sent++;
4793                         if (conn->sent == ~0)
4794                                 conn->sent = 0;
4795                 }
4796         }
4797 }
4798
4799 static void hci_sched_esco(struct hci_dev *hdev)
4800 {
4801         struct hci_conn *conn;
4802         struct sk_buff *skb;
4803         int quote;
4804
4805         BT_DBG("%s", hdev->name);
4806
4807         if (!hci_conn_num(hdev, ESCO_LINK))
4808                 return;
4809
4810         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4811                                                      &quote))) {
4812                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4813                         BT_DBG("skb %p len %d", skb, skb->len);
4814                         hci_send_frame(hdev, skb);
4815
4816                         conn->sent++;
4817                         if (conn->sent == ~0)
4818                                 conn->sent = 0;
4819                 }
4820         }
4821 }
4822
4823 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4824 {
4825         unsigned int cnt = hdev->acl_cnt;
4826         struct hci_chan *chan;
4827         struct sk_buff *skb;
4828         int quote;
4829
4830         __check_timeout(hdev, cnt, ACL_LINK);
4831
4832         while (hdev->acl_cnt &&
4833                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4834                 u32 priority = (skb_peek(&chan->data_q))->priority;
4835                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4836                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4837                                skb->len, skb->priority);
4838
4839                         /* Stop if priority has changed */
4840                         if (skb->priority < priority)
4841                                 break;
4842
4843                         skb = skb_dequeue(&chan->data_q);
4844
4845                         hci_conn_enter_active_mode(chan->conn,
4846                                                    bt_cb(skb)->force_active);
4847
4848                         hci_send_frame(hdev, skb);
4849                         hdev->acl_last_tx = jiffies;
4850
4851                         hdev->acl_cnt--;
4852                         chan->sent++;
4853                         chan->conn->sent++;
4854
4855                         /* Send pending SCO packets right away */
4856                         hci_sched_sco(hdev);
4857                         hci_sched_esco(hdev);
4858                 }
4859         }
4860
4861         if (cnt != hdev->acl_cnt)
4862                 hci_prio_recalculate(hdev, ACL_LINK);
4863 }
4864
4865 static void hci_sched_acl_blk(struct hci_dev *hdev)
4866 {
4867         unsigned int cnt = hdev->block_cnt;
4868         struct hci_chan *chan;
4869         struct sk_buff *skb;
4870         int quote;
4871         u8 type;
4872
4873         BT_DBG("%s", hdev->name);
4874
4875         if (hdev->dev_type == HCI_AMP)
4876                 type = AMP_LINK;
4877         else
4878                 type = ACL_LINK;
4879
4880         __check_timeout(hdev, cnt, type);
4881
4882         while (hdev->block_cnt > 0 &&
4883                (chan = hci_chan_sent(hdev, type, &quote))) {
4884                 u32 priority = (skb_peek(&chan->data_q))->priority;
4885                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4886                         int blocks;
4887
4888                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4889                                skb->len, skb->priority);
4890
4891                         /* Stop if priority has changed */
4892                         if (skb->priority < priority)
4893                                 break;
4894
4895                         skb = skb_dequeue(&chan->data_q);
4896
4897                         blocks = __get_blocks(hdev, skb);
4898                         if (blocks > hdev->block_cnt)
4899                                 return;
4900
4901                         hci_conn_enter_active_mode(chan->conn,
4902                                                    bt_cb(skb)->force_active);
4903
4904                         hci_send_frame(hdev, skb);
4905                         hdev->acl_last_tx = jiffies;
4906
4907                         hdev->block_cnt -= blocks;
4908                         quote -= blocks;
4909
4910                         chan->sent += blocks;
4911                         chan->conn->sent += blocks;
4912                 }
4913         }
4914
4915         if (cnt != hdev->block_cnt)
4916                 hci_prio_recalculate(hdev, type);
4917 }
4918
4919 static void hci_sched_acl(struct hci_dev *hdev)
4920 {
4921         BT_DBG("%s", hdev->name);
4922
4923         /* No ACL link over BR/EDR controller */
4924         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4925                 return;
4926
4927         /* No AMP link over AMP controller */
4928         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4929                 return;
4930
4931         switch (hdev->flow_ctl_mode) {
4932         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4933                 hci_sched_acl_pkt(hdev);
4934                 break;
4935
4936         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4937                 hci_sched_acl_blk(hdev);
4938                 break;
4939         }
4940 }
4941
4942 static void hci_sched_le(struct hci_dev *hdev)
4943 {
4944         struct hci_chan *chan;
4945         struct sk_buff *skb;
4946         int quote, cnt, tmp;
4947
4948         BT_DBG("%s", hdev->name);
4949
4950         if (!hci_conn_num(hdev, LE_LINK))
4951                 return;
4952
4953         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4954
4955         __check_timeout(hdev, cnt, LE_LINK);
4956
4957         tmp = cnt;
4958         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4959                 u32 priority = (skb_peek(&chan->data_q))->priority;
4960                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4961                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4962                                skb->len, skb->priority);
4963
4964                         /* Stop if priority has changed */
4965                         if (skb->priority < priority)
4966                                 break;
4967
4968                         skb = skb_dequeue(&chan->data_q);
4969
4970                         hci_send_frame(hdev, skb);
4971                         hdev->le_last_tx = jiffies;
4972
4973                         cnt--;
4974                         chan->sent++;
4975                         chan->conn->sent++;
4976
4977                         /* Send pending SCO packets right away */
4978                         hci_sched_sco(hdev);
4979                         hci_sched_esco(hdev);
4980                 }
4981         }
4982
4983         if (hdev->le_pkts)
4984                 hdev->le_cnt = cnt;
4985         else
4986                 hdev->acl_cnt = cnt;
4987
4988         if (cnt != tmp)
4989                 hci_prio_recalculate(hdev, LE_LINK);
4990 }
4991
4992 static void hci_tx_work(struct work_struct *work)
4993 {
4994         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4995         struct sk_buff *skb;
4996
4997         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4998                hdev->sco_cnt, hdev->le_cnt);
4999
5000         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5001                 /* Schedule queues and send stuff to HCI driver */
5002                 hci_sched_sco(hdev);
5003                 hci_sched_esco(hdev);
5004                 hci_sched_acl(hdev);
5005                 hci_sched_le(hdev);
5006         }
5007
5008         /* Send next queued raw (unknown type) packet */
5009         while ((skb = skb_dequeue(&hdev->raw_q)))
5010                 hci_send_frame(hdev, skb);
5011 }
5012
5013 /* ----- HCI RX task (incoming data processing) ----- */
5014
5015 /* ACL data packet */
5016 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5017 {
5018         struct hci_acl_hdr *hdr = (void *) skb->data;
5019         struct hci_conn *conn;
5020         __u16 handle, flags;
5021
5022         skb_pull(skb, HCI_ACL_HDR_SIZE);
5023
5024         handle = __le16_to_cpu(hdr->handle);
5025         flags  = hci_flags(handle);
5026         handle = hci_handle(handle);
5027
5028         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5029                handle, flags);
5030
5031         hdev->stat.acl_rx++;
5032
5033         hci_dev_lock(hdev);
5034         conn = hci_conn_hash_lookup_handle(hdev, handle);
5035         hci_dev_unlock(hdev);
5036
5037         if (conn) {
5038                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5039
5040                 /* Send to upper protocol */
5041                 l2cap_recv_acldata(conn, skb, flags);
5042                 return;
5043         } else {
5044                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
5045                            handle);
5046         }
5047
5048         kfree_skb(skb);
5049 }
5050
5051 /* SCO data packet */
5052 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5053 {
5054         struct hci_sco_hdr *hdr = (void *) skb->data;
5055         struct hci_conn *conn;
5056         __u16 handle, flags;
5057
5058         skb_pull(skb, HCI_SCO_HDR_SIZE);
5059
5060         handle = __le16_to_cpu(hdr->handle);
5061         flags  = hci_flags(handle);
5062         handle = hci_handle(handle);
5063
5064         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5065                handle, flags);
5066
5067         hdev->stat.sco_rx++;
5068
5069         hci_dev_lock(hdev);
5070         conn = hci_conn_hash_lookup_handle(hdev, handle);
5071         hci_dev_unlock(hdev);
5072
5073         if (conn) {
5074                 /* Send to upper protocol */
5075                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
5076                 sco_recv_scodata(conn, skb);
5077                 return;
5078         } else {
5079                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
5080                            handle);
5081         }
5082
5083         kfree_skb(skb);
5084 }
5085
5086 static bool hci_req_is_complete(struct hci_dev *hdev)
5087 {
5088         struct sk_buff *skb;
5089
5090         skb = skb_peek(&hdev->cmd_q);
5091         if (!skb)
5092                 return true;
5093
5094         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5095 }
5096
5097 static void hci_resend_last(struct hci_dev *hdev)
5098 {
5099         struct hci_command_hdr *sent;
5100         struct sk_buff *skb;
5101         u16 opcode;
5102
5103         if (!hdev->sent_cmd)
5104                 return;
5105
5106         sent = (void *) hdev->sent_cmd->data;
5107         opcode = __le16_to_cpu(sent->opcode);
5108         if (opcode == HCI_OP_RESET)
5109                 return;
5110
5111         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5112         if (!skb)
5113                 return;
5114
5115         skb_queue_head(&hdev->cmd_q, skb);
5116         queue_work(hdev->workqueue, &hdev->cmd_work);
5117 }
5118
5119 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5120                           hci_req_complete_t *req_complete,
5121                           hci_req_complete_skb_t *req_complete_skb)
5122 {
5123         struct sk_buff *skb;
5124         unsigned long flags;
5125
5126         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5127
5128         /* If the completed command doesn't match the last one that was
5129          * sent we need to do special handling of it.
5130          */
5131         if (!hci_sent_cmd_data(hdev, opcode)) {
5132                 /* Some CSR based controllers generate a spontaneous
5133                  * reset complete event during init and any pending
5134                  * command will never be completed. In such a case we
5135                  * need to resend whatever was the last sent
5136                  * command.
5137                  */
5138                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5139                         hci_resend_last(hdev);
5140
5141                 return;
5142         }
5143
5144         /* If we reach this point this event matches the last command sent */
5145         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5146
5147         /* If the command succeeded and there's still more commands in
5148          * this request the request is not yet complete.
5149          */
5150         if (!status && !hci_req_is_complete(hdev))
5151                 return;
5152
5153         /* If this was the last command in a request the complete
5154          * callback would be found in hdev->sent_cmd instead of the
5155          * command queue (hdev->cmd_q).
5156          */
5157         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5158                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5159                 return;
5160         }
5161
5162         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5163                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5164                 return;
5165         }
5166
5167         /* Remove all pending commands belonging to this request */
5168         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5169         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5170                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5171                         __skb_queue_head(&hdev->cmd_q, skb);
5172                         break;
5173                 }
5174
5175                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5176                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5177                 else
5178                         *req_complete = bt_cb(skb)->hci.req_complete;
5179                 dev_kfree_skb_irq(skb);
5180         }
5181         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5182 }
5183
5184 static void hci_rx_work(struct work_struct *work)
5185 {
5186         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5187         struct sk_buff *skb;
5188
5189         BT_DBG("%s", hdev->name);
5190
5191         while ((skb = skb_dequeue(&hdev->rx_q))) {
5192                 /* Send copy to monitor */
5193                 hci_send_to_monitor(hdev, skb);
5194
5195                 if (atomic_read(&hdev->promisc)) {
5196                         /* Send copy to the sockets */
5197                         hci_send_to_sock(hdev, skb);
5198                 }
5199
5200                 /* If the device has been opened in HCI_USER_CHANNEL,
5201                  * the userspace has exclusive access to device.
5202                  * When device is HCI_INIT, we still need to process
5203                  * the data packets to the driver in order
5204                  * to complete its setup().
5205                  */
5206                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5207                     !test_bit(HCI_INIT, &hdev->flags)) {
5208                         kfree_skb(skb);
5209                         continue;
5210                 }
5211
5212                 if (test_bit(HCI_INIT, &hdev->flags)) {
5213                         /* Don't process data packets in this states. */
5214                         switch (hci_skb_pkt_type(skb)) {
5215                         case HCI_ACLDATA_PKT:
5216                         case HCI_SCODATA_PKT:
5217                         case HCI_ISODATA_PKT:
5218                                 kfree_skb(skb);
5219                                 continue;
5220                         }
5221                 }
5222
5223                 /* Process frame */
5224                 switch (hci_skb_pkt_type(skb)) {
5225                 case HCI_EVENT_PKT:
5226                         BT_DBG("%s Event packet", hdev->name);
5227                         hci_event_packet(hdev, skb);
5228                         break;
5229
5230                 case HCI_ACLDATA_PKT:
5231                         BT_DBG("%s ACL data packet", hdev->name);
5232                         hci_acldata_packet(hdev, skb);
5233                         break;
5234
5235                 case HCI_SCODATA_PKT:
5236                         BT_DBG("%s SCO data packet", hdev->name);
5237                         hci_scodata_packet(hdev, skb);
5238                         break;
5239
5240                 default:
5241                         kfree_skb(skb);
5242                         break;
5243                 }
5244         }
5245 }
5246
5247 static void hci_cmd_work(struct work_struct *work)
5248 {
5249         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5250         struct sk_buff *skb;
5251
5252         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5253                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5254
5255         /* Send queued commands */
5256         if (atomic_read(&hdev->cmd_cnt)) {
5257                 skb = skb_dequeue(&hdev->cmd_q);
5258                 if (!skb)
5259                         return;
5260
5261                 kfree_skb(hdev->sent_cmd);
5262
5263                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5264                 if (hdev->sent_cmd) {
5265                         if (hci_req_status_pend(hdev))
5266                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5267                         atomic_dec(&hdev->cmd_cnt);
5268                         hci_send_frame(hdev, skb);
5269                         if (test_bit(HCI_RESET, &hdev->flags))
5270                                 cancel_delayed_work(&hdev->cmd_timer);
5271                         else
5272                                 schedule_delayed_work(&hdev->cmd_timer,
5273                                                       HCI_CMD_TIMEOUT);
5274                 } else {
5275                         skb_queue_head(&hdev->cmd_q, skb);
5276                         queue_work(hdev->workqueue, &hdev->cmd_work);
5277                 }
5278         }
5279 }