Bluetooth: Add H/W TX timeout error MGMT event
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         bool enable;
80         int err;
81
82         if (!test_bit(HCI_UP, &hdev->flags))
83                 return -ENETDOWN;
84
85         err = kstrtobool_from_user(user_buf, count, &enable);
86         if (err)
87                 return err;
88
89         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
90                 return -EALREADY;
91
92         hci_req_sync_lock(hdev);
93         if (enable)
94                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95                                      HCI_CMD_TIMEOUT);
96         else
97                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         hci_req_sync_unlock(hdev);
100
101         if (IS_ERR(skb))
102                 return PTR_ERR(skb);
103
104         kfree_skb(skb);
105
106         hci_dev_change_flag(hdev, HCI_DUT_MODE);
107
108         return count;
109 }
110
111 static const struct file_operations dut_mode_fops = {
112         .open           = simple_open,
113         .read           = dut_mode_read,
114         .write          = dut_mode_write,
115         .llseek         = default_llseek,
116 };
117
118 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119                                 size_t count, loff_t *ppos)
120 {
121         struct hci_dev *hdev = file->private_data;
122         char buf[3];
123
124         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
125         buf[1] = '\n';
126         buf[2] = '\0';
127         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128 }
129
130 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131                                  size_t count, loff_t *ppos)
132 {
133         struct hci_dev *hdev = file->private_data;
134         bool enable;
135         int err;
136
137         err = kstrtobool_from_user(user_buf, count, &enable);
138         if (err)
139                 return err;
140
141         /* When the diagnostic flags are not persistent and the transport
142          * is not active or in user channel operation, then there is no need
143          * for the vendor callback. Instead just store the desired value and
144          * the setting will be programmed when the controller gets powered on.
145          */
146         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
147             (!test_bit(HCI_RUNNING, &hdev->flags) ||
148              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
149                 goto done;
150
151         hci_req_sync_lock(hdev);
152         err = hdev->set_diag(hdev, enable);
153         hci_req_sync_unlock(hdev);
154
155         if (err < 0)
156                 return err;
157
158 done:
159         if (enable)
160                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161         else
162                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164         return count;
165 }
166
167 static const struct file_operations vendor_diag_fops = {
168         .open           = simple_open,
169         .read           = vendor_diag_read,
170         .write          = vendor_diag_write,
171         .llseek         = default_llseek,
172 };
173
174 static void hci_debugfs_create_basic(struct hci_dev *hdev)
175 {
176         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177                             &dut_mode_fops);
178
179         if (hdev->set_diag)
180                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181                                     &vendor_diag_fops);
182 }
183
184 static int hci_reset_req(struct hci_request *req, unsigned long opt)
185 {
186         BT_DBG("%s %ld", req->hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &req->hdev->flags);
190         hci_req_add(req, HCI_OP_RESET, 0, NULL);
191         return 0;
192 }
193
194 static void bredr_init(struct hci_request *req)
195 {
196         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Read Local Supported Features */
199         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
200
201         /* Read Local Version */
202         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
203
204         /* Read BD Address */
205         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
206 }
207
208 static void amp_init1(struct hci_request *req)
209 {
210         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
211
212         /* Read Local Version */
213         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Local Supported Commands */
216         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
218         /* Read Local AMP Info */
219         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
220
221         /* Read Data Blk size */
222         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
223
224         /* Read Flow Control Mode */
225         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
227         /* Read Location Data */
228         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
229 }
230
231 static int amp_init2(struct hci_request *req)
232 {
233         /* Read Local Supported Features. Not all AMP controllers
234          * support this so it's placed conditionally in the second
235          * stage init.
236          */
237         if (req->hdev->commands[14] & 0x20)
238                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
239
240         return 0;
241 }
242
243 static int hci_init1_req(struct hci_request *req, unsigned long opt)
244 {
245         struct hci_dev *hdev = req->hdev;
246
247         BT_DBG("%s %ld", hdev->name, opt);
248
249         /* Reset */
250         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
251                 hci_reset_req(req, 0);
252
253         switch (hdev->dev_type) {
254         case HCI_PRIMARY:
255                 bredr_init(req);
256                 break;
257         case HCI_AMP:
258                 amp_init1(req);
259                 break;
260         default:
261                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
262                 break;
263         }
264
265         return 0;
266 }
267
268 static void bredr_setup(struct hci_request *req)
269 {
270         __le16 param;
271         __u8 flt_type;
272
273         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
275
276         /* Read Class of Device */
277         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
278
279         /* Read Local Name */
280         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
281
282         /* Read Voice Setting */
283         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
284
285         /* Read Number of Supported IAC */
286         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
288         /* Read Current IAC LAP */
289         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
291         /* Clear Event Filters */
292         flt_type = HCI_FLT_CLEAR_ALL;
293         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
294
295         /* Connection accept timeout ~20 secs */
296         param = cpu_to_le16(0x7d00);
297         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
298 }
299
300 static void le_setup(struct hci_request *req)
301 {
302         struct hci_dev *hdev = req->hdev;
303
304         /* Read LE Buffer Size */
305         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
306
307         /* Read LE Local Supported Features */
308         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
309
310         /* Read LE Supported States */
311         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
313         /* LE-only controllers have LE implicitly enabled */
314         if (!lmp_bredr_capable(hdev))
315                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
316 }
317
318 static void hci_setup_event_mask(struct hci_request *req)
319 {
320         struct hci_dev *hdev = req->hdev;
321
322         /* The second byte is 0xff instead of 0x9f (two reserved bits
323          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324          * command otherwise.
325          */
326         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329          * any event mask for pre 1.2 devices.
330          */
331         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332                 return;
333
334         if (lmp_bredr_capable(hdev)) {
335                 events[4] |= 0x01; /* Flow Specification Complete */
336         } else {
337                 /* Use a different default for LE-only devices */
338                 memset(events, 0, sizeof(events));
339                 events[1] |= 0x20; /* Command Complete */
340                 events[1] |= 0x40; /* Command Status */
341                 events[1] |= 0x80; /* Hardware Error */
342
343                 /* If the controller supports the Disconnect command, enable
344                  * the corresponding event. In addition enable packet flow
345                  * control related events.
346                  */
347                 if (hdev->commands[0] & 0x20) {
348                         events[0] |= 0x10; /* Disconnection Complete */
349                         events[2] |= 0x04; /* Number of Completed Packets */
350                         events[3] |= 0x02; /* Data Buffer Overflow */
351                 }
352
353                 /* If the controller supports the Read Remote Version
354                  * Information command, enable the corresponding event.
355                  */
356                 if (hdev->commands[2] & 0x80)
357                         events[1] |= 0x08; /* Read Remote Version Information
358                                             * Complete
359                                             */
360
361                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362                         events[0] |= 0x80; /* Encryption Change */
363                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
364                 }
365         }
366
367         if (lmp_inq_rssi_capable(hdev) ||
368             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
369                 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
371         if (lmp_ext_feat_capable(hdev))
372                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374         if (lmp_esco_capable(hdev)) {
375                 events[5] |= 0x08; /* Synchronous Connection Complete */
376                 events[5] |= 0x10; /* Synchronous Connection Changed */
377         }
378
379         if (lmp_sniffsubr_capable(hdev))
380                 events[5] |= 0x20; /* Sniff Subrating */
381
382         if (lmp_pause_enc_capable(hdev))
383                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385         if (lmp_ext_inq_capable(hdev))
386                 events[5] |= 0x40; /* Extended Inquiry Result */
387
388         if (lmp_no_flush_capable(hdev))
389                 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391         if (lmp_lsto_capable(hdev))
392                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394         if (lmp_ssp_capable(hdev)) {
395                 events[6] |= 0x01;      /* IO Capability Request */
396                 events[6] |= 0x02;      /* IO Capability Response */
397                 events[6] |= 0x04;      /* User Confirmation Request */
398                 events[6] |= 0x08;      /* User Passkey Request */
399                 events[6] |= 0x10;      /* Remote OOB Data Request */
400                 events[6] |= 0x20;      /* Simple Pairing Complete */
401                 events[7] |= 0x04;      /* User Passkey Notification */
402                 events[7] |= 0x08;      /* Keypress Notification */
403                 events[7] |= 0x10;      /* Remote Host Supported
404                                          * Features Notification
405                                          */
406         }
407
408         if (lmp_le_capable(hdev))
409                 events[7] |= 0x20;      /* LE Meta-Event */
410
411         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
412 }
413
414 static int hci_init2_req(struct hci_request *req, unsigned long opt)
415 {
416         struct hci_dev *hdev = req->hdev;
417
418         if (hdev->dev_type == HCI_AMP)
419                 return amp_init2(req);
420
421         if (lmp_bredr_capable(hdev))
422                 bredr_setup(req);
423         else
424                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
425
426         if (lmp_le_capable(hdev))
427                 le_setup(req);
428
429         /* All Bluetooth 1.2 and later controllers should support the
430          * HCI command for reading the local supported commands.
431          *
432          * Unfortunately some controllers indicate Bluetooth 1.2 support,
433          * but do not have support for this command. If that is the case,
434          * the driver can quirk the behavior and skip reading the local
435          * supported commands.
436          */
437         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
439                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
440
441         if (lmp_ssp_capable(hdev)) {
442                 /* When SSP is available, then the host features page
443                  * should also be available as well. However some
444                  * controllers list the max_page as 0 as long as SSP
445                  * has not been enabled. To achieve proper debugging
446                  * output, force the minimum max_page to 1 at least.
447                  */
448                 hdev->max_page = 0x01;
449
450                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
451                         u8 mode = 0x01;
452
453                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454                                     sizeof(mode), &mode);
455                 } else {
456                         struct hci_cp_write_eir cp;
457
458                         memset(hdev->eir, 0, sizeof(hdev->eir));
459                         memset(&cp, 0, sizeof(cp));
460
461                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
462                 }
463         }
464
465         if (lmp_inq_rssi_capable(hdev) ||
466             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
467                 u8 mode;
468
469                 /* If Extended Inquiry Result events are supported, then
470                  * they are clearly preferred over Inquiry Result with RSSI
471                  * events.
472                  */
473                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476         }
477
478         if (lmp_inq_tx_pwr_capable(hdev))
479                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
480
481         if (lmp_ext_feat_capable(hdev)) {
482                 struct hci_cp_read_local_ext_features cp;
483
484                 cp.page = 0x01;
485                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486                             sizeof(cp), &cp);
487         }
488
489         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
490                 u8 enable = 1;
491                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492                             &enable);
493         }
494
495         return 0;
496 }
497
498 static void hci_setup_link_policy(struct hci_request *req)
499 {
500         struct hci_dev *hdev = req->hdev;
501         struct hci_cp_write_def_link_policy cp;
502         u16 link_policy = 0;
503
504         if (lmp_rswitch_capable(hdev))
505                 link_policy |= HCI_LP_RSWITCH;
506         if (lmp_hold_capable(hdev))
507                 link_policy |= HCI_LP_HOLD;
508         if (lmp_sniff_capable(hdev))
509                 link_policy |= HCI_LP_SNIFF;
510         if (lmp_park_capable(hdev))
511                 link_policy |= HCI_LP_PARK;
512
513         cp.policy = cpu_to_le16(link_policy);
514         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
515 }
516
517 static void hci_set_le_support(struct hci_request *req)
518 {
519         struct hci_dev *hdev = req->hdev;
520         struct hci_cp_write_le_host_supported cp;
521
522         /* LE-only devices do not support explicit enablement */
523         if (!lmp_bredr_capable(hdev))
524                 return;
525
526         memset(&cp, 0, sizeof(cp));
527
528         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
529                 cp.le = 0x01;
530                 cp.simul = 0x00;
531         }
532
533         if (cp.le != lmp_host_le_capable(hdev))
534                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535                             &cp);
536 }
537
538 static void hci_set_event_mask_page_2(struct hci_request *req)
539 {
540         struct hci_dev *hdev = req->hdev;
541         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542         bool changed = false;
543
544         /* If Connectionless Slave Broadcast master role is supported
545          * enable all necessary events for it.
546          */
547         if (lmp_csb_master_capable(hdev)) {
548                 events[1] |= 0x40;      /* Triggered Clock Capture */
549                 events[1] |= 0x80;      /* Synchronization Train Complete */
550                 events[2] |= 0x10;      /* Slave Page Response Timeout */
551                 events[2] |= 0x20;      /* CSB Channel Map Change */
552                 changed = true;
553         }
554
555         /* If Connectionless Slave Broadcast slave role is supported
556          * enable all necessary events for it.
557          */
558         if (lmp_csb_slave_capable(hdev)) {
559                 events[2] |= 0x01;      /* Synchronization Train Received */
560                 events[2] |= 0x02;      /* CSB Receive */
561                 events[2] |= 0x04;      /* CSB Timeout */
562                 events[2] |= 0x08;      /* Truncated Page Complete */
563                 changed = true;
564         }
565
566         /* Enable Authenticated Payload Timeout Expired event if supported */
567         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
568                 events[2] |= 0x80;
569                 changed = true;
570         }
571
572         /* Some Broadcom based controllers indicate support for Set Event
573          * Mask Page 2 command, but then actually do not support it. Since
574          * the default value is all bits set to zero, the command is only
575          * required if the event mask has to be changed. In case no change
576          * to the event mask is needed, skip this command.
577          */
578         if (changed)
579                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580                             sizeof(events), events);
581 }
582
583 static int hci_init3_req(struct hci_request *req, unsigned long opt)
584 {
585         struct hci_dev *hdev = req->hdev;
586         u8 p;
587
588         hci_setup_event_mask(req);
589
590         if (hdev->commands[6] & 0x20 &&
591             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
592                 struct hci_cp_read_stored_link_key cp;
593
594                 bacpy(&cp.bdaddr, BDADDR_ANY);
595                 cp.read_all = 0x01;
596                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597         }
598
599         if (hdev->commands[5] & 0x10)
600                 hci_setup_link_policy(req);
601
602         if (hdev->commands[8] & 0x01)
603                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605         /* Some older Broadcom based Bluetooth 1.2 controllers do not
606          * support the Read Page Scan Type command. Check support for
607          * this command in the bit mask of supported commands.
608          */
609         if (hdev->commands[13] & 0x01)
610                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
612         if (lmp_le_capable(hdev)) {
613                 u8 events[8];
614
615                 memset(events, 0, sizeof(events));
616
617                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618                         events[0] |= 0x10;      /* LE Long Term Key Request */
619
620                 /* If controller supports the Connection Parameters Request
621                  * Link Layer Procedure, enable the corresponding event.
622                  */
623                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624                         events[0] |= 0x20;      /* LE Remote Connection
625                                                  * Parameter Request
626                                                  */
627
628                 /* If the controller supports the Data Length Extension
629                  * feature, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632                         events[0] |= 0x40;      /* LE Data Length Change */
633
634                 /* If the controller supports Extended Scanner Filter
635                  * Policies, enable the correspondig event.
636                  */
637                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638                         events[1] |= 0x04;      /* LE Direct Advertising
639                                                  * Report
640                                                  */
641
642                 /* If the controller supports Channel Selection Algorithm #2
643                  * feature, enable the corresponding event.
644                  */
645                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646                         events[2] |= 0x08;      /* LE Channel Selection
647                                                  * Algorithm
648                                                  */
649
650                 /* If the controller supports the LE Set Scan Enable command,
651                  * enable the corresponding advertising report event.
652                  */
653                 if (hdev->commands[26] & 0x08)
654                         events[0] |= 0x02;      /* LE Advertising Report */
655
656                 /* If the controller supports the LE Create Connection
657                  * command, enable the corresponding event.
658                  */
659                 if (hdev->commands[26] & 0x10)
660                         events[0] |= 0x01;      /* LE Connection Complete */
661
662                 /* If the controller supports the LE Connection Update
663                  * command, enable the corresponding event.
664                  */
665                 if (hdev->commands[27] & 0x04)
666                         events[0] |= 0x04;      /* LE Connection Update
667                                                  * Complete
668                                                  */
669
670                 /* If the controller supports the LE Read Remote Used Features
671                  * command, enable the corresponding event.
672                  */
673                 if (hdev->commands[27] & 0x20)
674                         events[0] |= 0x08;      /* LE Read Remote Used
675                                                  * Features Complete
676                                                  */
677
678                 /* If the controller supports the LE Read Local P-256
679                  * Public Key command, enable the corresponding event.
680                  */
681                 if (hdev->commands[34] & 0x02)
682                         events[0] |= 0x80;      /* LE Read Local P-256
683                                                  * Public Key Complete
684                                                  */
685
686                 /* If the controller supports the LE Generate DHKey
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[34] & 0x04)
690                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
691
692                 /* If the controller supports the LE Set Default PHY or
693                  * LE Set PHY commands, enable the corresponding event.
694                  */
695                 if (hdev->commands[35] & (0x20 | 0x40))
696                         events[1] |= 0x08;        /* LE PHY Update Complete */
697
698                 /* If the controller supports LE Set Extended Scan Parameters
699                  * and LE Set Extended Scan Enable commands, enable the
700                  * corresponding event.
701                  */
702                 if (use_ext_scan(hdev))
703                         events[1] |= 0x10;      /* LE Extended Advertising
704                                                  * Report
705                                                  */
706
707                 /* If the controller supports the LE Extended Create Connection
708                  * command, enable the corresponding event.
709                  */
710                 if (use_ext_conn(hdev))
711                         events[1] |= 0x02;      /* LE Enhanced Connection
712                                                  * Complete
713                                                  */
714
715                 /* If the controller supports the LE Extended Advertising
716                  * command, enable the corresponding event.
717                  */
718                 if (ext_adv_capable(hdev))
719                         events[2] |= 0x02;      /* LE Advertising Set
720                                                  * Terminated
721                                                  */
722
723                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
724                             events);
725
726                 /* Read LE Advertising Channel TX Power */
727                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
728                         /* HCI TS spec forbids mixing of legacy and extended
729                          * advertising commands wherein READ_ADV_TX_POWER is
730                          * also included. So do not call it if extended adv
731                          * is supported otherwise controller will return
732                          * COMMAND_DISALLOWED for extended commands.
733                          */
734                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
735                 }
736
737                 if (hdev->commands[26] & 0x40) {
738                         /* Read LE White List Size */
739                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
740                                     0, NULL);
741                 }
742
743                 if (hdev->commands[26] & 0x80) {
744                         /* Clear LE White List */
745                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
746                 }
747
748                 if (hdev->commands[34] & 0x40) {
749                         /* Read LE Resolving List Size */
750                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
751                                     0, NULL);
752                 }
753
754                 if (hdev->commands[34] & 0x20) {
755                         /* Clear LE Resolving List */
756                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
757                 }
758
759                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
760                         /* Read LE Maximum Data Length */
761                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
762
763                         /* Read LE Suggested Default Data Length */
764                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
765                 }
766
767                 if (ext_adv_capable(hdev)) {
768                         /* Read LE Number of Supported Advertising Sets */
769                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
770                                     0, NULL);
771                 }
772
773                 hci_set_le_support(req);
774         }
775
776         /* Read features beyond page 1 if available */
777         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
778                 struct hci_cp_read_local_ext_features cp;
779
780                 cp.page = p;
781                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
782                             sizeof(cp), &cp);
783         }
784
785         return 0;
786 }
787
788 static int hci_init4_req(struct hci_request *req, unsigned long opt)
789 {
790         struct hci_dev *hdev = req->hdev;
791
792         /* Some Broadcom based Bluetooth controllers do not support the
793          * Delete Stored Link Key command. They are clearly indicating its
794          * absence in the bit mask of supported commands.
795          *
796          * Check the supported commands and only if the the command is marked
797          * as supported send it. If not supported assume that the controller
798          * does not have actual support for stored link keys which makes this
799          * command redundant anyway.
800          *
801          * Some controllers indicate that they support handling deleting
802          * stored link keys, but they don't. The quirk lets a driver
803          * just disable this command.
804          */
805         if (hdev->commands[6] & 0x80 &&
806             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
807                 struct hci_cp_delete_stored_link_key cp;
808
809                 bacpy(&cp.bdaddr, BDADDR_ANY);
810                 cp.delete_all = 0x01;
811                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
812                             sizeof(cp), &cp);
813         }
814
815         /* Set event mask page 2 if the HCI command for it is supported */
816         if (hdev->commands[22] & 0x04)
817                 hci_set_event_mask_page_2(req);
818
819         /* Read local codec list if the HCI command is supported */
820         if (hdev->commands[29] & 0x20)
821                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
822
823         /* Get MWS transport configuration if the HCI command is supported */
824         if (hdev->commands[30] & 0x08)
825                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
826
827         /* Check for Synchronization Train support */
828         if (lmp_sync_train_capable(hdev))
829                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
830
831         /* Enable Secure Connections if supported and configured */
832         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
833             bredr_sc_enabled(hdev)) {
834                 u8 support = 0x01;
835
836                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
837                             sizeof(support), &support);
838         }
839
840         /* Set Suggested Default Data Length to maximum if supported */
841         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
842                 struct hci_cp_le_write_def_data_len cp;
843
844                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
845                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
846                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
847         }
848
849         /* Set Default PHY parameters if command is supported */
850         if (hdev->commands[35] & 0x20) {
851                 struct hci_cp_le_set_default_phy cp;
852
853                 cp.all_phys = 0x00;
854                 cp.tx_phys = hdev->le_tx_def_phys;
855                 cp.rx_phys = hdev->le_rx_def_phys;
856
857                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
858         }
859
860         return 0;
861 }
862
863 static int __hci_init(struct hci_dev *hdev)
864 {
865         int err;
866
867         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
868         if (err < 0)
869                 return err;
870
871         if (hci_dev_test_flag(hdev, HCI_SETUP))
872                 hci_debugfs_create_basic(hdev);
873
874         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
875         if (err < 0)
876                 return err;
877
878         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
879          * BR/EDR/LE type controllers. AMP controllers only need the
880          * first two stages of init.
881          */
882         if (hdev->dev_type != HCI_PRIMARY)
883                 return 0;
884
885         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
886         if (err < 0)
887                 return err;
888
889         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
890         if (err < 0)
891                 return err;
892
893         /* This function is only called when the controller is actually in
894          * configured state. When the controller is marked as unconfigured,
895          * this initialization procedure is not run.
896          *
897          * It means that it is possible that a controller runs through its
898          * setup phase and then discovers missing settings. If that is the
899          * case, then this function will not be called. It then will only
900          * be called during the config phase.
901          *
902          * So only when in setup phase or config phase, create the debugfs
903          * entries and register the SMP channels.
904          */
905         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
906             !hci_dev_test_flag(hdev, HCI_CONFIG))
907                 return 0;
908
909         hci_debugfs_create_common(hdev);
910
911         if (lmp_bredr_capable(hdev))
912                 hci_debugfs_create_bredr(hdev);
913
914         if (lmp_le_capable(hdev))
915                 hci_debugfs_create_le(hdev);
916
917         return 0;
918 }
919
920 static int hci_init0_req(struct hci_request *req, unsigned long opt)
921 {
922         struct hci_dev *hdev = req->hdev;
923
924         BT_DBG("%s %ld", hdev->name, opt);
925
926         /* Reset */
927         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
928                 hci_reset_req(req, 0);
929
930         /* Read Local Version */
931         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
932
933         /* Read BD Address */
934         if (hdev->set_bdaddr)
935                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
936
937         return 0;
938 }
939
940 static int __hci_unconf_init(struct hci_dev *hdev)
941 {
942         int err;
943
944         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
945                 return 0;
946
947         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
948         if (err < 0)
949                 return err;
950
951         if (hci_dev_test_flag(hdev, HCI_SETUP))
952                 hci_debugfs_create_basic(hdev);
953
954         return 0;
955 }
956
957 static int hci_scan_req(struct hci_request *req, unsigned long opt)
958 {
959         __u8 scan = opt;
960
961         BT_DBG("%s %x", req->hdev->name, scan);
962
963         /* Inquiry and Page scans */
964         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
965         return 0;
966 }
967
968 static int hci_auth_req(struct hci_request *req, unsigned long opt)
969 {
970         __u8 auth = opt;
971
972         BT_DBG("%s %x", req->hdev->name, auth);
973
974         /* Authentication */
975         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
976         return 0;
977 }
978
979 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
980 {
981         __u8 encrypt = opt;
982
983         BT_DBG("%s %x", req->hdev->name, encrypt);
984
985         /* Encryption */
986         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
987         return 0;
988 }
989
990 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
991 {
992         __le16 policy = cpu_to_le16(opt);
993
994         BT_DBG("%s %x", req->hdev->name, policy);
995
996         /* Default link policy */
997         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
998         return 0;
999 }
1000
1001 /* Get HCI device by index.
1002  * Device is held on return. */
1003 struct hci_dev *hci_dev_get(int index)
1004 {
1005         struct hci_dev *hdev = NULL, *d;
1006
1007         BT_DBG("%d", index);
1008
1009         if (index < 0)
1010                 return NULL;
1011
1012         read_lock(&hci_dev_list_lock);
1013         list_for_each_entry(d, &hci_dev_list, list) {
1014                 if (d->id == index) {
1015                         hdev = hci_dev_hold(d);
1016                         break;
1017                 }
1018         }
1019         read_unlock(&hci_dev_list_lock);
1020         return hdev;
1021 }
1022
1023 /* ---- Inquiry support ---- */
1024
1025 bool hci_discovery_active(struct hci_dev *hdev)
1026 {
1027         struct discovery_state *discov = &hdev->discovery;
1028
1029         switch (discov->state) {
1030         case DISCOVERY_FINDING:
1031         case DISCOVERY_RESOLVING:
1032                 return true;
1033
1034         default:
1035                 return false;
1036         }
1037 }
1038
1039 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1040 {
1041         int old_state = hdev->discovery.state;
1042
1043         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1044
1045         if (old_state == state)
1046                 return;
1047
1048         hdev->discovery.state = state;
1049
1050         switch (state) {
1051         case DISCOVERY_STOPPED:
1052                 hci_update_background_scan(hdev);
1053
1054                 if (old_state != DISCOVERY_STARTING)
1055                         mgmt_discovering(hdev, 0);
1056                 break;
1057         case DISCOVERY_STARTING:
1058                 break;
1059         case DISCOVERY_FINDING:
1060                 mgmt_discovering(hdev, 1);
1061                 break;
1062         case DISCOVERY_RESOLVING:
1063                 break;
1064         case DISCOVERY_STOPPING:
1065                 break;
1066         }
1067 }
1068
1069 #ifdef TIZEN_BT
1070 bool hci_le_discovery_active(struct hci_dev *hdev)
1071 {
1072         struct discovery_state *discov = &hdev->le_discovery;
1073
1074         switch (discov->state) {
1075         case DISCOVERY_FINDING:
1076         case DISCOVERY_RESOLVING:
1077                 return true;
1078
1079         default:
1080                 return false;
1081         }
1082 }
1083
1084 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
1085 {
1086         BT_DBG("%s state %u -> %u", hdev->name,
1087                         hdev->le_discovery.state, state);
1088
1089         if (hdev->le_discovery.state == state)
1090                 return;
1091
1092         switch (state) {
1093         case DISCOVERY_STOPPED:
1094                 hci_update_background_scan(hdev);
1095
1096                 if (hdev->le_discovery.state != DISCOVERY_STARTING)
1097                         mgmt_le_discovering(hdev, 0);
1098                 break;
1099         case DISCOVERY_STARTING:
1100                 break;
1101         case DISCOVERY_FINDING:
1102                 mgmt_le_discovering(hdev, 1);
1103                 break;
1104         case DISCOVERY_RESOLVING:
1105                 break;
1106         case DISCOVERY_STOPPING:
1107                 break;
1108         }
1109
1110         hdev->le_discovery.state = state;
1111 }
1112
1113 static void hci_tx_timeout_error_evt(struct hci_dev *hdev)
1114 {
1115         BT_ERR("%s H/W TX Timeout error", hdev->name);
1116
1117         mgmt_tx_timeout_error(hdev);
1118 }
1119 #endif
1120
1121 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1122 {
1123         struct discovery_state *cache = &hdev->discovery;
1124         struct inquiry_entry *p, *n;
1125
1126         list_for_each_entry_safe(p, n, &cache->all, all) {
1127                 list_del(&p->all);
1128                 kfree(p);
1129         }
1130
1131         INIT_LIST_HEAD(&cache->unknown);
1132         INIT_LIST_HEAD(&cache->resolve);
1133 }
1134
1135 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1136                                                bdaddr_t *bdaddr)
1137 {
1138         struct discovery_state *cache = &hdev->discovery;
1139         struct inquiry_entry *e;
1140
1141         BT_DBG("cache %p, %pMR", cache, bdaddr);
1142
1143         list_for_each_entry(e, &cache->all, all) {
1144                 if (!bacmp(&e->data.bdaddr, bdaddr))
1145                         return e;
1146         }
1147
1148         return NULL;
1149 }
1150
1151 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1152                                                        bdaddr_t *bdaddr)
1153 {
1154         struct discovery_state *cache = &hdev->discovery;
1155         struct inquiry_entry *e;
1156
1157         BT_DBG("cache %p, %pMR", cache, bdaddr);
1158
1159         list_for_each_entry(e, &cache->unknown, list) {
1160                 if (!bacmp(&e->data.bdaddr, bdaddr))
1161                         return e;
1162         }
1163
1164         return NULL;
1165 }
1166
1167 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1168                                                        bdaddr_t *bdaddr,
1169                                                        int state)
1170 {
1171         struct discovery_state *cache = &hdev->discovery;
1172         struct inquiry_entry *e;
1173
1174         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1175
1176         list_for_each_entry(e, &cache->resolve, list) {
1177                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1178                         return e;
1179                 if (!bacmp(&e->data.bdaddr, bdaddr))
1180                         return e;
1181         }
1182
1183         return NULL;
1184 }
1185
1186 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1187                                       struct inquiry_entry *ie)
1188 {
1189         struct discovery_state *cache = &hdev->discovery;
1190         struct list_head *pos = &cache->resolve;
1191         struct inquiry_entry *p;
1192
1193         list_del(&ie->list);
1194
1195         list_for_each_entry(p, &cache->resolve, list) {
1196                 if (p->name_state != NAME_PENDING &&
1197                     abs(p->data.rssi) >= abs(ie->data.rssi))
1198                         break;
1199                 pos = &p->list;
1200         }
1201
1202         list_add(&ie->list, pos);
1203 }
1204
1205 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1206                              bool name_known)
1207 {
1208         struct discovery_state *cache = &hdev->discovery;
1209         struct inquiry_entry *ie;
1210         u32 flags = 0;
1211
1212         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1213
1214         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1215
1216         if (!data->ssp_mode)
1217                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218
1219         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1220         if (ie) {
1221                 if (!ie->data.ssp_mode)
1222                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1223
1224                 if (ie->name_state == NAME_NEEDED &&
1225                     data->rssi != ie->data.rssi) {
1226                         ie->data.rssi = data->rssi;
1227                         hci_inquiry_cache_update_resolve(hdev, ie);
1228                 }
1229
1230                 goto update;
1231         }
1232
1233         /* Entry not in the cache. Add new one. */
1234         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1235         if (!ie) {
1236                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1237                 goto done;
1238         }
1239
1240         list_add(&ie->all, &cache->all);
1241
1242         if (name_known) {
1243                 ie->name_state = NAME_KNOWN;
1244         } else {
1245                 ie->name_state = NAME_NOT_KNOWN;
1246                 list_add(&ie->list, &cache->unknown);
1247         }
1248
1249 update:
1250         if (name_known && ie->name_state != NAME_KNOWN &&
1251             ie->name_state != NAME_PENDING) {
1252                 ie->name_state = NAME_KNOWN;
1253                 list_del(&ie->list);
1254         }
1255
1256         memcpy(&ie->data, data, sizeof(*data));
1257         ie->timestamp = jiffies;
1258         cache->timestamp = jiffies;
1259
1260         if (ie->name_state == NAME_NOT_KNOWN)
1261                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1262
1263 done:
1264         return flags;
1265 }
1266
1267 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1268 {
1269         struct discovery_state *cache = &hdev->discovery;
1270         struct inquiry_info *info = (struct inquiry_info *) buf;
1271         struct inquiry_entry *e;
1272         int copied = 0;
1273
1274         list_for_each_entry(e, &cache->all, all) {
1275                 struct inquiry_data *data = &e->data;
1276
1277                 if (copied >= num)
1278                         break;
1279
1280                 bacpy(&info->bdaddr, &data->bdaddr);
1281                 info->pscan_rep_mode    = data->pscan_rep_mode;
1282                 info->pscan_period_mode = data->pscan_period_mode;
1283                 info->pscan_mode        = data->pscan_mode;
1284                 memcpy(info->dev_class, data->dev_class, 3);
1285                 info->clock_offset      = data->clock_offset;
1286
1287                 info++;
1288                 copied++;
1289         }
1290
1291         BT_DBG("cache %p, copied %d", cache, copied);
1292         return copied;
1293 }
1294
1295 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1296 {
1297         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1298         struct hci_dev *hdev = req->hdev;
1299         struct hci_cp_inquiry cp;
1300
1301         BT_DBG("%s", hdev->name);
1302
1303         if (test_bit(HCI_INQUIRY, &hdev->flags))
1304                 return 0;
1305
1306         /* Start Inquiry */
1307         memcpy(&cp.lap, &ir->lap, 3);
1308         cp.length  = ir->length;
1309         cp.num_rsp = ir->num_rsp;
1310         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1311
1312         return 0;
1313 }
1314
1315 int hci_inquiry(void __user *arg)
1316 {
1317         __u8 __user *ptr = arg;
1318         struct hci_inquiry_req ir;
1319         struct hci_dev *hdev;
1320         int err = 0, do_inquiry = 0, max_rsp;
1321         long timeo;
1322         __u8 *buf;
1323
1324         if (copy_from_user(&ir, ptr, sizeof(ir)))
1325                 return -EFAULT;
1326
1327         hdev = hci_dev_get(ir.dev_id);
1328         if (!hdev)
1329                 return -ENODEV;
1330
1331         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1332                 err = -EBUSY;
1333                 goto done;
1334         }
1335
1336         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1337                 err = -EOPNOTSUPP;
1338                 goto done;
1339         }
1340
1341         if (hdev->dev_type != HCI_PRIMARY) {
1342                 err = -EOPNOTSUPP;
1343                 goto done;
1344         }
1345
1346         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1347                 err = -EOPNOTSUPP;
1348                 goto done;
1349         }
1350
1351         hci_dev_lock(hdev);
1352         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1353             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1354                 hci_inquiry_cache_flush(hdev);
1355                 do_inquiry = 1;
1356         }
1357         hci_dev_unlock(hdev);
1358
1359         timeo = ir.length * msecs_to_jiffies(2000);
1360
1361         if (do_inquiry) {
1362                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1363                                    timeo, NULL);
1364                 if (err < 0)
1365                         goto done;
1366
1367                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1368                  * cleared). If it is interrupted by a signal, return -EINTR.
1369                  */
1370                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1371                                 TASK_INTERRUPTIBLE))
1372                         return -EINTR;
1373         }
1374
1375         /* for unlimited number of responses we will use buffer with
1376          * 255 entries
1377          */
1378         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1379
1380         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1381          * copy it to the user space.
1382          */
1383         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1384         if (!buf) {
1385                 err = -ENOMEM;
1386                 goto done;
1387         }
1388
1389         hci_dev_lock(hdev);
1390         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1391         hci_dev_unlock(hdev);
1392
1393         BT_DBG("num_rsp %d", ir.num_rsp);
1394
1395         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1396                 ptr += sizeof(ir);
1397                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1398                                  ir.num_rsp))
1399                         err = -EFAULT;
1400         } else
1401                 err = -EFAULT;
1402
1403         kfree(buf);
1404
1405 done:
1406         hci_dev_put(hdev);
1407         return err;
1408 }
1409
1410 static int hci_dev_do_open(struct hci_dev *hdev)
1411 {
1412         int ret = 0;
1413
1414         BT_DBG("%s %p", hdev->name, hdev);
1415
1416         hci_req_sync_lock(hdev);
1417
1418         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1419                 ret = -ENODEV;
1420                 goto done;
1421         }
1422
1423         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1424             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1425                 /* Check for rfkill but allow the HCI setup stage to
1426                  * proceed (which in itself doesn't cause any RF activity).
1427                  */
1428                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1429                         ret = -ERFKILL;
1430                         goto done;
1431                 }
1432
1433                 /* Check for valid public address or a configured static
1434                  * random adddress, but let the HCI setup proceed to
1435                  * be able to determine if there is a public address
1436                  * or not.
1437                  *
1438                  * In case of user channel usage, it is not important
1439                  * if a public address or static random address is
1440                  * available.
1441                  *
1442                  * This check is only valid for BR/EDR controllers
1443                  * since AMP controllers do not have an address.
1444                  */
1445                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446                     hdev->dev_type == HCI_PRIMARY &&
1447                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1448                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1449                         ret = -EADDRNOTAVAIL;
1450                         goto done;
1451                 }
1452         }
1453
1454         if (test_bit(HCI_UP, &hdev->flags)) {
1455                 ret = -EALREADY;
1456                 goto done;
1457         }
1458
1459         if (hdev->open(hdev)) {
1460                 ret = -EIO;
1461                 goto done;
1462         }
1463
1464         set_bit(HCI_RUNNING, &hdev->flags);
1465         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1466
1467         atomic_set(&hdev->cmd_cnt, 1);
1468         set_bit(HCI_INIT, &hdev->flags);
1469
1470         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1471             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1472                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1473
1474                 if (hdev->setup)
1475                         ret = hdev->setup(hdev);
1476
1477                 /* The transport driver can set these quirks before
1478                  * creating the HCI device or in its setup callback.
1479                  *
1480                  * In case any of them is set, the controller has to
1481                  * start up as unconfigured.
1482                  */
1483                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1484                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1485                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1486
1487                 /* For an unconfigured controller it is required to
1488                  * read at least the version information provided by
1489                  * the Read Local Version Information command.
1490                  *
1491                  * If the set_bdaddr driver callback is provided, then
1492                  * also the original Bluetooth public device address
1493                  * will be read using the Read BD Address command.
1494                  */
1495                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1496                         ret = __hci_unconf_init(hdev);
1497         }
1498
1499         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1500                 /* If public address change is configured, ensure that
1501                  * the address gets programmed. If the driver does not
1502                  * support changing the public address, fail the power
1503                  * on procedure.
1504                  */
1505                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1506                     hdev->set_bdaddr)
1507                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1508                 else
1509                         ret = -EADDRNOTAVAIL;
1510         }
1511
1512         if (!ret) {
1513                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1514                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1515                         ret = __hci_init(hdev);
1516                         if (!ret && hdev->post_init)
1517                                 ret = hdev->post_init(hdev);
1518                 }
1519         }
1520
1521         /* If the HCI Reset command is clearing all diagnostic settings,
1522          * then they need to be reprogrammed after the init procedure
1523          * completed.
1524          */
1525         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1526             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1527             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1528                 ret = hdev->set_diag(hdev, true);
1529
1530         clear_bit(HCI_INIT, &hdev->flags);
1531
1532         if (!ret) {
1533                 hci_dev_hold(hdev);
1534                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1535                 hci_adv_instances_set_rpa_expired(hdev, true);
1536                 set_bit(HCI_UP, &hdev->flags);
1537                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1538                 hci_leds_update_powered(hdev, true);
1539                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1540                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1541                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1542                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1543                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1544                     hdev->dev_type == HCI_PRIMARY) {
1545                         ret = __hci_req_hci_power_on(hdev);
1546                         mgmt_power_on(hdev, ret);
1547                 }
1548         } else {
1549                 /* Init failed, cleanup */
1550                 flush_work(&hdev->tx_work);
1551                 flush_work(&hdev->cmd_work);
1552                 flush_work(&hdev->rx_work);
1553
1554                 skb_queue_purge(&hdev->cmd_q);
1555                 skb_queue_purge(&hdev->rx_q);
1556
1557                 if (hdev->flush)
1558                         hdev->flush(hdev);
1559
1560                 if (hdev->sent_cmd) {
1561                         kfree_skb(hdev->sent_cmd);
1562                         hdev->sent_cmd = NULL;
1563                 }
1564
1565                 clear_bit(HCI_RUNNING, &hdev->flags);
1566                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1567
1568                 hdev->close(hdev);
1569                 hdev->flags &= BIT(HCI_RAW);
1570         }
1571
1572 done:
1573         hci_req_sync_unlock(hdev);
1574         return ret;
1575 }
1576
1577 /* ---- HCI ioctl helpers ---- */
1578
1579 int hci_dev_open(__u16 dev)
1580 {
1581         struct hci_dev *hdev;
1582         int err;
1583
1584         hdev = hci_dev_get(dev);
1585         if (!hdev)
1586                 return -ENODEV;
1587
1588         /* Devices that are marked as unconfigured can only be powered
1589          * up as user channel. Trying to bring them up as normal devices
1590          * will result into a failure. Only user channel operation is
1591          * possible.
1592          *
1593          * When this function is called for a user channel, the flag
1594          * HCI_USER_CHANNEL will be set first before attempting to
1595          * open the device.
1596          */
1597         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1598             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1599                 err = -EOPNOTSUPP;
1600                 goto done;
1601         }
1602
1603         /* We need to ensure that no other power on/off work is pending
1604          * before proceeding to call hci_dev_do_open. This is
1605          * particularly important if the setup procedure has not yet
1606          * completed.
1607          */
1608         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1609                 cancel_delayed_work(&hdev->power_off);
1610
1611         /* After this call it is guaranteed that the setup procedure
1612          * has finished. This means that error conditions like RFKILL
1613          * or no valid public or static random address apply.
1614          */
1615         flush_workqueue(hdev->req_workqueue);
1616
1617         /* For controllers not using the management interface and that
1618          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1619          * so that pairing works for them. Once the management interface
1620          * is in use this bit will be cleared again and userspace has
1621          * to explicitly enable it.
1622          */
1623         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1624             !hci_dev_test_flag(hdev, HCI_MGMT))
1625                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1626
1627         err = hci_dev_do_open(hdev);
1628
1629 done:
1630         hci_dev_put(hdev);
1631         return err;
1632 }
1633
1634 /* This function requires the caller holds hdev->lock */
1635 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1636 {
1637         struct hci_conn_params *p;
1638
1639         list_for_each_entry(p, &hdev->le_conn_params, list) {
1640                 if (p->conn) {
1641                         hci_conn_drop(p->conn);
1642                         hci_conn_put(p->conn);
1643                         p->conn = NULL;
1644                 }
1645                 list_del_init(&p->action);
1646         }
1647
1648         BT_DBG("All LE pending actions cleared");
1649 }
1650
1651 int hci_dev_do_close(struct hci_dev *hdev)
1652 {
1653         bool auto_off;
1654
1655         BT_DBG("%s %p", hdev->name, hdev);
1656
1657         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1658             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1659             test_bit(HCI_UP, &hdev->flags)) {
1660                 /* Execute vendor specific shutdown routine */
1661                 if (hdev->shutdown)
1662                         hdev->shutdown(hdev);
1663         }
1664
1665         cancel_delayed_work(&hdev->power_off);
1666
1667         hci_request_cancel_all(hdev);
1668         hci_req_sync_lock(hdev);
1669
1670         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1671                 cancel_delayed_work_sync(&hdev->cmd_timer);
1672                 hci_req_sync_unlock(hdev);
1673                 return 0;
1674         }
1675
1676         hci_leds_update_powered(hdev, false);
1677
1678         /* Flush RX and TX works */
1679         flush_work(&hdev->tx_work);
1680         flush_work(&hdev->rx_work);
1681
1682         if (hdev->discov_timeout > 0) {
1683                 hdev->discov_timeout = 0;
1684                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1685                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1686         }
1687
1688         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1689                 cancel_delayed_work(&hdev->service_cache);
1690
1691         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1692                 struct adv_info *adv_instance;
1693
1694                 cancel_delayed_work_sync(&hdev->rpa_expired);
1695
1696                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1697                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1698         }
1699
1700         /* Avoid potential lockdep warnings from the *_flush() calls by
1701          * ensuring the workqueue is empty up front.
1702          */
1703         drain_workqueue(hdev->workqueue);
1704
1705         hci_dev_lock(hdev);
1706
1707         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1708
1709         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1710
1711         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1712             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1713             hci_dev_test_flag(hdev, HCI_MGMT))
1714                 __mgmt_power_off(hdev);
1715
1716         hci_inquiry_cache_flush(hdev);
1717         hci_pend_le_actions_clear(hdev);
1718         hci_conn_hash_flush(hdev);
1719         hci_dev_unlock(hdev);
1720
1721         smp_unregister(hdev);
1722
1723         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1724
1725         if (hdev->flush)
1726                 hdev->flush(hdev);
1727
1728         /* Reset device */
1729         skb_queue_purge(&hdev->cmd_q);
1730         atomic_set(&hdev->cmd_cnt, 1);
1731         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1732             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1733                 set_bit(HCI_INIT, &hdev->flags);
1734                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1735                 clear_bit(HCI_INIT, &hdev->flags);
1736         }
1737
1738         /* flush cmd  work */
1739         flush_work(&hdev->cmd_work);
1740
1741         /* Drop queues */
1742         skb_queue_purge(&hdev->rx_q);
1743         skb_queue_purge(&hdev->cmd_q);
1744         skb_queue_purge(&hdev->raw_q);
1745
1746         /* Drop last sent command */
1747         if (hdev->sent_cmd) {
1748                 cancel_delayed_work_sync(&hdev->cmd_timer);
1749                 kfree_skb(hdev->sent_cmd);
1750                 hdev->sent_cmd = NULL;
1751         }
1752
1753         clear_bit(HCI_RUNNING, &hdev->flags);
1754         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1755
1756         /* After this point our queues are empty
1757          * and no tasks are scheduled. */
1758         hdev->close(hdev);
1759
1760         /* Clear flags */
1761         hdev->flags &= BIT(HCI_RAW);
1762         hci_dev_clear_volatile_flags(hdev);
1763
1764         /* Controller radio is available but is currently powered down */
1765         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1766
1767         memset(hdev->eir, 0, sizeof(hdev->eir));
1768         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1769         bacpy(&hdev->random_addr, BDADDR_ANY);
1770
1771         hci_req_sync_unlock(hdev);
1772
1773         hci_dev_put(hdev);
1774         return 0;
1775 }
1776
1777 int hci_dev_close(__u16 dev)
1778 {
1779         struct hci_dev *hdev;
1780         int err;
1781
1782         hdev = hci_dev_get(dev);
1783         if (!hdev)
1784                 return -ENODEV;
1785
1786         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1787                 err = -EBUSY;
1788                 goto done;
1789         }
1790
1791         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1792                 cancel_delayed_work(&hdev->power_off);
1793
1794         err = hci_dev_do_close(hdev);
1795
1796 done:
1797         hci_dev_put(hdev);
1798         return err;
1799 }
1800
1801 static int hci_dev_do_reset(struct hci_dev *hdev)
1802 {
1803         int ret;
1804
1805         BT_DBG("%s %p", hdev->name, hdev);
1806
1807         hci_req_sync_lock(hdev);
1808
1809         /* Drop queues */
1810         skb_queue_purge(&hdev->rx_q);
1811         skb_queue_purge(&hdev->cmd_q);
1812
1813         /* Avoid potential lockdep warnings from the *_flush() calls by
1814          * ensuring the workqueue is empty up front.
1815          */
1816         drain_workqueue(hdev->workqueue);
1817
1818         hci_dev_lock(hdev);
1819         hci_inquiry_cache_flush(hdev);
1820         hci_conn_hash_flush(hdev);
1821         hci_dev_unlock(hdev);
1822
1823         if (hdev->flush)
1824                 hdev->flush(hdev);
1825
1826         atomic_set(&hdev->cmd_cnt, 1);
1827         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1828
1829         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1830
1831         hci_req_sync_unlock(hdev);
1832         return ret;
1833 }
1834
1835 int hci_dev_reset(__u16 dev)
1836 {
1837         struct hci_dev *hdev;
1838         int err;
1839
1840         hdev = hci_dev_get(dev);
1841         if (!hdev)
1842                 return -ENODEV;
1843
1844         if (!test_bit(HCI_UP, &hdev->flags)) {
1845                 err = -ENETDOWN;
1846                 goto done;
1847         }
1848
1849         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1850                 err = -EBUSY;
1851                 goto done;
1852         }
1853
1854         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1855                 err = -EOPNOTSUPP;
1856                 goto done;
1857         }
1858
1859         err = hci_dev_do_reset(hdev);
1860
1861 done:
1862         hci_dev_put(hdev);
1863         return err;
1864 }
1865
1866 int hci_dev_reset_stat(__u16 dev)
1867 {
1868         struct hci_dev *hdev;
1869         int ret = 0;
1870
1871         hdev = hci_dev_get(dev);
1872         if (!hdev)
1873                 return -ENODEV;
1874
1875         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1876                 ret = -EBUSY;
1877                 goto done;
1878         }
1879
1880         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1881                 ret = -EOPNOTSUPP;
1882                 goto done;
1883         }
1884
1885         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1886
1887 done:
1888         hci_dev_put(hdev);
1889         return ret;
1890 }
1891
1892 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1893 {
1894         bool conn_changed, discov_changed;
1895
1896         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1897
1898         if ((scan & SCAN_PAGE))
1899                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1900                                                           HCI_CONNECTABLE);
1901         else
1902                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1903                                                            HCI_CONNECTABLE);
1904
1905         if ((scan & SCAN_INQUIRY)) {
1906                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1907                                                             HCI_DISCOVERABLE);
1908         } else {
1909                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1910                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1911                                                              HCI_DISCOVERABLE);
1912         }
1913
1914         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1915                 return;
1916
1917         if (conn_changed || discov_changed) {
1918                 /* In case this was disabled through mgmt */
1919                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1920
1921                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1922                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1923
1924                 mgmt_new_settings(hdev);
1925         }
1926 }
1927
1928 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1929 {
1930         struct hci_dev *hdev;
1931         struct hci_dev_req dr;
1932         int err = 0;
1933
1934         if (copy_from_user(&dr, arg, sizeof(dr)))
1935                 return -EFAULT;
1936
1937         hdev = hci_dev_get(dr.dev_id);
1938         if (!hdev)
1939                 return -ENODEV;
1940
1941         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1942                 err = -EBUSY;
1943                 goto done;
1944         }
1945
1946         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1947                 err = -EOPNOTSUPP;
1948                 goto done;
1949         }
1950
1951         if (hdev->dev_type != HCI_PRIMARY) {
1952                 err = -EOPNOTSUPP;
1953                 goto done;
1954         }
1955
1956         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1957                 err = -EOPNOTSUPP;
1958                 goto done;
1959         }
1960
1961         switch (cmd) {
1962         case HCISETAUTH:
1963                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1964                                    HCI_INIT_TIMEOUT, NULL);
1965                 break;
1966
1967         case HCISETENCRYPT:
1968                 if (!lmp_encrypt_capable(hdev)) {
1969                         err = -EOPNOTSUPP;
1970                         break;
1971                 }
1972
1973                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1974                         /* Auth must be enabled first */
1975                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1976                                            HCI_INIT_TIMEOUT, NULL);
1977                         if (err)
1978                                 break;
1979                 }
1980
1981                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1982                                    HCI_INIT_TIMEOUT, NULL);
1983                 break;
1984
1985         case HCISETSCAN:
1986                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1987                                    HCI_INIT_TIMEOUT, NULL);
1988
1989                 /* Ensure that the connectable and discoverable states
1990                  * get correctly modified as this was a non-mgmt change.
1991                  */
1992                 if (!err)
1993                         hci_update_scan_state(hdev, dr.dev_opt);
1994                 break;
1995
1996         case HCISETLINKPOL:
1997                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1998                                    HCI_INIT_TIMEOUT, NULL);
1999                 break;
2000
2001         case HCISETLINKMODE:
2002                 hdev->link_mode = ((__u16) dr.dev_opt) &
2003                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2004                 break;
2005
2006         case HCISETPTYPE:
2007                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2008                         break;
2009
2010                 hdev->pkt_type = (__u16) dr.dev_opt;
2011                 mgmt_phy_configuration_changed(hdev, NULL);
2012                 break;
2013
2014         case HCISETACLMTU:
2015                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2016                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2017                 break;
2018
2019         case HCISETSCOMTU:
2020                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2021                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2022                 break;
2023
2024         default:
2025                 err = -EINVAL;
2026                 break;
2027         }
2028
2029 done:
2030         hci_dev_put(hdev);
2031         return err;
2032 }
2033
2034 int hci_get_dev_list(void __user *arg)
2035 {
2036         struct hci_dev *hdev;
2037         struct hci_dev_list_req *dl;
2038         struct hci_dev_req *dr;
2039         int n = 0, size, err;
2040         __u16 dev_num;
2041
2042         if (get_user(dev_num, (__u16 __user *) arg))
2043                 return -EFAULT;
2044
2045         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2046                 return -EINVAL;
2047
2048         size = sizeof(*dl) + dev_num * sizeof(*dr);
2049
2050         dl = kzalloc(size, GFP_KERNEL);
2051         if (!dl)
2052                 return -ENOMEM;
2053
2054         dr = dl->dev_req;
2055
2056         read_lock(&hci_dev_list_lock);
2057         list_for_each_entry(hdev, &hci_dev_list, list) {
2058                 unsigned long flags = hdev->flags;
2059
2060                 /* When the auto-off is configured it means the transport
2061                  * is running, but in that case still indicate that the
2062                  * device is actually down.
2063                  */
2064                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2065                         flags &= ~BIT(HCI_UP);
2066
2067                 (dr + n)->dev_id  = hdev->id;
2068                 (dr + n)->dev_opt = flags;
2069
2070                 if (++n >= dev_num)
2071                         break;
2072         }
2073         read_unlock(&hci_dev_list_lock);
2074
2075         dl->dev_num = n;
2076         size = sizeof(*dl) + n * sizeof(*dr);
2077
2078         err = copy_to_user(arg, dl, size);
2079         kfree(dl);
2080
2081         return err ? -EFAULT : 0;
2082 }
2083
2084 int hci_get_dev_info(void __user *arg)
2085 {
2086         struct hci_dev *hdev;
2087         struct hci_dev_info di;
2088         unsigned long flags;
2089         int err = 0;
2090
2091         if (copy_from_user(&di, arg, sizeof(di)))
2092                 return -EFAULT;
2093
2094         hdev = hci_dev_get(di.dev_id);
2095         if (!hdev)
2096                 return -ENODEV;
2097
2098         /* When the auto-off is configured it means the transport
2099          * is running, but in that case still indicate that the
2100          * device is actually down.
2101          */
2102         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2103                 flags = hdev->flags & ~BIT(HCI_UP);
2104         else
2105                 flags = hdev->flags;
2106
2107         strcpy(di.name, hdev->name);
2108         di.bdaddr   = hdev->bdaddr;
2109         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2110         di.flags    = flags;
2111         di.pkt_type = hdev->pkt_type;
2112         if (lmp_bredr_capable(hdev)) {
2113                 di.acl_mtu  = hdev->acl_mtu;
2114                 di.acl_pkts = hdev->acl_pkts;
2115                 di.sco_mtu  = hdev->sco_mtu;
2116                 di.sco_pkts = hdev->sco_pkts;
2117         } else {
2118                 di.acl_mtu  = hdev->le_mtu;
2119                 di.acl_pkts = hdev->le_pkts;
2120                 di.sco_mtu  = 0;
2121                 di.sco_pkts = 0;
2122         }
2123         di.link_policy = hdev->link_policy;
2124         di.link_mode   = hdev->link_mode;
2125
2126         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2127         memcpy(&di.features, &hdev->features, sizeof(di.features));
2128
2129         if (copy_to_user(arg, &di, sizeof(di)))
2130                 err = -EFAULT;
2131
2132         hci_dev_put(hdev);
2133
2134         return err;
2135 }
2136
2137 /* ---- Interface to HCI drivers ---- */
2138
2139 static int hci_rfkill_set_block(void *data, bool blocked)
2140 {
2141         struct hci_dev *hdev = data;
2142
2143         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2144
2145         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2146                 return -EBUSY;
2147
2148         if (blocked) {
2149                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2150                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2151                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2152                         hci_dev_do_close(hdev);
2153         } else {
2154                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2155         }
2156
2157         return 0;
2158 }
2159
2160 static const struct rfkill_ops hci_rfkill_ops = {
2161         .set_block = hci_rfkill_set_block,
2162 };
2163
2164 static void hci_power_on(struct work_struct *work)
2165 {
2166         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2167         int err;
2168
2169         BT_DBG("%s", hdev->name);
2170
2171         if (test_bit(HCI_UP, &hdev->flags) &&
2172             hci_dev_test_flag(hdev, HCI_MGMT) &&
2173             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2174                 cancel_delayed_work(&hdev->power_off);
2175                 hci_req_sync_lock(hdev);
2176                 err = __hci_req_hci_power_on(hdev);
2177                 hci_req_sync_unlock(hdev);
2178                 mgmt_power_on(hdev, err);
2179                 return;
2180         }
2181
2182         err = hci_dev_do_open(hdev);
2183         if (err < 0) {
2184                 hci_dev_lock(hdev);
2185                 mgmt_set_powered_failed(hdev, err);
2186                 hci_dev_unlock(hdev);
2187                 return;
2188         }
2189
2190         /* During the HCI setup phase, a few error conditions are
2191          * ignored and they need to be checked now. If they are still
2192          * valid, it is important to turn the device back off.
2193          */
2194         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2195             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2196             (hdev->dev_type == HCI_PRIMARY &&
2197              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2198              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2199                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2200                 hci_dev_do_close(hdev);
2201         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2202                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2203                                    HCI_AUTO_OFF_TIMEOUT);
2204         }
2205
2206         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2207                 /* For unconfigured devices, set the HCI_RAW flag
2208                  * so that userspace can easily identify them.
2209                  */
2210                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2211                         set_bit(HCI_RAW, &hdev->flags);
2212
2213                 /* For fully configured devices, this will send
2214                  * the Index Added event. For unconfigured devices,
2215                  * it will send Unconfigued Index Added event.
2216                  *
2217                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2218                  * and no event will be send.
2219                  */
2220                 mgmt_index_added(hdev);
2221         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2222                 /* When the controller is now configured, then it
2223                  * is important to clear the HCI_RAW flag.
2224                  */
2225                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2226                         clear_bit(HCI_RAW, &hdev->flags);
2227
2228                 /* Powering on the controller with HCI_CONFIG set only
2229                  * happens with the transition from unconfigured to
2230                  * configured. This will send the Index Added event.
2231                  */
2232                 mgmt_index_added(hdev);
2233         }
2234 }
2235
2236 static void hci_power_off(struct work_struct *work)
2237 {
2238         struct hci_dev *hdev = container_of(work, struct hci_dev,
2239                                             power_off.work);
2240
2241         BT_DBG("%s", hdev->name);
2242
2243         hci_dev_do_close(hdev);
2244 }
2245
2246 static void hci_error_reset(struct work_struct *work)
2247 {
2248         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2249
2250         BT_DBG("%s", hdev->name);
2251
2252         if (hdev->hw_error)
2253                 hdev->hw_error(hdev, hdev->hw_error_code);
2254         else
2255                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2256
2257         if (hci_dev_do_close(hdev))
2258                 return;
2259
2260         hci_dev_do_open(hdev);
2261 }
2262
2263 void hci_uuids_clear(struct hci_dev *hdev)
2264 {
2265         struct bt_uuid *uuid, *tmp;
2266
2267         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2268                 list_del(&uuid->list);
2269                 kfree(uuid);
2270         }
2271 }
2272
2273 void hci_link_keys_clear(struct hci_dev *hdev)
2274 {
2275         struct link_key *key;
2276
2277         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2278                 list_del_rcu(&key->list);
2279                 kfree_rcu(key, rcu);
2280         }
2281 }
2282
2283 void hci_smp_ltks_clear(struct hci_dev *hdev)
2284 {
2285         struct smp_ltk *k;
2286
2287         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2288                 list_del_rcu(&k->list);
2289                 kfree_rcu(k, rcu);
2290         }
2291 }
2292
2293 void hci_smp_irks_clear(struct hci_dev *hdev)
2294 {
2295         struct smp_irk *k;
2296
2297         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2298                 list_del_rcu(&k->list);
2299                 kfree_rcu(k, rcu);
2300         }
2301 }
2302
2303 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2304 {
2305         struct link_key *k;
2306
2307         rcu_read_lock();
2308         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2309                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2310                         rcu_read_unlock();
2311                         return k;
2312                 }
2313         }
2314         rcu_read_unlock();
2315
2316         return NULL;
2317 }
2318
2319 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2320                                u8 key_type, u8 old_key_type)
2321 {
2322         /* Legacy key */
2323         if (key_type < 0x03)
2324                 return true;
2325
2326         /* Debug keys are insecure so don't store them persistently */
2327         if (key_type == HCI_LK_DEBUG_COMBINATION)
2328                 return false;
2329
2330         /* Changed combination key and there's no previous one */
2331         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2332                 return false;
2333
2334         /* Security mode 3 case */
2335         if (!conn)
2336                 return true;
2337
2338         /* BR/EDR key derived using SC from an LE link */
2339         if (conn->type == LE_LINK)
2340                 return true;
2341
2342         /* Neither local nor remote side had no-bonding as requirement */
2343         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2344                 return true;
2345
2346         /* Local side had dedicated bonding as requirement */
2347         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2348                 return true;
2349
2350         /* Remote side had dedicated bonding as requirement */
2351         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2352                 return true;
2353
2354         /* If none of the above criteria match, then don't store the key
2355          * persistently */
2356         return false;
2357 }
2358
2359 static u8 ltk_role(u8 type)
2360 {
2361         if (type == SMP_LTK)
2362                 return HCI_ROLE_MASTER;
2363
2364         return HCI_ROLE_SLAVE;
2365 }
2366
2367 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2368                              u8 addr_type, u8 role)
2369 {
2370         struct smp_ltk *k;
2371
2372         rcu_read_lock();
2373         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2374                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2375                         continue;
2376
2377                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2378                         rcu_read_unlock();
2379                         return k;
2380                 }
2381         }
2382         rcu_read_unlock();
2383
2384         return NULL;
2385 }
2386
2387 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2388 {
2389         struct smp_irk *irk;
2390
2391         rcu_read_lock();
2392         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2393                 if (!bacmp(&irk->rpa, rpa)) {
2394                         rcu_read_unlock();
2395                         return irk;
2396                 }
2397         }
2398
2399         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2400                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2401                         bacpy(&irk->rpa, rpa);
2402                         rcu_read_unlock();
2403                         return irk;
2404                 }
2405         }
2406         rcu_read_unlock();
2407
2408         return NULL;
2409 }
2410
2411 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2412                                      u8 addr_type)
2413 {
2414         struct smp_irk *irk;
2415
2416         /* Identity Address must be public or static random */
2417         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2418                 return NULL;
2419
2420         rcu_read_lock();
2421         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2422                 if (addr_type == irk->addr_type &&
2423                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2424                         rcu_read_unlock();
2425                         return irk;
2426                 }
2427         }
2428         rcu_read_unlock();
2429
2430         return NULL;
2431 }
2432
2433 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2434                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2435                                   u8 pin_len, bool *persistent)
2436 {
2437         struct link_key *key, *old_key;
2438         u8 old_key_type;
2439
2440         old_key = hci_find_link_key(hdev, bdaddr);
2441         if (old_key) {
2442                 old_key_type = old_key->type;
2443                 key = old_key;
2444         } else {
2445                 old_key_type = conn ? conn->key_type : 0xff;
2446                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2447                 if (!key)
2448                         return NULL;
2449                 list_add_rcu(&key->list, &hdev->link_keys);
2450         }
2451
2452         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2453
2454         /* Some buggy controller combinations generate a changed
2455          * combination key for legacy pairing even when there's no
2456          * previous key */
2457         if (type == HCI_LK_CHANGED_COMBINATION &&
2458             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2459                 type = HCI_LK_COMBINATION;
2460                 if (conn)
2461                         conn->key_type = type;
2462         }
2463
2464         bacpy(&key->bdaddr, bdaddr);
2465         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2466         key->pin_len = pin_len;
2467
2468         if (type == HCI_LK_CHANGED_COMBINATION)
2469                 key->type = old_key_type;
2470         else
2471                 key->type = type;
2472
2473         if (persistent)
2474                 *persistent = hci_persistent_key(hdev, conn, type,
2475                                                  old_key_type);
2476
2477         return key;
2478 }
2479
2480 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2481                             u8 addr_type, u8 type, u8 authenticated,
2482                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2483 {
2484         struct smp_ltk *key, *old_key;
2485         u8 role = ltk_role(type);
2486
2487         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2488         if (old_key)
2489                 key = old_key;
2490         else {
2491                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2492                 if (!key)
2493                         return NULL;
2494                 list_add_rcu(&key->list, &hdev->long_term_keys);
2495         }
2496
2497         bacpy(&key->bdaddr, bdaddr);
2498         key->bdaddr_type = addr_type;
2499         memcpy(key->val, tk, sizeof(key->val));
2500         key->authenticated = authenticated;
2501         key->ediv = ediv;
2502         key->rand = rand;
2503         key->enc_size = enc_size;
2504         key->type = type;
2505
2506         return key;
2507 }
2508
2509 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2510                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2511 {
2512         struct smp_irk *irk;
2513
2514         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2515         if (!irk) {
2516                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2517                 if (!irk)
2518                         return NULL;
2519
2520                 bacpy(&irk->bdaddr, bdaddr);
2521                 irk->addr_type = addr_type;
2522
2523                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2524         }
2525
2526         memcpy(irk->val, val, 16);
2527         bacpy(&irk->rpa, rpa);
2528
2529         return irk;
2530 }
2531
2532 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2533 {
2534         struct link_key *key;
2535
2536         key = hci_find_link_key(hdev, bdaddr);
2537         if (!key)
2538                 return -ENOENT;
2539
2540         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2541
2542         list_del_rcu(&key->list);
2543         kfree_rcu(key, rcu);
2544
2545         return 0;
2546 }
2547
2548 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2549 {
2550         struct smp_ltk *k;
2551         int removed = 0;
2552
2553         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2554                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2555                         continue;
2556
2557                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2558
2559                 list_del_rcu(&k->list);
2560                 kfree_rcu(k, rcu);
2561                 removed++;
2562         }
2563
2564         return removed ? 0 : -ENOENT;
2565 }
2566
2567 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2568 {
2569         struct smp_irk *k;
2570
2571         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2572                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2573                         continue;
2574
2575                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2576
2577                 list_del_rcu(&k->list);
2578                 kfree_rcu(k, rcu);
2579         }
2580 }
2581
2582 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2583 {
2584         struct smp_ltk *k;
2585         struct smp_irk *irk;
2586         u8 addr_type;
2587
2588         if (type == BDADDR_BREDR) {
2589                 if (hci_find_link_key(hdev, bdaddr))
2590                         return true;
2591                 return false;
2592         }
2593
2594         /* Convert to HCI addr type which struct smp_ltk uses */
2595         if (type == BDADDR_LE_PUBLIC)
2596                 addr_type = ADDR_LE_DEV_PUBLIC;
2597         else
2598                 addr_type = ADDR_LE_DEV_RANDOM;
2599
2600         irk = hci_get_irk(hdev, bdaddr, addr_type);
2601         if (irk) {
2602                 bdaddr = &irk->bdaddr;
2603                 addr_type = irk->addr_type;
2604         }
2605
2606         rcu_read_lock();
2607         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2608                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2609                         rcu_read_unlock();
2610                         return true;
2611                 }
2612         }
2613         rcu_read_unlock();
2614
2615         return false;
2616 }
2617
2618 /* HCI command timer function */
2619 static void hci_cmd_timeout(struct work_struct *work)
2620 {
2621         struct hci_dev *hdev = container_of(work, struct hci_dev,
2622                                             cmd_timer.work);
2623
2624         if (hdev->sent_cmd) {
2625                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2626                 u16 opcode = __le16_to_cpu(sent->opcode);
2627
2628                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2629         } else {
2630                 bt_dev_err(hdev, "command tx timeout");
2631         }
2632
2633 #ifdef TIZEN_BT
2634         hci_tx_timeout_error_evt(hdev);
2635 #endif
2636
2637         atomic_set(&hdev->cmd_cnt, 1);
2638         queue_work(hdev->workqueue, &hdev->cmd_work);
2639 }
2640
2641 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2642                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2643 {
2644         struct oob_data *data;
2645
2646         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2647                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2648                         continue;
2649                 if (data->bdaddr_type != bdaddr_type)
2650                         continue;
2651                 return data;
2652         }
2653
2654         return NULL;
2655 }
2656
2657 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2658                                u8 bdaddr_type)
2659 {
2660         struct oob_data *data;
2661
2662         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2663         if (!data)
2664                 return -ENOENT;
2665
2666         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2667
2668         list_del(&data->list);
2669         kfree(data);
2670
2671         return 0;
2672 }
2673
2674 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2675 {
2676         struct oob_data *data, *n;
2677
2678         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2679                 list_del(&data->list);
2680                 kfree(data);
2681         }
2682 }
2683
2684 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2685                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2686                             u8 *hash256, u8 *rand256)
2687 {
2688         struct oob_data *data;
2689
2690         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2691         if (!data) {
2692                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2693                 if (!data)
2694                         return -ENOMEM;
2695
2696                 bacpy(&data->bdaddr, bdaddr);
2697                 data->bdaddr_type = bdaddr_type;
2698                 list_add(&data->list, &hdev->remote_oob_data);
2699         }
2700
2701         if (hash192 && rand192) {
2702                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2703                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2704                 if (hash256 && rand256)
2705                         data->present = 0x03;
2706         } else {
2707                 memset(data->hash192, 0, sizeof(data->hash192));
2708                 memset(data->rand192, 0, sizeof(data->rand192));
2709                 if (hash256 && rand256)
2710                         data->present = 0x02;
2711                 else
2712                         data->present = 0x00;
2713         }
2714
2715         if (hash256 && rand256) {
2716                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2717                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2718         } else {
2719                 memset(data->hash256, 0, sizeof(data->hash256));
2720                 memset(data->rand256, 0, sizeof(data->rand256));
2721                 if (hash192 && rand192)
2722                         data->present = 0x01;
2723         }
2724
2725         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2726
2727         return 0;
2728 }
2729
2730 /* This function requires the caller holds hdev->lock */
2731 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2732 {
2733         struct adv_info *adv_instance;
2734
2735         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2736                 if (adv_instance->instance == instance)
2737                         return adv_instance;
2738         }
2739
2740         return NULL;
2741 }
2742
2743 /* This function requires the caller holds hdev->lock */
2744 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2745 {
2746         struct adv_info *cur_instance;
2747
2748         cur_instance = hci_find_adv_instance(hdev, instance);
2749         if (!cur_instance)
2750                 return NULL;
2751
2752         if (cur_instance == list_last_entry(&hdev->adv_instances,
2753                                             struct adv_info, list))
2754                 return list_first_entry(&hdev->adv_instances,
2755                                                  struct adv_info, list);
2756         else
2757                 return list_next_entry(cur_instance, list);
2758 }
2759
2760 /* This function requires the caller holds hdev->lock */
2761 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2762 {
2763         struct adv_info *adv_instance;
2764
2765         adv_instance = hci_find_adv_instance(hdev, instance);
2766         if (!adv_instance)
2767                 return -ENOENT;
2768
2769         BT_DBG("%s removing %dMR", hdev->name, instance);
2770
2771         if (hdev->cur_adv_instance == instance) {
2772                 if (hdev->adv_instance_timeout) {
2773                         cancel_delayed_work(&hdev->adv_instance_expire);
2774                         hdev->adv_instance_timeout = 0;
2775                 }
2776                 hdev->cur_adv_instance = 0x00;
2777         }
2778
2779         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2780
2781         list_del(&adv_instance->list);
2782         kfree(adv_instance);
2783
2784         hdev->adv_instance_cnt--;
2785
2786         return 0;
2787 }
2788
2789 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2790 {
2791         struct adv_info *adv_instance, *n;
2792
2793         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2794                 adv_instance->rpa_expired = rpa_expired;
2795 }
2796
2797 /* This function requires the caller holds hdev->lock */
2798 void hci_adv_instances_clear(struct hci_dev *hdev)
2799 {
2800         struct adv_info *adv_instance, *n;
2801
2802         if (hdev->adv_instance_timeout) {
2803                 cancel_delayed_work(&hdev->adv_instance_expire);
2804                 hdev->adv_instance_timeout = 0;
2805         }
2806
2807         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2808                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2809                 list_del(&adv_instance->list);
2810                 kfree(adv_instance);
2811         }
2812
2813         hdev->adv_instance_cnt = 0;
2814         hdev->cur_adv_instance = 0x00;
2815 }
2816
2817 static void adv_instance_rpa_expired(struct work_struct *work)
2818 {
2819         struct adv_info *adv_instance = container_of(work, struct adv_info,
2820                                                      rpa_expired_cb.work);
2821
2822         BT_DBG("");
2823
2824         adv_instance->rpa_expired = true;
2825 }
2826
2827 /* This function requires the caller holds hdev->lock */
2828 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2829                          u16 adv_data_len, u8 *adv_data,
2830                          u16 scan_rsp_len, u8 *scan_rsp_data,
2831                          u16 timeout, u16 duration)
2832 {
2833         struct adv_info *adv_instance;
2834
2835         adv_instance = hci_find_adv_instance(hdev, instance);
2836         if (adv_instance) {
2837                 memset(adv_instance->adv_data, 0,
2838                        sizeof(adv_instance->adv_data));
2839                 memset(adv_instance->scan_rsp_data, 0,
2840                        sizeof(adv_instance->scan_rsp_data));
2841         } else {
2842                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2843                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2844                         return -EOVERFLOW;
2845
2846                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2847                 if (!adv_instance)
2848                         return -ENOMEM;
2849
2850                 adv_instance->pending = true;
2851                 adv_instance->instance = instance;
2852                 list_add(&adv_instance->list, &hdev->adv_instances);
2853                 hdev->adv_instance_cnt++;
2854         }
2855
2856         adv_instance->flags = flags;
2857         adv_instance->adv_data_len = adv_data_len;
2858         adv_instance->scan_rsp_len = scan_rsp_len;
2859
2860         if (adv_data_len)
2861                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2862
2863         if (scan_rsp_len)
2864                 memcpy(adv_instance->scan_rsp_data,
2865                        scan_rsp_data, scan_rsp_len);
2866
2867         adv_instance->timeout = timeout;
2868         adv_instance->remaining_time = timeout;
2869
2870         if (duration == 0)
2871                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2872         else
2873                 adv_instance->duration = duration;
2874
2875         adv_instance->tx_power = HCI_TX_POWER_INVALID;
2876
2877         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2878                           adv_instance_rpa_expired);
2879
2880         BT_DBG("%s for %dMR", hdev->name, instance);
2881
2882         return 0;
2883 }
2884
2885 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2886                                          bdaddr_t *bdaddr, u8 type)
2887 {
2888         struct bdaddr_list *b;
2889
2890         list_for_each_entry(b, bdaddr_list, list) {
2891                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2892                         return b;
2893         }
2894
2895         return NULL;
2896 }
2897
2898 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2899 {
2900         struct bdaddr_list *b, *n;
2901
2902         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2903                 list_del(&b->list);
2904                 kfree(b);
2905         }
2906 }
2907
2908 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2909 {
2910         struct bdaddr_list *entry;
2911
2912         if (!bacmp(bdaddr, BDADDR_ANY))
2913                 return -EBADF;
2914
2915         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2916                 return -EEXIST;
2917
2918         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2919         if (!entry)
2920                 return -ENOMEM;
2921
2922         bacpy(&entry->bdaddr, bdaddr);
2923         entry->bdaddr_type = type;
2924
2925         list_add(&entry->list, list);
2926
2927         return 0;
2928 }
2929
2930 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2931 {
2932         struct bdaddr_list *entry;
2933
2934         if (!bacmp(bdaddr, BDADDR_ANY)) {
2935                 hci_bdaddr_list_clear(list);
2936                 return 0;
2937         }
2938
2939         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2940         if (!entry)
2941                 return -ENOENT;
2942
2943         list_del(&entry->list);
2944         kfree(entry);
2945
2946         return 0;
2947 }
2948
2949 /* This function requires the caller holds hdev->lock */
2950 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2951                                                bdaddr_t *addr, u8 addr_type)
2952 {
2953         struct hci_conn_params *params;
2954
2955         list_for_each_entry(params, &hdev->le_conn_params, list) {
2956                 if (bacmp(&params->addr, addr) == 0 &&
2957                     params->addr_type == addr_type) {
2958                         return params;
2959                 }
2960         }
2961
2962         return NULL;
2963 }
2964
2965 /* This function requires the caller holds hdev->lock */
2966 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2967                                                   bdaddr_t *addr, u8 addr_type)
2968 {
2969         struct hci_conn_params *param;
2970
2971         list_for_each_entry(param, list, action) {
2972                 if (bacmp(&param->addr, addr) == 0 &&
2973                     param->addr_type == addr_type)
2974                         return param;
2975         }
2976
2977         return NULL;
2978 }
2979
2980 /* This function requires the caller holds hdev->lock */
2981 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2982                                             bdaddr_t *addr, u8 addr_type)
2983 {
2984         struct hci_conn_params *params;
2985
2986         params = hci_conn_params_lookup(hdev, addr, addr_type);
2987         if (params)
2988                 return params;
2989
2990         params = kzalloc(sizeof(*params), GFP_KERNEL);
2991         if (!params) {
2992                 bt_dev_err(hdev, "out of memory");
2993                 return NULL;
2994         }
2995
2996         bacpy(&params->addr, addr);
2997         params->addr_type = addr_type;
2998
2999         list_add(&params->list, &hdev->le_conn_params);
3000         INIT_LIST_HEAD(&params->action);
3001
3002         params->conn_min_interval = hdev->le_conn_min_interval;
3003         params->conn_max_interval = hdev->le_conn_max_interval;
3004         params->conn_latency = hdev->le_conn_latency;
3005         params->supervision_timeout = hdev->le_supv_timeout;
3006         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3007
3008         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3009
3010         return params;
3011 }
3012
3013 static void hci_conn_params_free(struct hci_conn_params *params)
3014 {
3015         if (params->conn) {
3016                 hci_conn_drop(params->conn);
3017                 hci_conn_put(params->conn);
3018         }
3019
3020         list_del(&params->action);
3021         list_del(&params->list);
3022         kfree(params);
3023 }
3024
3025 /* This function requires the caller holds hdev->lock */
3026 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3027 {
3028         struct hci_conn_params *params;
3029
3030         params = hci_conn_params_lookup(hdev, addr, addr_type);
3031         if (!params)
3032                 return;
3033
3034         hci_conn_params_free(params);
3035
3036         hci_update_background_scan(hdev);
3037
3038         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3039 }
3040
3041 /* This function requires the caller holds hdev->lock */
3042 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3043 {
3044         struct hci_conn_params *params, *tmp;
3045
3046         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3047                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3048                         continue;
3049
3050                 /* If trying to estabilish one time connection to disabled
3051                  * device, leave the params, but mark them as just once.
3052                  */
3053                 if (params->explicit_connect) {
3054                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3055                         continue;
3056                 }
3057
3058                 list_del(&params->list);
3059                 kfree(params);
3060         }
3061
3062         BT_DBG("All LE disabled connection parameters were removed");
3063 }
3064
3065 /* This function requires the caller holds hdev->lock */
3066 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3067 {
3068         struct hci_conn_params *params, *tmp;
3069
3070         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3071                 hci_conn_params_free(params);
3072
3073         BT_DBG("All LE connection parameters were removed");
3074 }
3075
3076 /* Copy the Identity Address of the controller.
3077  *
3078  * If the controller has a public BD_ADDR, then by default use that one.
3079  * If this is a LE only controller without a public address, default to
3080  * the static random address.
3081  *
3082  * For debugging purposes it is possible to force controllers with a
3083  * public address to use the static random address instead.
3084  *
3085  * In case BR/EDR has been disabled on a dual-mode controller and
3086  * userspace has configured a static address, then that address
3087  * becomes the identity address instead of the public BR/EDR address.
3088  */
3089 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3090                                u8 *bdaddr_type)
3091 {
3092         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3093             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3094             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3095              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3096                 bacpy(bdaddr, &hdev->static_addr);
3097                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3098         } else {
3099                 bacpy(bdaddr, &hdev->bdaddr);
3100                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3101         }
3102 }
3103
3104 /* Alloc HCI device */
3105 struct hci_dev *hci_alloc_dev(void)
3106 {
3107         struct hci_dev *hdev;
3108
3109         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3110         if (!hdev)
3111                 return NULL;
3112
3113         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3114         hdev->esco_type = (ESCO_HV1);
3115         hdev->link_mode = (HCI_LM_ACCEPT);
3116         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3117         hdev->io_capability = 0x03;     /* No Input No Output */
3118         hdev->manufacturer = 0xffff;    /* Default to internal use */
3119         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3120         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3121         hdev->adv_instance_cnt = 0;
3122         hdev->cur_adv_instance = 0x00;
3123         hdev->adv_instance_timeout = 0;
3124
3125         hdev->sniff_max_interval = 800;
3126         hdev->sniff_min_interval = 80;
3127
3128         hdev->le_adv_channel_map = 0x07;
3129         hdev->le_adv_min_interval = 0x0800;
3130         hdev->le_adv_max_interval = 0x0800;
3131 #ifdef TIZEN_BT
3132         hdev->adv_filter_policy = 0x00;
3133         hdev->adv_type = 0x00;
3134 #endif
3135         hdev->le_scan_interval = 0x0060;
3136         hdev->le_scan_window = 0x0030;
3137         hdev->le_conn_min_interval = 0x0018;
3138         hdev->le_conn_max_interval = 0x0028;
3139         hdev->le_conn_latency = 0x0000;
3140         hdev->le_supv_timeout = 0x002a;
3141         hdev->le_def_tx_len = 0x001b;
3142         hdev->le_def_tx_time = 0x0148;
3143         hdev->le_max_tx_len = 0x001b;
3144         hdev->le_max_tx_time = 0x0148;
3145         hdev->le_max_rx_len = 0x001b;
3146         hdev->le_max_rx_time = 0x0148;
3147         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3148         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3149         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3150         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3151
3152         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3153         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3154         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3155         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3156
3157         mutex_init(&hdev->lock);
3158         mutex_init(&hdev->req_lock);
3159
3160         INIT_LIST_HEAD(&hdev->mgmt_pending);
3161         INIT_LIST_HEAD(&hdev->blacklist);
3162         INIT_LIST_HEAD(&hdev->whitelist);
3163         INIT_LIST_HEAD(&hdev->uuids);
3164         INIT_LIST_HEAD(&hdev->link_keys);
3165         INIT_LIST_HEAD(&hdev->long_term_keys);
3166         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3167         INIT_LIST_HEAD(&hdev->remote_oob_data);
3168         INIT_LIST_HEAD(&hdev->le_white_list);
3169         INIT_LIST_HEAD(&hdev->le_resolv_list);
3170         INIT_LIST_HEAD(&hdev->le_conn_params);
3171         INIT_LIST_HEAD(&hdev->pend_le_conns);
3172         INIT_LIST_HEAD(&hdev->pend_le_reports);
3173         INIT_LIST_HEAD(&hdev->conn_hash.list);
3174         INIT_LIST_HEAD(&hdev->adv_instances);
3175
3176         INIT_WORK(&hdev->rx_work, hci_rx_work);
3177         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3178         INIT_WORK(&hdev->tx_work, hci_tx_work);
3179         INIT_WORK(&hdev->power_on, hci_power_on);
3180         INIT_WORK(&hdev->error_reset, hci_error_reset);
3181
3182         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3183
3184         skb_queue_head_init(&hdev->rx_q);
3185         skb_queue_head_init(&hdev->cmd_q);
3186         skb_queue_head_init(&hdev->raw_q);
3187
3188         init_waitqueue_head(&hdev->req_wait_q);
3189
3190         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3191
3192         hci_request_setup(hdev);
3193
3194         hci_init_sysfs(hdev);
3195         discovery_init(hdev);
3196
3197         return hdev;
3198 }
3199 EXPORT_SYMBOL(hci_alloc_dev);
3200
3201 /* Free HCI device */
3202 void hci_free_dev(struct hci_dev *hdev)
3203 {
3204         /* will free via device release */
3205         put_device(&hdev->dev);
3206 }
3207 EXPORT_SYMBOL(hci_free_dev);
3208
3209 /* Register HCI device */
3210 int hci_register_dev(struct hci_dev *hdev)
3211 {
3212         int id, error;
3213
3214         if (!hdev->open || !hdev->close || !hdev->send)
3215                 return -EINVAL;
3216
3217         /* Do not allow HCI_AMP devices to register at index 0,
3218          * so the index can be used as the AMP controller ID.
3219          */
3220         switch (hdev->dev_type) {
3221         case HCI_PRIMARY:
3222                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3223                 break;
3224         case HCI_AMP:
3225                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3226                 break;
3227         default:
3228                 return -EINVAL;
3229         }
3230
3231         if (id < 0)
3232                 return id;
3233
3234         sprintf(hdev->name, "hci%d", id);
3235         hdev->id = id;
3236
3237         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3238
3239         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3240         if (!hdev->workqueue) {
3241                 error = -ENOMEM;
3242                 goto err;
3243         }
3244
3245         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3246                                                       hdev->name);
3247         if (!hdev->req_workqueue) {
3248                 destroy_workqueue(hdev->workqueue);
3249                 error = -ENOMEM;
3250                 goto err;
3251         }
3252
3253         if (!IS_ERR_OR_NULL(bt_debugfs))
3254                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3255
3256         dev_set_name(&hdev->dev, "%s", hdev->name);
3257
3258         error = device_add(&hdev->dev);
3259         if (error < 0)
3260                 goto err_wqueue;
3261
3262         hci_leds_init(hdev);
3263
3264         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3265                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3266                                     hdev);
3267         if (hdev->rfkill) {
3268                 if (rfkill_register(hdev->rfkill) < 0) {
3269                         rfkill_destroy(hdev->rfkill);
3270                         hdev->rfkill = NULL;
3271                 }
3272         }
3273
3274         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3275                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3276
3277         hci_dev_set_flag(hdev, HCI_SETUP);
3278         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3279
3280         if (hdev->dev_type == HCI_PRIMARY) {
3281                 /* Assume BR/EDR support until proven otherwise (such as
3282                  * through reading supported features during init.
3283                  */
3284                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3285         }
3286
3287         write_lock(&hci_dev_list_lock);
3288         list_add(&hdev->list, &hci_dev_list);
3289         write_unlock(&hci_dev_list_lock);
3290
3291         /* Devices that are marked for raw-only usage are unconfigured
3292          * and should not be included in normal operation.
3293          */
3294         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3295                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3296
3297         hci_sock_dev_event(hdev, HCI_DEV_REG);
3298         hci_dev_hold(hdev);
3299
3300         queue_work(hdev->req_workqueue, &hdev->power_on);
3301
3302         return id;
3303
3304 err_wqueue:
3305         destroy_workqueue(hdev->workqueue);
3306         destroy_workqueue(hdev->req_workqueue);
3307 err:
3308         ida_simple_remove(&hci_index_ida, hdev->id);
3309
3310         return error;
3311 }
3312 EXPORT_SYMBOL(hci_register_dev);
3313
3314 /* Unregister HCI device */
3315 void hci_unregister_dev(struct hci_dev *hdev)
3316 {
3317         int id;
3318
3319         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3320
3321         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3322
3323         id = hdev->id;
3324
3325         write_lock(&hci_dev_list_lock);
3326         list_del(&hdev->list);
3327         write_unlock(&hci_dev_list_lock);
3328
3329         cancel_work_sync(&hdev->power_on);
3330
3331         hci_dev_do_close(hdev);
3332
3333         if (!test_bit(HCI_INIT, &hdev->flags) &&
3334             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3335             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3336                 hci_dev_lock(hdev);
3337                 mgmt_index_removed(hdev);
3338                 hci_dev_unlock(hdev);
3339         }
3340
3341         /* mgmt_index_removed should take care of emptying the
3342          * pending list */
3343         BUG_ON(!list_empty(&hdev->mgmt_pending));
3344
3345         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3346
3347         if (hdev->rfkill) {
3348                 rfkill_unregister(hdev->rfkill);
3349                 rfkill_destroy(hdev->rfkill);
3350         }
3351
3352         device_del(&hdev->dev);
3353
3354         debugfs_remove_recursive(hdev->debugfs);
3355         kfree_const(hdev->hw_info);
3356         kfree_const(hdev->fw_info);
3357
3358         destroy_workqueue(hdev->workqueue);
3359         destroy_workqueue(hdev->req_workqueue);
3360
3361         hci_dev_lock(hdev);
3362         hci_bdaddr_list_clear(&hdev->blacklist);
3363         hci_bdaddr_list_clear(&hdev->whitelist);
3364         hci_uuids_clear(hdev);
3365         hci_link_keys_clear(hdev);
3366         hci_smp_ltks_clear(hdev);
3367         hci_smp_irks_clear(hdev);
3368         hci_remote_oob_data_clear(hdev);
3369         hci_adv_instances_clear(hdev);
3370         hci_bdaddr_list_clear(&hdev->le_white_list);
3371         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3372         hci_conn_params_clear_all(hdev);
3373         hci_discovery_filter_clear(hdev);
3374         hci_dev_unlock(hdev);
3375
3376         hci_dev_put(hdev);
3377
3378         ida_simple_remove(&hci_index_ida, id);
3379 }
3380 EXPORT_SYMBOL(hci_unregister_dev);
3381
3382 /* Suspend HCI device */
3383 int hci_suspend_dev(struct hci_dev *hdev)
3384 {
3385         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3386         return 0;
3387 }
3388 EXPORT_SYMBOL(hci_suspend_dev);
3389
3390 /* Resume HCI device */
3391 int hci_resume_dev(struct hci_dev *hdev)
3392 {
3393         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3394         return 0;
3395 }
3396 EXPORT_SYMBOL(hci_resume_dev);
3397
3398 /* Reset HCI device */
3399 int hci_reset_dev(struct hci_dev *hdev)
3400 {
3401         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3402         struct sk_buff *skb;
3403
3404         skb = bt_skb_alloc(3, GFP_ATOMIC);
3405         if (!skb)
3406                 return -ENOMEM;
3407
3408         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3409         skb_put_data(skb, hw_err, 3);
3410
3411         /* Send Hardware Error to upper stack */
3412         return hci_recv_frame(hdev, skb);
3413 }
3414 EXPORT_SYMBOL(hci_reset_dev);
3415
3416 /* Receive frame from HCI drivers */
3417 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3418 {
3419         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3420                       && !test_bit(HCI_INIT, &hdev->flags))) {
3421                 kfree_skb(skb);
3422                 return -ENXIO;
3423         }
3424
3425         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3426             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3427             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3428                 kfree_skb(skb);
3429                 return -EINVAL;
3430         }
3431
3432         /* Incoming skb */
3433         bt_cb(skb)->incoming = 1;
3434
3435         /* Time stamp */
3436         __net_timestamp(skb);
3437
3438         skb_queue_tail(&hdev->rx_q, skb);
3439         queue_work(hdev->workqueue, &hdev->rx_work);
3440
3441         return 0;
3442 }
3443 EXPORT_SYMBOL(hci_recv_frame);
3444
3445 /* Receive diagnostic message from HCI drivers */
3446 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3447 {
3448         /* Mark as diagnostic packet */
3449         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3450
3451         /* Time stamp */
3452         __net_timestamp(skb);
3453
3454         skb_queue_tail(&hdev->rx_q, skb);
3455         queue_work(hdev->workqueue, &hdev->rx_work);
3456
3457         return 0;
3458 }
3459 EXPORT_SYMBOL(hci_recv_diag);
3460
3461 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3462 {
3463         va_list vargs;
3464
3465         va_start(vargs, fmt);
3466         kfree_const(hdev->hw_info);
3467         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3468         va_end(vargs);
3469 }
3470 EXPORT_SYMBOL(hci_set_hw_info);
3471
3472 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3473 {
3474         va_list vargs;
3475
3476         va_start(vargs, fmt);
3477         kfree_const(hdev->fw_info);
3478         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3479         va_end(vargs);
3480 }
3481 EXPORT_SYMBOL(hci_set_fw_info);
3482
3483 /* ---- Interface to upper protocols ---- */
3484
3485 int hci_register_cb(struct hci_cb *cb)
3486 {
3487         BT_DBG("%p name %s", cb, cb->name);
3488
3489         mutex_lock(&hci_cb_list_lock);
3490         list_add_tail(&cb->list, &hci_cb_list);
3491         mutex_unlock(&hci_cb_list_lock);
3492
3493         return 0;
3494 }
3495 EXPORT_SYMBOL(hci_register_cb);
3496
3497 int hci_unregister_cb(struct hci_cb *cb)
3498 {
3499         BT_DBG("%p name %s", cb, cb->name);
3500
3501         mutex_lock(&hci_cb_list_lock);
3502         list_del(&cb->list);
3503         mutex_unlock(&hci_cb_list_lock);
3504
3505         return 0;
3506 }
3507 EXPORT_SYMBOL(hci_unregister_cb);
3508
3509 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3510 {
3511         int err;
3512
3513         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3514                skb->len);
3515
3516         /* Time stamp */
3517         __net_timestamp(skb);
3518
3519         /* Send copy to monitor */
3520         hci_send_to_monitor(hdev, skb);
3521
3522         if (atomic_read(&hdev->promisc)) {
3523                 /* Send copy to the sockets */
3524                 hci_send_to_sock(hdev, skb);
3525         }
3526
3527         /* Get rid of skb owner, prior to sending to the driver. */
3528         skb_orphan(skb);
3529
3530         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3531                 kfree_skb(skb);
3532                 return;
3533         }
3534
3535         err = hdev->send(hdev, skb);
3536         if (err < 0) {
3537                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3538                 kfree_skb(skb);
3539         }
3540 }
3541
3542 /* Send HCI command */
3543 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3544                  const void *param)
3545 {
3546         struct sk_buff *skb;
3547
3548         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3549
3550         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3551         if (!skb) {
3552                 bt_dev_err(hdev, "no memory for command");
3553                 return -ENOMEM;
3554         }
3555
3556         /* Stand-alone HCI commands must be flagged as
3557          * single-command requests.
3558          */
3559         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3560
3561         skb_queue_tail(&hdev->cmd_q, skb);
3562         queue_work(hdev->workqueue, &hdev->cmd_work);
3563
3564         return 0;
3565 }
3566
3567 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3568                    const void *param)
3569 {
3570         struct sk_buff *skb;
3571
3572         if (hci_opcode_ogf(opcode) != 0x3f) {
3573                 /* A controller receiving a command shall respond with either
3574                  * a Command Status Event or a Command Complete Event.
3575                  * Therefore, all standard HCI commands must be sent via the
3576                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3577                  * Some vendors do not comply with this rule for vendor-specific
3578                  * commands and do not return any event. We want to support
3579                  * unresponded commands for such cases only.
3580                  */
3581                 bt_dev_err(hdev, "unresponded command not supported");
3582                 return -EINVAL;
3583         }
3584
3585         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3586         if (!skb) {
3587                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3588                            opcode);
3589                 return -ENOMEM;
3590         }
3591
3592         hci_send_frame(hdev, skb);
3593
3594         return 0;
3595 }
3596 EXPORT_SYMBOL(__hci_cmd_send);
3597
3598 /* Get data from the previously sent command */
3599 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3600 {
3601         struct hci_command_hdr *hdr;
3602
3603         if (!hdev->sent_cmd)
3604                 return NULL;
3605
3606         hdr = (void *) hdev->sent_cmd->data;
3607
3608         if (hdr->opcode != cpu_to_le16(opcode))
3609                 return NULL;
3610
3611         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3612
3613         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3614 }
3615
3616 /* Send HCI command and wait for command commplete event */
3617 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3618                              const void *param, u32 timeout)
3619 {
3620         struct sk_buff *skb;
3621
3622         if (!test_bit(HCI_UP, &hdev->flags))
3623                 return ERR_PTR(-ENETDOWN);
3624
3625         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3626
3627         hci_req_sync_lock(hdev);
3628         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3629         hci_req_sync_unlock(hdev);
3630
3631         return skb;
3632 }
3633 EXPORT_SYMBOL(hci_cmd_sync);
3634
3635 /* Send ACL data */
3636 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3637 {
3638         struct hci_acl_hdr *hdr;
3639         int len = skb->len;
3640
3641         skb_push(skb, HCI_ACL_HDR_SIZE);
3642         skb_reset_transport_header(skb);
3643         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3644         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3645         hdr->dlen   = cpu_to_le16(len);
3646 }
3647
3648 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3649                           struct sk_buff *skb, __u16 flags)
3650 {
3651         struct hci_conn *conn = chan->conn;
3652         struct hci_dev *hdev = conn->hdev;
3653         struct sk_buff *list;
3654
3655         skb->len = skb_headlen(skb);
3656         skb->data_len = 0;
3657
3658         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3659
3660         switch (hdev->dev_type) {
3661         case HCI_PRIMARY:
3662                 hci_add_acl_hdr(skb, conn->handle, flags);
3663                 break;
3664         case HCI_AMP:
3665                 hci_add_acl_hdr(skb, chan->handle, flags);
3666                 break;
3667         default:
3668                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3669                 return;
3670         }
3671
3672         list = skb_shinfo(skb)->frag_list;
3673         if (!list) {
3674                 /* Non fragmented */
3675                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3676
3677                 skb_queue_tail(queue, skb);
3678         } else {
3679                 /* Fragmented */
3680                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3681
3682                 skb_shinfo(skb)->frag_list = NULL;
3683
3684                 /* Queue all fragments atomically. We need to use spin_lock_bh
3685                  * here because of 6LoWPAN links, as there this function is
3686                  * called from softirq and using normal spin lock could cause
3687                  * deadlocks.
3688                  */
3689                 spin_lock_bh(&queue->lock);
3690
3691                 __skb_queue_tail(queue, skb);
3692
3693                 flags &= ~ACL_START;
3694                 flags |= ACL_CONT;
3695                 do {
3696                         skb = list; list = list->next;
3697
3698                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3699                         hci_add_acl_hdr(skb, conn->handle, flags);
3700
3701                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3702
3703                         __skb_queue_tail(queue, skb);
3704                 } while (list);
3705
3706                 spin_unlock_bh(&queue->lock);
3707         }
3708 }
3709
3710 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3711 {
3712         struct hci_dev *hdev = chan->conn->hdev;
3713
3714         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3715
3716         hci_queue_acl(chan, &chan->data_q, skb, flags);
3717
3718         queue_work(hdev->workqueue, &hdev->tx_work);
3719 }
3720
3721 /* Send SCO data */
3722 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3723 {
3724         struct hci_dev *hdev = conn->hdev;
3725         struct hci_sco_hdr hdr;
3726
3727         BT_DBG("%s len %d", hdev->name, skb->len);
3728
3729         hdr.handle = cpu_to_le16(conn->handle);
3730         hdr.dlen   = skb->len;
3731
3732         skb_push(skb, HCI_SCO_HDR_SIZE);
3733         skb_reset_transport_header(skb);
3734         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3735
3736         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3737
3738         skb_queue_tail(&conn->data_q, skb);
3739         queue_work(hdev->workqueue, &hdev->tx_work);
3740 }
3741
3742 /* ---- HCI TX task (outgoing data) ---- */
3743
3744 /* HCI Connection scheduler */
3745 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3746                                      int *quote)
3747 {
3748         struct hci_conn_hash *h = &hdev->conn_hash;
3749         struct hci_conn *conn = NULL, *c;
3750         unsigned int num = 0, min = ~0;
3751
3752         /* We don't have to lock device here. Connections are always
3753          * added and removed with TX task disabled. */
3754
3755         rcu_read_lock();
3756
3757         list_for_each_entry_rcu(c, &h->list, list) {
3758                 if (c->type != type || skb_queue_empty(&c->data_q))
3759                         continue;
3760
3761                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3762                         continue;
3763
3764                 num++;
3765
3766                 if (c->sent < min) {
3767                         min  = c->sent;
3768                         conn = c;
3769                 }
3770
3771                 if (hci_conn_num(hdev, type) == num)
3772                         break;
3773         }
3774
3775         rcu_read_unlock();
3776
3777         if (conn) {
3778                 int cnt, q;
3779
3780                 switch (conn->type) {
3781                 case ACL_LINK:
3782                         cnt = hdev->acl_cnt;
3783                         break;
3784                 case SCO_LINK:
3785                 case ESCO_LINK:
3786                         cnt = hdev->sco_cnt;
3787                         break;
3788                 case LE_LINK:
3789                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3790                         break;
3791                 default:
3792                         cnt = 0;
3793                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3794                 }
3795
3796                 q = cnt / num;
3797                 *quote = q ? q : 1;
3798         } else
3799                 *quote = 0;
3800
3801         BT_DBG("conn %p quote %d", conn, *quote);
3802         return conn;
3803 }
3804
3805 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3806 {
3807         struct hci_conn_hash *h = &hdev->conn_hash;
3808         struct hci_conn *c;
3809
3810         bt_dev_err(hdev, "link tx timeout");
3811
3812         rcu_read_lock();
3813
3814         /* Kill stalled connections */
3815         list_for_each_entry_rcu(c, &h->list, list) {
3816                 if (c->type == type && c->sent) {
3817                         bt_dev_err(hdev, "killing stalled connection %pMR",
3818                                    &c->dst);
3819                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3820                 }
3821         }
3822
3823         rcu_read_unlock();
3824 }
3825
3826 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3827                                       int *quote)
3828 {
3829         struct hci_conn_hash *h = &hdev->conn_hash;
3830         struct hci_chan *chan = NULL;
3831         unsigned int num = 0, min = ~0, cur_prio = 0;
3832         struct hci_conn *conn;
3833         int cnt, q, conn_num = 0;
3834
3835         BT_DBG("%s", hdev->name);
3836
3837         rcu_read_lock();
3838
3839         list_for_each_entry_rcu(conn, &h->list, list) {
3840                 struct hci_chan *tmp;
3841
3842                 if (conn->type != type)
3843                         continue;
3844
3845                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3846                         continue;
3847
3848                 conn_num++;
3849
3850                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3851                         struct sk_buff *skb;
3852
3853                         if (skb_queue_empty(&tmp->data_q))
3854                                 continue;
3855
3856                         skb = skb_peek(&tmp->data_q);
3857                         if (skb->priority < cur_prio)
3858                                 continue;
3859
3860                         if (skb->priority > cur_prio) {
3861                                 num = 0;
3862                                 min = ~0;
3863                                 cur_prio = skb->priority;
3864                         }
3865
3866                         num++;
3867
3868                         if (conn->sent < min) {
3869                                 min  = conn->sent;
3870                                 chan = tmp;
3871                         }
3872                 }
3873
3874                 if (hci_conn_num(hdev, type) == conn_num)
3875                         break;
3876         }
3877
3878         rcu_read_unlock();
3879
3880         if (!chan)
3881                 return NULL;
3882
3883         switch (chan->conn->type) {
3884         case ACL_LINK:
3885                 cnt = hdev->acl_cnt;
3886                 break;
3887         case AMP_LINK:
3888                 cnt = hdev->block_cnt;
3889                 break;
3890         case SCO_LINK:
3891         case ESCO_LINK:
3892                 cnt = hdev->sco_cnt;
3893                 break;
3894         case LE_LINK:
3895                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3896                 break;
3897         default:
3898                 cnt = 0;
3899                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3900         }
3901
3902         q = cnt / num;
3903         *quote = q ? q : 1;
3904         BT_DBG("chan %p quote %d", chan, *quote);
3905         return chan;
3906 }
3907
3908 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3909 {
3910         struct hci_conn_hash *h = &hdev->conn_hash;
3911         struct hci_conn *conn;
3912         int num = 0;
3913
3914         BT_DBG("%s", hdev->name);
3915
3916         rcu_read_lock();
3917
3918         list_for_each_entry_rcu(conn, &h->list, list) {
3919                 struct hci_chan *chan;
3920
3921                 if (conn->type != type)
3922                         continue;
3923
3924                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3925                         continue;
3926
3927                 num++;
3928
3929                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3930                         struct sk_buff *skb;
3931
3932                         if (chan->sent) {
3933                                 chan->sent = 0;
3934                                 continue;
3935                         }
3936
3937                         if (skb_queue_empty(&chan->data_q))
3938                                 continue;
3939
3940                         skb = skb_peek(&chan->data_q);
3941                         if (skb->priority >= HCI_PRIO_MAX - 1)
3942                                 continue;
3943
3944                         skb->priority = HCI_PRIO_MAX - 1;
3945
3946                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3947                                skb->priority);
3948                 }
3949
3950                 if (hci_conn_num(hdev, type) == num)
3951                         break;
3952         }
3953
3954         rcu_read_unlock();
3955
3956 }
3957
3958 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3959 {
3960         /* Calculate count of blocks used by this packet */
3961         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3962 }
3963
3964 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3965 {
3966         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3967                 /* ACL tx timeout must be longer than maximum
3968                  * link supervision timeout (40.9 seconds) */
3969                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3970                                        HCI_ACL_TX_TIMEOUT))
3971                         hci_link_tx_to(hdev, ACL_LINK);
3972         }
3973 }
3974
3975 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3976 {
3977         unsigned int cnt = hdev->acl_cnt;
3978         struct hci_chan *chan;
3979         struct sk_buff *skb;
3980         int quote;
3981
3982         __check_timeout(hdev, cnt);
3983
3984         while (hdev->acl_cnt &&
3985                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3986                 u32 priority = (skb_peek(&chan->data_q))->priority;
3987                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3988                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3989                                skb->len, skb->priority);
3990
3991                         /* Stop if priority has changed */
3992                         if (skb->priority < priority)
3993                                 break;
3994
3995                         skb = skb_dequeue(&chan->data_q);
3996
3997                         hci_conn_enter_active_mode(chan->conn,
3998                                                    bt_cb(skb)->force_active);
3999
4000                         hci_send_frame(hdev, skb);
4001                         hdev->acl_last_tx = jiffies;
4002
4003                         hdev->acl_cnt--;
4004                         chan->sent++;
4005                         chan->conn->sent++;
4006                 }
4007         }
4008
4009         if (cnt != hdev->acl_cnt)
4010                 hci_prio_recalculate(hdev, ACL_LINK);
4011 }
4012
4013 static void hci_sched_acl_blk(struct hci_dev *hdev)
4014 {
4015         unsigned int cnt = hdev->block_cnt;
4016         struct hci_chan *chan;
4017         struct sk_buff *skb;
4018         int quote;
4019         u8 type;
4020
4021         __check_timeout(hdev, cnt);
4022
4023         BT_DBG("%s", hdev->name);
4024
4025         if (hdev->dev_type == HCI_AMP)
4026                 type = AMP_LINK;
4027         else
4028                 type = ACL_LINK;
4029
4030         while (hdev->block_cnt > 0 &&
4031                (chan = hci_chan_sent(hdev, type, &quote))) {
4032                 u32 priority = (skb_peek(&chan->data_q))->priority;
4033                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4034                         int blocks;
4035
4036                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4037                                skb->len, skb->priority);
4038
4039                         /* Stop if priority has changed */
4040                         if (skb->priority < priority)
4041                                 break;
4042
4043                         skb = skb_dequeue(&chan->data_q);
4044
4045                         blocks = __get_blocks(hdev, skb);
4046                         if (blocks > hdev->block_cnt)
4047                                 return;
4048
4049                         hci_conn_enter_active_mode(chan->conn,
4050                                                    bt_cb(skb)->force_active);
4051
4052                         hci_send_frame(hdev, skb);
4053                         hdev->acl_last_tx = jiffies;
4054
4055                         hdev->block_cnt -= blocks;
4056                         quote -= blocks;
4057
4058                         chan->sent += blocks;
4059                         chan->conn->sent += blocks;
4060                 }
4061         }
4062
4063         if (cnt != hdev->block_cnt)
4064                 hci_prio_recalculate(hdev, type);
4065 }
4066
4067 static void hci_sched_acl(struct hci_dev *hdev)
4068 {
4069         BT_DBG("%s", hdev->name);
4070
4071         /* No ACL link over BR/EDR controller */
4072         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4073                 return;
4074
4075         /* No AMP link over AMP controller */
4076         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4077                 return;
4078
4079         switch (hdev->flow_ctl_mode) {
4080         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4081                 hci_sched_acl_pkt(hdev);
4082                 break;
4083
4084         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4085                 hci_sched_acl_blk(hdev);
4086                 break;
4087         }
4088 }
4089
4090 /* Schedule SCO */
4091 static void hci_sched_sco(struct hci_dev *hdev)
4092 {
4093         struct hci_conn *conn;
4094         struct sk_buff *skb;
4095         int quote;
4096
4097         BT_DBG("%s", hdev->name);
4098
4099         if (!hci_conn_num(hdev, SCO_LINK))
4100                 return;
4101
4102         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4103                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4104                         BT_DBG("skb %p len %d", skb, skb->len);
4105                         hci_send_frame(hdev, skb);
4106
4107                         conn->sent++;
4108                         if (conn->sent == ~0)
4109                                 conn->sent = 0;
4110                 }
4111         }
4112 }
4113
4114 static void hci_sched_esco(struct hci_dev *hdev)
4115 {
4116         struct hci_conn *conn;
4117         struct sk_buff *skb;
4118         int quote;
4119
4120         BT_DBG("%s", hdev->name);
4121
4122         if (!hci_conn_num(hdev, ESCO_LINK))
4123                 return;
4124
4125         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4126                                                      &quote))) {
4127                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4128                         BT_DBG("skb %p len %d", skb, skb->len);
4129                         hci_send_frame(hdev, skb);
4130
4131                         conn->sent++;
4132                         if (conn->sent == ~0)
4133                                 conn->sent = 0;
4134                 }
4135         }
4136 }
4137
4138 static void hci_sched_le(struct hci_dev *hdev)
4139 {
4140         struct hci_chan *chan;
4141         struct sk_buff *skb;
4142         int quote, cnt, tmp;
4143
4144         BT_DBG("%s", hdev->name);
4145
4146         if (!hci_conn_num(hdev, LE_LINK))
4147                 return;
4148
4149         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4150                 /* LE tx timeout must be longer than maximum
4151                  * link supervision timeout (40.9 seconds) */
4152                 if (!hdev->le_cnt && hdev->le_pkts &&
4153                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4154                         hci_link_tx_to(hdev, LE_LINK);
4155         }
4156
4157         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4158         tmp = cnt;
4159         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4160                 u32 priority = (skb_peek(&chan->data_q))->priority;
4161                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4162                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4163                                skb->len, skb->priority);
4164
4165                         /* Stop if priority has changed */
4166                         if (skb->priority < priority)
4167                                 break;
4168
4169                         skb = skb_dequeue(&chan->data_q);
4170
4171                         hci_send_frame(hdev, skb);
4172                         hdev->le_last_tx = jiffies;
4173
4174                         cnt--;
4175                         chan->sent++;
4176                         chan->conn->sent++;
4177                 }
4178         }
4179
4180         if (hdev->le_pkts)
4181                 hdev->le_cnt = cnt;
4182         else
4183                 hdev->acl_cnt = cnt;
4184
4185         if (cnt != tmp)
4186                 hci_prio_recalculate(hdev, LE_LINK);
4187 }
4188
4189 static void hci_tx_work(struct work_struct *work)
4190 {
4191         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4192         struct sk_buff *skb;
4193
4194         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4195                hdev->sco_cnt, hdev->le_cnt);
4196
4197         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4198                 /* Schedule queues and send stuff to HCI driver */
4199                 hci_sched_acl(hdev);
4200                 hci_sched_sco(hdev);
4201                 hci_sched_esco(hdev);
4202                 hci_sched_le(hdev);
4203         }
4204
4205         /* Send next queued raw (unknown type) packet */
4206         while ((skb = skb_dequeue(&hdev->raw_q)))
4207                 hci_send_frame(hdev, skb);
4208 }
4209
4210 /* ----- HCI RX task (incoming data processing) ----- */
4211
4212 /* ACL data packet */
4213 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4214 {
4215         struct hci_acl_hdr *hdr = (void *) skb->data;
4216         struct hci_conn *conn;
4217         __u16 handle, flags;
4218
4219         skb_pull(skb, HCI_ACL_HDR_SIZE);
4220
4221         handle = __le16_to_cpu(hdr->handle);
4222         flags  = hci_flags(handle);
4223         handle = hci_handle(handle);
4224
4225         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4226                handle, flags);
4227
4228         hdev->stat.acl_rx++;
4229
4230         hci_dev_lock(hdev);
4231         conn = hci_conn_hash_lookup_handle(hdev, handle);
4232         hci_dev_unlock(hdev);
4233
4234         if (conn) {
4235                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4236
4237                 /* Send to upper protocol */
4238                 l2cap_recv_acldata(conn, skb, flags);
4239                 return;
4240         } else {
4241                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4242                            handle);
4243         }
4244
4245         kfree_skb(skb);
4246 }
4247
4248 /* SCO data packet */
4249 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4250 {
4251         struct hci_sco_hdr *hdr = (void *) skb->data;
4252         struct hci_conn *conn;
4253         __u16 handle;
4254
4255         skb_pull(skb, HCI_SCO_HDR_SIZE);
4256
4257         handle = __le16_to_cpu(hdr->handle);
4258
4259         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4260
4261         hdev->stat.sco_rx++;
4262
4263         hci_dev_lock(hdev);
4264         conn = hci_conn_hash_lookup_handle(hdev, handle);
4265         hci_dev_unlock(hdev);
4266
4267         if (conn) {
4268                 /* Send to upper protocol */
4269                 sco_recv_scodata(conn, skb);
4270                 return;
4271         } else {
4272                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4273                            handle);
4274         }
4275
4276         kfree_skb(skb);
4277 }
4278
4279 static bool hci_req_is_complete(struct hci_dev *hdev)
4280 {
4281         struct sk_buff *skb;
4282
4283         skb = skb_peek(&hdev->cmd_q);
4284         if (!skb)
4285                 return true;
4286
4287         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4288 }
4289
4290 static void hci_resend_last(struct hci_dev *hdev)
4291 {
4292         struct hci_command_hdr *sent;
4293         struct sk_buff *skb;
4294         u16 opcode;
4295
4296         if (!hdev->sent_cmd)
4297                 return;
4298
4299         sent = (void *) hdev->sent_cmd->data;
4300         opcode = __le16_to_cpu(sent->opcode);
4301         if (opcode == HCI_OP_RESET)
4302                 return;
4303
4304         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4305         if (!skb)
4306                 return;
4307
4308         skb_queue_head(&hdev->cmd_q, skb);
4309         queue_work(hdev->workqueue, &hdev->cmd_work);
4310 }
4311
4312 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4313                           hci_req_complete_t *req_complete,
4314                           hci_req_complete_skb_t *req_complete_skb)
4315 {
4316         struct sk_buff *skb;
4317         unsigned long flags;
4318
4319         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4320
4321         /* If the completed command doesn't match the last one that was
4322          * sent we need to do special handling of it.
4323          */
4324         if (!hci_sent_cmd_data(hdev, opcode)) {
4325                 /* Some CSR based controllers generate a spontaneous
4326                  * reset complete event during init and any pending
4327                  * command will never be completed. In such a case we
4328                  * need to resend whatever was the last sent
4329                  * command.
4330                  */
4331                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4332                         hci_resend_last(hdev);
4333
4334                 return;
4335         }
4336
4337         /* If we reach this point this event matches the last command sent */
4338         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4339
4340         /* If the command succeeded and there's still more commands in
4341          * this request the request is not yet complete.
4342          */
4343         if (!status && !hci_req_is_complete(hdev))
4344                 return;
4345
4346         /* If this was the last command in a request the complete
4347          * callback would be found in hdev->sent_cmd instead of the
4348          * command queue (hdev->cmd_q).
4349          */
4350         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4351                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4352                 return;
4353         }
4354
4355         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4356                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4357                 return;
4358         }
4359
4360         /* Remove all pending commands belonging to this request */
4361         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4362         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4363                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4364                         __skb_queue_head(&hdev->cmd_q, skb);
4365                         break;
4366                 }
4367
4368                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4369                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4370                 else
4371                         *req_complete = bt_cb(skb)->hci.req_complete;
4372                 kfree_skb(skb);
4373         }
4374         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4375 }
4376
4377 static void hci_rx_work(struct work_struct *work)
4378 {
4379         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4380         struct sk_buff *skb;
4381
4382         BT_DBG("%s", hdev->name);
4383
4384         while ((skb = skb_dequeue(&hdev->rx_q))) {
4385                 /* Send copy to monitor */
4386                 hci_send_to_monitor(hdev, skb);
4387
4388                 if (atomic_read(&hdev->promisc)) {
4389                         /* Send copy to the sockets */
4390                         hci_send_to_sock(hdev, skb);
4391                 }
4392
4393                 /* If the device has been opened in HCI_USER_CHANNEL,
4394                  * the userspace has exclusive access to device.
4395                  * When device is HCI_INIT, we still need to process
4396                  * the data packets to the driver in order
4397                  * to complete its setup().
4398                  */
4399                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4400                     !test_bit(HCI_INIT, &hdev->flags)) {
4401                         kfree_skb(skb);
4402                         continue;
4403                 }
4404
4405                 if (test_bit(HCI_INIT, &hdev->flags)) {
4406                         /* Don't process data packets in this states. */
4407                         switch (hci_skb_pkt_type(skb)) {
4408                         case HCI_ACLDATA_PKT:
4409                         case HCI_SCODATA_PKT:
4410                                 kfree_skb(skb);
4411                                 continue;
4412                         }
4413                 }
4414
4415                 /* Process frame */
4416                 switch (hci_skb_pkt_type(skb)) {
4417                 case HCI_EVENT_PKT:
4418                         BT_DBG("%s Event packet", hdev->name);
4419                         hci_event_packet(hdev, skb);
4420                         break;
4421
4422                 case HCI_ACLDATA_PKT:
4423                         BT_DBG("%s ACL data packet", hdev->name);
4424                         hci_acldata_packet(hdev, skb);
4425                         break;
4426
4427                 case HCI_SCODATA_PKT:
4428                         BT_DBG("%s SCO data packet", hdev->name);
4429                         hci_scodata_packet(hdev, skb);
4430                         break;
4431
4432                 default:
4433                         kfree_skb(skb);
4434                         break;
4435                 }
4436         }
4437 }
4438
4439 static void hci_cmd_work(struct work_struct *work)
4440 {
4441         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4442         struct sk_buff *skb;
4443
4444         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4445                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4446
4447         /* Send queued commands */
4448         if (atomic_read(&hdev->cmd_cnt)) {
4449                 skb = skb_dequeue(&hdev->cmd_q);
4450                 if (!skb)
4451                         return;
4452
4453                 kfree_skb(hdev->sent_cmd);
4454
4455                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4456                 if (hdev->sent_cmd) {
4457                         if (hci_req_status_pend(hdev))
4458                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4459                         atomic_dec(&hdev->cmd_cnt);
4460                         hci_send_frame(hdev, skb);
4461                         if (test_bit(HCI_RESET, &hdev->flags))
4462                                 cancel_delayed_work(&hdev->cmd_timer);
4463                         else
4464                                 schedule_delayed_work(&hdev->cmd_timer,
4465                                                       HCI_CMD_TIMEOUT);
4466                 } else {
4467                         skb_queue_head(&hdev->cmd_q, skb);
4468                         queue_work(hdev->workqueue, &hdev->cmd_work);
4469                 }
4470         }
4471 }