2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
518 struct hci_dev *hdev = req->hdev;
520 if (lmp_bredr_capable(hdev))
523 if (lmp_le_capable(hdev))
526 hci_setup_event_mask(req);
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
540 struct hci_cp_write_eir cp;
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
549 if (lmp_inq_rssi_capable(hdev))
550 hci_setup_inquiry_mode(req);
552 if (lmp_inq_tx_pwr_capable(hdev))
553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
570 static void hci_setup_link_policy(struct hci_request *req)
572 struct hci_dev *hdev = req->hdev;
573 struct hci_cp_write_def_link_policy cp;
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
585 cp.policy = cpu_to_le16(link_policy);
586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
589 static void hci_set_le_support(struct hci_request *req)
591 struct hci_dev *hdev = req->hdev;
592 struct hci_cp_write_le_host_supported cp;
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
598 memset(&cp, 0, sizeof(cp));
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
602 cp.simul = lmp_le_br_capable(hdev);
605 if (cp.le != lmp_host_le_capable(hdev))
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
612 struct hci_dev *hdev = req->hdev;
615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
633 if (hdev->commands[5] & 0x10)
634 hci_setup_link_policy(req);
636 if (lmp_le_capable(hdev)) {
637 hci_set_le_support(req);
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
651 static void hci_init4_req(struct hci_request *req, unsigned long opt)
653 struct hci_dev *hdev = req->hdev;
655 /* Check for Synchronization Train support */
656 if (hdev->features[2][0] & 0x04)
657 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
660 static int __hci_init(struct hci_dev *hdev)
664 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
668 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
669 * BR/EDR/LE type controllers. AMP controllers only need the
672 if (hdev->dev_type != HCI_BREDR)
675 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
679 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
683 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
686 static void hci_scan_req(struct hci_request *req, unsigned long opt)
690 BT_DBG("%s %x", req->hdev->name, scan);
692 /* Inquiry and Page scans */
693 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
696 static void hci_auth_req(struct hci_request *req, unsigned long opt)
700 BT_DBG("%s %x", req->hdev->name, auth);
703 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
706 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
710 BT_DBG("%s %x", req->hdev->name, encrypt);
713 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
716 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
718 __le16 policy = cpu_to_le16(opt);
720 BT_DBG("%s %x", req->hdev->name, policy);
722 /* Default link policy */
723 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
726 /* Get HCI device by index.
727 * Device is held on return. */
728 struct hci_dev *hci_dev_get(int index)
730 struct hci_dev *hdev = NULL, *d;
737 read_lock(&hci_dev_list_lock);
738 list_for_each_entry(d, &hci_dev_list, list) {
739 if (d->id == index) {
740 hdev = hci_dev_hold(d);
744 read_unlock(&hci_dev_list_lock);
748 /* ---- Inquiry support ---- */
750 bool hci_discovery_active(struct hci_dev *hdev)
752 struct discovery_state *discov = &hdev->discovery;
754 switch (discov->state) {
755 case DISCOVERY_FINDING:
756 case DISCOVERY_RESOLVING:
764 void hci_discovery_set_state(struct hci_dev *hdev, int state)
766 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
768 if (hdev->discovery.state == state)
772 case DISCOVERY_STOPPED:
773 if (hdev->discovery.state != DISCOVERY_STARTING)
774 mgmt_discovering(hdev, 0);
776 case DISCOVERY_STARTING:
778 case DISCOVERY_FINDING:
779 mgmt_discovering(hdev, 1);
781 case DISCOVERY_RESOLVING:
783 case DISCOVERY_STOPPING:
787 hdev->discovery.state = state;
790 void hci_inquiry_cache_flush(struct hci_dev *hdev)
792 struct discovery_state *cache = &hdev->discovery;
793 struct inquiry_entry *p, *n;
795 list_for_each_entry_safe(p, n, &cache->all, all) {
800 INIT_LIST_HEAD(&cache->unknown);
801 INIT_LIST_HEAD(&cache->resolve);
804 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
807 struct discovery_state *cache = &hdev->discovery;
808 struct inquiry_entry *e;
810 BT_DBG("cache %p, %pMR", cache, bdaddr);
812 list_for_each_entry(e, &cache->all, all) {
813 if (!bacmp(&e->data.bdaddr, bdaddr))
820 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
823 struct discovery_state *cache = &hdev->discovery;
824 struct inquiry_entry *e;
826 BT_DBG("cache %p, %pMR", cache, bdaddr);
828 list_for_each_entry(e, &cache->unknown, list) {
829 if (!bacmp(&e->data.bdaddr, bdaddr))
836 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
840 struct discovery_state *cache = &hdev->discovery;
841 struct inquiry_entry *e;
843 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
845 list_for_each_entry(e, &cache->resolve, list) {
846 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
848 if (!bacmp(&e->data.bdaddr, bdaddr))
855 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
856 struct inquiry_entry *ie)
858 struct discovery_state *cache = &hdev->discovery;
859 struct list_head *pos = &cache->resolve;
860 struct inquiry_entry *p;
864 list_for_each_entry(p, &cache->resolve, list) {
865 if (p->name_state != NAME_PENDING &&
866 abs(p->data.rssi) >= abs(ie->data.rssi))
871 list_add(&ie->list, pos);
874 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
875 bool name_known, bool *ssp)
877 struct discovery_state *cache = &hdev->discovery;
878 struct inquiry_entry *ie;
880 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
882 hci_remove_remote_oob_data(hdev, &data->bdaddr);
885 *ssp = data->ssp_mode;
887 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
889 if (ie->data.ssp_mode && ssp)
892 if (ie->name_state == NAME_NEEDED &&
893 data->rssi != ie->data.rssi) {
894 ie->data.rssi = data->rssi;
895 hci_inquiry_cache_update_resolve(hdev, ie);
901 /* Entry not in the cache. Add new one. */
902 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
906 list_add(&ie->all, &cache->all);
909 ie->name_state = NAME_KNOWN;
911 ie->name_state = NAME_NOT_KNOWN;
912 list_add(&ie->list, &cache->unknown);
916 if (name_known && ie->name_state != NAME_KNOWN &&
917 ie->name_state != NAME_PENDING) {
918 ie->name_state = NAME_KNOWN;
922 memcpy(&ie->data, data, sizeof(*data));
923 ie->timestamp = jiffies;
924 cache->timestamp = jiffies;
926 if (ie->name_state == NAME_NOT_KNOWN)
932 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
934 struct discovery_state *cache = &hdev->discovery;
935 struct inquiry_info *info = (struct inquiry_info *) buf;
936 struct inquiry_entry *e;
939 list_for_each_entry(e, &cache->all, all) {
940 struct inquiry_data *data = &e->data;
945 bacpy(&info->bdaddr, &data->bdaddr);
946 info->pscan_rep_mode = data->pscan_rep_mode;
947 info->pscan_period_mode = data->pscan_period_mode;
948 info->pscan_mode = data->pscan_mode;
949 memcpy(info->dev_class, data->dev_class, 3);
950 info->clock_offset = data->clock_offset;
956 BT_DBG("cache %p, copied %d", cache, copied);
960 static void hci_inq_req(struct hci_request *req, unsigned long opt)
962 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
963 struct hci_dev *hdev = req->hdev;
964 struct hci_cp_inquiry cp;
966 BT_DBG("%s", hdev->name);
968 if (test_bit(HCI_INQUIRY, &hdev->flags))
972 memcpy(&cp.lap, &ir->lap, 3);
973 cp.length = ir->length;
974 cp.num_rsp = ir->num_rsp;
975 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
978 static int wait_inquiry(void *word)
981 return signal_pending(current);
984 int hci_inquiry(void __user *arg)
986 __u8 __user *ptr = arg;
987 struct hci_inquiry_req ir;
988 struct hci_dev *hdev;
989 int err = 0, do_inquiry = 0, max_rsp;
993 if (copy_from_user(&ir, ptr, sizeof(ir)))
996 hdev = hci_dev_get(ir.dev_id);
1000 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1006 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1007 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1008 hci_inquiry_cache_flush(hdev);
1011 hci_dev_unlock(hdev);
1013 timeo = ir.length * msecs_to_jiffies(2000);
1016 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1021 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1022 * cleared). If it is interrupted by a signal, return -EINTR.
1024 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1025 TASK_INTERRUPTIBLE))
1029 /* for unlimited number of responses we will use buffer with
1032 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1034 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1035 * copy it to the user space.
1037 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1044 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1045 hci_dev_unlock(hdev);
1047 BT_DBG("num_rsp %d", ir.num_rsp);
1049 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1051 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1064 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1066 u8 ad_len = 0, flags = 0;
1069 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1070 flags |= LE_AD_GENERAL;
1072 if (!lmp_bredr_capable(hdev))
1073 flags |= LE_AD_NO_BREDR;
1075 if (lmp_le_br_capable(hdev))
1076 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1078 if (lmp_host_le_br_capable(hdev))
1079 flags |= LE_AD_SIM_LE_BREDR_HOST;
1082 BT_DBG("adv flags 0x%02x", flags);
1092 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1094 ptr[1] = EIR_TX_POWER;
1095 ptr[2] = (u8) hdev->adv_tx_power;
1101 name_len = strlen(hdev->dev_name);
1103 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1105 if (name_len > max_len) {
1107 ptr[1] = EIR_NAME_SHORT;
1109 ptr[1] = EIR_NAME_COMPLETE;
1111 ptr[0] = name_len + 1;
1113 memcpy(ptr + 2, hdev->dev_name, name_len);
1115 ad_len += (name_len + 2);
1116 ptr += (name_len + 2);
1122 void hci_update_ad(struct hci_request *req)
1124 struct hci_dev *hdev = req->hdev;
1125 struct hci_cp_le_set_adv_data cp;
1128 if (!lmp_le_capable(hdev))
1131 memset(&cp, 0, sizeof(cp));
1133 len = create_ad(hdev, cp.data);
1135 if (hdev->adv_data_len == len &&
1136 memcmp(cp.data, hdev->adv_data, len) == 0)
1139 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1140 hdev->adv_data_len = len;
1144 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1147 /* ---- HCI ioctl helpers ---- */
1149 int hci_dev_open(__u16 dev)
1151 struct hci_dev *hdev;
1154 hdev = hci_dev_get(dev);
1158 BT_DBG("%s %p", hdev->name, hdev);
1162 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1167 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1172 if (test_bit(HCI_UP, &hdev->flags)) {
1177 if (hdev->open(hdev)) {
1182 atomic_set(&hdev->cmd_cnt, 1);
1183 set_bit(HCI_INIT, &hdev->flags);
1185 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1186 ret = hdev->setup(hdev);
1189 /* Treat all non BR/EDR controllers as raw devices if
1190 * enable_hs is not set.
1192 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1193 set_bit(HCI_RAW, &hdev->flags);
1195 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1196 set_bit(HCI_RAW, &hdev->flags);
1198 if (!test_bit(HCI_RAW, &hdev->flags) &&
1199 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1200 ret = __hci_init(hdev);
1203 clear_bit(HCI_INIT, &hdev->flags);
1207 set_bit(HCI_UP, &hdev->flags);
1208 hci_notify(hdev, HCI_DEV_UP);
1209 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1210 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1211 mgmt_valid_hdev(hdev)) {
1213 mgmt_powered(hdev, 1);
1214 hci_dev_unlock(hdev);
1217 /* Init failed, cleanup */
1218 flush_work(&hdev->tx_work);
1219 flush_work(&hdev->cmd_work);
1220 flush_work(&hdev->rx_work);
1222 skb_queue_purge(&hdev->cmd_q);
1223 skb_queue_purge(&hdev->rx_q);
1228 if (hdev->sent_cmd) {
1229 kfree_skb(hdev->sent_cmd);
1230 hdev->sent_cmd = NULL;
1238 hci_req_unlock(hdev);
1243 static int hci_dev_do_close(struct hci_dev *hdev)
1245 BT_DBG("%s %p", hdev->name, hdev);
1247 cancel_delayed_work(&hdev->power_off);
1249 hci_req_cancel(hdev, ENODEV);
1252 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1253 del_timer_sync(&hdev->cmd_timer);
1254 hci_req_unlock(hdev);
1258 /* Flush RX and TX works */
1259 flush_work(&hdev->tx_work);
1260 flush_work(&hdev->rx_work);
1262 if (hdev->discov_timeout > 0) {
1263 cancel_delayed_work(&hdev->discov_off);
1264 hdev->discov_timeout = 0;
1265 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1268 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1269 cancel_delayed_work(&hdev->service_cache);
1271 cancel_delayed_work_sync(&hdev->le_scan_disable);
1274 hci_inquiry_cache_flush(hdev);
1275 hci_conn_hash_flush(hdev);
1276 hci_dev_unlock(hdev);
1278 hci_notify(hdev, HCI_DEV_DOWN);
1284 skb_queue_purge(&hdev->cmd_q);
1285 atomic_set(&hdev->cmd_cnt, 1);
1286 if (!test_bit(HCI_RAW, &hdev->flags) &&
1287 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1288 set_bit(HCI_INIT, &hdev->flags);
1289 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1290 clear_bit(HCI_INIT, &hdev->flags);
1293 /* flush cmd work */
1294 flush_work(&hdev->cmd_work);
1297 skb_queue_purge(&hdev->rx_q);
1298 skb_queue_purge(&hdev->cmd_q);
1299 skb_queue_purge(&hdev->raw_q);
1301 /* Drop last sent command */
1302 if (hdev->sent_cmd) {
1303 del_timer_sync(&hdev->cmd_timer);
1304 kfree_skb(hdev->sent_cmd);
1305 hdev->sent_cmd = NULL;
1308 kfree_skb(hdev->recv_evt);
1309 hdev->recv_evt = NULL;
1311 /* After this point our queues are empty
1312 * and no tasks are scheduled. */
1317 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1319 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1320 mgmt_valid_hdev(hdev)) {
1322 mgmt_powered(hdev, 0);
1323 hci_dev_unlock(hdev);
1326 /* Controller radio is available but is currently powered down */
1327 hdev->amp_status = 0;
1329 memset(hdev->eir, 0, sizeof(hdev->eir));
1330 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1332 hci_req_unlock(hdev);
1338 int hci_dev_close(__u16 dev)
1340 struct hci_dev *hdev;
1343 hdev = hci_dev_get(dev);
1347 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1352 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1353 cancel_delayed_work(&hdev->power_off);
1355 err = hci_dev_do_close(hdev);
1362 int hci_dev_reset(__u16 dev)
1364 struct hci_dev *hdev;
1367 hdev = hci_dev_get(dev);
1373 if (!test_bit(HCI_UP, &hdev->flags)) {
1378 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1384 skb_queue_purge(&hdev->rx_q);
1385 skb_queue_purge(&hdev->cmd_q);
1388 hci_inquiry_cache_flush(hdev);
1389 hci_conn_hash_flush(hdev);
1390 hci_dev_unlock(hdev);
1395 atomic_set(&hdev->cmd_cnt, 1);
1396 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1398 if (!test_bit(HCI_RAW, &hdev->flags))
1399 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1402 hci_req_unlock(hdev);
1407 int hci_dev_reset_stat(__u16 dev)
1409 struct hci_dev *hdev;
1412 hdev = hci_dev_get(dev);
1416 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1421 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1428 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1430 struct hci_dev *hdev;
1431 struct hci_dev_req dr;
1434 if (copy_from_user(&dr, arg, sizeof(dr)))
1437 hdev = hci_dev_get(dr.dev_id);
1441 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1448 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1453 if (!lmp_encrypt_capable(hdev)) {
1458 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1459 /* Auth must be enabled first */
1460 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1466 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1471 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1476 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1480 case HCISETLINKMODE:
1481 hdev->link_mode = ((__u16) dr.dev_opt) &
1482 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1486 hdev->pkt_type = (__u16) dr.dev_opt;
1490 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1491 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1495 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1496 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1509 int hci_get_dev_list(void __user *arg)
1511 struct hci_dev *hdev;
1512 struct hci_dev_list_req *dl;
1513 struct hci_dev_req *dr;
1514 int n = 0, size, err;
1517 if (get_user(dev_num, (__u16 __user *) arg))
1520 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1523 size = sizeof(*dl) + dev_num * sizeof(*dr);
1525 dl = kzalloc(size, GFP_KERNEL);
1531 read_lock(&hci_dev_list_lock);
1532 list_for_each_entry(hdev, &hci_dev_list, list) {
1533 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1534 cancel_delayed_work(&hdev->power_off);
1536 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1537 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1539 (dr + n)->dev_id = hdev->id;
1540 (dr + n)->dev_opt = hdev->flags;
1545 read_unlock(&hci_dev_list_lock);
1548 size = sizeof(*dl) + n * sizeof(*dr);
1550 err = copy_to_user(arg, dl, size);
1553 return err ? -EFAULT : 0;
1556 int hci_get_dev_info(void __user *arg)
1558 struct hci_dev *hdev;
1559 struct hci_dev_info di;
1562 if (copy_from_user(&di, arg, sizeof(di)))
1565 hdev = hci_dev_get(di.dev_id);
1569 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1570 cancel_delayed_work_sync(&hdev->power_off);
1572 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1573 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1575 strcpy(di.name, hdev->name);
1576 di.bdaddr = hdev->bdaddr;
1577 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1578 di.flags = hdev->flags;
1579 di.pkt_type = hdev->pkt_type;
1580 if (lmp_bredr_capable(hdev)) {
1581 di.acl_mtu = hdev->acl_mtu;
1582 di.acl_pkts = hdev->acl_pkts;
1583 di.sco_mtu = hdev->sco_mtu;
1584 di.sco_pkts = hdev->sco_pkts;
1586 di.acl_mtu = hdev->le_mtu;
1587 di.acl_pkts = hdev->le_pkts;
1591 di.link_policy = hdev->link_policy;
1592 di.link_mode = hdev->link_mode;
1594 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1595 memcpy(&di.features, &hdev->features, sizeof(di.features));
1597 if (copy_to_user(arg, &di, sizeof(di)))
1605 /* ---- Interface to HCI drivers ---- */
1607 static int hci_rfkill_set_block(void *data, bool blocked)
1609 struct hci_dev *hdev = data;
1611 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1613 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1619 hci_dev_do_close(hdev);
1624 static const struct rfkill_ops hci_rfkill_ops = {
1625 .set_block = hci_rfkill_set_block,
1628 static void hci_power_on(struct work_struct *work)
1630 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1633 BT_DBG("%s", hdev->name);
1635 err = hci_dev_open(hdev->id);
1637 mgmt_set_powered_failed(hdev, err);
1641 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1642 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1643 HCI_AUTO_OFF_TIMEOUT);
1645 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1646 mgmt_index_added(hdev);
1649 static void hci_power_off(struct work_struct *work)
1651 struct hci_dev *hdev = container_of(work, struct hci_dev,
1654 BT_DBG("%s", hdev->name);
1656 hci_dev_do_close(hdev);
1659 static void hci_discov_off(struct work_struct *work)
1661 struct hci_dev *hdev;
1662 u8 scan = SCAN_PAGE;
1664 hdev = container_of(work, struct hci_dev, discov_off.work);
1666 BT_DBG("%s", hdev->name);
1670 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1672 hdev->discov_timeout = 0;
1674 hci_dev_unlock(hdev);
1677 int hci_uuids_clear(struct hci_dev *hdev)
1679 struct bt_uuid *uuid, *tmp;
1681 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1682 list_del(&uuid->list);
1689 int hci_link_keys_clear(struct hci_dev *hdev)
1691 struct list_head *p, *n;
1693 list_for_each_safe(p, n, &hdev->link_keys) {
1694 struct link_key *key;
1696 key = list_entry(p, struct link_key, list);
1705 int hci_smp_ltks_clear(struct hci_dev *hdev)
1707 struct smp_ltk *k, *tmp;
1709 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1717 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1721 list_for_each_entry(k, &hdev->link_keys, list)
1722 if (bacmp(bdaddr, &k->bdaddr) == 0)
1728 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1729 u8 key_type, u8 old_key_type)
1732 if (key_type < 0x03)
1735 /* Debug keys are insecure so don't store them persistently */
1736 if (key_type == HCI_LK_DEBUG_COMBINATION)
1739 /* Changed combination key and there's no previous one */
1740 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1743 /* Security mode 3 case */
1747 /* Neither local nor remote side had no-bonding as requirement */
1748 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1751 /* Local side had dedicated bonding as requirement */
1752 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1755 /* Remote side had dedicated bonding as requirement */
1756 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1759 /* If none of the above criteria match, then don't store the key
1764 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1768 list_for_each_entry(k, &hdev->long_term_keys, list) {
1769 if (k->ediv != ediv ||
1770 memcmp(rand, k->rand, sizeof(k->rand)))
1779 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1784 list_for_each_entry(k, &hdev->long_term_keys, list)
1785 if (addr_type == k->bdaddr_type &&
1786 bacmp(bdaddr, &k->bdaddr) == 0)
1792 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1793 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1795 struct link_key *key, *old_key;
1799 old_key = hci_find_link_key(hdev, bdaddr);
1801 old_key_type = old_key->type;
1804 old_key_type = conn ? conn->key_type : 0xff;
1805 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1808 list_add(&key->list, &hdev->link_keys);
1811 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1813 /* Some buggy controller combinations generate a changed
1814 * combination key for legacy pairing even when there's no
1816 if (type == HCI_LK_CHANGED_COMBINATION &&
1817 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1818 type = HCI_LK_COMBINATION;
1820 conn->key_type = type;
1823 bacpy(&key->bdaddr, bdaddr);
1824 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1825 key->pin_len = pin_len;
1827 if (type == HCI_LK_CHANGED_COMBINATION)
1828 key->type = old_key_type;
1835 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1837 mgmt_new_link_key(hdev, key, persistent);
1840 conn->flush_key = !persistent;
1845 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1846 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1849 struct smp_ltk *key, *old_key;
1851 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1854 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1858 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1861 list_add(&key->list, &hdev->long_term_keys);
1864 bacpy(&key->bdaddr, bdaddr);
1865 key->bdaddr_type = addr_type;
1866 memcpy(key->val, tk, sizeof(key->val));
1867 key->authenticated = authenticated;
1869 key->enc_size = enc_size;
1871 memcpy(key->rand, rand, sizeof(key->rand));
1876 if (type & HCI_SMP_LTK)
1877 mgmt_new_ltk(hdev, key, 1);
1882 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1884 struct link_key *key;
1886 key = hci_find_link_key(hdev, bdaddr);
1890 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1892 list_del(&key->list);
1898 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1900 struct smp_ltk *k, *tmp;
1902 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1903 if (bacmp(bdaddr, &k->bdaddr))
1906 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1915 /* HCI command timer function */
1916 static void hci_cmd_timeout(unsigned long arg)
1918 struct hci_dev *hdev = (void *) arg;
1920 if (hdev->sent_cmd) {
1921 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1922 u16 opcode = __le16_to_cpu(sent->opcode);
1924 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1926 BT_ERR("%s command tx timeout", hdev->name);
1929 atomic_set(&hdev->cmd_cnt, 1);
1930 queue_work(hdev->workqueue, &hdev->cmd_work);
1933 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1936 struct oob_data *data;
1938 list_for_each_entry(data, &hdev->remote_oob_data, list)
1939 if (bacmp(bdaddr, &data->bdaddr) == 0)
1945 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1947 struct oob_data *data;
1949 data = hci_find_remote_oob_data(hdev, bdaddr);
1953 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1955 list_del(&data->list);
1961 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1963 struct oob_data *data, *n;
1965 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1966 list_del(&data->list);
1973 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1976 struct oob_data *data;
1978 data = hci_find_remote_oob_data(hdev, bdaddr);
1981 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1985 bacpy(&data->bdaddr, bdaddr);
1986 list_add(&data->list, &hdev->remote_oob_data);
1989 memcpy(data->hash, hash, sizeof(data->hash));
1990 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1992 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1997 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1999 struct bdaddr_list *b;
2001 list_for_each_entry(b, &hdev->blacklist, list)
2002 if (bacmp(bdaddr, &b->bdaddr) == 0)
2008 int hci_blacklist_clear(struct hci_dev *hdev)
2010 struct list_head *p, *n;
2012 list_for_each_safe(p, n, &hdev->blacklist) {
2013 struct bdaddr_list *b;
2015 b = list_entry(p, struct bdaddr_list, list);
2024 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2026 struct bdaddr_list *entry;
2028 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2031 if (hci_blacklist_lookup(hdev, bdaddr))
2034 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2038 bacpy(&entry->bdaddr, bdaddr);
2040 list_add(&entry->list, &hdev->blacklist);
2042 return mgmt_device_blocked(hdev, bdaddr, type);
2045 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2047 struct bdaddr_list *entry;
2049 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2050 return hci_blacklist_clear(hdev);
2052 entry = hci_blacklist_lookup(hdev, bdaddr);
2056 list_del(&entry->list);
2059 return mgmt_device_unblocked(hdev, bdaddr, type);
2062 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2065 BT_ERR("Failed to start inquiry: status %d", status);
2068 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069 hci_dev_unlock(hdev);
2074 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2076 /* General inquiry access code (GIAC) */
2077 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2078 struct hci_request req;
2079 struct hci_cp_inquiry cp;
2083 BT_ERR("Failed to disable LE scanning: status %d", status);
2087 switch (hdev->discovery.type) {
2088 case DISCOV_TYPE_LE:
2090 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2091 hci_dev_unlock(hdev);
2094 case DISCOV_TYPE_INTERLEAVED:
2095 hci_req_init(&req, hdev);
2097 memset(&cp, 0, sizeof(cp));
2098 memcpy(&cp.lap, lap, sizeof(cp.lap));
2099 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2100 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2104 hci_inquiry_cache_flush(hdev);
2106 err = hci_req_run(&req, inquiry_complete);
2108 BT_ERR("Inquiry request failed: err %d", err);
2109 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2112 hci_dev_unlock(hdev);
2117 static void le_scan_disable_work(struct work_struct *work)
2119 struct hci_dev *hdev = container_of(work, struct hci_dev,
2120 le_scan_disable.work);
2121 struct hci_cp_le_set_scan_enable cp;
2122 struct hci_request req;
2125 BT_DBG("%s", hdev->name);
2127 hci_req_init(&req, hdev);
2129 memset(&cp, 0, sizeof(cp));
2130 cp.enable = LE_SCAN_DISABLE;
2131 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2133 err = hci_req_run(&req, le_scan_disable_work_complete);
2135 BT_ERR("Disable LE scanning request failed: err %d", err);
2138 /* Alloc HCI device */
2139 struct hci_dev *hci_alloc_dev(void)
2141 struct hci_dev *hdev;
2143 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2147 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2148 hdev->esco_type = (ESCO_HV1);
2149 hdev->link_mode = (HCI_LM_ACCEPT);
2150 hdev->io_capability = 0x03; /* No Input No Output */
2151 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2152 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2154 hdev->sniff_max_interval = 800;
2155 hdev->sniff_min_interval = 80;
2157 mutex_init(&hdev->lock);
2158 mutex_init(&hdev->req_lock);
2160 INIT_LIST_HEAD(&hdev->mgmt_pending);
2161 INIT_LIST_HEAD(&hdev->blacklist);
2162 INIT_LIST_HEAD(&hdev->uuids);
2163 INIT_LIST_HEAD(&hdev->link_keys);
2164 INIT_LIST_HEAD(&hdev->long_term_keys);
2165 INIT_LIST_HEAD(&hdev->remote_oob_data);
2166 INIT_LIST_HEAD(&hdev->conn_hash.list);
2168 INIT_WORK(&hdev->rx_work, hci_rx_work);
2169 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2170 INIT_WORK(&hdev->tx_work, hci_tx_work);
2171 INIT_WORK(&hdev->power_on, hci_power_on);
2173 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2174 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2175 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2177 skb_queue_head_init(&hdev->rx_q);
2178 skb_queue_head_init(&hdev->cmd_q);
2179 skb_queue_head_init(&hdev->raw_q);
2181 init_waitqueue_head(&hdev->req_wait_q);
2183 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2185 hci_init_sysfs(hdev);
2186 discovery_init(hdev);
2190 EXPORT_SYMBOL(hci_alloc_dev);
2192 /* Free HCI device */
2193 void hci_free_dev(struct hci_dev *hdev)
2195 /* will free via device release */
2196 put_device(&hdev->dev);
2198 EXPORT_SYMBOL(hci_free_dev);
2200 /* Register HCI device */
2201 int hci_register_dev(struct hci_dev *hdev)
2205 if (!hdev->open || !hdev->close)
2208 /* Do not allow HCI_AMP devices to register at index 0,
2209 * so the index can be used as the AMP controller ID.
2211 switch (hdev->dev_type) {
2213 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2216 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2225 sprintf(hdev->name, "hci%d", id);
2228 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2230 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2231 WQ_MEM_RECLAIM, 1, hdev->name);
2232 if (!hdev->workqueue) {
2237 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2238 WQ_MEM_RECLAIM, 1, hdev->name);
2239 if (!hdev->req_workqueue) {
2240 destroy_workqueue(hdev->workqueue);
2245 error = hci_add_sysfs(hdev);
2249 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2250 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2253 if (rfkill_register(hdev->rfkill) < 0) {
2254 rfkill_destroy(hdev->rfkill);
2255 hdev->rfkill = NULL;
2259 set_bit(HCI_SETUP, &hdev->dev_flags);
2261 if (hdev->dev_type != HCI_AMP)
2262 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2264 write_lock(&hci_dev_list_lock);
2265 list_add(&hdev->list, &hci_dev_list);
2266 write_unlock(&hci_dev_list_lock);
2268 hci_notify(hdev, HCI_DEV_REG);
2271 queue_work(hdev->req_workqueue, &hdev->power_on);
2276 destroy_workqueue(hdev->workqueue);
2277 destroy_workqueue(hdev->req_workqueue);
2279 ida_simple_remove(&hci_index_ida, hdev->id);
2283 EXPORT_SYMBOL(hci_register_dev);
2285 /* Unregister HCI device */
2286 void hci_unregister_dev(struct hci_dev *hdev)
2290 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2292 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2296 write_lock(&hci_dev_list_lock);
2297 list_del(&hdev->list);
2298 write_unlock(&hci_dev_list_lock);
2300 hci_dev_do_close(hdev);
2302 for (i = 0; i < NUM_REASSEMBLY; i++)
2303 kfree_skb(hdev->reassembly[i]);
2305 cancel_work_sync(&hdev->power_on);
2307 if (!test_bit(HCI_INIT, &hdev->flags) &&
2308 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2310 mgmt_index_removed(hdev);
2311 hci_dev_unlock(hdev);
2314 /* mgmt_index_removed should take care of emptying the
2316 BUG_ON(!list_empty(&hdev->mgmt_pending));
2318 hci_notify(hdev, HCI_DEV_UNREG);
2321 rfkill_unregister(hdev->rfkill);
2322 rfkill_destroy(hdev->rfkill);
2325 hci_del_sysfs(hdev);
2327 destroy_workqueue(hdev->workqueue);
2328 destroy_workqueue(hdev->req_workqueue);
2331 hci_blacklist_clear(hdev);
2332 hci_uuids_clear(hdev);
2333 hci_link_keys_clear(hdev);
2334 hci_smp_ltks_clear(hdev);
2335 hci_remote_oob_data_clear(hdev);
2336 hci_dev_unlock(hdev);
2340 ida_simple_remove(&hci_index_ida, id);
2342 EXPORT_SYMBOL(hci_unregister_dev);
2344 /* Suspend HCI device */
2345 int hci_suspend_dev(struct hci_dev *hdev)
2347 hci_notify(hdev, HCI_DEV_SUSPEND);
2350 EXPORT_SYMBOL(hci_suspend_dev);
2352 /* Resume HCI device */
2353 int hci_resume_dev(struct hci_dev *hdev)
2355 hci_notify(hdev, HCI_DEV_RESUME);
2358 EXPORT_SYMBOL(hci_resume_dev);
2360 /* Receive frame from HCI drivers */
2361 int hci_recv_frame(struct sk_buff *skb)
2363 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2364 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2365 && !test_bit(HCI_INIT, &hdev->flags))) {
2371 bt_cb(skb)->incoming = 1;
2374 __net_timestamp(skb);
2376 skb_queue_tail(&hdev->rx_q, skb);
2377 queue_work(hdev->workqueue, &hdev->rx_work);
2381 EXPORT_SYMBOL(hci_recv_frame);
2383 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2384 int count, __u8 index)
2389 struct sk_buff *skb;
2390 struct bt_skb_cb *scb;
2392 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2393 index >= NUM_REASSEMBLY)
2396 skb = hdev->reassembly[index];
2400 case HCI_ACLDATA_PKT:
2401 len = HCI_MAX_FRAME_SIZE;
2402 hlen = HCI_ACL_HDR_SIZE;
2405 len = HCI_MAX_EVENT_SIZE;
2406 hlen = HCI_EVENT_HDR_SIZE;
2408 case HCI_SCODATA_PKT:
2409 len = HCI_MAX_SCO_SIZE;
2410 hlen = HCI_SCO_HDR_SIZE;
2414 skb = bt_skb_alloc(len, GFP_ATOMIC);
2418 scb = (void *) skb->cb;
2420 scb->pkt_type = type;
2422 skb->dev = (void *) hdev;
2423 hdev->reassembly[index] = skb;
2427 scb = (void *) skb->cb;
2428 len = min_t(uint, scb->expect, count);
2430 memcpy(skb_put(skb, len), data, len);
2439 if (skb->len == HCI_EVENT_HDR_SIZE) {
2440 struct hci_event_hdr *h = hci_event_hdr(skb);
2441 scb->expect = h->plen;
2443 if (skb_tailroom(skb) < scb->expect) {
2445 hdev->reassembly[index] = NULL;
2451 case HCI_ACLDATA_PKT:
2452 if (skb->len == HCI_ACL_HDR_SIZE) {
2453 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2454 scb->expect = __le16_to_cpu(h->dlen);
2456 if (skb_tailroom(skb) < scb->expect) {
2458 hdev->reassembly[index] = NULL;
2464 case HCI_SCODATA_PKT:
2465 if (skb->len == HCI_SCO_HDR_SIZE) {
2466 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2467 scb->expect = h->dlen;
2469 if (skb_tailroom(skb) < scb->expect) {
2471 hdev->reassembly[index] = NULL;
2478 if (scb->expect == 0) {
2479 /* Complete frame */
2481 bt_cb(skb)->pkt_type = type;
2482 hci_recv_frame(skb);
2484 hdev->reassembly[index] = NULL;
2492 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2496 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2500 rem = hci_reassembly(hdev, type, data, count, type - 1);
2504 data += (count - rem);
2510 EXPORT_SYMBOL(hci_recv_fragment);
2512 #define STREAM_REASSEMBLY 0
2514 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2520 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2523 struct { char type; } *pkt;
2525 /* Start of the frame */
2532 type = bt_cb(skb)->pkt_type;
2534 rem = hci_reassembly(hdev, type, data, count,
2539 data += (count - rem);
2545 EXPORT_SYMBOL(hci_recv_stream_fragment);
2547 /* ---- Interface to upper protocols ---- */
2549 int hci_register_cb(struct hci_cb *cb)
2551 BT_DBG("%p name %s", cb, cb->name);
2553 write_lock(&hci_cb_list_lock);
2554 list_add(&cb->list, &hci_cb_list);
2555 write_unlock(&hci_cb_list_lock);
2559 EXPORT_SYMBOL(hci_register_cb);
2561 int hci_unregister_cb(struct hci_cb *cb)
2563 BT_DBG("%p name %s", cb, cb->name);
2565 write_lock(&hci_cb_list_lock);
2566 list_del(&cb->list);
2567 write_unlock(&hci_cb_list_lock);
2571 EXPORT_SYMBOL(hci_unregister_cb);
2573 static int hci_send_frame(struct sk_buff *skb)
2575 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2582 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2585 __net_timestamp(skb);
2587 /* Send copy to monitor */
2588 hci_send_to_monitor(hdev, skb);
2590 if (atomic_read(&hdev->promisc)) {
2591 /* Send copy to the sockets */
2592 hci_send_to_sock(hdev, skb);
2595 /* Get rid of skb owner, prior to sending to the driver. */
2598 return hdev->send(skb);
2601 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2603 skb_queue_head_init(&req->cmd_q);
2608 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2610 struct hci_dev *hdev = req->hdev;
2611 struct sk_buff *skb;
2612 unsigned long flags;
2614 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2616 /* If an error occured during request building, remove all HCI
2617 * commands queued on the HCI request queue.
2620 skb_queue_purge(&req->cmd_q);
2624 /* Do not allow empty requests */
2625 if (skb_queue_empty(&req->cmd_q))
2628 skb = skb_peek_tail(&req->cmd_q);
2629 bt_cb(skb)->req.complete = complete;
2631 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2632 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2633 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2635 queue_work(hdev->workqueue, &hdev->cmd_work);
2640 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2641 u32 plen, const void *param)
2643 int len = HCI_COMMAND_HDR_SIZE + plen;
2644 struct hci_command_hdr *hdr;
2645 struct sk_buff *skb;
2647 skb = bt_skb_alloc(len, GFP_ATOMIC);
2651 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2652 hdr->opcode = cpu_to_le16(opcode);
2656 memcpy(skb_put(skb, plen), param, plen);
2658 BT_DBG("skb len %d", skb->len);
2660 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2661 skb->dev = (void *) hdev;
2666 /* Send HCI command */
2667 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2670 struct sk_buff *skb;
2672 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2674 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2676 BT_ERR("%s no memory for command", hdev->name);
2680 /* Stand-alone HCI commands must be flaged as
2681 * single-command requests.
2683 bt_cb(skb)->req.start = true;
2685 skb_queue_tail(&hdev->cmd_q, skb);
2686 queue_work(hdev->workqueue, &hdev->cmd_work);
2691 /* Queue a command to an asynchronous HCI request */
2692 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2693 const void *param, u8 event)
2695 struct hci_dev *hdev = req->hdev;
2696 struct sk_buff *skb;
2698 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2700 /* If an error occured during request building, there is no point in
2701 * queueing the HCI command. We can simply return.
2706 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2708 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2709 hdev->name, opcode);
2714 if (skb_queue_empty(&req->cmd_q))
2715 bt_cb(skb)->req.start = true;
2717 bt_cb(skb)->req.event = event;
2719 skb_queue_tail(&req->cmd_q, skb);
2722 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2725 hci_req_add_ev(req, opcode, plen, param, 0);
2728 /* Get data from the previously sent command */
2729 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2731 struct hci_command_hdr *hdr;
2733 if (!hdev->sent_cmd)
2736 hdr = (void *) hdev->sent_cmd->data;
2738 if (hdr->opcode != cpu_to_le16(opcode))
2741 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2743 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2747 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2749 struct hci_acl_hdr *hdr;
2752 skb_push(skb, HCI_ACL_HDR_SIZE);
2753 skb_reset_transport_header(skb);
2754 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2755 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2756 hdr->dlen = cpu_to_le16(len);
2759 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2760 struct sk_buff *skb, __u16 flags)
2762 struct hci_conn *conn = chan->conn;
2763 struct hci_dev *hdev = conn->hdev;
2764 struct sk_buff *list;
2766 skb->len = skb_headlen(skb);
2769 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2771 switch (hdev->dev_type) {
2773 hci_add_acl_hdr(skb, conn->handle, flags);
2776 hci_add_acl_hdr(skb, chan->handle, flags);
2779 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2783 list = skb_shinfo(skb)->frag_list;
2785 /* Non fragmented */
2786 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2788 skb_queue_tail(queue, skb);
2791 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2793 skb_shinfo(skb)->frag_list = NULL;
2795 /* Queue all fragments atomically */
2796 spin_lock(&queue->lock);
2798 __skb_queue_tail(queue, skb);
2800 flags &= ~ACL_START;
2803 skb = list; list = list->next;
2805 skb->dev = (void *) hdev;
2806 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2807 hci_add_acl_hdr(skb, conn->handle, flags);
2809 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2811 __skb_queue_tail(queue, skb);
2814 spin_unlock(&queue->lock);
2818 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2820 struct hci_dev *hdev = chan->conn->hdev;
2822 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2824 skb->dev = (void *) hdev;
2826 hci_queue_acl(chan, &chan->data_q, skb, flags);
2828 queue_work(hdev->workqueue, &hdev->tx_work);
2832 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2834 struct hci_dev *hdev = conn->hdev;
2835 struct hci_sco_hdr hdr;
2837 BT_DBG("%s len %d", hdev->name, skb->len);
2839 hdr.handle = cpu_to_le16(conn->handle);
2840 hdr.dlen = skb->len;
2842 skb_push(skb, HCI_SCO_HDR_SIZE);
2843 skb_reset_transport_header(skb);
2844 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2846 skb->dev = (void *) hdev;
2847 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2849 skb_queue_tail(&conn->data_q, skb);
2850 queue_work(hdev->workqueue, &hdev->tx_work);
2853 /* ---- HCI TX task (outgoing data) ---- */
2855 /* HCI Connection scheduler */
2856 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2859 struct hci_conn_hash *h = &hdev->conn_hash;
2860 struct hci_conn *conn = NULL, *c;
2861 unsigned int num = 0, min = ~0;
2863 /* We don't have to lock device here. Connections are always
2864 * added and removed with TX task disabled. */
2868 list_for_each_entry_rcu(c, &h->list, list) {
2869 if (c->type != type || skb_queue_empty(&c->data_q))
2872 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2877 if (c->sent < min) {
2882 if (hci_conn_num(hdev, type) == num)
2891 switch (conn->type) {
2893 cnt = hdev->acl_cnt;
2897 cnt = hdev->sco_cnt;
2900 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2904 BT_ERR("Unknown link type");
2912 BT_DBG("conn %p quote %d", conn, *quote);
2916 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2918 struct hci_conn_hash *h = &hdev->conn_hash;
2921 BT_ERR("%s link tx timeout", hdev->name);
2925 /* Kill stalled connections */
2926 list_for_each_entry_rcu(c, &h->list, list) {
2927 if (c->type == type && c->sent) {
2928 BT_ERR("%s killing stalled connection %pMR",
2929 hdev->name, &c->dst);
2930 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2937 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2940 struct hci_conn_hash *h = &hdev->conn_hash;
2941 struct hci_chan *chan = NULL;
2942 unsigned int num = 0, min = ~0, cur_prio = 0;
2943 struct hci_conn *conn;
2944 int cnt, q, conn_num = 0;
2946 BT_DBG("%s", hdev->name);
2950 list_for_each_entry_rcu(conn, &h->list, list) {
2951 struct hci_chan *tmp;
2953 if (conn->type != type)
2956 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2961 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2962 struct sk_buff *skb;
2964 if (skb_queue_empty(&tmp->data_q))
2967 skb = skb_peek(&tmp->data_q);
2968 if (skb->priority < cur_prio)
2971 if (skb->priority > cur_prio) {
2974 cur_prio = skb->priority;
2979 if (conn->sent < min) {
2985 if (hci_conn_num(hdev, type) == conn_num)
2994 switch (chan->conn->type) {
2996 cnt = hdev->acl_cnt;
2999 cnt = hdev->block_cnt;
3003 cnt = hdev->sco_cnt;
3006 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3010 BT_ERR("Unknown link type");
3015 BT_DBG("chan %p quote %d", chan, *quote);
3019 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3021 struct hci_conn_hash *h = &hdev->conn_hash;
3022 struct hci_conn *conn;
3025 BT_DBG("%s", hdev->name);
3029 list_for_each_entry_rcu(conn, &h->list, list) {
3030 struct hci_chan *chan;
3032 if (conn->type != type)
3035 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3040 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3041 struct sk_buff *skb;
3048 if (skb_queue_empty(&chan->data_q))
3051 skb = skb_peek(&chan->data_q);
3052 if (skb->priority >= HCI_PRIO_MAX - 1)
3055 skb->priority = HCI_PRIO_MAX - 1;
3057 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3061 if (hci_conn_num(hdev, type) == num)
3069 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3071 /* Calculate count of blocks used by this packet */
3072 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3075 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3077 if (!test_bit(HCI_RAW, &hdev->flags)) {
3078 /* ACL tx timeout must be longer than maximum
3079 * link supervision timeout (40.9 seconds) */
3080 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3081 HCI_ACL_TX_TIMEOUT))
3082 hci_link_tx_to(hdev, ACL_LINK);
3086 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3088 unsigned int cnt = hdev->acl_cnt;
3089 struct hci_chan *chan;
3090 struct sk_buff *skb;
3093 __check_timeout(hdev, cnt);
3095 while (hdev->acl_cnt &&
3096 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3097 u32 priority = (skb_peek(&chan->data_q))->priority;
3098 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3100 skb->len, skb->priority);
3102 /* Stop if priority has changed */
3103 if (skb->priority < priority)
3106 skb = skb_dequeue(&chan->data_q);
3108 hci_conn_enter_active_mode(chan->conn,
3109 bt_cb(skb)->force_active);
3111 hci_send_frame(skb);
3112 hdev->acl_last_tx = jiffies;
3120 if (cnt != hdev->acl_cnt)
3121 hci_prio_recalculate(hdev, ACL_LINK);
3124 static void hci_sched_acl_blk(struct hci_dev *hdev)
3126 unsigned int cnt = hdev->block_cnt;
3127 struct hci_chan *chan;
3128 struct sk_buff *skb;
3132 __check_timeout(hdev, cnt);
3134 BT_DBG("%s", hdev->name);
3136 if (hdev->dev_type == HCI_AMP)
3141 while (hdev->block_cnt > 0 &&
3142 (chan = hci_chan_sent(hdev, type, "e))) {
3143 u32 priority = (skb_peek(&chan->data_q))->priority;
3144 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3147 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3148 skb->len, skb->priority);
3150 /* Stop if priority has changed */
3151 if (skb->priority < priority)
3154 skb = skb_dequeue(&chan->data_q);
3156 blocks = __get_blocks(hdev, skb);
3157 if (blocks > hdev->block_cnt)
3160 hci_conn_enter_active_mode(chan->conn,
3161 bt_cb(skb)->force_active);
3163 hci_send_frame(skb);
3164 hdev->acl_last_tx = jiffies;
3166 hdev->block_cnt -= blocks;
3169 chan->sent += blocks;
3170 chan->conn->sent += blocks;
3174 if (cnt != hdev->block_cnt)
3175 hci_prio_recalculate(hdev, type);
3178 static void hci_sched_acl(struct hci_dev *hdev)
3180 BT_DBG("%s", hdev->name);
3182 /* No ACL link over BR/EDR controller */
3183 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3186 /* No AMP link over AMP controller */
3187 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3190 switch (hdev->flow_ctl_mode) {
3191 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3192 hci_sched_acl_pkt(hdev);
3195 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3196 hci_sched_acl_blk(hdev);
3202 static void hci_sched_sco(struct hci_dev *hdev)
3204 struct hci_conn *conn;
3205 struct sk_buff *skb;
3208 BT_DBG("%s", hdev->name);
3210 if (!hci_conn_num(hdev, SCO_LINK))
3213 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3214 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3215 BT_DBG("skb %p len %d", skb, skb->len);
3216 hci_send_frame(skb);
3219 if (conn->sent == ~0)
3225 static void hci_sched_esco(struct hci_dev *hdev)
3227 struct hci_conn *conn;
3228 struct sk_buff *skb;
3231 BT_DBG("%s", hdev->name);
3233 if (!hci_conn_num(hdev, ESCO_LINK))
3236 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3238 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3239 BT_DBG("skb %p len %d", skb, skb->len);
3240 hci_send_frame(skb);
3243 if (conn->sent == ~0)
3249 static void hci_sched_le(struct hci_dev *hdev)
3251 struct hci_chan *chan;
3252 struct sk_buff *skb;
3253 int quote, cnt, tmp;
3255 BT_DBG("%s", hdev->name);
3257 if (!hci_conn_num(hdev, LE_LINK))
3260 if (!test_bit(HCI_RAW, &hdev->flags)) {
3261 /* LE tx timeout must be longer than maximum
3262 * link supervision timeout (40.9 seconds) */
3263 if (!hdev->le_cnt && hdev->le_pkts &&
3264 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3265 hci_link_tx_to(hdev, LE_LINK);
3268 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3270 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3271 u32 priority = (skb_peek(&chan->data_q))->priority;
3272 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3273 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3274 skb->len, skb->priority);
3276 /* Stop if priority has changed */
3277 if (skb->priority < priority)
3280 skb = skb_dequeue(&chan->data_q);
3282 hci_send_frame(skb);
3283 hdev->le_last_tx = jiffies;
3294 hdev->acl_cnt = cnt;
3297 hci_prio_recalculate(hdev, LE_LINK);
3300 static void hci_tx_work(struct work_struct *work)
3302 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3303 struct sk_buff *skb;
3305 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3306 hdev->sco_cnt, hdev->le_cnt);
3308 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3309 /* Schedule queues and send stuff to HCI driver */
3310 hci_sched_acl(hdev);
3311 hci_sched_sco(hdev);
3312 hci_sched_esco(hdev);
3316 /* Send next queued raw (unknown type) packet */
3317 while ((skb = skb_dequeue(&hdev->raw_q)))
3318 hci_send_frame(skb);
3321 /* ----- HCI RX task (incoming data processing) ----- */
3323 /* ACL data packet */
3324 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3326 struct hci_acl_hdr *hdr = (void *) skb->data;
3327 struct hci_conn *conn;
3328 __u16 handle, flags;
3330 skb_pull(skb, HCI_ACL_HDR_SIZE);
3332 handle = __le16_to_cpu(hdr->handle);
3333 flags = hci_flags(handle);
3334 handle = hci_handle(handle);
3336 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3339 hdev->stat.acl_rx++;
3342 conn = hci_conn_hash_lookup_handle(hdev, handle);
3343 hci_dev_unlock(hdev);
3346 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3348 /* Send to upper protocol */
3349 l2cap_recv_acldata(conn, skb, flags);
3352 BT_ERR("%s ACL packet for unknown connection handle %d",
3353 hdev->name, handle);
3359 /* SCO data packet */
3360 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3362 struct hci_sco_hdr *hdr = (void *) skb->data;
3363 struct hci_conn *conn;
3366 skb_pull(skb, HCI_SCO_HDR_SIZE);
3368 handle = __le16_to_cpu(hdr->handle);
3370 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3372 hdev->stat.sco_rx++;
3375 conn = hci_conn_hash_lookup_handle(hdev, handle);
3376 hci_dev_unlock(hdev);
3379 /* Send to upper protocol */
3380 sco_recv_scodata(conn, skb);
3383 BT_ERR("%s SCO packet for unknown connection handle %d",
3384 hdev->name, handle);
3390 static bool hci_req_is_complete(struct hci_dev *hdev)
3392 struct sk_buff *skb;
3394 skb = skb_peek(&hdev->cmd_q);
3398 return bt_cb(skb)->req.start;
3401 static void hci_resend_last(struct hci_dev *hdev)
3403 struct hci_command_hdr *sent;
3404 struct sk_buff *skb;
3407 if (!hdev->sent_cmd)
3410 sent = (void *) hdev->sent_cmd->data;
3411 opcode = __le16_to_cpu(sent->opcode);
3412 if (opcode == HCI_OP_RESET)
3415 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3419 skb_queue_head(&hdev->cmd_q, skb);
3420 queue_work(hdev->workqueue, &hdev->cmd_work);
3423 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3425 hci_req_complete_t req_complete = NULL;
3426 struct sk_buff *skb;
3427 unsigned long flags;
3429 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3431 /* If the completed command doesn't match the last one that was
3432 * sent we need to do special handling of it.
3434 if (!hci_sent_cmd_data(hdev, opcode)) {
3435 /* Some CSR based controllers generate a spontaneous
3436 * reset complete event during init and any pending
3437 * command will never be completed. In such a case we
3438 * need to resend whatever was the last sent
3441 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3442 hci_resend_last(hdev);
3447 /* If the command succeeded and there's still more commands in
3448 * this request the request is not yet complete.
3450 if (!status && !hci_req_is_complete(hdev))
3453 /* If this was the last command in a request the complete
3454 * callback would be found in hdev->sent_cmd instead of the
3455 * command queue (hdev->cmd_q).
3457 if (hdev->sent_cmd) {
3458 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3461 /* We must set the complete callback to NULL to
3462 * avoid calling the callback more than once if
3463 * this function gets called again.
3465 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3471 /* Remove all pending commands belonging to this request */
3472 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3473 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3474 if (bt_cb(skb)->req.start) {
3475 __skb_queue_head(&hdev->cmd_q, skb);
3479 req_complete = bt_cb(skb)->req.complete;
3482 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3486 req_complete(hdev, status);
3489 static void hci_rx_work(struct work_struct *work)
3491 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3492 struct sk_buff *skb;
3494 BT_DBG("%s", hdev->name);
3496 while ((skb = skb_dequeue(&hdev->rx_q))) {
3497 /* Send copy to monitor */
3498 hci_send_to_monitor(hdev, skb);
3500 if (atomic_read(&hdev->promisc)) {
3501 /* Send copy to the sockets */
3502 hci_send_to_sock(hdev, skb);
3505 if (test_bit(HCI_RAW, &hdev->flags) ||
3506 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3511 if (test_bit(HCI_INIT, &hdev->flags)) {
3512 /* Don't process data packets in this states. */
3513 switch (bt_cb(skb)->pkt_type) {
3514 case HCI_ACLDATA_PKT:
3515 case HCI_SCODATA_PKT:
3522 switch (bt_cb(skb)->pkt_type) {
3524 BT_DBG("%s Event packet", hdev->name);
3525 hci_event_packet(hdev, skb);
3528 case HCI_ACLDATA_PKT:
3529 BT_DBG("%s ACL data packet", hdev->name);
3530 hci_acldata_packet(hdev, skb);
3533 case HCI_SCODATA_PKT:
3534 BT_DBG("%s SCO data packet", hdev->name);
3535 hci_scodata_packet(hdev, skb);
3545 static void hci_cmd_work(struct work_struct *work)
3547 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3548 struct sk_buff *skb;
3550 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3551 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3553 /* Send queued commands */
3554 if (atomic_read(&hdev->cmd_cnt)) {
3555 skb = skb_dequeue(&hdev->cmd_q);
3559 kfree_skb(hdev->sent_cmd);
3561 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3562 if (hdev->sent_cmd) {
3563 atomic_dec(&hdev->cmd_cnt);
3564 hci_send_frame(skb);
3565 if (test_bit(HCI_RESET, &hdev->flags))
3566 del_timer(&hdev->cmd_timer);
3568 mod_timer(&hdev->cmd_timer,
3569 jiffies + HCI_CMD_TIMEOUT);
3571 skb_queue_head(&hdev->cmd_q, skb);
3572 queue_work(hdev->workqueue, &hdev->cmd_work);
3577 u8 bdaddr_to_le(u8 bdaddr_type)
3579 switch (bdaddr_type) {
3580 case BDADDR_LE_PUBLIC:
3581 return ADDR_LE_DEV_PUBLIC;
3584 /* Fallback to LE Random address type */
3585 return ADDR_LE_DEV_RANDOM;