2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
34 #include "hci_request.h"
45 static const struct sco_param esco_param_cvsd[] = {
46 { (EDR_ESCO_MASK & ~ESCO_2EV3) | SCO_ESCO_MASK | ESCO_EV3,
47 0x000a, 0x01 }, /* S3 */
48 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
49 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
50 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
51 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
54 static const struct sco_param esco_param_cvsd[] = {
55 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
56 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
57 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
58 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
59 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
63 static const struct sco_param sco_param_cvsd[] = {
64 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
65 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
69 static const struct sco_param esco_param_msbc[] = {
70 { (EDR_ESCO_MASK & ~ESCO_2EV3) | ESCO_EV3,
71 0x000d, 0x02 }, /* T2 */
72 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
75 static const struct sco_param esco_param_msbc[] = {
76 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
77 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
81 /* This function requires the caller holds hdev->lock */
82 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
84 struct hci_conn_params *params;
85 struct hci_dev *hdev = conn->hdev;
91 bdaddr_type = conn->dst_type;
93 /* Check if we need to convert to identity address */
94 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
96 bdaddr = &irk->bdaddr;
97 bdaddr_type = irk->addr_type;
100 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
102 if (!params || !params->explicit_connect)
105 /* The connection attempt was doing scan for new RPA, and is
106 * in scan phase. If params are not associated with any other
107 * autoconnect action, remove them completely. If they are, just unmark
108 * them as waiting for connection, by clearing explicit_connect field.
110 params->explicit_connect = false;
112 list_del_init(¶ms->action);
114 switch (params->auto_connect) {
115 case HCI_AUTO_CONN_EXPLICIT:
116 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
117 /* return instead of break to avoid duplicate scan update */
119 case HCI_AUTO_CONN_DIRECT:
120 case HCI_AUTO_CONN_ALWAYS:
121 list_add(¶ms->action, &hdev->pend_le_conns);
123 case HCI_AUTO_CONN_REPORT:
124 list_add(¶ms->action, &hdev->pend_le_reports);
130 hci_update_background_scan(hdev);
133 static void hci_conn_cleanup(struct hci_conn *conn)
135 struct hci_dev *hdev = conn->hdev;
137 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
138 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
140 hci_chan_list_flush(conn);
142 hci_conn_hash_del(hdev, conn);
145 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
147 hci_conn_del_sysfs(conn);
149 debugfs_remove_recursive(conn->debugfs);
156 static void le_scan_cleanup(struct work_struct *work)
158 struct hci_conn *conn = container_of(work, struct hci_conn,
160 struct hci_dev *hdev = conn->hdev;
161 struct hci_conn *c = NULL;
163 BT_DBG("%s hcon %p", hdev->name, conn);
167 /* Check that the hci_conn is still around */
169 list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
176 hci_connect_le_scan_cleanup(conn);
177 hci_conn_cleanup(conn);
180 hci_dev_unlock(hdev);
185 static void hci_connect_le_scan_remove(struct hci_conn *conn)
187 BT_DBG("%s hcon %p", conn->hdev->name, conn);
189 /* We can't call hci_conn_del/hci_conn_cleanup here since that
190 * could deadlock with another hci_conn_del() call that's holding
191 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
192 * Instead, grab temporary extra references to the hci_dev and
193 * hci_conn and perform the necessary cleanup in a separate work
197 hci_dev_hold(conn->hdev);
200 /* Even though we hold a reference to the hdev, many other
201 * things might get cleaned up meanwhile, including the hdev's
202 * own workqueue, so we can't use that for scheduling.
204 schedule_work(&conn->le_scan_cleanup);
207 static void hci_acl_create_connection(struct hci_conn *conn)
209 struct hci_dev *hdev = conn->hdev;
210 struct inquiry_entry *ie;
211 struct hci_cp_create_conn cp;
213 BT_DBG("hcon %p", conn);
215 conn->state = BT_CONNECT;
217 conn->role = HCI_ROLE_MASTER;
221 conn->link_policy = hdev->link_policy;
223 memset(&cp, 0, sizeof(cp));
224 bacpy(&cp.bdaddr, &conn->dst);
225 cp.pscan_rep_mode = 0x02;
227 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
229 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
230 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
231 cp.pscan_mode = ie->data.pscan_mode;
232 cp.clock_offset = ie->data.clock_offset |
236 memcpy(conn->dev_class, ie->data.dev_class, 3);
237 if (ie->data.ssp_mode > 0)
238 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
241 cp.pkt_type = cpu_to_le16(conn->pkt_type);
242 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
243 cp.role_switch = 0x01;
245 cp.role_switch = 0x00;
247 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
250 int hci_disconnect(struct hci_conn *conn, __u8 reason)
252 BT_DBG("hcon %p", conn);
254 /* When we are master of an established connection and it enters
255 * the disconnect timeout, then go ahead and try to read the
256 * current clock offset. Processing of the result is done
257 * within the event handling and hci_clock_offset_evt function.
259 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
260 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
261 struct hci_dev *hdev = conn->hdev;
262 struct hci_cp_read_clock_offset clkoff_cp;
264 clkoff_cp.handle = cpu_to_le16(conn->handle);
265 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
269 return hci_abort_conn(conn, reason);
272 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
274 struct hci_dev *hdev = conn->hdev;
275 struct hci_cp_add_sco cp;
277 BT_DBG("hcon %p", conn);
279 conn->state = BT_CONNECT;
284 cp.handle = cpu_to_le16(handle);
285 cp.pkt_type = cpu_to_le16(conn->pkt_type);
287 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
290 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
292 struct hci_dev *hdev = conn->hdev;
293 struct hci_cp_setup_sync_conn cp;
294 const struct sco_param *param;
296 BT_DBG("hcon %p", conn);
298 conn->state = BT_CONNECT;
303 cp.handle = cpu_to_le16(handle);
305 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
306 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
307 cp.voice_setting = cpu_to_le16(conn->setting);
309 switch (conn->setting & SCO_AIRMODE_MASK) {
310 case SCO_AIRMODE_TRANSP:
311 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
313 param = &esco_param_msbc[conn->attempt - 1];
315 case SCO_AIRMODE_CVSD:
316 if (lmp_esco_capable(conn->link)) {
317 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
319 param = &esco_param_cvsd[conn->attempt - 1];
321 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
323 param = &sco_param_cvsd[conn->attempt - 1];
330 cp.retrans_effort = param->retrans_effort;
331 cp.pkt_type = __cpu_to_le16(param->pkt_type);
332 cp.max_latency = __cpu_to_le16(param->max_latency);
334 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
340 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
343 struct hci_dev *hdev = conn->hdev;
344 struct hci_conn_params *params;
345 struct hci_cp_le_conn_update cp;
349 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
351 params->conn_min_interval = min;
352 params->conn_max_interval = max;
353 params->conn_latency = latency;
354 params->supervision_timeout = to_multiplier;
357 hci_dev_unlock(hdev);
359 memset(&cp, 0, sizeof(cp));
360 cp.handle = cpu_to_le16(conn->handle);
361 cp.conn_interval_min = cpu_to_le16(min);
362 cp.conn_interval_max = cpu_to_le16(max);
363 cp.conn_latency = cpu_to_le16(latency);
364 cp.supervision_timeout = cpu_to_le16(to_multiplier);
365 cp.min_ce_len = cpu_to_le16(0x0000);
366 cp.max_ce_len = cpu_to_le16(0x0000);
368 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
376 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
377 __u8 ltk[16], __u8 key_size)
379 struct hci_dev *hdev = conn->hdev;
380 struct hci_cp_le_start_enc cp;
382 BT_DBG("hcon %p", conn);
384 memset(&cp, 0, sizeof(cp));
386 cp.handle = cpu_to_le16(conn->handle);
389 memcpy(cp.ltk, ltk, key_size);
391 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
394 /* Device _must_ be locked */
395 void hci_sco_setup(struct hci_conn *conn, __u8 status)
397 struct hci_conn *sco = conn->link;
402 BT_DBG("hcon %p", conn);
405 if (lmp_esco_capable(conn->hdev))
406 hci_setup_sync(sco, conn->handle);
408 hci_add_sco(sco, conn->handle);
410 hci_connect_cfm(sco, status);
415 static void hci_conn_timeout(struct work_struct *work)
417 struct hci_conn *conn = container_of(work, struct hci_conn,
419 int refcnt = atomic_read(&conn->refcnt);
421 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
425 /* FIXME: It was observed that in pairing failed scenario, refcnt
426 * drops below 0. Probably this is because l2cap_conn_del calls
427 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
428 * dropped. After that loop hci_chan_del is called which also drops
429 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
435 /* LE connections in scanning state need special handling */
436 if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
437 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
438 hci_connect_le_scan_remove(conn);
442 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
445 /* Enter sniff mode */
446 static void hci_conn_idle(struct work_struct *work)
448 struct hci_conn *conn = container_of(work, struct hci_conn,
450 struct hci_dev *hdev = conn->hdev;
452 BT_DBG("hcon %p mode %d", conn, conn->mode);
454 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
457 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
460 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
461 struct hci_cp_sniff_subrate cp;
462 cp.handle = cpu_to_le16(conn->handle);
463 cp.max_latency = cpu_to_le16(0);
464 cp.min_remote_timeout = cpu_to_le16(0);
465 cp.min_local_timeout = cpu_to_le16(0);
466 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
469 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
470 struct hci_cp_sniff_mode cp;
471 cp.handle = cpu_to_le16(conn->handle);
472 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
473 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
474 cp.attempt = cpu_to_le16(4);
475 cp.timeout = cpu_to_le16(1);
476 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
480 static void hci_conn_auto_accept(struct work_struct *work)
482 struct hci_conn *conn = container_of(work, struct hci_conn,
483 auto_accept_work.work);
485 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
489 static void le_conn_timeout(struct work_struct *work)
491 struct hci_conn *conn = container_of(work, struct hci_conn,
492 le_conn_timeout.work);
493 struct hci_dev *hdev = conn->hdev;
497 /* We could end up here due to having done directed advertising,
498 * so clean up the state if necessary. This should however only
499 * happen with broken hardware or if low duty cycle was used
500 * (which doesn't have a timeout of its own).
502 if (conn->role == HCI_ROLE_SLAVE) {
504 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
506 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
510 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
513 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
516 struct hci_conn *conn;
518 BT_DBG("%s dst %pMR", hdev->name, dst);
520 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
524 bacpy(&conn->dst, dst);
525 bacpy(&conn->src, &hdev->bdaddr);
529 conn->mode = HCI_CM_ACTIVE;
530 conn->state = BT_OPEN;
531 conn->auth_type = HCI_AT_GENERAL_BONDING;
532 conn->io_capability = hdev->io_capability;
533 conn->remote_auth = 0xff;
534 conn->key_type = 0xff;
535 conn->rssi = HCI_RSSI_INVALID;
536 conn->tx_power = HCI_TX_POWER_INVALID;
537 conn->max_tx_power = HCI_TX_POWER_INVALID;
540 /* enable sniff mode for incoming connection */
541 conn->link_policy = hdev->link_policy;
544 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
545 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
547 if (conn->role == HCI_ROLE_MASTER)
552 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
555 /* conn->src should reflect the local identity address */
556 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
559 if (lmp_esco_capable(hdev))
560 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
561 (hdev->esco_type & EDR_ESCO_MASK);
563 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
566 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
570 skb_queue_head_init(&conn->data_q);
572 INIT_LIST_HEAD(&conn->chan_list);
574 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
575 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
576 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
577 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
578 INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
580 atomic_set(&conn->refcnt, 0);
584 hci_conn_hash_add(hdev, conn);
586 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
588 hci_conn_init_sysfs(conn);
593 int hci_conn_del(struct hci_conn *conn)
595 struct hci_dev *hdev = conn->hdev;
597 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
599 cancel_delayed_work_sync(&conn->disc_work);
600 cancel_delayed_work_sync(&conn->auto_accept_work);
601 cancel_delayed_work_sync(&conn->idle_work);
603 if (conn->type == ACL_LINK) {
604 struct hci_conn *sco = conn->link;
609 hdev->acl_cnt += conn->sent;
610 } else if (conn->type == LE_LINK) {
611 cancel_delayed_work(&conn->le_conn_timeout);
614 hdev->le_cnt += conn->sent;
616 hdev->acl_cnt += conn->sent;
618 struct hci_conn *acl = conn->link;
626 amp_mgr_put(conn->amp_mgr);
628 skb_queue_purge(&conn->data_q);
630 /* Remove the connection from the list and cleanup its remaining
631 * state. This is a separate function since for some cases like
632 * BT_CONNECT_SCAN we *only* want the cleanup part without the
633 * rest of hci_conn_del.
635 hci_conn_cleanup(conn);
640 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
642 int use_src = bacmp(src, BDADDR_ANY);
643 struct hci_dev *hdev = NULL, *d;
645 BT_DBG("%pMR -> %pMR", src, dst);
647 read_lock(&hci_dev_list_lock);
649 list_for_each_entry(d, &hci_dev_list, list) {
650 if (!test_bit(HCI_UP, &d->flags) ||
651 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
652 d->dev_type != HCI_PRIMARY)
656 * No source address - find interface with bdaddr != dst
657 * Source address - find interface with bdaddr == src
664 if (src_type == BDADDR_BREDR) {
665 if (!lmp_bredr_capable(d))
667 bacpy(&id_addr, &d->bdaddr);
668 id_addr_type = BDADDR_BREDR;
670 if (!lmp_le_capable(d))
673 hci_copy_identity_address(d, &id_addr,
676 /* Convert from HCI to three-value type */
677 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
678 id_addr_type = BDADDR_LE_PUBLIC;
680 id_addr_type = BDADDR_LE_RANDOM;
683 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
687 if (bacmp(&d->bdaddr, dst)) {
694 hdev = hci_dev_hold(hdev);
696 read_unlock(&hci_dev_list_lock);
699 EXPORT_SYMBOL(hci_get_route);
701 /* This function requires the caller holds hdev->lock */
702 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
704 struct hci_dev *hdev = conn->hdev;
705 struct hci_conn_params *params;
707 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
709 if (params && params->conn) {
710 hci_conn_drop(params->conn);
711 hci_conn_put(params->conn);
715 conn->state = BT_CLOSED;
717 /* If the status indicates successful cancellation of
718 * the attempt (i.e. Unkown Connection Id) there's no point of
719 * notifying failure since we'll go back to keep trying to
720 * connect. The only exception is explicit connect requests
721 * where a timeout + cancel does indicate an actual failure.
723 if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
724 (params && params->explicit_connect))
725 mgmt_connect_failed(hdev, &conn->dst, conn->type,
726 conn->dst_type, status);
728 hci_connect_cfm(conn, status);
732 /* Since we may have temporarily stopped the background scanning in
733 * favor of connection establishment, we should restart it.
735 hci_update_background_scan(hdev);
737 /* Re-enable advertising in case this was a failed connection
738 * attempt as a peripheral.
740 hci_req_reenable_advertising(hdev);
743 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
745 struct hci_conn *conn;
749 conn = hci_lookup_le_connect(hdev);
752 hci_connect_le_scan_cleanup(conn);
756 bt_dev_err(hdev, "request failed to create LE connection: "
757 "status 0x%2.2x", status);
762 hci_le_conn_failed(conn, status);
765 hci_dev_unlock(hdev);
768 static bool conn_use_rpa(struct hci_conn *conn)
770 struct hci_dev *hdev = conn->hdev;
772 return hci_dev_test_flag(hdev, HCI_PRIVACY);
775 static void set_ext_conn_params(struct hci_conn *conn,
776 struct hci_cp_le_ext_conn_param *p)
778 struct hci_dev *hdev = conn->hdev;
780 memset(p, 0, sizeof(*p));
782 /* Set window to be the same value as the interval to
783 * enable continuous scanning.
785 p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
786 p->scan_window = p->scan_interval;
787 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
788 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
789 p->conn_latency = cpu_to_le16(conn->le_conn_latency);
790 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
791 p->min_ce_len = cpu_to_le16(0x0000);
792 p->max_ce_len = cpu_to_le16(0x0000);
795 static void hci_req_add_le_create_conn(struct hci_request *req,
796 struct hci_conn *conn,
797 bdaddr_t *direct_rpa)
799 struct hci_dev *hdev = conn->hdev;
802 /* If direct address was provided we use it instead of current
806 if (bacmp(&req->hdev->random_addr, direct_rpa))
807 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
810 /* direct address is always RPA */
811 own_addr_type = ADDR_LE_DEV_RANDOM;
813 /* Update random address, but set require_privacy to false so
814 * that we never connect with an non-resolvable address.
816 if (hci_update_random_address(req, false, conn_use_rpa(conn),
821 if (use_ext_conn(hdev)) {
822 struct hci_cp_le_ext_create_conn *cp;
823 struct hci_cp_le_ext_conn_param *p;
824 u8 data[sizeof(*cp) + sizeof(*p) * 3];
828 p = (void *) cp->data;
830 memset(cp, 0, sizeof(*cp));
832 bacpy(&cp->peer_addr, &conn->dst);
833 cp->peer_addr_type = conn->dst_type;
834 cp->own_addr_type = own_addr_type;
839 cp->phys |= LE_SCAN_PHY_1M;
840 set_ext_conn_params(conn, p);
847 cp->phys |= LE_SCAN_PHY_2M;
848 set_ext_conn_params(conn, p);
854 if (scan_coded(hdev)) {
855 cp->phys |= LE_SCAN_PHY_CODED;
856 set_ext_conn_params(conn, p);
861 hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
864 struct hci_cp_le_create_conn cp;
866 memset(&cp, 0, sizeof(cp));
868 /* Set window to be the same value as the interval to enable
869 * continuous scanning.
871 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
872 cp.scan_window = cp.scan_interval;
875 /* LE auto connect */
876 if (!bacmp(&conn->dst, BDADDR_ANY))
877 cp.filter_policy = 0x1;
879 bacpy(&cp.peer_addr, &conn->dst);
881 bacpy(&cp.peer_addr, &conn->dst);
883 cp.peer_addr_type = conn->dst_type;
884 cp.own_address_type = own_addr_type;
885 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
886 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
887 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
888 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
889 cp.min_ce_len = cpu_to_le16(0x0000);
890 cp.max_ce_len = cpu_to_le16(0x0000);
892 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
895 conn->state = BT_CONNECT;
896 clear_bit(HCI_CONN_SCANNING, &conn->flags);
899 static void hci_req_directed_advertising(struct hci_request *req,
900 struct hci_conn *conn)
902 struct hci_dev *hdev = req->hdev;
906 if (ext_adv_capable(hdev)) {
907 struct hci_cp_le_set_ext_adv_params cp;
908 bdaddr_t random_addr;
910 /* Set require_privacy to false so that the remote device has a
911 * chance of identifying us.
913 if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
914 &own_addr_type, &random_addr) < 0)
917 memset(&cp, 0, sizeof(cp));
919 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
920 cp.own_addr_type = own_addr_type;
921 cp.channel_map = hdev->le_adv_channel_map;
922 cp.tx_power = HCI_TX_POWER_INVALID;
923 cp.primary_phy = HCI_ADV_PHY_1M;
924 cp.secondary_phy = HCI_ADV_PHY_1M;
925 cp.handle = 0; /* Use instance 0 for directed adv */
926 cp.own_addr_type = own_addr_type;
927 cp.peer_addr_type = conn->dst_type;
928 bacpy(&cp.peer_addr, &conn->dst);
930 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
932 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
933 bacmp(&random_addr, BDADDR_ANY) &&
934 bacmp(&random_addr, &hdev->random_addr)) {
935 struct hci_cp_le_set_adv_set_rand_addr cp;
937 memset(&cp, 0, sizeof(cp));
940 bacpy(&cp.bdaddr, &random_addr);
943 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
947 __hci_req_enable_ext_advertising(req);
949 struct hci_cp_le_set_adv_param cp;
951 /* Clear the HCI_LE_ADV bit temporarily so that the
952 * hci_update_random_address knows that it's safe to go ahead
953 * and write a new random address. The flag will be set back on
954 * as soon as the SET_ADV_ENABLE HCI command completes.
956 hci_dev_clear_flag(hdev, HCI_LE_ADV);
958 /* Set require_privacy to false so that the remote device has a
959 * chance of identifying us.
961 if (hci_update_random_address(req, false, conn_use_rpa(conn),
965 memset(&cp, 0, sizeof(cp));
967 /* Some controllers might reject command if intervals are not
968 * within range for undirected advertising.
969 * BCM20702A0 is known to be affected by this.
971 cp.min_interval = cpu_to_le16(0x0020);
972 cp.max_interval = cpu_to_le16(0x0020);
974 cp.type = LE_ADV_DIRECT_IND;
975 cp.own_address_type = own_addr_type;
976 cp.direct_addr_type = conn->dst_type;
977 bacpy(&cp.direct_addr, &conn->dst);
978 cp.channel_map = hdev->le_adv_channel_map;
980 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
983 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
987 conn->state = BT_CONNECT;
990 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
991 u8 dst_type, u8 sec_level, u16 conn_timeout,
992 u8 role, bdaddr_t *direct_rpa)
994 struct hci_conn_params *params;
995 struct hci_conn *conn;
997 struct hci_request req;
1000 /* Let's make sure that le is enabled.*/
1001 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1002 if (lmp_le_capable(hdev))
1003 return ERR_PTR(-ECONNREFUSED);
1005 return ERR_PTR(-EOPNOTSUPP);
1008 /* Since the controller supports only one LE connection attempt at a
1009 * time, we return -EBUSY if there is any connection attempt running.
1011 if (hci_lookup_le_connect(hdev))
1012 return ERR_PTR(-EBUSY);
1014 /* If there's already a connection object but it's not in
1015 * scanning state it means it must already be established, in
1016 * which case we can't do anything else except report a failure
1019 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1020 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1021 return ERR_PTR(-EBUSY);
1024 /* When given an identity address with existing identity
1025 * resolving key, the connection needs to be established
1026 * to a resolvable random address.
1028 * Storing the resolvable random address is required here
1029 * to handle connection failures. The address will later
1030 * be resolved back into the original identity address
1031 * from the connect request.
1033 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1034 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1036 dst_type = ADDR_LE_DEV_RANDOM;
1040 bacpy(&conn->dst, dst);
1042 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1044 return ERR_PTR(-ENOMEM);
1045 hci_conn_hold(conn);
1046 conn->pending_sec_level = sec_level;
1049 conn->dst_type = dst_type;
1050 conn->sec_level = BT_SECURITY_LOW;
1051 conn->conn_timeout = conn_timeout;
1053 hci_req_init(&req, hdev);
1055 /* Disable advertising if we're active. For master role
1056 * connections most controllers will refuse to connect if
1057 * advertising is enabled, and for slave role connections we
1058 * anyway have to disable it in order to start directed
1061 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1063 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
1067 /* If requested to connect as slave use directed advertising */
1068 if (conn->role == HCI_ROLE_SLAVE) {
1069 /* If we're active scanning most controllers are unable
1070 * to initiate advertising. Simply reject the attempt.
1072 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1073 hdev->le_scan_type == LE_SCAN_ACTIVE) {
1074 hci_req_purge(&req);
1076 return ERR_PTR(-EBUSY);
1079 hci_req_directed_advertising(&req, conn);
1083 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1085 conn->le_conn_min_interval = params->conn_min_interval;
1086 conn->le_conn_max_interval = params->conn_max_interval;
1087 conn->le_conn_latency = params->conn_latency;
1088 conn->le_supv_timeout = params->supervision_timeout;
1090 conn->le_conn_min_interval = hdev->le_conn_min_interval;
1091 conn->le_conn_max_interval = hdev->le_conn_max_interval;
1092 conn->le_conn_latency = hdev->le_conn_latency;
1093 conn->le_supv_timeout = hdev->le_supv_timeout;
1096 /* If controller is scanning, we stop it since some controllers are
1097 * not able to scan and connect at the same time. Also set the
1098 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
1099 * handler for scan disabling knows to set the correct discovery
1102 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1103 hci_req_add_le_scan_disable(&req);
1104 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1107 hci_req_add_le_create_conn(&req, conn, direct_rpa);
1110 err = hci_req_run(&req, create_le_conn_complete);
1113 return ERR_PTR(err);
1119 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1121 struct hci_conn *conn;
1123 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1127 if (conn->state != BT_CONNECTED)
1133 /* This function requires the caller holds hdev->lock */
1134 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1135 bdaddr_t *addr, u8 addr_type)
1137 struct hci_conn_params *params;
1139 if (is_connected(hdev, addr, addr_type))
1142 params = hci_conn_params_lookup(hdev, addr, addr_type);
1144 params = hci_conn_params_add(hdev, addr, addr_type);
1148 /* If we created new params, mark them to be deleted in
1149 * hci_connect_le_scan_cleanup. It's different case than
1150 * existing disabled params, those will stay after cleanup.
1152 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1155 /* We're trying to connect, so make sure params are at pend_le_conns */
1156 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1157 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1158 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1159 list_del_init(¶ms->action);
1160 list_add(¶ms->action, &hdev->pend_le_conns);
1163 params->explicit_connect = true;
1165 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1166 params->auto_connect);
1171 /* This function requires the caller holds hdev->lock */
1172 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1173 u8 dst_type, u8 sec_level,
1176 struct hci_conn *conn;
1178 /* Let's make sure that le is enabled.*/
1179 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1180 if (lmp_le_capable(hdev))
1181 return ERR_PTR(-ECONNREFUSED);
1183 return ERR_PTR(-EOPNOTSUPP);
1186 /* Some devices send ATT messages as soon as the physical link is
1187 * established. To be able to handle these ATT messages, the user-
1188 * space first establishes the connection and then starts the pairing
1191 * So if a hci_conn object already exists for the following connection
1192 * attempt, we simply update pending_sec_level and auth_type fields
1193 * and return the object found.
1195 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1197 if (conn->pending_sec_level < sec_level)
1198 conn->pending_sec_level = sec_level;
1202 BT_DBG("requesting refresh of dst_addr");
1204 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1206 return ERR_PTR(-ENOMEM);
1208 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1210 return ERR_PTR(-EBUSY);
1213 conn->state = BT_CONNECT;
1214 set_bit(HCI_CONN_SCANNING, &conn->flags);
1215 conn->dst_type = dst_type;
1216 conn->sec_level = BT_SECURITY_LOW;
1217 conn->pending_sec_level = sec_level;
1218 conn->conn_timeout = conn_timeout;
1220 hci_update_background_scan(hdev);
1223 hci_conn_hold(conn);
1227 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1228 u8 sec_level, u8 auth_type)
1230 struct hci_conn *acl;
1232 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1233 if (lmp_bredr_capable(hdev))
1234 return ERR_PTR(-ECONNREFUSED);
1236 return ERR_PTR(-EOPNOTSUPP);
1239 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1241 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1243 return ERR_PTR(-ENOMEM);
1248 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1249 acl->sec_level = BT_SECURITY_LOW;
1250 acl->pending_sec_level = sec_level;
1251 acl->auth_type = auth_type;
1252 hci_acl_create_connection(acl);
1258 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1261 struct hci_conn *acl;
1262 struct hci_conn *sco;
1264 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1268 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1270 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1273 return ERR_PTR(-ENOMEM);
1282 sco->setting = setting;
1284 if (acl->state == BT_CONNECTED &&
1285 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1286 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1287 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1289 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1290 /* defer SCO setup until mode change completed */
1291 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1295 hci_sco_setup(acl, 0x00);
1301 /* Check link security requirement */
1302 int hci_conn_check_link_mode(struct hci_conn *conn)
1304 BT_DBG("hcon %p", conn);
1306 /* In Secure Connections Only mode, it is required that Secure
1307 * Connections is used and the link is encrypted with AES-CCM
1308 * using a P-256 authenticated combination key.
1310 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1311 if (!hci_conn_sc_enabled(conn) ||
1312 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1313 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1317 /* If Secure Simple Pairing is not enabled, then legacy connection
1318 * setup is used and no encryption or key sizes can be enforced.
1320 if (!hci_conn_ssp_enabled(conn))
1323 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1329 /* Authenticate remote device */
1330 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1332 BT_DBG("hcon %p", conn);
1334 if (conn->pending_sec_level > sec_level)
1335 sec_level = conn->pending_sec_level;
1337 if (sec_level > conn->sec_level)
1338 conn->pending_sec_level = sec_level;
1339 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1342 /* Make sure we preserve an existing MITM requirement*/
1343 auth_type |= (conn->auth_type & 0x01);
1345 conn->auth_type = auth_type;
1347 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1348 struct hci_cp_auth_requested cp;
1350 cp.handle = cpu_to_le16(conn->handle);
1351 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1354 /* If we're already encrypted set the REAUTH_PEND flag,
1355 * otherwise set the ENCRYPT_PEND.
1357 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1358 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1360 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1366 /* Encrypt the the link */
1367 static void hci_conn_encrypt(struct hci_conn *conn)
1369 BT_DBG("hcon %p", conn);
1371 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1372 struct hci_cp_set_conn_encrypt cp;
1373 cp.handle = cpu_to_le16(conn->handle);
1375 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1380 /* Enable security */
1381 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1384 BT_DBG("hcon %p", conn);
1386 if (conn->type == LE_LINK)
1387 return smp_conn_security(conn, sec_level);
1389 /* For sdp we don't need the link key. */
1390 if (sec_level == BT_SECURITY_SDP)
1393 /* For non 2.1 devices and low security level we don't need the link
1395 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1398 /* For other security levels we need the link key. */
1399 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1402 /* An authenticated FIPS approved combination key has sufficient
1403 * security for security level 4. */
1404 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1405 sec_level == BT_SECURITY_FIPS)
1408 /* An authenticated combination key has sufficient security for
1409 security level 3. */
1410 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1411 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1412 sec_level == BT_SECURITY_HIGH)
1415 /* An unauthenticated combination key has sufficient security for
1416 security level 1 and 2. */
1417 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1418 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1419 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1422 /* A combination key has always sufficient security for the security
1423 levels 1 or 2. High security level requires the combination key
1424 is generated using maximum PIN code length (16).
1425 For pre 2.1 units. */
1426 if (conn->key_type == HCI_LK_COMBINATION &&
1427 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1428 conn->pin_length == 16))
1432 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1436 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1438 if (!hci_conn_auth(conn, sec_level, auth_type))
1442 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1443 /* Ensure that the encryption key size has been read,
1444 * otherwise stall the upper layer responses.
1446 if (!conn->enc_key_size)
1449 /* Nothing else needed, all requirements are met */
1453 hci_conn_encrypt(conn);
1456 EXPORT_SYMBOL(hci_conn_security);
1458 /* Check secure link requirement */
1459 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1461 BT_DBG("hcon %p", conn);
1463 /* Accept if non-secure or higher security level is required */
1464 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1467 /* Accept if secure or higher security level is already present */
1468 if (conn->sec_level == BT_SECURITY_HIGH ||
1469 conn->sec_level == BT_SECURITY_FIPS)
1472 /* Reject not secure link */
1475 EXPORT_SYMBOL(hci_conn_check_secure);
1478 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1480 BT_DBG("hcon %p", conn);
1482 if (role == conn->role)
1485 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1486 struct hci_cp_switch_role cp;
1487 bacpy(&cp.bdaddr, &conn->dst);
1489 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1494 EXPORT_SYMBOL(hci_conn_switch_role);
1497 int hci_conn_change_supervision_timeout(struct hci_conn *conn, __u16 timeout)
1499 struct hci_cp_write_link_supervision_timeout cp;
1501 if (!((get_link_mode(conn)) & HCI_LM_MASTER))
1504 if (conn->handle == 0)
1507 memset(&cp, 0, sizeof(cp));
1508 cp.handle = cpu_to_le16(conn->handle);
1509 cp.timeout = cpu_to_le16(timeout);
1511 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_LINK_SUPERVISION_TIMEOUT,
1512 sizeof(cp), &cp) < 0)
1513 BT_ERR("HCI_OP_WRITE_LINK_SUPERVISION_TIMEOUT is failed");
1519 /* Enter active mode */
1520 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1522 struct hci_dev *hdev = conn->hdev;
1524 BT_DBG("hcon %p mode %d", conn, conn->mode);
1526 if (conn->mode != HCI_CM_SNIFF)
1529 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1532 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1533 struct hci_cp_exit_sniff_mode cp;
1534 cp.handle = cpu_to_le16(conn->handle);
1535 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1540 if (hdev->idle_timeout > 0) {
1541 /* Sniff timer cancel */
1542 cancel_delayed_work(&conn->idle_work);
1543 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1544 msecs_to_jiffies(hdev->idle_timeout));
1547 if (hdev->idle_timeout > 0)
1548 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1549 msecs_to_jiffies(hdev->idle_timeout));
1553 /* Drop all connection on the device */
1554 void hci_conn_hash_flush(struct hci_dev *hdev)
1556 struct hci_conn_hash *h = &hdev->conn_hash;
1557 struct hci_conn *c, *n;
1559 BT_DBG("hdev %s", hdev->name);
1561 list_for_each_entry_safe(c, n, &h->list, list) {
1562 c->state = BT_CLOSED;
1564 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1569 /* Check pending connect attempts */
1570 void hci_conn_check_pending(struct hci_dev *hdev)
1572 struct hci_conn *conn;
1574 BT_DBG("hdev %s", hdev->name);
1578 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1580 hci_acl_create_connection(conn);
1582 hci_dev_unlock(hdev);
1586 static u32 get_link_mode(struct hci_conn *conn)
1588 u32 get_link_mode(struct hci_conn *conn)
1593 if (conn->role == HCI_ROLE_MASTER)
1594 link_mode |= HCI_LM_MASTER;
1596 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1597 link_mode |= HCI_LM_ENCRYPT;
1599 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1600 link_mode |= HCI_LM_AUTH;
1602 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1603 link_mode |= HCI_LM_SECURE;
1605 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1606 link_mode |= HCI_LM_FIPS;
1611 int hci_get_conn_list(void __user *arg)
1614 struct hci_conn_list_req req, *cl;
1615 struct hci_conn_info *ci;
1616 struct hci_dev *hdev;
1617 int n = 0, size, err;
1619 if (copy_from_user(&req, arg, sizeof(req)))
1622 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1625 size = sizeof(req) + req.conn_num * sizeof(*ci);
1627 cl = kmalloc(size, GFP_KERNEL);
1631 hdev = hci_dev_get(req.dev_id);
1640 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1641 bacpy(&(ci + n)->bdaddr, &c->dst);
1642 (ci + n)->handle = c->handle;
1643 (ci + n)->type = c->type;
1644 (ci + n)->out = c->out;
1645 (ci + n)->state = c->state;
1646 (ci + n)->link_mode = get_link_mode(c);
1647 if (++n >= req.conn_num)
1650 hci_dev_unlock(hdev);
1652 cl->dev_id = hdev->id;
1654 size = sizeof(req) + n * sizeof(*ci);
1658 err = copy_to_user(arg, cl, size);
1661 return err ? -EFAULT : 0;
1664 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1666 struct hci_conn_info_req req;
1667 struct hci_conn_info ci;
1668 struct hci_conn *conn;
1669 char __user *ptr = arg + sizeof(req);
1671 if (copy_from_user(&req, arg, sizeof(req)))
1675 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1677 bacpy(&ci.bdaddr, &conn->dst);
1678 ci.handle = conn->handle;
1679 ci.type = conn->type;
1681 ci.state = conn->state;
1682 ci.link_mode = get_link_mode(conn);
1684 hci_dev_unlock(hdev);
1689 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1692 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1694 struct hci_auth_info_req req;
1695 struct hci_conn *conn;
1697 if (copy_from_user(&req, arg, sizeof(req)))
1701 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1703 req.type = conn->auth_type;
1704 hci_dev_unlock(hdev);
1709 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1712 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1714 struct hci_dev *hdev = conn->hdev;
1715 struct hci_chan *chan;
1717 BT_DBG("%s hcon %p", hdev->name, conn);
1719 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1720 BT_DBG("Refusing to create new hci_chan");
1724 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1728 chan->conn = hci_conn_get(conn);
1729 skb_queue_head_init(&chan->data_q);
1730 chan->state = BT_CONNECTED;
1732 list_add_rcu(&chan->list, &conn->chan_list);
1737 void hci_chan_del(struct hci_chan *chan)
1739 struct hci_conn *conn = chan->conn;
1740 struct hci_dev *hdev = conn->hdev;
1742 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1744 list_del_rcu(&chan->list);
1748 /* Prevent new hci_chan's to be created for this hci_conn */
1749 set_bit(HCI_CONN_DROP, &conn->flags);
1753 skb_queue_purge(&chan->data_q);
1757 void hci_chan_list_flush(struct hci_conn *conn)
1759 struct hci_chan *chan, *n;
1761 BT_DBG("hcon %p", conn);
1763 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1767 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1770 struct hci_chan *hchan;
1772 list_for_each_entry(hchan, &hcon->chan_list, list) {
1773 if (hchan->handle == handle)
1780 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1782 struct hci_conn_hash *h = &hdev->conn_hash;
1783 struct hci_conn *hcon;
1784 struct hci_chan *hchan = NULL;
1788 list_for_each_entry_rcu(hcon, &h->list, list) {
1789 hchan = __hci_chan_lookup_handle(hcon, handle);