2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
34 #include "hci_request.h"
43 #ifdef CONFIG_TIZEN_WIP
44 static const struct sco_param esco_param_cvsd[] = {
45 { (EDR_ESCO_MASK & ~ESCO_2EV3) | SCO_ESCO_MASK | ESCO_EV3 , 0x000a, 0x01 }, /* S3 */
46 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
47 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
48 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
49 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
52 static const struct sco_param esco_param_msbc[] = {
53 { (EDR_ESCO_MASK & ~ESCO_2EV3) | ESCO_EV3, 0x000d, 0x02 }, /* T2 */
54 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
58 static const struct sco_param esco_param_cvsd[] = {
59 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
60 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
61 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
62 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
63 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
66 static const struct sco_param esco_param_msbc[] = {
67 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
68 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
72 static const struct sco_param sco_param_cvsd[] = {
73 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
74 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
77 static void hci_le_create_connection_cancel(struct hci_conn *conn)
79 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
82 static void hci_acl_create_connection(struct hci_conn *conn)
84 struct hci_dev *hdev = conn->hdev;
85 struct inquiry_entry *ie;
86 struct hci_cp_create_conn cp;
88 BT_DBG("hcon %p", conn);
90 conn->state = BT_CONNECT;
92 conn->role = HCI_ROLE_MASTER;
96 conn->link_policy = hdev->link_policy;
98 memset(&cp, 0, sizeof(cp));
99 bacpy(&cp.bdaddr, &conn->dst);
100 cp.pscan_rep_mode = 0x02;
102 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
104 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
105 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
106 cp.pscan_mode = ie->data.pscan_mode;
107 cp.clock_offset = ie->data.clock_offset |
111 memcpy(conn->dev_class, ie->data.dev_class, 3);
112 if (ie->data.ssp_mode > 0)
113 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
116 cp.pkt_type = cpu_to_le16(conn->pkt_type);
117 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
118 cp.role_switch = 0x01;
120 cp.role_switch = 0x00;
122 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
125 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
127 struct hci_cp_create_conn_cancel cp;
129 BT_DBG("hcon %p", conn);
131 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
134 bacpy(&cp.bdaddr, &conn->dst);
135 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
138 static void hci_reject_sco(struct hci_conn *conn)
140 struct hci_cp_reject_sync_conn_req cp;
142 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
143 bacpy(&cp.bdaddr, &conn->dst);
145 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
148 int hci_disconnect(struct hci_conn *conn, __u8 reason)
150 struct hci_cp_disconnect cp;
152 BT_DBG("hcon %p", conn);
154 /* When we are master of an established connection and it enters
155 * the disconnect timeout, then go ahead and try to read the
156 * current clock offset. Processing of the result is done
157 * within the event handling and hci_clock_offset_evt function.
159 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
160 struct hci_dev *hdev = conn->hdev;
161 struct hci_cp_read_clock_offset clkoff_cp;
163 clkoff_cp.handle = cpu_to_le16(conn->handle);
164 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
168 conn->state = BT_DISCONN;
170 cp.handle = cpu_to_le16(conn->handle);
172 return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
175 static void hci_amp_disconn(struct hci_conn *conn)
177 struct hci_cp_disconn_phy_link cp;
179 BT_DBG("hcon %p", conn);
181 conn->state = BT_DISCONN;
183 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
184 cp.reason = hci_proto_disconn_ind(conn);
185 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
189 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
191 struct hci_dev *hdev = conn->hdev;
192 struct hci_cp_add_sco cp;
194 BT_DBG("hcon %p", conn);
196 conn->state = BT_CONNECT;
201 cp.handle = cpu_to_le16(handle);
202 cp.pkt_type = cpu_to_le16(conn->pkt_type);
204 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
207 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
209 struct hci_dev *hdev = conn->hdev;
210 struct hci_cp_setup_sync_conn cp;
211 const struct sco_param *param;
213 BT_DBG("hcon %p", conn);
215 conn->state = BT_CONNECT;
220 cp.handle = cpu_to_le16(handle);
222 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
223 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
224 cp.voice_setting = cpu_to_le16(conn->setting);
226 switch (conn->setting & SCO_AIRMODE_MASK) {
227 case SCO_AIRMODE_TRANSP:
228 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
230 param = &esco_param_msbc[conn->attempt - 1];
232 case SCO_AIRMODE_CVSD:
233 if (lmp_esco_capable(conn->link)) {
234 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
236 param = &esco_param_cvsd[conn->attempt - 1];
238 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
240 param = &sco_param_cvsd[conn->attempt - 1];
247 cp.retrans_effort = param->retrans_effort;
248 cp.pkt_type = __cpu_to_le16(param->pkt_type);
249 cp.max_latency = __cpu_to_le16(param->max_latency);
251 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
257 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
260 struct hci_dev *hdev = conn->hdev;
261 struct hci_conn_params *params;
262 struct hci_cp_le_conn_update cp;
266 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
268 params->conn_min_interval = min;
269 params->conn_max_interval = max;
270 params->conn_latency = latency;
271 params->supervision_timeout = to_multiplier;
274 hci_dev_unlock(hdev);
276 memset(&cp, 0, sizeof(cp));
277 cp.handle = cpu_to_le16(conn->handle);
278 cp.conn_interval_min = cpu_to_le16(min);
279 cp.conn_interval_max = cpu_to_le16(max);
280 cp.conn_latency = cpu_to_le16(latency);
281 cp.supervision_timeout = cpu_to_le16(to_multiplier);
282 cp.min_ce_len = cpu_to_le16(0x0000);
283 cp.max_ce_len = cpu_to_le16(0x0000);
285 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
293 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
296 struct hci_dev *hdev = conn->hdev;
297 struct hci_cp_le_start_enc cp;
299 BT_DBG("hcon %p", conn);
301 memset(&cp, 0, sizeof(cp));
303 cp.handle = cpu_to_le16(conn->handle);
306 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
308 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
311 /* Device _must_ be locked */
312 void hci_sco_setup(struct hci_conn *conn, __u8 status)
314 struct hci_conn *sco = conn->link;
319 BT_DBG("hcon %p", conn);
322 if (lmp_esco_capable(conn->hdev))
323 hci_setup_sync(sco, conn->handle);
325 hci_add_sco(sco, conn->handle);
327 hci_proto_connect_cfm(sco, status);
332 static void hci_conn_timeout(struct work_struct *work)
334 struct hci_conn *conn = container_of(work, struct hci_conn,
336 int refcnt = atomic_read(&conn->refcnt);
338 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
342 /* FIXME: It was observed that in pairing failed scenario, refcnt
343 * drops below 0. Probably this is because l2cap_conn_del calls
344 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
345 * dropped. After that loop hci_chan_del is called which also drops
346 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
352 switch (conn->state) {
356 if (conn->type == ACL_LINK)
357 hci_acl_create_connection_cancel(conn);
358 #ifdef CONFIG_TIZEN_WIP
359 else if (conn->type == LE_LINK &&
360 bacmp(&conn->dst, BDADDR_ANY))
362 else if (conn->type == LE_LINK)
364 hci_le_create_connection_cancel(conn);
365 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
366 hci_reject_sco(conn);
371 if (conn->type == AMP_LINK) {
372 hci_amp_disconn(conn);
374 __u8 reason = hci_proto_disconn_ind(conn);
375 hci_disconnect(conn, reason);
379 conn->state = BT_CLOSED;
384 /* Enter sniff mode */
385 static void hci_conn_idle(struct work_struct *work)
387 struct hci_conn *conn = container_of(work, struct hci_conn,
389 struct hci_dev *hdev = conn->hdev;
391 BT_DBG("hcon %p mode %d", conn, conn->mode);
393 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
396 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
399 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
400 struct hci_cp_sniff_subrate cp;
401 cp.handle = cpu_to_le16(conn->handle);
402 cp.max_latency = cpu_to_le16(0);
403 cp.min_remote_timeout = cpu_to_le16(0);
404 cp.min_local_timeout = cpu_to_le16(0);
405 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
408 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
409 struct hci_cp_sniff_mode cp;
410 cp.handle = cpu_to_le16(conn->handle);
411 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
412 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
413 cp.attempt = cpu_to_le16(4);
414 cp.timeout = cpu_to_le16(1);
415 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
419 static void hci_conn_auto_accept(struct work_struct *work)
421 struct hci_conn *conn = container_of(work, struct hci_conn,
422 auto_accept_work.work);
424 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
428 static void le_conn_timeout(struct work_struct *work)
430 struct hci_conn *conn = container_of(work, struct hci_conn,
431 le_conn_timeout.work);
432 struct hci_dev *hdev = conn->hdev;
436 /* We could end up here due to having done directed advertising,
437 * so clean up the state if necessary. This should however only
438 * happen with broken hardware or if low duty cycle was used
439 * (which doesn't have a timeout of its own).
441 if (conn->role == HCI_ROLE_SLAVE) {
443 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
445 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
449 hci_le_create_connection_cancel(conn);
452 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
455 struct hci_conn *conn;
457 BT_DBG("%s dst %pMR", hdev->name, dst);
459 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
463 bacpy(&conn->dst, dst);
464 bacpy(&conn->src, &hdev->bdaddr);
468 conn->mode = HCI_CM_ACTIVE;
469 conn->state = BT_OPEN;
470 conn->auth_type = HCI_AT_GENERAL_BONDING;
471 conn->io_capability = hdev->io_capability;
472 conn->remote_auth = 0xff;
473 conn->key_type = 0xff;
474 conn->rssi = HCI_RSSI_INVALID;
475 conn->tx_power = HCI_TX_POWER_INVALID;
476 conn->max_tx_power = HCI_TX_POWER_INVALID;
478 #ifdef CONFIG_TIZEN_WIP
479 /* enable sniff mode for incoming connection */
480 conn->link_policy = hdev->link_policy;
483 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
484 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
486 if (conn->role == HCI_ROLE_MASTER)
491 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
494 /* conn->src should reflect the local identity address */
495 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
498 if (lmp_esco_capable(hdev))
499 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
500 (hdev->esco_type & EDR_ESCO_MASK);
502 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
505 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
509 skb_queue_head_init(&conn->data_q);
511 INIT_LIST_HEAD(&conn->chan_list);
513 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
514 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
515 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
516 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
518 atomic_set(&conn->refcnt, 0);
522 hci_conn_hash_add(hdev, conn);
524 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
526 hci_conn_init_sysfs(conn);
531 int hci_conn_del(struct hci_conn *conn)
533 struct hci_dev *hdev = conn->hdev;
535 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
537 cancel_delayed_work_sync(&conn->disc_work);
538 cancel_delayed_work_sync(&conn->auto_accept_work);
539 cancel_delayed_work_sync(&conn->idle_work);
541 if (conn->type == ACL_LINK) {
542 struct hci_conn *sco = conn->link;
547 hdev->acl_cnt += conn->sent;
548 } else if (conn->type == LE_LINK) {
549 cancel_delayed_work(&conn->le_conn_timeout);
552 hdev->le_cnt += conn->sent;
554 hdev->acl_cnt += conn->sent;
556 struct hci_conn *acl = conn->link;
563 hci_chan_list_flush(conn);
566 amp_mgr_put(conn->amp_mgr);
568 hci_conn_hash_del(hdev, conn);
570 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
572 skb_queue_purge(&conn->data_q);
574 hci_conn_del_sysfs(conn);
576 debugfs_remove_recursive(conn->debugfs);
578 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
579 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
588 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
590 int use_src = bacmp(src, BDADDR_ANY);
591 struct hci_dev *hdev = NULL, *d;
593 BT_DBG("%pMR -> %pMR", src, dst);
595 read_lock(&hci_dev_list_lock);
597 list_for_each_entry(d, &hci_dev_list, list) {
598 if (!test_bit(HCI_UP, &d->flags) ||
599 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
600 d->dev_type != HCI_BREDR)
604 * No source address - find interface with bdaddr != dst
605 * Source address - find interface with bdaddr == src
609 if (!bacmp(&d->bdaddr, src)) {
613 if (bacmp(&d->bdaddr, dst)) {
620 hdev = hci_dev_hold(hdev);
622 read_unlock(&hci_dev_list_lock);
625 EXPORT_SYMBOL(hci_get_route);
627 /* This function requires the caller holds hdev->lock */
628 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
630 struct hci_dev *hdev = conn->hdev;
631 struct hci_conn_params *params;
633 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
635 if (params && params->conn) {
636 hci_conn_drop(params->conn);
637 hci_conn_put(params->conn);
641 conn->state = BT_CLOSED;
643 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
646 hci_proto_connect_cfm(conn, status);
650 /* Since we may have temporarily stopped the background scanning in
651 * favor of connection establishment, we should restart it.
653 hci_update_background_scan(hdev);
655 /* Re-enable advertising in case this was a failed connection
656 * attempt as a peripheral.
658 mgmt_reenable_advertising(hdev);
661 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
663 struct hci_conn *conn;
668 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
673 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
677 hci_le_conn_failed(conn, status);
680 hci_dev_unlock(hdev);
683 static void hci_req_add_le_create_conn(struct hci_request *req,
684 struct hci_conn *conn)
686 struct hci_cp_le_create_conn cp;
687 struct hci_dev *hdev = conn->hdev;
690 memset(&cp, 0, sizeof(cp));
692 /* Update random address, but set require_privacy to false so
693 * that we never connect with an non-resolvable address.
695 if (hci_update_random_address(req, false, &own_addr_type))
698 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
699 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
700 #ifdef CONFIG_TIZEN_WIP
701 /* LE auto connect */
702 if (!bacmp(&conn->dst, BDADDR_ANY))
703 cp.filter_policy = 0x1;
705 bacpy(&cp.peer_addr, &conn->dst);
707 bacpy(&cp.peer_addr, &conn->dst);
709 cp.peer_addr_type = conn->dst_type;
710 cp.own_address_type = own_addr_type;
711 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
712 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
713 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
714 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
715 cp.min_ce_len = cpu_to_le16(0x0000);
716 cp.max_ce_len = cpu_to_le16(0x0000);
718 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
720 conn->state = BT_CONNECT;
723 static void hci_req_directed_advertising(struct hci_request *req,
724 struct hci_conn *conn)
726 struct hci_dev *hdev = req->hdev;
727 struct hci_cp_le_set_adv_param cp;
731 /* Clear the HCI_LE_ADV bit temporarily so that the
732 * hci_update_random_address knows that it's safe to go ahead
733 * and write a new random address. The flag will be set back on
734 * as soon as the SET_ADV_ENABLE HCI command completes.
736 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
738 /* Set require_privacy to false so that the remote device has a
739 * chance of identifying us.
741 if (hci_update_random_address(req, false, &own_addr_type) < 0)
744 memset(&cp, 0, sizeof(cp));
745 cp.type = LE_ADV_DIRECT_IND;
746 cp.own_address_type = own_addr_type;
747 cp.direct_addr_type = conn->dst_type;
748 bacpy(&cp.direct_addr, &conn->dst);
749 cp.channel_map = hdev->le_adv_channel_map;
751 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
754 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
756 conn->state = BT_CONNECT;
759 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
760 u8 dst_type, u8 sec_level, u16 conn_timeout,
763 struct hci_conn_params *params;
764 struct hci_conn *conn;
766 struct hci_request req;
769 /* Some devices send ATT messages as soon as the physical link is
770 * established. To be able to handle these ATT messages, the user-
771 * space first establishes the connection and then starts the pairing
774 * So if a hci_conn object already exists for the following connection
775 * attempt, we simply update pending_sec_level and auth_type fields
776 * and return the object found.
778 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
780 conn->pending_sec_level = sec_level;
784 /* Since the controller supports only one LE connection attempt at a
785 * time, we return -EBUSY if there is any connection attempt running.
787 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
789 return ERR_PTR(-EBUSY);
791 /* When given an identity address with existing identity
792 * resolving key, the connection needs to be established
793 * to a resolvable random address.
795 * This uses the cached random resolvable address from
796 * a previous scan. When no cached address is available,
797 * try connecting to the identity address instead.
799 * Storing the resolvable random address is required here
800 * to handle connection failures. The address will later
801 * be resolved back into the original identity address
802 * from the connect request.
804 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
805 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
807 dst_type = ADDR_LE_DEV_RANDOM;
810 conn = hci_conn_add(hdev, LE_LINK, dst, role);
812 return ERR_PTR(-ENOMEM);
814 conn->dst_type = dst_type;
815 conn->sec_level = BT_SECURITY_LOW;
816 conn->pending_sec_level = sec_level;
817 conn->conn_timeout = conn_timeout;
819 hci_req_init(&req, hdev);
821 /* Disable advertising if we're active. For master role
822 * connections most controllers will refuse to connect if
823 * advertising is enabled, and for slave role connections we
824 * anyway have to disable it in order to start directed
827 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
829 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
833 /* If requested to connect as slave use directed advertising */
834 if (conn->role == HCI_ROLE_SLAVE) {
835 /* If we're active scanning most controllers are unable
836 * to initiate advertising. Simply reject the attempt.
838 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
839 hdev->le_scan_type == LE_SCAN_ACTIVE) {
840 skb_queue_purge(&req.cmd_q);
842 return ERR_PTR(-EBUSY);
845 hci_req_directed_advertising(&req, conn);
849 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
851 conn->le_conn_min_interval = params->conn_min_interval;
852 conn->le_conn_max_interval = params->conn_max_interval;
853 conn->le_conn_latency = params->conn_latency;
854 conn->le_supv_timeout = params->supervision_timeout;
856 conn->le_conn_min_interval = hdev->le_conn_min_interval;
857 conn->le_conn_max_interval = hdev->le_conn_max_interval;
858 conn->le_conn_latency = hdev->le_conn_latency;
859 conn->le_supv_timeout = hdev->le_supv_timeout;
862 /* If controller is scanning, we stop it since some controllers are
863 * not able to scan and connect at the same time. Also set the
864 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
865 * handler for scan disabling knows to set the correct discovery
868 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
869 hci_req_add_le_scan_disable(&req);
870 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
873 hci_req_add_le_create_conn(&req, conn);
876 err = hci_req_run(&req, create_le_conn_complete);
887 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
888 u8 sec_level, u8 auth_type)
890 struct hci_conn *acl;
892 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
893 return ERR_PTR(-EOPNOTSUPP);
895 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
897 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
899 return ERR_PTR(-ENOMEM);
904 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
905 acl->sec_level = BT_SECURITY_LOW;
906 acl->pending_sec_level = sec_level;
907 acl->auth_type = auth_type;
908 hci_acl_create_connection(acl);
914 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
917 struct hci_conn *acl;
918 struct hci_conn *sco;
920 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
924 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
926 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
929 return ERR_PTR(-ENOMEM);
938 sco->setting = setting;
940 if (acl->state == BT_CONNECTED &&
941 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
942 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
943 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
945 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
946 /* defer SCO setup until mode change completed */
947 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
951 hci_sco_setup(acl, 0x00);
957 /* Check link security requirement */
958 int hci_conn_check_link_mode(struct hci_conn *conn)
960 BT_DBG("hcon %p", conn);
962 /* In Secure Connections Only mode, it is required that Secure
963 * Connections is used and the link is encrypted with AES-CCM
964 * using a P-256 authenticated combination key.
966 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
967 if (!hci_conn_sc_enabled(conn) ||
968 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
969 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
973 if (hci_conn_ssp_enabled(conn) &&
974 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
980 /* Authenticate remote device */
981 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
983 BT_DBG("hcon %p", conn);
985 if (conn->pending_sec_level > sec_level)
986 sec_level = conn->pending_sec_level;
988 if (sec_level > conn->sec_level)
989 conn->pending_sec_level = sec_level;
990 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
993 /* Make sure we preserve an existing MITM requirement*/
994 auth_type |= (conn->auth_type & 0x01);
996 conn->auth_type = auth_type;
998 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
999 struct hci_cp_auth_requested cp;
1001 cp.handle = cpu_to_le16(conn->handle);
1002 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1005 /* If we're already encrypted set the REAUTH_PEND flag,
1006 * otherwise set the ENCRYPT_PEND.
1008 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1009 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1011 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1017 /* Encrypt the the link */
1018 static void hci_conn_encrypt(struct hci_conn *conn)
1020 BT_DBG("hcon %p", conn);
1022 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1023 struct hci_cp_set_conn_encrypt cp;
1024 cp.handle = cpu_to_le16(conn->handle);
1026 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1031 /* Enable security */
1032 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1035 BT_DBG("hcon %p", conn);
1037 if (conn->type == LE_LINK)
1038 return smp_conn_security(conn, sec_level);
1040 /* For sdp we don't need the link key. */
1041 if (sec_level == BT_SECURITY_SDP)
1044 /* For non 2.1 devices and low security level we don't need the link
1046 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1049 /* For other security levels we need the link key. */
1050 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1053 /* An authenticated FIPS approved combination key has sufficient
1054 * security for security level 4. */
1055 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1056 sec_level == BT_SECURITY_FIPS)
1059 /* An authenticated combination key has sufficient security for
1060 security level 3. */
1061 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1062 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1063 sec_level == BT_SECURITY_HIGH)
1066 /* An unauthenticated combination key has sufficient security for
1067 security level 1 and 2. */
1068 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1069 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1070 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1073 /* A combination key has always sufficient security for the security
1074 levels 1 or 2. High security level requires the combination key
1075 is generated using maximum PIN code length (16).
1076 For pre 2.1 units. */
1077 if (conn->key_type == HCI_LK_COMBINATION &&
1078 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1079 conn->pin_length == 16))
1083 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1087 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1089 if (!hci_conn_auth(conn, sec_level, auth_type))
1093 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1096 hci_conn_encrypt(conn);
1099 EXPORT_SYMBOL(hci_conn_security);
1101 /* Check secure link requirement */
1102 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1104 BT_DBG("hcon %p", conn);
1106 /* Accept if non-secure or higher security level is required */
1107 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1110 /* Accept if secure or higher security level is already present */
1111 if (conn->sec_level == BT_SECURITY_HIGH ||
1112 conn->sec_level == BT_SECURITY_FIPS)
1115 /* Reject not secure link */
1118 EXPORT_SYMBOL(hci_conn_check_secure);
1119 #ifdef CONFIG_TIZEN_WIP
1120 /* Change link key */
1121 int hci_conn_change_link_key(struct hci_conn *conn)
1123 BT_DBG("hcon %p", conn);
1125 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1126 struct hci_cp_change_conn_link_key cp;
1127 cp.handle = cpu_to_le16(conn->handle);
1128 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1136 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1138 BT_DBG("hcon %p", conn);
1140 if (role == conn->role)
1143 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1144 struct hci_cp_switch_role cp;
1145 bacpy(&cp.bdaddr, &conn->dst);
1147 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1152 EXPORT_SYMBOL(hci_conn_switch_role);
1154 #ifdef CONFIG_TIZEN_WIP
1155 /* Change supervision timeout */
1156 int hci_conn_change_supervision_timeout(struct hci_conn *conn, __u16 timeout)
1158 struct hci_cp_write_link_supervision_timeout cp;
1160 if (!((get_link_mode(conn)) & HCI_LM_MASTER))
1163 if (conn->handle == 0)
1166 memset(&cp, 0, sizeof(cp));
1167 cp.handle = cpu_to_le16(conn->handle);
1168 cp.timeout = cpu_to_le16(timeout);
1170 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_LINK_SUPERVISION_TIMEOUT,
1171 sizeof(cp), &cp) < 0)
1172 BT_ERR("HCI_OP_WRITE_LINK_SUPERVISION_TIMEOUT is failed");
1177 int hci_le_set_data_length(struct hci_conn *conn, u16 tx_octets, u16 tx_time)
1179 struct hci_dev *hdev = conn->hdev;
1180 struct hci_conn_params *params;
1181 struct hci_cp_le_set_data_len cp;
1185 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1187 params->max_tx_octets = tx_octets;
1188 params->max_tx_time = tx_time;
1191 hci_dev_unlock(hdev);
1193 memset(&cp, 0, sizeof(cp));
1194 cp.handle = cpu_to_le16(conn->handle);
1195 cp.tx_len = cpu_to_le16(tx_octets);
1196 cp.tx_time = cpu_to_le16(tx_time);
1198 hci_send_cmd(hdev, HCI_OP_LE_SET_DATA_LEN, sizeof(cp), &cp);
1207 /* Enter active mode */
1208 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1210 struct hci_dev *hdev = conn->hdev;
1212 BT_DBG("hcon %p mode %d", conn, conn->mode);
1214 if (conn->mode != HCI_CM_SNIFF)
1217 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1220 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1221 struct hci_cp_exit_sniff_mode cp;
1222 cp.handle = cpu_to_le16(conn->handle);
1223 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1227 #ifdef CONFIG_TIZEN_WIP /* Sniff timer cancel */
1228 if (hdev->idle_timeout > 0) {
1229 cancel_delayed_work(&conn->idle_work);
1230 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1231 msecs_to_jiffies(hdev->idle_timeout));
1234 if (hdev->idle_timeout > 0)
1235 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1236 msecs_to_jiffies(hdev->idle_timeout));
1237 #endif /* Sniff timer cancel */
1240 /* Drop all connection on the device */
1241 void hci_conn_hash_flush(struct hci_dev *hdev)
1243 struct hci_conn_hash *h = &hdev->conn_hash;
1244 struct hci_conn *c, *n;
1246 BT_DBG("hdev %s", hdev->name);
1248 list_for_each_entry_safe(c, n, &h->list, list) {
1249 c->state = BT_CLOSED;
1251 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1256 /* Check pending connect attempts */
1257 void hci_conn_check_pending(struct hci_dev *hdev)
1259 struct hci_conn *conn;
1261 BT_DBG("hdev %s", hdev->name);
1265 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1267 hci_acl_create_connection(conn);
1269 hci_dev_unlock(hdev);
1272 #ifndef CONFIG_TIZEN_WIP
1273 static u32 get_link_mode(struct hci_conn *conn)
1275 u32 get_link_mode(struct hci_conn *conn)
1280 if (conn->role == HCI_ROLE_MASTER)
1281 link_mode |= HCI_LM_MASTER;
1283 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1284 link_mode |= HCI_LM_ENCRYPT;
1286 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1287 link_mode |= HCI_LM_AUTH;
1289 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1290 link_mode |= HCI_LM_SECURE;
1292 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1293 link_mode |= HCI_LM_FIPS;
1298 int hci_get_conn_list(void __user *arg)
1301 struct hci_conn_list_req req, *cl;
1302 struct hci_conn_info *ci;
1303 struct hci_dev *hdev;
1304 int n = 0, size, err;
1306 if (copy_from_user(&req, arg, sizeof(req)))
1309 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1312 size = sizeof(req) + req.conn_num * sizeof(*ci);
1314 cl = kmalloc(size, GFP_KERNEL);
1318 hdev = hci_dev_get(req.dev_id);
1327 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1328 bacpy(&(ci + n)->bdaddr, &c->dst);
1329 (ci + n)->handle = c->handle;
1330 (ci + n)->type = c->type;
1331 (ci + n)->out = c->out;
1332 (ci + n)->state = c->state;
1333 (ci + n)->link_mode = get_link_mode(c);
1334 if (++n >= req.conn_num)
1337 hci_dev_unlock(hdev);
1339 cl->dev_id = hdev->id;
1341 size = sizeof(req) + n * sizeof(*ci);
1345 err = copy_to_user(arg, cl, size);
1348 return err ? -EFAULT : 0;
1351 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1353 struct hci_conn_info_req req;
1354 struct hci_conn_info ci;
1355 struct hci_conn *conn;
1356 char __user *ptr = arg + sizeof(req);
1358 if (copy_from_user(&req, arg, sizeof(req)))
1362 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1364 bacpy(&ci.bdaddr, &conn->dst);
1365 ci.handle = conn->handle;
1366 ci.type = conn->type;
1368 ci.state = conn->state;
1369 ci.link_mode = get_link_mode(conn);
1371 hci_dev_unlock(hdev);
1376 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1379 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1381 struct hci_auth_info_req req;
1382 struct hci_conn *conn;
1384 if (copy_from_user(&req, arg, sizeof(req)))
1388 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1390 req.type = conn->auth_type;
1391 hci_dev_unlock(hdev);
1396 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1399 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1401 struct hci_dev *hdev = conn->hdev;
1402 struct hci_chan *chan;
1404 BT_DBG("%s hcon %p", hdev->name, conn);
1406 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1407 BT_DBG("Refusing to create new hci_chan");
1411 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1415 chan->conn = hci_conn_get(conn);
1416 skb_queue_head_init(&chan->data_q);
1417 chan->state = BT_CONNECTED;
1419 list_add_rcu(&chan->list, &conn->chan_list);
1424 void hci_chan_del(struct hci_chan *chan)
1426 struct hci_conn *conn = chan->conn;
1427 struct hci_dev *hdev = conn->hdev;
1429 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1431 list_del_rcu(&chan->list);
1435 /* Prevent new hci_chan's to be created for this hci_conn */
1436 set_bit(HCI_CONN_DROP, &conn->flags);
1440 skb_queue_purge(&chan->data_q);
1444 void hci_chan_list_flush(struct hci_conn *conn)
1446 struct hci_chan *chan, *n;
1448 BT_DBG("hcon %p", conn);
1450 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1454 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1457 struct hci_chan *hchan;
1459 list_for_each_entry(hchan, &hcon->chan_list, list) {
1460 if (hchan->handle == handle)
1467 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1469 struct hci_conn_hash *h = &hdev->conn_hash;
1470 struct hci_conn *hcon;
1471 struct hci_chan *hchan = NULL;
1475 list_for_each_entry_rcu(hcon, &h->list, list) {
1476 hchan = __hci_chan_lookup_handle(hcon, handle);