2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI connection handling. */
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "hci_request.h"
48 struct conn_handle_t {
49 struct hci_conn *conn;
53 static const struct sco_param esco_param_cvsd[] = {
54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
55 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
56 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
57 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
58 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
61 static const struct sco_param sco_param_cvsd[] = {
62 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
63 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
66 static const struct sco_param esco_param_msbc[] = {
67 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
68 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
71 /* This function requires the caller holds hdev->lock */
72 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
74 struct hci_conn_params *params;
75 struct hci_dev *hdev = conn->hdev;
81 bdaddr_type = conn->dst_type;
83 /* Check if we need to convert to identity address */
84 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
86 bdaddr = &irk->bdaddr;
87 bdaddr_type = irk->addr_type;
90 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
96 hci_conn_drop(params->conn);
97 hci_conn_put(params->conn);
101 if (!params->explicit_connect)
104 /* If the status indicates successful cancellation of
105 * the attempt (i.e. Unknown Connection Id) there's no point of
106 * notifying failure since we'll go back to keep trying to
107 * connect. The only exception is explicit connect requests
108 * where a timeout + cancel does indicate an actual failure.
110 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
111 mgmt_connect_failed(hdev, &conn->dst, conn->type,
112 conn->dst_type, status);
114 /* The connection attempt was doing scan for new RPA, and is
115 * in scan phase. If params are not associated with any other
116 * autoconnect action, remove them completely. If they are, just unmark
117 * them as waiting for connection, by clearing explicit_connect field.
119 params->explicit_connect = false;
121 hci_pend_le_list_del_init(params);
123 switch (params->auto_connect) {
124 case HCI_AUTO_CONN_EXPLICIT:
125 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
126 /* return instead of break to avoid duplicate scan update */
128 case HCI_AUTO_CONN_DIRECT:
129 case HCI_AUTO_CONN_ALWAYS:
130 hci_pend_le_list_add(params, &hdev->pend_le_conns);
132 case HCI_AUTO_CONN_REPORT:
133 hci_pend_le_list_add(params, &hdev->pend_le_reports);
139 hci_update_passive_scan(hdev);
142 static void hci_conn_cleanup(struct hci_conn *conn)
144 struct hci_dev *hdev = conn->hdev;
146 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
147 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
149 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
150 hci_remove_link_key(hdev, &conn->dst);
152 hci_chan_list_flush(conn);
154 hci_conn_hash_del(hdev, conn);
159 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160 switch (conn->setting & SCO_AIRMODE_MASK) {
161 case SCO_AIRMODE_CVSD:
162 case SCO_AIRMODE_TRANSP:
164 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
169 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172 hci_conn_del_sysfs(conn);
174 debugfs_remove_recursive(conn->debugfs);
181 static void hci_acl_create_connection(struct hci_conn *conn)
183 struct hci_dev *hdev = conn->hdev;
184 struct inquiry_entry *ie;
185 struct hci_cp_create_conn cp;
187 BT_DBG("hcon %p", conn);
189 /* Many controllers disallow HCI Create Connection while it is doing
190 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191 * Connection. This may cause the MGMT discovering state to become false
192 * without user space's request but it is okay since the MGMT Discovery
193 * APIs do not promise that discovery should be done forever. Instead,
194 * the user space monitors the status of MGMT discovering and it may
195 * request for discovery again when this flag becomes false.
197 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198 /* Put this connection to "pending" state so that it will be
199 * executed after the inquiry cancel command complete event.
201 conn->state = BT_CONNECT2;
202 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
206 conn->state = BT_CONNECT;
208 conn->role = HCI_ROLE_MASTER;
212 conn->link_policy = hdev->link_policy;
214 memset(&cp, 0, sizeof(cp));
215 bacpy(&cp.bdaddr, &conn->dst);
216 cp.pscan_rep_mode = 0x02;
218 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
220 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 cp.pscan_mode = ie->data.pscan_mode;
223 cp.clock_offset = ie->data.clock_offset |
227 memcpy(conn->dev_class, ie->data.dev_class, 3);
230 cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232 cp.role_switch = 0x01;
234 cp.role_switch = 0x00;
236 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
241 BT_DBG("hcon %p", conn);
243 /* When we are central of an established connection and it enters
244 * the disconnect timeout, then go ahead and try to read the
245 * current clock offset. Processing of the result is done
246 * within the event handling and hci_clock_offset_evt function.
248 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250 struct hci_dev *hdev = conn->hdev;
251 struct hci_cp_read_clock_offset clkoff_cp;
253 clkoff_cp.handle = cpu_to_le16(conn->handle);
254 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
258 return hci_abort_conn(conn, reason);
261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
263 struct hci_dev *hdev = conn->hdev;
264 struct hci_cp_add_sco cp;
266 BT_DBG("hcon %p", conn);
268 conn->state = BT_CONNECT;
273 cp.handle = cpu_to_le16(handle);
274 cp.pkt_type = cpu_to_le16(conn->pkt_type);
276 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
279 static bool find_next_esco_param(struct hci_conn *conn,
280 const struct sco_param *esco_param, int size)
285 for (; conn->attempt <= size; conn->attempt++) {
286 if (lmp_esco_2m_capable(conn->parent) ||
287 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
289 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290 conn, conn->attempt);
293 return conn->attempt <= size;
296 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
299 __u8 vnd_len, *vnd_data = NULL;
300 struct hci_op_configure_data_path *cmd = NULL;
302 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
307 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
313 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
317 cmd->vnd_len = vnd_len;
318 memcpy(cmd->vnd_data, vnd_data, vnd_len);
320 cmd->direction = 0x00;
321 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
322 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
324 cmd->direction = 0x01;
325 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
326 sizeof(*cmd) + vnd_len, cmd,
335 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
337 struct conn_handle_t *conn_handle = data;
338 struct hci_conn *conn = conn_handle->conn;
339 __u16 handle = conn_handle->handle;
340 struct hci_cp_enhanced_setup_sync_conn cp;
341 const struct sco_param *param;
345 bt_dev_dbg(hdev, "hcon %p", conn);
347 /* for offload use case, codec needs to configured before opening SCO */
348 if (conn->codec.data_path)
349 configure_datapath_sync(hdev, &conn->codec);
351 conn->state = BT_CONNECT;
356 memset(&cp, 0x00, sizeof(cp));
358 cp.handle = cpu_to_le16(handle);
360 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
361 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
363 switch (conn->codec.id) {
365 if (!find_next_esco_param(conn, esco_param_msbc,
366 ARRAY_SIZE(esco_param_msbc)))
369 param = &esco_param_msbc[conn->attempt - 1];
370 cp.tx_coding_format.id = 0x05;
371 cp.rx_coding_format.id = 0x05;
372 cp.tx_codec_frame_size = __cpu_to_le16(60);
373 cp.rx_codec_frame_size = __cpu_to_le16(60);
374 cp.in_bandwidth = __cpu_to_le32(32000);
375 cp.out_bandwidth = __cpu_to_le32(32000);
376 cp.in_coding_format.id = 0x04;
377 cp.out_coding_format.id = 0x04;
378 cp.in_coded_data_size = __cpu_to_le16(16);
379 cp.out_coded_data_size = __cpu_to_le16(16);
380 cp.in_pcm_data_format = 2;
381 cp.out_pcm_data_format = 2;
382 cp.in_pcm_sample_payload_msb_pos = 0;
383 cp.out_pcm_sample_payload_msb_pos = 0;
384 cp.in_data_path = conn->codec.data_path;
385 cp.out_data_path = conn->codec.data_path;
386 cp.in_transport_unit_size = 1;
387 cp.out_transport_unit_size = 1;
390 case BT_CODEC_TRANSPARENT:
391 if (!find_next_esco_param(conn, esco_param_msbc,
392 ARRAY_SIZE(esco_param_msbc)))
394 param = &esco_param_msbc[conn->attempt - 1];
395 cp.tx_coding_format.id = 0x03;
396 cp.rx_coding_format.id = 0x03;
397 cp.tx_codec_frame_size = __cpu_to_le16(60);
398 cp.rx_codec_frame_size = __cpu_to_le16(60);
399 cp.in_bandwidth = __cpu_to_le32(0x1f40);
400 cp.out_bandwidth = __cpu_to_le32(0x1f40);
401 cp.in_coding_format.id = 0x03;
402 cp.out_coding_format.id = 0x03;
403 cp.in_coded_data_size = __cpu_to_le16(16);
404 cp.out_coded_data_size = __cpu_to_le16(16);
405 cp.in_pcm_data_format = 2;
406 cp.out_pcm_data_format = 2;
407 cp.in_pcm_sample_payload_msb_pos = 0;
408 cp.out_pcm_sample_payload_msb_pos = 0;
409 cp.in_data_path = conn->codec.data_path;
410 cp.out_data_path = conn->codec.data_path;
411 cp.in_transport_unit_size = 1;
412 cp.out_transport_unit_size = 1;
416 if (conn->parent && lmp_esco_capable(conn->parent)) {
417 if (!find_next_esco_param(conn, esco_param_cvsd,
418 ARRAY_SIZE(esco_param_cvsd)))
420 param = &esco_param_cvsd[conn->attempt - 1];
422 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
424 param = &sco_param_cvsd[conn->attempt - 1];
426 cp.tx_coding_format.id = 2;
427 cp.rx_coding_format.id = 2;
428 cp.tx_codec_frame_size = __cpu_to_le16(60);
429 cp.rx_codec_frame_size = __cpu_to_le16(60);
430 cp.in_bandwidth = __cpu_to_le32(16000);
431 cp.out_bandwidth = __cpu_to_le32(16000);
432 cp.in_coding_format.id = 4;
433 cp.out_coding_format.id = 4;
434 cp.in_coded_data_size = __cpu_to_le16(16);
435 cp.out_coded_data_size = __cpu_to_le16(16);
436 cp.in_pcm_data_format = 2;
437 cp.out_pcm_data_format = 2;
438 cp.in_pcm_sample_payload_msb_pos = 0;
439 cp.out_pcm_sample_payload_msb_pos = 0;
440 cp.in_data_path = conn->codec.data_path;
441 cp.out_data_path = conn->codec.data_path;
442 cp.in_transport_unit_size = 16;
443 cp.out_transport_unit_size = 16;
449 cp.retrans_effort = param->retrans_effort;
450 cp.pkt_type = __cpu_to_le16(param->pkt_type);
451 cp.max_latency = __cpu_to_le16(param->max_latency);
453 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
459 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
461 struct hci_dev *hdev = conn->hdev;
462 struct hci_cp_setup_sync_conn cp;
463 const struct sco_param *param;
465 bt_dev_dbg(hdev, "hcon %p", conn);
467 conn->state = BT_CONNECT;
472 cp.handle = cpu_to_le16(handle);
474 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
475 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
476 cp.voice_setting = cpu_to_le16(conn->setting);
478 switch (conn->setting & SCO_AIRMODE_MASK) {
479 case SCO_AIRMODE_TRANSP:
480 if (!find_next_esco_param(conn, esco_param_msbc,
481 ARRAY_SIZE(esco_param_msbc)))
483 param = &esco_param_msbc[conn->attempt - 1];
485 case SCO_AIRMODE_CVSD:
486 if (conn->parent && lmp_esco_capable(conn->parent)) {
487 if (!find_next_esco_param(conn, esco_param_cvsd,
488 ARRAY_SIZE(esco_param_cvsd)))
490 param = &esco_param_cvsd[conn->attempt - 1];
492 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
494 param = &sco_param_cvsd[conn->attempt - 1];
501 cp.retrans_effort = param->retrans_effort;
502 cp.pkt_type = __cpu_to_le16(param->pkt_type);
503 cp.max_latency = __cpu_to_le16(param->max_latency);
505 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
511 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
514 struct conn_handle_t *conn_handle;
516 if (enhanced_sync_conn_capable(conn->hdev)) {
517 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
522 conn_handle->conn = conn;
523 conn_handle->handle = handle;
524 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
532 return hci_setup_sync_conn(conn, handle);
535 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
538 struct hci_dev *hdev = conn->hdev;
539 struct hci_conn_params *params;
540 struct hci_cp_le_conn_update cp;
544 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
546 params->conn_min_interval = min;
547 params->conn_max_interval = max;
548 params->conn_latency = latency;
549 params->supervision_timeout = to_multiplier;
552 hci_dev_unlock(hdev);
554 memset(&cp, 0, sizeof(cp));
555 cp.handle = cpu_to_le16(conn->handle);
556 cp.conn_interval_min = cpu_to_le16(min);
557 cp.conn_interval_max = cpu_to_le16(max);
558 cp.conn_latency = cpu_to_le16(latency);
559 cp.supervision_timeout = cpu_to_le16(to_multiplier);
560 cp.min_ce_len = cpu_to_le16(0x0000);
561 cp.max_ce_len = cpu_to_le16(0x0000);
563 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
571 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
572 __u8 ltk[16], __u8 key_size)
574 struct hci_dev *hdev = conn->hdev;
575 struct hci_cp_le_start_enc cp;
577 BT_DBG("hcon %p", conn);
579 memset(&cp, 0, sizeof(cp));
581 cp.handle = cpu_to_le16(conn->handle);
584 memcpy(cp.ltk, ltk, key_size);
586 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
589 /* Device _must_ be locked */
590 void hci_sco_setup(struct hci_conn *conn, __u8 status)
592 struct hci_link *link;
594 link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
595 if (!link || !link->conn)
598 BT_DBG("hcon %p", conn);
601 if (lmp_esco_capable(conn->hdev))
602 hci_setup_sync(link->conn, conn->handle);
604 hci_add_sco(link->conn, conn->handle);
606 hci_connect_cfm(link->conn, status);
607 hci_conn_del(link->conn);
611 static void hci_conn_timeout(struct work_struct *work)
613 struct hci_conn *conn = container_of(work, struct hci_conn,
615 int refcnt = atomic_read(&conn->refcnt);
617 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
621 /* FIXME: It was observed that in pairing failed scenario, refcnt
622 * drops below 0. Probably this is because l2cap_conn_del calls
623 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
624 * dropped. After that loop hci_chan_del is called which also drops
625 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
631 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
634 /* Enter sniff mode */
635 static void hci_conn_idle(struct work_struct *work)
637 struct hci_conn *conn = container_of(work, struct hci_conn,
639 struct hci_dev *hdev = conn->hdev;
641 BT_DBG("hcon %p mode %d", conn, conn->mode);
643 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
646 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
649 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
650 struct hci_cp_sniff_subrate cp;
651 cp.handle = cpu_to_le16(conn->handle);
652 cp.max_latency = cpu_to_le16(0);
653 cp.min_remote_timeout = cpu_to_le16(0);
654 cp.min_local_timeout = cpu_to_le16(0);
655 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
658 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
659 struct hci_cp_sniff_mode cp;
660 cp.handle = cpu_to_le16(conn->handle);
661 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
662 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
663 cp.attempt = cpu_to_le16(4);
664 cp.timeout = cpu_to_le16(1);
665 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
669 static void hci_conn_auto_accept(struct work_struct *work)
671 struct hci_conn *conn = container_of(work, struct hci_conn,
672 auto_accept_work.work);
674 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
678 static void le_disable_advertising(struct hci_dev *hdev)
680 if (ext_adv_capable(hdev)) {
681 struct hci_cp_le_set_ext_adv_enable cp;
684 cp.num_of_sets = 0x00;
686 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
690 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
695 static void le_conn_timeout(struct work_struct *work)
697 struct hci_conn *conn = container_of(work, struct hci_conn,
698 le_conn_timeout.work);
699 struct hci_dev *hdev = conn->hdev;
703 /* We could end up here due to having done directed advertising,
704 * so clean up the state if necessary. This should however only
705 * happen with broken hardware or if low duty cycle was used
706 * (which doesn't have a timeout of its own).
708 if (conn->role == HCI_ROLE_SLAVE) {
709 /* Disable LE Advertising */
710 le_disable_advertising(hdev);
712 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
713 hci_dev_unlock(hdev);
717 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
720 struct iso_cig_params {
721 struct hci_cp_le_set_cig_params cp;
722 struct hci_cis_params cis[0x1f];
725 struct iso_list_data {
740 static void bis_list(struct hci_conn *conn, void *data)
742 struct iso_list_data *d = data;
744 /* Skip if not broadcast/ANY address */
745 if (bacmp(&conn->dst, BDADDR_ANY))
748 if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
749 d->bis != conn->iso_qos.bcast.bis)
755 static int terminate_big_sync(struct hci_dev *hdev, void *data)
757 struct iso_list_data *d = data;
759 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
761 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
763 /* Only terminate BIG if it has been created */
767 return hci_le_terminate_big_sync(hdev, d->big,
768 HCI_ERROR_LOCAL_HOST_TERM);
771 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
776 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
778 struct iso_list_data *d;
781 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
782 conn->iso_qos.bcast.bis);
784 d = kzalloc(sizeof(*d), GFP_KERNEL);
788 d->big = conn->iso_qos.bcast.big;
789 d->bis = conn->iso_qos.bcast.bis;
790 d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
792 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
793 terminate_big_destroy);
800 static int big_terminate_sync(struct hci_dev *hdev, void *data)
802 struct iso_list_data *d = data;
804 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
807 if (d->big_sync_term)
808 hci_le_big_terminate_sync(hdev, d->big);
810 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
813 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
815 struct iso_list_data *d;
818 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
820 d = kzalloc(sizeof(*d), GFP_KERNEL);
825 d->sync_handle = conn->sync_handle;
826 d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
828 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
829 terminate_big_destroy);
836 /* Cleanup BIS connection
838 * Detects if there any BIS left connected in a BIG
839 * broadcaster: Remove advertising instance and terminate BIG.
840 * broadcaster receiver: Teminate BIG sync and terminate PA sync.
842 static void bis_cleanup(struct hci_conn *conn)
844 struct hci_dev *hdev = conn->hdev;
845 struct hci_conn *bis;
847 bt_dev_dbg(hdev, "conn %p", conn);
849 if (conn->role == HCI_ROLE_MASTER) {
850 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
853 /* Check if ISO connection is a BIS and terminate advertising
854 * set and BIG if there are no other connections using it.
856 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
860 hci_le_terminate_big(hdev, conn);
862 bis = hci_conn_hash_lookup_big_any_dst(hdev,
863 conn->iso_qos.bcast.big);
868 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
873 static int remove_cig_sync(struct hci_dev *hdev, void *data)
875 u8 handle = PTR_UINT(data);
877 return hci_le_remove_cig_sync(hdev, handle);
880 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
882 bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
884 return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
888 static void find_cis(struct hci_conn *conn, void *data)
890 struct iso_list_data *d = data;
892 /* Ignore broadcast or if CIG don't match */
893 if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
899 /* Cleanup CIS connection:
901 * Detects if there any CIS left connected in a CIG and remove it.
903 static void cis_cleanup(struct hci_conn *conn)
905 struct hci_dev *hdev = conn->hdev;
906 struct iso_list_data d;
908 if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
911 memset(&d, 0, sizeof(d));
912 d.cig = conn->iso_qos.ucast.cig;
914 /* Check if ISO connection is a CIS and remove CIG if there are
915 * no other connections using it.
917 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
918 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
919 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
923 hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
926 static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
928 struct hci_conn_hash *h = &hdev->conn_hash;
930 u16 handle = HCI_CONN_HANDLE_MAX + 1;
934 list_for_each_entry_rcu(c, &h->list, list) {
935 /* Find the first unused handle */
936 if (handle == 0xffff || c->handle != handle)
945 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
948 struct hci_conn *conn;
950 BT_DBG("%s dst %pMR", hdev->name, dst);
952 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
956 bacpy(&conn->dst, dst);
957 bacpy(&conn->src, &hdev->bdaddr);
958 conn->handle = hci_conn_hash_alloc_unset(hdev);
962 conn->mode = HCI_CM_ACTIVE;
963 conn->state = BT_OPEN;
964 conn->auth_type = HCI_AT_GENERAL_BONDING;
965 conn->io_capability = hdev->io_capability;
966 conn->remote_auth = 0xff;
967 conn->key_type = 0xff;
968 conn->rssi = HCI_RSSI_INVALID;
969 conn->tx_power = HCI_TX_POWER_INVALID;
970 conn->max_tx_power = HCI_TX_POWER_INVALID;
972 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
973 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
975 /* Set Default Authenticated payload timeout to 30s */
976 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
978 if (conn->role == HCI_ROLE_MASTER)
983 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
986 /* conn->src should reflect the local identity address */
987 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
990 /* conn->src should reflect the local identity address */
991 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
993 /* set proper cleanup function */
994 if (!bacmp(dst, BDADDR_ANY))
995 conn->cleanup = bis_cleanup;
996 else if (conn->role == HCI_ROLE_MASTER)
997 conn->cleanup = cis_cleanup;
1001 if (lmp_esco_capable(hdev))
1002 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1003 (hdev->esco_type & EDR_ESCO_MASK);
1005 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1008 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1012 skb_queue_head_init(&conn->data_q);
1014 INIT_LIST_HEAD(&conn->chan_list);
1015 INIT_LIST_HEAD(&conn->link_list);
1017 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1018 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1019 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1020 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1022 atomic_set(&conn->refcnt, 0);
1026 hci_conn_hash_add(hdev, conn);
1028 /* The SCO and eSCO connections will only be notified when their
1029 * setup has been completed. This is different to ACL links which
1030 * can be notified right away.
1032 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1034 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1037 hci_conn_init_sysfs(conn);
1042 static void hci_conn_unlink(struct hci_conn *conn)
1044 struct hci_dev *hdev = conn->hdev;
1046 bt_dev_dbg(hdev, "hcon %p", conn);
1048 if (!conn->parent) {
1049 struct hci_link *link, *t;
1051 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1052 struct hci_conn *child = link->conn;
1054 hci_conn_unlink(child);
1056 /* If hdev is down it means
1057 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1058 * and links don't need to be cleanup as all connections
1061 if (!test_bit(HCI_UP, &hdev->flags))
1064 /* Due to race, SCO connection might be not established
1065 * yet at this point. Delete it now, otherwise it is
1066 * possible for it to be stuck and can't be deleted.
1068 if ((child->type == SCO_LINK ||
1069 child->type == ESCO_LINK) &&
1070 HCI_CONN_HANDLE_UNSET(child->handle))
1071 hci_conn_del(child);
1080 list_del_rcu(&conn->link->list);
1083 hci_conn_drop(conn->parent);
1084 hci_conn_put(conn->parent);
1085 conn->parent = NULL;
1091 void hci_conn_del(struct hci_conn *conn)
1093 struct hci_dev *hdev = conn->hdev;
1095 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1097 hci_conn_unlink(conn);
1099 cancel_delayed_work_sync(&conn->disc_work);
1100 cancel_delayed_work_sync(&conn->auto_accept_work);
1101 cancel_delayed_work_sync(&conn->idle_work);
1103 if (conn->type == ACL_LINK) {
1104 /* Unacked frames */
1105 hdev->acl_cnt += conn->sent;
1106 } else if (conn->type == LE_LINK) {
1107 cancel_delayed_work(&conn->le_conn_timeout);
1110 hdev->le_cnt += conn->sent;
1112 hdev->acl_cnt += conn->sent;
1114 /* Unacked ISO frames */
1115 if (conn->type == ISO_LINK) {
1117 hdev->iso_cnt += conn->sent;
1118 else if (hdev->le_pkts)
1119 hdev->le_cnt += conn->sent;
1121 hdev->acl_cnt += conn->sent;
1126 amp_mgr_put(conn->amp_mgr);
1128 skb_queue_purge(&conn->data_q);
1130 /* Remove the connection from the list and cleanup its remaining
1131 * state. This is a separate function since for some cases like
1132 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1133 * rest of hci_conn_del.
1135 hci_conn_cleanup(conn);
1138 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1140 int use_src = bacmp(src, BDADDR_ANY);
1141 struct hci_dev *hdev = NULL, *d;
1143 BT_DBG("%pMR -> %pMR", src, dst);
1145 read_lock(&hci_dev_list_lock);
1147 list_for_each_entry(d, &hci_dev_list, list) {
1148 if (!test_bit(HCI_UP, &d->flags) ||
1149 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1150 d->dev_type != HCI_PRIMARY)
1154 * No source address - find interface with bdaddr != dst
1155 * Source address - find interface with bdaddr == src
1162 if (src_type == BDADDR_BREDR) {
1163 if (!lmp_bredr_capable(d))
1165 bacpy(&id_addr, &d->bdaddr);
1166 id_addr_type = BDADDR_BREDR;
1168 if (!lmp_le_capable(d))
1171 hci_copy_identity_address(d, &id_addr,
1174 /* Convert from HCI to three-value type */
1175 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1176 id_addr_type = BDADDR_LE_PUBLIC;
1178 id_addr_type = BDADDR_LE_RANDOM;
1181 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1185 if (bacmp(&d->bdaddr, dst)) {
1192 hdev = hci_dev_hold(hdev);
1194 read_unlock(&hci_dev_list_lock);
1197 EXPORT_SYMBOL(hci_get_route);
1199 /* This function requires the caller holds hdev->lock */
1200 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1202 struct hci_dev *hdev = conn->hdev;
1204 hci_connect_le_scan_cleanup(conn, status);
1206 /* Enable advertising in case this was a failed connection
1207 * attempt as a peripheral.
1209 hci_enable_advertising(hdev);
1212 /* This function requires the caller holds hdev->lock */
1213 void hci_conn_failed(struct hci_conn *conn, u8 status)
1215 struct hci_dev *hdev = conn->hdev;
1217 bt_dev_dbg(hdev, "status 0x%2.2x", status);
1219 switch (conn->type) {
1221 hci_le_conn_failed(conn, status);
1224 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1225 conn->dst_type, status);
1229 conn->state = BT_CLOSED;
1230 hci_connect_cfm(conn, status);
1234 /* This function requires the caller holds hdev->lock */
1235 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1237 struct hci_dev *hdev = conn->hdev;
1239 bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1241 if (conn->handle == handle)
1244 if (handle > HCI_CONN_HANDLE_MAX) {
1245 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1246 handle, HCI_CONN_HANDLE_MAX);
1247 return HCI_ERROR_INVALID_PARAMETERS;
1250 /* If abort_reason has been sent it means the connection is being
1251 * aborted and the handle shall not be changed.
1253 if (conn->abort_reason)
1254 return conn->abort_reason;
1256 conn->handle = handle;
1261 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1263 struct hci_conn *conn;
1264 u16 handle = PTR_UINT(data);
1266 conn = hci_conn_hash_lookup_handle(hdev, handle);
1270 bt_dev_dbg(hdev, "err %d", err);
1275 hci_connect_le_scan_cleanup(conn, 0x00);
1279 /* Check if connection is still pending */
1280 if (conn != hci_lookup_le_connect(hdev))
1283 /* Flush to make sure we send create conn cancel command if needed */
1284 flush_delayed_work(&conn->le_conn_timeout);
1285 hci_conn_failed(conn, bt_status(err));
1288 hci_dev_unlock(hdev);
1291 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1293 struct hci_conn *conn;
1294 u16 handle = PTR_UINT(data);
1296 conn = hci_conn_hash_lookup_handle(hdev, handle);
1300 bt_dev_dbg(hdev, "conn %p", conn);
1302 conn->state = BT_CONNECT;
1304 return hci_le_create_conn_sync(hdev, conn);
1307 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1308 u8 dst_type, bool dst_resolved, u8 sec_level,
1309 u16 conn_timeout, u8 role)
1311 struct hci_conn *conn;
1312 struct smp_irk *irk;
1315 /* Let's make sure that le is enabled.*/
1316 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1317 if (lmp_le_capable(hdev))
1318 return ERR_PTR(-ECONNREFUSED);
1320 return ERR_PTR(-EOPNOTSUPP);
1323 /* Since the controller supports only one LE connection attempt at a
1324 * time, we return -EBUSY if there is any connection attempt running.
1326 if (hci_lookup_le_connect(hdev))
1327 return ERR_PTR(-EBUSY);
1329 /* If there's already a connection object but it's not in
1330 * scanning state it means it must already be established, in
1331 * which case we can't do anything else except report a failure
1334 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1335 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1336 return ERR_PTR(-EBUSY);
1339 /* Check if the destination address has been resolved by the controller
1340 * since if it did then the identity address shall be used.
1342 if (!dst_resolved) {
1343 /* When given an identity address with existing identity
1344 * resolving key, the connection needs to be established
1345 * to a resolvable random address.
1347 * Storing the resolvable random address is required here
1348 * to handle connection failures. The address will later
1349 * be resolved back into the original identity address
1350 * from the connect request.
1352 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1353 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1355 dst_type = ADDR_LE_DEV_RANDOM;
1360 bacpy(&conn->dst, dst);
1362 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1364 return ERR_PTR(-ENOMEM);
1365 hci_conn_hold(conn);
1366 conn->pending_sec_level = sec_level;
1369 conn->dst_type = dst_type;
1370 conn->sec_level = BT_SECURITY_LOW;
1371 conn->conn_timeout = conn_timeout;
1373 clear_bit(HCI_CONN_SCANNING, &conn->flags);
1375 err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1376 UINT_PTR(conn->handle),
1377 create_le_conn_complete);
1380 return ERR_PTR(err);
1386 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1388 struct hci_conn *conn;
1390 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1394 if (conn->state != BT_CONNECTED)
1400 /* This function requires the caller holds hdev->lock */
1401 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1402 bdaddr_t *addr, u8 addr_type)
1404 struct hci_conn_params *params;
1406 if (is_connected(hdev, addr, addr_type))
1409 params = hci_conn_params_lookup(hdev, addr, addr_type);
1411 params = hci_conn_params_add(hdev, addr, addr_type);
1415 /* If we created new params, mark them to be deleted in
1416 * hci_connect_le_scan_cleanup. It's different case than
1417 * existing disabled params, those will stay after cleanup.
1419 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1422 /* We're trying to connect, so make sure params are at pend_le_conns */
1423 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1424 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1425 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1426 hci_pend_le_list_del_init(params);
1427 hci_pend_le_list_add(params, &hdev->pend_le_conns);
1430 params->explicit_connect = true;
1432 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1433 params->auto_connect);
1438 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1440 struct hci_conn *conn;
1443 /* Allocate a BIG if not set */
1444 if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1445 for (big = 0x00; big < 0xef; big++) {
1447 conn = hci_conn_hash_lookup_big(hdev, big);
1453 return -EADDRNOTAVAIL;
1456 qos->bcast.big = big;
1462 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1464 struct hci_conn *conn;
1467 /* Allocate BIS if not set */
1468 if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1469 /* Find an unused adv set to advertise BIS, skip instance 0x00
1470 * since it is reserved as general purpose set.
1472 for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1475 conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1480 if (bis == hdev->le_num_of_adv_sets)
1481 return -EADDRNOTAVAIL;
1484 qos->bcast.bis = bis;
1490 /* This function requires the caller holds hdev->lock */
1491 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1492 struct bt_iso_qos *qos, __u8 base_len,
1495 struct hci_conn *conn;
1498 /* Let's make sure that le is enabled.*/
1499 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1500 if (lmp_le_capable(hdev))
1501 return ERR_PTR(-ECONNREFUSED);
1502 return ERR_PTR(-EOPNOTSUPP);
1505 err = qos_set_big(hdev, qos);
1507 return ERR_PTR(err);
1509 err = qos_set_bis(hdev, qos);
1511 return ERR_PTR(err);
1513 /* Check if the LE Create BIG command has already been sent */
1514 conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1517 return ERR_PTR(-EADDRINUSE);
1519 /* Check BIS settings against other bound BISes, since all
1520 * BISes in a BIG must have the same value for all parameters
1522 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1524 if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1525 base_len != conn->le_per_adv_data_len ||
1526 memcmp(conn->le_per_adv_data, base, base_len)))
1527 return ERR_PTR(-EADDRINUSE);
1529 conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1531 return ERR_PTR(-ENOMEM);
1533 conn->state = BT_CONNECT;
1535 hci_conn_hold(conn);
1539 /* This function requires the caller holds hdev->lock */
1540 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1541 u8 dst_type, u8 sec_level,
1543 enum conn_reasons conn_reason)
1545 struct hci_conn *conn;
1547 /* Let's make sure that le is enabled.*/
1548 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1549 if (lmp_le_capable(hdev))
1550 return ERR_PTR(-ECONNREFUSED);
1552 return ERR_PTR(-EOPNOTSUPP);
1555 /* Some devices send ATT messages as soon as the physical link is
1556 * established. To be able to handle these ATT messages, the user-
1557 * space first establishes the connection and then starts the pairing
1560 * So if a hci_conn object already exists for the following connection
1561 * attempt, we simply update pending_sec_level and auth_type fields
1562 * and return the object found.
1564 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1566 if (conn->pending_sec_level < sec_level)
1567 conn->pending_sec_level = sec_level;
1571 BT_DBG("requesting refresh of dst_addr");
1573 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1575 return ERR_PTR(-ENOMEM);
1577 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1579 return ERR_PTR(-EBUSY);
1582 conn->state = BT_CONNECT;
1583 set_bit(HCI_CONN_SCANNING, &conn->flags);
1584 conn->dst_type = dst_type;
1585 conn->sec_level = BT_SECURITY_LOW;
1586 conn->pending_sec_level = sec_level;
1587 conn->conn_timeout = conn_timeout;
1588 conn->conn_reason = conn_reason;
1590 hci_update_passive_scan(hdev);
1593 hci_conn_hold(conn);
1597 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1598 u8 sec_level, u8 auth_type,
1599 enum conn_reasons conn_reason)
1601 struct hci_conn *acl;
1603 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1604 if (lmp_bredr_capable(hdev))
1605 return ERR_PTR(-ECONNREFUSED);
1607 return ERR_PTR(-EOPNOTSUPP);
1610 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1612 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1614 return ERR_PTR(-ENOMEM);
1619 acl->conn_reason = conn_reason;
1620 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1621 acl->sec_level = BT_SECURITY_LOW;
1622 acl->pending_sec_level = sec_level;
1623 acl->auth_type = auth_type;
1624 hci_acl_create_connection(acl);
1630 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1631 struct hci_conn *conn)
1633 struct hci_dev *hdev = parent->hdev;
1634 struct hci_link *link;
1636 bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1644 link = kzalloc(sizeof(*link), GFP_KERNEL);
1648 link->conn = hci_conn_hold(conn);
1650 conn->parent = hci_conn_get(parent);
1652 /* Use list_add_tail_rcu append to the list */
1653 list_add_tail_rcu(&link->list, &parent->link_list);
1658 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1659 __u16 setting, struct bt_codec *codec)
1661 struct hci_conn *acl;
1662 struct hci_conn *sco;
1663 struct hci_link *link;
1665 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1666 CONN_REASON_SCO_CONNECT);
1670 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1672 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1675 return ERR_PTR(-ENOMEM);
1679 link = hci_conn_link(acl, sco);
1683 return ERR_PTR(-ENOLINK);
1686 sco->setting = setting;
1687 sco->codec = *codec;
1689 if (acl->state == BT_CONNECTED &&
1690 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1691 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1692 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1694 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1695 /* defer SCO setup until mode change completed */
1696 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1700 hci_sco_setup(acl, 0x00);
1706 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1708 struct hci_dev *hdev = conn->hdev;
1709 struct hci_cp_le_create_big cp;
1710 struct iso_list_data data;
1712 memset(&cp, 0, sizeof(cp));
1714 data.big = qos->bcast.big;
1715 data.bis = qos->bcast.bis;
1718 /* Create a BIS for each bound connection */
1719 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1722 cp.handle = qos->bcast.big;
1723 cp.adv_handle = qos->bcast.bis;
1724 cp.num_bis = data.count;
1725 hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1726 cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1727 cp.bis.latency = cpu_to_le16(qos->bcast.out.latency);
1728 cp.bis.rtn = qos->bcast.out.rtn;
1729 cp.bis.phy = qos->bcast.out.phy;
1730 cp.bis.packing = qos->bcast.packing;
1731 cp.bis.framing = qos->bcast.framing;
1732 cp.bis.encryption = qos->bcast.encryption;
1733 memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1735 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1738 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1740 u8 cig_id = PTR_UINT(data);
1741 struct hci_conn *conn;
1742 struct bt_iso_qos *qos;
1743 struct iso_cig_params pdu;
1746 conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1750 memset(&pdu, 0, sizeof(pdu));
1752 qos = &conn->iso_qos;
1753 pdu.cp.cig_id = cig_id;
1754 hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1755 hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1756 pdu.cp.sca = qos->ucast.sca;
1757 pdu.cp.packing = qos->ucast.packing;
1758 pdu.cp.framing = qos->ucast.framing;
1759 pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1760 pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1762 /* Reprogram all CIS(s) with the same CIG, valid range are:
1763 * num_cis: 0x00 to 0x1F
1764 * cis_id: 0x00 to 0xEF
1766 for (cis_id = 0x00; cis_id < 0xf0 &&
1767 pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1768 struct hci_cis_params *cis;
1770 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1774 qos = &conn->iso_qos;
1776 cis = &pdu.cis[pdu.cp.num_cis++];
1777 cis->cis_id = cis_id;
1778 cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1779 cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1780 cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy :
1782 cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy :
1784 cis->c_rtn = qos->ucast.out.rtn;
1785 cis->p_rtn = qos->ucast.in.rtn;
1788 if (!pdu.cp.num_cis)
1791 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1793 pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1797 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1799 struct hci_dev *hdev = conn->hdev;
1800 struct iso_list_data data;
1802 memset(&data, 0, sizeof(data));
1804 /* Allocate first still reconfigurable CIG if not set */
1805 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1806 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1809 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1814 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1815 BT_CONNECTED, &data);
1820 if (data.cig == 0xf0)
1824 qos->ucast.cig = data.cig;
1827 if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1828 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1834 /* Allocate first available CIS if not set */
1835 for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1837 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1840 qos->ucast.cis = data.cis;
1845 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1849 if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1850 UINT_PTR(qos->ucast.cig), NULL) < 0)
1856 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1857 __u8 dst_type, struct bt_iso_qos *qos)
1859 struct hci_conn *cis;
1861 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1864 cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1866 return ERR_PTR(-ENOMEM);
1867 cis->cleanup = cis_cleanup;
1868 cis->dst_type = dst_type;
1869 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1870 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1873 if (cis->state == BT_CONNECTED)
1876 /* Check if CIS has been set and the settings matches */
1877 if (cis->state == BT_BOUND &&
1878 !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1881 /* Update LINK PHYs according to QoS preference */
1882 cis->le_tx_phy = qos->ucast.out.phy;
1883 cis->le_rx_phy = qos->ucast.in.phy;
1885 /* If output interval is not set use the input interval as it cannot be
1888 if (!qos->ucast.out.interval)
1889 qos->ucast.out.interval = qos->ucast.in.interval;
1891 /* If input interval is not set use the output interval as it cannot be
1894 if (!qos->ucast.in.interval)
1895 qos->ucast.in.interval = qos->ucast.out.interval;
1897 /* If output latency is not set use the input latency as it cannot be
1900 if (!qos->ucast.out.latency)
1901 qos->ucast.out.latency = qos->ucast.in.latency;
1903 /* If input latency is not set use the output latency as it cannot be
1906 if (!qos->ucast.in.latency)
1907 qos->ucast.in.latency = qos->ucast.out.latency;
1909 if (!hci_le_set_cig_params(cis, qos)) {
1911 return ERR_PTR(-EINVAL);
1916 cis->iso_qos = *qos;
1917 cis->state = BT_BOUND;
1922 bool hci_iso_setup_path(struct hci_conn *conn)
1924 struct hci_dev *hdev = conn->hdev;
1925 struct hci_cp_le_setup_iso_path cmd;
1927 memset(&cmd, 0, sizeof(cmd));
1929 if (conn->iso_qos.ucast.out.sdu) {
1930 cmd.handle = cpu_to_le16(conn->handle);
1931 cmd.direction = 0x00; /* Input (Host to Controller) */
1932 cmd.path = 0x00; /* HCI path if enabled */
1933 cmd.codec = 0x03; /* Transparent Data */
1935 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1940 if (conn->iso_qos.ucast.in.sdu) {
1941 cmd.handle = cpu_to_le16(conn->handle);
1942 cmd.direction = 0x01; /* Output (Controller to Host) */
1943 cmd.path = 0x00; /* HCI path if enabled */
1944 cmd.codec = 0x03; /* Transparent Data */
1946 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1954 int hci_conn_check_create_cis(struct hci_conn *conn)
1956 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1959 if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1960 conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1966 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1968 return hci_le_create_cis_sync(hdev);
1971 int hci_le_create_cis_pending(struct hci_dev *hdev)
1973 struct hci_conn *conn;
1974 bool pending = false;
1978 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1979 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1984 if (!hci_conn_check_create_cis(conn))
1993 /* Queue Create CIS */
1994 return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
1997 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1998 struct bt_iso_io_qos *qos, __u8 phy)
2000 /* Only set MTU if PHY is enabled */
2001 if (!qos->sdu && qos->phy) {
2002 if (hdev->iso_mtu > 0)
2003 qos->sdu = hdev->iso_mtu;
2004 else if (hdev->le_mtu > 0)
2005 qos->sdu = hdev->le_mtu;
2007 qos->sdu = hdev->acl_mtu;
2010 /* Use the same PHY as ACL if set to any */
2011 if (qos->phy == BT_ISO_PHY_ANY)
2014 /* Use LE ACL connection interval if not set */
2016 /* ACL interval unit in 1.25 ms to us */
2017 qos->interval = conn->le_conn_interval * 1250;
2019 /* Use LE ACL connection latency if not set */
2021 qos->latency = conn->le_conn_latency;
2024 static int create_big_sync(struct hci_dev *hdev, void *data)
2026 struct hci_conn *conn = data;
2027 struct bt_iso_qos *qos = &conn->iso_qos;
2028 u16 interval, sync_interval = 0;
2032 if (qos->bcast.out.phy == 0x02)
2033 flags |= MGMT_ADV_FLAG_SEC_2M;
2035 /* Align intervals */
2036 interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2039 sync_interval = interval * 4;
2041 err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2042 conn->le_per_adv_data, flags, interval,
2043 interval, sync_interval);
2047 return hci_le_create_big(conn, &conn->iso_qos);
2050 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2052 struct hci_cp_le_pa_create_sync *cp = data;
2054 bt_dev_dbg(hdev, "");
2057 bt_dev_err(hdev, "Unable to create PA: %d", err);
2062 static int create_pa_sync(struct hci_dev *hdev, void *data)
2064 struct hci_cp_le_pa_create_sync *cp = data;
2067 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2068 sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2070 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2074 return hci_update_passive_scan_sync(hdev);
2077 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2078 __u8 sid, struct bt_iso_qos *qos)
2080 struct hci_cp_le_pa_create_sync *cp;
2082 if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2085 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2087 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2091 cp->options = qos->bcast.options;
2093 cp->addr_type = dst_type;
2094 bacpy(&cp->addr, dst);
2095 cp->skip = cpu_to_le16(qos->bcast.skip);
2096 cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2097 cp->sync_cte_type = qos->bcast.sync_cte_type;
2099 /* Queue start pa_create_sync and scan */
2100 return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2103 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2104 __u16 sync_handle, __u8 num_bis, __u8 bis[])
2107 struct hci_cp_le_big_create_sync cp;
2112 if (num_bis > sizeof(pdu.bis))
2115 err = qos_set_big(hdev, qos);
2119 memset(&pdu, 0, sizeof(pdu));
2120 pdu.cp.handle = qos->bcast.big;
2121 pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2122 pdu.cp.encryption = qos->bcast.encryption;
2123 memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2124 pdu.cp.mse = qos->bcast.mse;
2125 pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2126 pdu.cp.num_bis = num_bis;
2127 memcpy(pdu.bis, bis, num_bis);
2129 return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2130 sizeof(pdu.cp) + num_bis, &pdu);
2133 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2135 struct hci_conn *conn = data;
2137 bt_dev_dbg(hdev, "conn %p", conn);
2140 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2141 hci_connect_cfm(conn, err);
2146 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2147 struct bt_iso_qos *qos,
2148 __u8 base_len, __u8 *base)
2150 struct hci_conn *conn;
2151 __u8 eir[HCI_MAX_PER_AD_LENGTH];
2153 if (base_len && base)
2154 base_len = eir_append_service_data(eir, 0, 0x1851,
2157 /* We need hci_conn object using the BDADDR_ANY as dst */
2158 conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2162 /* Update LINK PHYs according to QoS preference */
2163 conn->le_tx_phy = qos->bcast.out.phy;
2164 conn->le_tx_phy = qos->bcast.out.phy;
2166 /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2167 if (base_len && base) {
2168 memcpy(conn->le_per_adv_data, eir, sizeof(eir));
2169 conn->le_per_adv_data_len = base_len;
2172 hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2173 conn->le_tx_phy ? conn->le_tx_phy :
2174 hdev->le_tx_def_phys);
2176 conn->iso_qos = *qos;
2177 conn->state = BT_BOUND;
2182 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2184 struct iso_list_data *d = data;
2186 /* Skip if not broadcast/ANY address */
2187 if (bacmp(&conn->dst, BDADDR_ANY))
2190 if (d->big != conn->iso_qos.bcast.big ||
2191 d->bis == BT_ISO_QOS_BIS_UNSET ||
2192 d->bis != conn->iso_qos.bcast.bis)
2195 set_bit(HCI_CONN_PER_ADV, &conn->flags);
2198 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2199 __u8 dst_type, struct bt_iso_qos *qos,
2200 __u8 base_len, __u8 *base)
2202 struct hci_conn *conn;
2204 struct iso_list_data data;
2206 conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2210 data.big = qos->bcast.big;
2211 data.bis = qos->bcast.bis;
2213 /* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2214 * the start periodic advertising and create BIG commands have
2217 hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2220 /* Queue start periodic advertising and create BIG */
2221 err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2222 create_big_complete);
2224 hci_conn_drop(conn);
2225 return ERR_PTR(err);
2231 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2232 __u8 dst_type, struct bt_iso_qos *qos)
2234 struct hci_conn *le;
2235 struct hci_conn *cis;
2236 struct hci_link *link;
2238 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2239 le = hci_connect_le(hdev, dst, dst_type, false,
2241 HCI_LE_CONN_TIMEOUT,
2244 le = hci_connect_le_scan(hdev, dst, dst_type,
2246 HCI_LE_CONN_TIMEOUT,
2247 CONN_REASON_ISO_CONNECT);
2251 hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2252 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2253 hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2254 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2256 cis = hci_bind_cis(hdev, dst, dst_type, qos);
2262 link = hci_conn_link(le, cis);
2266 return ERR_PTR(-ENOLINK);
2269 /* Link takes the refcount */
2272 cis->state = BT_CONNECT;
2274 hci_le_create_cis_pending(hdev);
2279 /* Check link security requirement */
2280 int hci_conn_check_link_mode(struct hci_conn *conn)
2282 BT_DBG("hcon %p", conn);
2284 /* In Secure Connections Only mode, it is required that Secure
2285 * Connections is used and the link is encrypted with AES-CCM
2286 * using a P-256 authenticated combination key.
2288 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2289 if (!hci_conn_sc_enabled(conn) ||
2290 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2291 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2295 /* AES encryption is required for Level 4:
2297 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2300 * 128-bit equivalent strength for link and encryption keys
2301 * required using FIPS approved algorithms (E0 not allowed,
2302 * SAFER+ not allowed, and P-192 not allowed; encryption key
2305 if (conn->sec_level == BT_SECURITY_FIPS &&
2306 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2307 bt_dev_err(conn->hdev,
2308 "Invalid security: Missing AES-CCM usage");
2312 if (hci_conn_ssp_enabled(conn) &&
2313 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2319 /* Authenticate remote device */
2320 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2322 BT_DBG("hcon %p", conn);
2324 if (conn->pending_sec_level > sec_level)
2325 sec_level = conn->pending_sec_level;
2327 if (sec_level > conn->sec_level)
2328 conn->pending_sec_level = sec_level;
2329 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2332 /* Make sure we preserve an existing MITM requirement*/
2333 auth_type |= (conn->auth_type & 0x01);
2335 conn->auth_type = auth_type;
2337 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2338 struct hci_cp_auth_requested cp;
2340 cp.handle = cpu_to_le16(conn->handle);
2341 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2344 /* If we're already encrypted set the REAUTH_PEND flag,
2345 * otherwise set the ENCRYPT_PEND.
2347 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2348 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2350 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2356 /* Encrypt the link */
2357 static void hci_conn_encrypt(struct hci_conn *conn)
2359 BT_DBG("hcon %p", conn);
2361 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2362 struct hci_cp_set_conn_encrypt cp;
2363 cp.handle = cpu_to_le16(conn->handle);
2365 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2370 /* Enable security */
2371 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2374 BT_DBG("hcon %p", conn);
2376 if (conn->type == LE_LINK)
2377 return smp_conn_security(conn, sec_level);
2379 /* For sdp we don't need the link key. */
2380 if (sec_level == BT_SECURITY_SDP)
2383 /* For non 2.1 devices and low security level we don't need the link
2385 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2388 /* For other security levels we need the link key. */
2389 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2392 /* An authenticated FIPS approved combination key has sufficient
2393 * security for security level 4. */
2394 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2395 sec_level == BT_SECURITY_FIPS)
2398 /* An authenticated combination key has sufficient security for
2399 security level 3. */
2400 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2401 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2402 sec_level == BT_SECURITY_HIGH)
2405 /* An unauthenticated combination key has sufficient security for
2406 security level 1 and 2. */
2407 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2408 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2409 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2412 /* A combination key has always sufficient security for the security
2413 levels 1 or 2. High security level requires the combination key
2414 is generated using maximum PIN code length (16).
2415 For pre 2.1 units. */
2416 if (conn->key_type == HCI_LK_COMBINATION &&
2417 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2418 conn->pin_length == 16))
2422 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2426 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2428 if (!hci_conn_auth(conn, sec_level, auth_type))
2432 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2433 /* Ensure that the encryption key size has been read,
2434 * otherwise stall the upper layer responses.
2436 if (!conn->enc_key_size)
2439 /* Nothing else needed, all requirements are met */
2443 hci_conn_encrypt(conn);
2446 EXPORT_SYMBOL(hci_conn_security);
2448 /* Check secure link requirement */
2449 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2451 BT_DBG("hcon %p", conn);
2453 /* Accept if non-secure or higher security level is required */
2454 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2457 /* Accept if secure or higher security level is already present */
2458 if (conn->sec_level == BT_SECURITY_HIGH ||
2459 conn->sec_level == BT_SECURITY_FIPS)
2462 /* Reject not secure link */
2465 EXPORT_SYMBOL(hci_conn_check_secure);
2468 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2470 BT_DBG("hcon %p", conn);
2472 if (role == conn->role)
2475 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2476 struct hci_cp_switch_role cp;
2477 bacpy(&cp.bdaddr, &conn->dst);
2479 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2484 EXPORT_SYMBOL(hci_conn_switch_role);
2486 /* Enter active mode */
2487 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2489 struct hci_dev *hdev = conn->hdev;
2491 BT_DBG("hcon %p mode %d", conn, conn->mode);
2493 if (conn->mode != HCI_CM_SNIFF)
2496 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2499 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2500 struct hci_cp_exit_sniff_mode cp;
2501 cp.handle = cpu_to_le16(conn->handle);
2502 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2506 if (hdev->idle_timeout > 0)
2507 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2508 msecs_to_jiffies(hdev->idle_timeout));
2511 /* Drop all connection on the device */
2512 void hci_conn_hash_flush(struct hci_dev *hdev)
2514 struct list_head *head = &hdev->conn_hash.list;
2515 struct hci_conn *conn;
2517 BT_DBG("hdev %s", hdev->name);
2519 /* We should not traverse the list here, because hci_conn_del
2520 * can remove extra links, which may cause the list traversal
2521 * to hit items that have already been released.
2523 while ((conn = list_first_entry_or_null(head,
2526 conn->state = BT_CLOSED;
2527 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2532 /* Check pending connect attempts */
2533 void hci_conn_check_pending(struct hci_dev *hdev)
2535 struct hci_conn *conn;
2537 BT_DBG("hdev %s", hdev->name);
2541 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2543 hci_acl_create_connection(conn);
2545 hci_dev_unlock(hdev);
2548 static u32 get_link_mode(struct hci_conn *conn)
2552 if (conn->role == HCI_ROLE_MASTER)
2553 link_mode |= HCI_LM_MASTER;
2555 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2556 link_mode |= HCI_LM_ENCRYPT;
2558 if (test_bit(HCI_CONN_AUTH, &conn->flags))
2559 link_mode |= HCI_LM_AUTH;
2561 if (test_bit(HCI_CONN_SECURE, &conn->flags))
2562 link_mode |= HCI_LM_SECURE;
2564 if (test_bit(HCI_CONN_FIPS, &conn->flags))
2565 link_mode |= HCI_LM_FIPS;
2570 int hci_get_conn_list(void __user *arg)
2573 struct hci_conn_list_req req, *cl;
2574 struct hci_conn_info *ci;
2575 struct hci_dev *hdev;
2576 int n = 0, size, err;
2578 if (copy_from_user(&req, arg, sizeof(req)))
2581 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2584 size = sizeof(req) + req.conn_num * sizeof(*ci);
2586 cl = kmalloc(size, GFP_KERNEL);
2590 hdev = hci_dev_get(req.dev_id);
2599 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2600 bacpy(&(ci + n)->bdaddr, &c->dst);
2601 (ci + n)->handle = c->handle;
2602 (ci + n)->type = c->type;
2603 (ci + n)->out = c->out;
2604 (ci + n)->state = c->state;
2605 (ci + n)->link_mode = get_link_mode(c);
2606 if (++n >= req.conn_num)
2609 hci_dev_unlock(hdev);
2611 cl->dev_id = hdev->id;
2613 size = sizeof(req) + n * sizeof(*ci);
2617 err = copy_to_user(arg, cl, size);
2620 return err ? -EFAULT : 0;
2623 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2625 struct hci_conn_info_req req;
2626 struct hci_conn_info ci;
2627 struct hci_conn *conn;
2628 char __user *ptr = arg + sizeof(req);
2630 if (copy_from_user(&req, arg, sizeof(req)))
2634 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2636 bacpy(&ci.bdaddr, &conn->dst);
2637 ci.handle = conn->handle;
2638 ci.type = conn->type;
2640 ci.state = conn->state;
2641 ci.link_mode = get_link_mode(conn);
2643 hci_dev_unlock(hdev);
2648 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2651 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2653 struct hci_auth_info_req req;
2654 struct hci_conn *conn;
2656 if (copy_from_user(&req, arg, sizeof(req)))
2660 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2662 req.type = conn->auth_type;
2663 hci_dev_unlock(hdev);
2668 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2671 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2673 struct hci_dev *hdev = conn->hdev;
2674 struct hci_chan *chan;
2676 BT_DBG("%s hcon %p", hdev->name, conn);
2678 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2679 BT_DBG("Refusing to create new hci_chan");
2683 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2687 chan->conn = hci_conn_get(conn);
2688 skb_queue_head_init(&chan->data_q);
2689 chan->state = BT_CONNECTED;
2691 list_add_rcu(&chan->list, &conn->chan_list);
2696 void hci_chan_del(struct hci_chan *chan)
2698 struct hci_conn *conn = chan->conn;
2699 struct hci_dev *hdev = conn->hdev;
2701 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2703 list_del_rcu(&chan->list);
2707 /* Prevent new hci_chan's to be created for this hci_conn */
2708 set_bit(HCI_CONN_DROP, &conn->flags);
2712 skb_queue_purge(&chan->data_q);
2716 void hci_chan_list_flush(struct hci_conn *conn)
2718 struct hci_chan *chan, *n;
2720 BT_DBG("hcon %p", conn);
2722 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2726 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2729 struct hci_chan *hchan;
2731 list_for_each_entry(hchan, &hcon->chan_list, list) {
2732 if (hchan->handle == handle)
2739 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2741 struct hci_conn_hash *h = &hdev->conn_hash;
2742 struct hci_conn *hcon;
2743 struct hci_chan *hchan = NULL;
2747 list_for_each_entry_rcu(hcon, &h->list, list) {
2748 hchan = __hci_chan_lookup_handle(hcon, handle);
2758 u32 hci_conn_get_phy(struct hci_conn *conn)
2762 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2763 * Table 6.2: Packets defined for synchronous, asynchronous, and
2764 * CPB logical transport types.
2766 switch (conn->type) {
2768 /* SCO logical transport (1 Mb/s):
2769 * HV1, HV2, HV3 and DV.
2771 phys |= BT_PHY_BR_1M_1SLOT;
2776 /* ACL logical transport (1 Mb/s) ptt=0:
2777 * DH1, DM3, DH3, DM5 and DH5.
2779 phys |= BT_PHY_BR_1M_1SLOT;
2781 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2782 phys |= BT_PHY_BR_1M_3SLOT;
2784 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2785 phys |= BT_PHY_BR_1M_5SLOT;
2787 /* ACL logical transport (2 Mb/s) ptt=1:
2788 * 2-DH1, 2-DH3 and 2-DH5.
2790 if (!(conn->pkt_type & HCI_2DH1))
2791 phys |= BT_PHY_EDR_2M_1SLOT;
2793 if (!(conn->pkt_type & HCI_2DH3))
2794 phys |= BT_PHY_EDR_2M_3SLOT;
2796 if (!(conn->pkt_type & HCI_2DH5))
2797 phys |= BT_PHY_EDR_2M_5SLOT;
2799 /* ACL logical transport (3 Mb/s) ptt=1:
2800 * 3-DH1, 3-DH3 and 3-DH5.
2802 if (!(conn->pkt_type & HCI_3DH1))
2803 phys |= BT_PHY_EDR_3M_1SLOT;
2805 if (!(conn->pkt_type & HCI_3DH3))
2806 phys |= BT_PHY_EDR_3M_3SLOT;
2808 if (!(conn->pkt_type & HCI_3DH5))
2809 phys |= BT_PHY_EDR_3M_5SLOT;
2814 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2815 phys |= BT_PHY_BR_1M_1SLOT;
2817 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2818 phys |= BT_PHY_BR_1M_3SLOT;
2820 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2821 if (!(conn->pkt_type & ESCO_2EV3))
2822 phys |= BT_PHY_EDR_2M_1SLOT;
2824 if (!(conn->pkt_type & ESCO_2EV5))
2825 phys |= BT_PHY_EDR_2M_3SLOT;
2827 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2828 if (!(conn->pkt_type & ESCO_3EV3))
2829 phys |= BT_PHY_EDR_3M_1SLOT;
2831 if (!(conn->pkt_type & ESCO_3EV5))
2832 phys |= BT_PHY_EDR_3M_3SLOT;
2837 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2838 phys |= BT_PHY_LE_1M_TX;
2840 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2841 phys |= BT_PHY_LE_1M_RX;
2843 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2844 phys |= BT_PHY_LE_2M_TX;
2846 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2847 phys |= BT_PHY_LE_2M_RX;
2849 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2850 phys |= BT_PHY_LE_CODED_TX;
2852 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2853 phys |= BT_PHY_LE_CODED_RX;
2861 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2863 struct hci_conn *conn;
2864 u16 handle = PTR_UINT(data);
2866 conn = hci_conn_hash_lookup_handle(hdev, handle);
2870 return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2873 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2875 struct hci_dev *hdev = conn->hdev;
2877 /* If abort_reason has already been set it means the connection is
2878 * already being aborted so don't attempt to overwrite it.
2880 if (conn->abort_reason)
2883 bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2885 conn->abort_reason = reason;
2887 /* If the connection is pending check the command opcode since that
2888 * might be blocking on hci_cmd_sync_work while waiting its respective
2889 * event so we need to hci_cmd_sync_cancel to cancel it.
2891 * hci_connect_le serializes the connection attempts so only one
2892 * connection can be in BT_CONNECT at time.
2894 if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2895 switch (hci_skb_event(hdev->sent_cmd)) {
2896 case HCI_EV_LE_CONN_COMPLETE:
2897 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2898 case HCI_EVT_LE_CIS_ESTABLISHED:
2899 hci_cmd_sync_cancel(hdev, -ECANCELED);
2904 return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),