inet: move inet->recverr_rfc4884 to inet->inet_flags
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI connection handling. */
27
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "a2mp.h"
40 #include "eir.h"
41
42 struct sco_param {
43         u16 pkt_type;
44         u16 max_latency;
45         u8  retrans_effort;
46 };
47
48 struct conn_handle_t {
49         struct hci_conn *conn;
50         __u16 handle;
51 };
52
53 static const struct sco_param esco_param_cvsd[] = {
54         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
55         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
56         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
57         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
58         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
59 };
60
61 static const struct sco_param sco_param_cvsd[] = {
62         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
63         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
64 };
65
66 static const struct sco_param esco_param_msbc[] = {
67         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
68         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
69 };
70
71 /* This function requires the caller holds hdev->lock */
72 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
73 {
74         struct hci_conn_params *params;
75         struct hci_dev *hdev = conn->hdev;
76         struct smp_irk *irk;
77         bdaddr_t *bdaddr;
78         u8 bdaddr_type;
79
80         bdaddr = &conn->dst;
81         bdaddr_type = conn->dst_type;
82
83         /* Check if we need to convert to identity address */
84         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
85         if (irk) {
86                 bdaddr = &irk->bdaddr;
87                 bdaddr_type = irk->addr_type;
88         }
89
90         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
91                                            bdaddr_type);
92         if (!params)
93                 return;
94
95         if (params->conn) {
96                 hci_conn_drop(params->conn);
97                 hci_conn_put(params->conn);
98                 params->conn = NULL;
99         }
100
101         if (!params->explicit_connect)
102                 return;
103
104         /* If the status indicates successful cancellation of
105          * the attempt (i.e. Unknown Connection Id) there's no point of
106          * notifying failure since we'll go back to keep trying to
107          * connect. The only exception is explicit connect requests
108          * where a timeout + cancel does indicate an actual failure.
109          */
110         if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
111                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
112                                     conn->dst_type, status);
113
114         /* The connection attempt was doing scan for new RPA, and is
115          * in scan phase. If params are not associated with any other
116          * autoconnect action, remove them completely. If they are, just unmark
117          * them as waiting for connection, by clearing explicit_connect field.
118          */
119         params->explicit_connect = false;
120
121         hci_pend_le_list_del_init(params);
122
123         switch (params->auto_connect) {
124         case HCI_AUTO_CONN_EXPLICIT:
125                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
126                 /* return instead of break to avoid duplicate scan update */
127                 return;
128         case HCI_AUTO_CONN_DIRECT:
129         case HCI_AUTO_CONN_ALWAYS:
130                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
131                 break;
132         case HCI_AUTO_CONN_REPORT:
133                 hci_pend_le_list_add(params, &hdev->pend_le_reports);
134                 break;
135         default:
136                 break;
137         }
138
139         hci_update_passive_scan(hdev);
140 }
141
142 static void hci_conn_cleanup(struct hci_conn *conn)
143 {
144         struct hci_dev *hdev = conn->hdev;
145
146         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
147                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
148
149         if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
150                 hci_remove_link_key(hdev, &conn->dst);
151
152         hci_chan_list_flush(conn);
153
154         hci_conn_hash_del(hdev, conn);
155
156         if (conn->cleanup)
157                 conn->cleanup(conn);
158
159         if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160                 switch (conn->setting & SCO_AIRMODE_MASK) {
161                 case SCO_AIRMODE_CVSD:
162                 case SCO_AIRMODE_TRANSP:
163                         if (hdev->notify)
164                                 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
165                         break;
166                 }
167         } else {
168                 if (hdev->notify)
169                         hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
170         }
171
172         hci_conn_del_sysfs(conn);
173
174         debugfs_remove_recursive(conn->debugfs);
175
176         hci_dev_put(hdev);
177
178         hci_conn_put(conn);
179 }
180
181 static void hci_acl_create_connection(struct hci_conn *conn)
182 {
183         struct hci_dev *hdev = conn->hdev;
184         struct inquiry_entry *ie;
185         struct hci_cp_create_conn cp;
186
187         BT_DBG("hcon %p", conn);
188
189         /* Many controllers disallow HCI Create Connection while it is doing
190          * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191          * Connection. This may cause the MGMT discovering state to become false
192          * without user space's request but it is okay since the MGMT Discovery
193          * APIs do not promise that discovery should be done forever. Instead,
194          * the user space monitors the status of MGMT discovering and it may
195          * request for discovery again when this flag becomes false.
196          */
197         if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198                 /* Put this connection to "pending" state so that it will be
199                  * executed after the inquiry cancel command complete event.
200                  */
201                 conn->state = BT_CONNECT2;
202                 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
203                 return;
204         }
205
206         conn->state = BT_CONNECT;
207         conn->out = true;
208         conn->role = HCI_ROLE_MASTER;
209
210         conn->attempt++;
211
212         conn->link_policy = hdev->link_policy;
213
214         memset(&cp, 0, sizeof(cp));
215         bacpy(&cp.bdaddr, &conn->dst);
216         cp.pscan_rep_mode = 0x02;
217
218         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219         if (ie) {
220                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222                         cp.pscan_mode     = ie->data.pscan_mode;
223                         cp.clock_offset   = ie->data.clock_offset |
224                                             cpu_to_le16(0x8000);
225                 }
226
227                 memcpy(conn->dev_class, ie->data.dev_class, 3);
228         }
229
230         cp.pkt_type = cpu_to_le16(conn->pkt_type);
231         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232                 cp.role_switch = 0x01;
233         else
234                 cp.role_switch = 0x00;
235
236         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
237 }
238
239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
240 {
241         BT_DBG("hcon %p", conn);
242
243         /* When we are central of an established connection and it enters
244          * the disconnect timeout, then go ahead and try to read the
245          * current clock offset.  Processing of the result is done
246          * within the event handling and hci_clock_offset_evt function.
247          */
248         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250                 struct hci_dev *hdev = conn->hdev;
251                 struct hci_cp_read_clock_offset clkoff_cp;
252
253                 clkoff_cp.handle = cpu_to_le16(conn->handle);
254                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
255                              &clkoff_cp);
256         }
257
258         return hci_abort_conn(conn, reason);
259 }
260
261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
262 {
263         struct hci_dev *hdev = conn->hdev;
264         struct hci_cp_add_sco cp;
265
266         BT_DBG("hcon %p", conn);
267
268         conn->state = BT_CONNECT;
269         conn->out = true;
270
271         conn->attempt++;
272
273         cp.handle   = cpu_to_le16(handle);
274         cp.pkt_type = cpu_to_le16(conn->pkt_type);
275
276         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
277 }
278
279 static bool find_next_esco_param(struct hci_conn *conn,
280                                  const struct sco_param *esco_param, int size)
281 {
282         if (!conn->parent)
283                 return false;
284
285         for (; conn->attempt <= size; conn->attempt++) {
286                 if (lmp_esco_2m_capable(conn->parent) ||
287                     (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
288                         break;
289                 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290                        conn, conn->attempt);
291         }
292
293         return conn->attempt <= size;
294 }
295
296 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
297 {
298         int err;
299         __u8 vnd_len, *vnd_data = NULL;
300         struct hci_op_configure_data_path *cmd = NULL;
301
302         err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
303                                           &vnd_data);
304         if (err < 0)
305                 goto error;
306
307         cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
308         if (!cmd) {
309                 err = -ENOMEM;
310                 goto error;
311         }
312
313         err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
314         if (err < 0)
315                 goto error;
316
317         cmd->vnd_len = vnd_len;
318         memcpy(cmd->vnd_data, vnd_data, vnd_len);
319
320         cmd->direction = 0x00;
321         __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
322                               sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
323
324         cmd->direction = 0x01;
325         err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
326                                     sizeof(*cmd) + vnd_len, cmd,
327                                     HCI_CMD_TIMEOUT);
328 error:
329
330         kfree(cmd);
331         kfree(vnd_data);
332         return err;
333 }
334
335 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
336 {
337         struct conn_handle_t *conn_handle = data;
338         struct hci_conn *conn = conn_handle->conn;
339         __u16 handle = conn_handle->handle;
340         struct hci_cp_enhanced_setup_sync_conn cp;
341         const struct sco_param *param;
342
343         kfree(conn_handle);
344
345         bt_dev_dbg(hdev, "hcon %p", conn);
346
347         /* for offload use case, codec needs to configured before opening SCO */
348         if (conn->codec.data_path)
349                 configure_datapath_sync(hdev, &conn->codec);
350
351         conn->state = BT_CONNECT;
352         conn->out = true;
353
354         conn->attempt++;
355
356         memset(&cp, 0x00, sizeof(cp));
357
358         cp.handle   = cpu_to_le16(handle);
359
360         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
361         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
362
363         switch (conn->codec.id) {
364         case BT_CODEC_MSBC:
365                 if (!find_next_esco_param(conn, esco_param_msbc,
366                                           ARRAY_SIZE(esco_param_msbc)))
367                         return -EINVAL;
368
369                 param = &esco_param_msbc[conn->attempt - 1];
370                 cp.tx_coding_format.id = 0x05;
371                 cp.rx_coding_format.id = 0x05;
372                 cp.tx_codec_frame_size = __cpu_to_le16(60);
373                 cp.rx_codec_frame_size = __cpu_to_le16(60);
374                 cp.in_bandwidth = __cpu_to_le32(32000);
375                 cp.out_bandwidth = __cpu_to_le32(32000);
376                 cp.in_coding_format.id = 0x04;
377                 cp.out_coding_format.id = 0x04;
378                 cp.in_coded_data_size = __cpu_to_le16(16);
379                 cp.out_coded_data_size = __cpu_to_le16(16);
380                 cp.in_pcm_data_format = 2;
381                 cp.out_pcm_data_format = 2;
382                 cp.in_pcm_sample_payload_msb_pos = 0;
383                 cp.out_pcm_sample_payload_msb_pos = 0;
384                 cp.in_data_path = conn->codec.data_path;
385                 cp.out_data_path = conn->codec.data_path;
386                 cp.in_transport_unit_size = 1;
387                 cp.out_transport_unit_size = 1;
388                 break;
389
390         case BT_CODEC_TRANSPARENT:
391                 if (!find_next_esco_param(conn, esco_param_msbc,
392                                           ARRAY_SIZE(esco_param_msbc)))
393                         return false;
394                 param = &esco_param_msbc[conn->attempt - 1];
395                 cp.tx_coding_format.id = 0x03;
396                 cp.rx_coding_format.id = 0x03;
397                 cp.tx_codec_frame_size = __cpu_to_le16(60);
398                 cp.rx_codec_frame_size = __cpu_to_le16(60);
399                 cp.in_bandwidth = __cpu_to_le32(0x1f40);
400                 cp.out_bandwidth = __cpu_to_le32(0x1f40);
401                 cp.in_coding_format.id = 0x03;
402                 cp.out_coding_format.id = 0x03;
403                 cp.in_coded_data_size = __cpu_to_le16(16);
404                 cp.out_coded_data_size = __cpu_to_le16(16);
405                 cp.in_pcm_data_format = 2;
406                 cp.out_pcm_data_format = 2;
407                 cp.in_pcm_sample_payload_msb_pos = 0;
408                 cp.out_pcm_sample_payload_msb_pos = 0;
409                 cp.in_data_path = conn->codec.data_path;
410                 cp.out_data_path = conn->codec.data_path;
411                 cp.in_transport_unit_size = 1;
412                 cp.out_transport_unit_size = 1;
413                 break;
414
415         case BT_CODEC_CVSD:
416                 if (conn->parent && lmp_esco_capable(conn->parent)) {
417                         if (!find_next_esco_param(conn, esco_param_cvsd,
418                                                   ARRAY_SIZE(esco_param_cvsd)))
419                                 return -EINVAL;
420                         param = &esco_param_cvsd[conn->attempt - 1];
421                 } else {
422                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
423                                 return -EINVAL;
424                         param = &sco_param_cvsd[conn->attempt - 1];
425                 }
426                 cp.tx_coding_format.id = 2;
427                 cp.rx_coding_format.id = 2;
428                 cp.tx_codec_frame_size = __cpu_to_le16(60);
429                 cp.rx_codec_frame_size = __cpu_to_le16(60);
430                 cp.in_bandwidth = __cpu_to_le32(16000);
431                 cp.out_bandwidth = __cpu_to_le32(16000);
432                 cp.in_coding_format.id = 4;
433                 cp.out_coding_format.id = 4;
434                 cp.in_coded_data_size = __cpu_to_le16(16);
435                 cp.out_coded_data_size = __cpu_to_le16(16);
436                 cp.in_pcm_data_format = 2;
437                 cp.out_pcm_data_format = 2;
438                 cp.in_pcm_sample_payload_msb_pos = 0;
439                 cp.out_pcm_sample_payload_msb_pos = 0;
440                 cp.in_data_path = conn->codec.data_path;
441                 cp.out_data_path = conn->codec.data_path;
442                 cp.in_transport_unit_size = 16;
443                 cp.out_transport_unit_size = 16;
444                 break;
445         default:
446                 return -EINVAL;
447         }
448
449         cp.retrans_effort = param->retrans_effort;
450         cp.pkt_type = __cpu_to_le16(param->pkt_type);
451         cp.max_latency = __cpu_to_le16(param->max_latency);
452
453         if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
454                 return -EIO;
455
456         return 0;
457 }
458
459 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
460 {
461         struct hci_dev *hdev = conn->hdev;
462         struct hci_cp_setup_sync_conn cp;
463         const struct sco_param *param;
464
465         bt_dev_dbg(hdev, "hcon %p", conn);
466
467         conn->state = BT_CONNECT;
468         conn->out = true;
469
470         conn->attempt++;
471
472         cp.handle   = cpu_to_le16(handle);
473
474         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
475         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
476         cp.voice_setting  = cpu_to_le16(conn->setting);
477
478         switch (conn->setting & SCO_AIRMODE_MASK) {
479         case SCO_AIRMODE_TRANSP:
480                 if (!find_next_esco_param(conn, esco_param_msbc,
481                                           ARRAY_SIZE(esco_param_msbc)))
482                         return false;
483                 param = &esco_param_msbc[conn->attempt - 1];
484                 break;
485         case SCO_AIRMODE_CVSD:
486                 if (conn->parent && lmp_esco_capable(conn->parent)) {
487                         if (!find_next_esco_param(conn, esco_param_cvsd,
488                                                   ARRAY_SIZE(esco_param_cvsd)))
489                                 return false;
490                         param = &esco_param_cvsd[conn->attempt - 1];
491                 } else {
492                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
493                                 return false;
494                         param = &sco_param_cvsd[conn->attempt - 1];
495                 }
496                 break;
497         default:
498                 return false;
499         }
500
501         cp.retrans_effort = param->retrans_effort;
502         cp.pkt_type = __cpu_to_le16(param->pkt_type);
503         cp.max_latency = __cpu_to_le16(param->max_latency);
504
505         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
506                 return false;
507
508         return true;
509 }
510
511 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
512 {
513         int result;
514         struct conn_handle_t *conn_handle;
515
516         if (enhanced_sync_conn_capable(conn->hdev)) {
517                 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
518
519                 if (!conn_handle)
520                         return false;
521
522                 conn_handle->conn = conn;
523                 conn_handle->handle = handle;
524                 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
525                                             conn_handle, NULL);
526                 if (result < 0)
527                         kfree(conn_handle);
528
529                 return result == 0;
530         }
531
532         return hci_setup_sync_conn(conn, handle);
533 }
534
535 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
536                       u16 to_multiplier)
537 {
538         struct hci_dev *hdev = conn->hdev;
539         struct hci_conn_params *params;
540         struct hci_cp_le_conn_update cp;
541
542         hci_dev_lock(hdev);
543
544         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
545         if (params) {
546                 params->conn_min_interval = min;
547                 params->conn_max_interval = max;
548                 params->conn_latency = latency;
549                 params->supervision_timeout = to_multiplier;
550         }
551
552         hci_dev_unlock(hdev);
553
554         memset(&cp, 0, sizeof(cp));
555         cp.handle               = cpu_to_le16(conn->handle);
556         cp.conn_interval_min    = cpu_to_le16(min);
557         cp.conn_interval_max    = cpu_to_le16(max);
558         cp.conn_latency         = cpu_to_le16(latency);
559         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
560         cp.min_ce_len           = cpu_to_le16(0x0000);
561         cp.max_ce_len           = cpu_to_le16(0x0000);
562
563         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
564
565         if (params)
566                 return 0x01;
567
568         return 0x00;
569 }
570
571 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
572                       __u8 ltk[16], __u8 key_size)
573 {
574         struct hci_dev *hdev = conn->hdev;
575         struct hci_cp_le_start_enc cp;
576
577         BT_DBG("hcon %p", conn);
578
579         memset(&cp, 0, sizeof(cp));
580
581         cp.handle = cpu_to_le16(conn->handle);
582         cp.rand = rand;
583         cp.ediv = ediv;
584         memcpy(cp.ltk, ltk, key_size);
585
586         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
587 }
588
589 /* Device _must_ be locked */
590 void hci_sco_setup(struct hci_conn *conn, __u8 status)
591 {
592         struct hci_link *link;
593
594         link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
595         if (!link || !link->conn)
596                 return;
597
598         BT_DBG("hcon %p", conn);
599
600         if (!status) {
601                 if (lmp_esco_capable(conn->hdev))
602                         hci_setup_sync(link->conn, conn->handle);
603                 else
604                         hci_add_sco(link->conn, conn->handle);
605         } else {
606                 hci_connect_cfm(link->conn, status);
607                 hci_conn_del(link->conn);
608         }
609 }
610
611 static void hci_conn_timeout(struct work_struct *work)
612 {
613         struct hci_conn *conn = container_of(work, struct hci_conn,
614                                              disc_work.work);
615         int refcnt = atomic_read(&conn->refcnt);
616
617         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
618
619         WARN_ON(refcnt < 0);
620
621         /* FIXME: It was observed that in pairing failed scenario, refcnt
622          * drops below 0. Probably this is because l2cap_conn_del calls
623          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
624          * dropped. After that loop hci_chan_del is called which also drops
625          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
626          * otherwise drop it.
627          */
628         if (refcnt > 0)
629                 return;
630
631         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
632 }
633
634 /* Enter sniff mode */
635 static void hci_conn_idle(struct work_struct *work)
636 {
637         struct hci_conn *conn = container_of(work, struct hci_conn,
638                                              idle_work.work);
639         struct hci_dev *hdev = conn->hdev;
640
641         BT_DBG("hcon %p mode %d", conn, conn->mode);
642
643         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
644                 return;
645
646         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
647                 return;
648
649         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
650                 struct hci_cp_sniff_subrate cp;
651                 cp.handle             = cpu_to_le16(conn->handle);
652                 cp.max_latency        = cpu_to_le16(0);
653                 cp.min_remote_timeout = cpu_to_le16(0);
654                 cp.min_local_timeout  = cpu_to_le16(0);
655                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
656         }
657
658         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
659                 struct hci_cp_sniff_mode cp;
660                 cp.handle       = cpu_to_le16(conn->handle);
661                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
662                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
663                 cp.attempt      = cpu_to_le16(4);
664                 cp.timeout      = cpu_to_le16(1);
665                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
666         }
667 }
668
669 static void hci_conn_auto_accept(struct work_struct *work)
670 {
671         struct hci_conn *conn = container_of(work, struct hci_conn,
672                                              auto_accept_work.work);
673
674         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
675                      &conn->dst);
676 }
677
678 static void le_disable_advertising(struct hci_dev *hdev)
679 {
680         if (ext_adv_capable(hdev)) {
681                 struct hci_cp_le_set_ext_adv_enable cp;
682
683                 cp.enable = 0x00;
684                 cp.num_of_sets = 0x00;
685
686                 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
687                              &cp);
688         } else {
689                 u8 enable = 0x00;
690                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
691                              &enable);
692         }
693 }
694
695 static void le_conn_timeout(struct work_struct *work)
696 {
697         struct hci_conn *conn = container_of(work, struct hci_conn,
698                                              le_conn_timeout.work);
699         struct hci_dev *hdev = conn->hdev;
700
701         BT_DBG("");
702
703         /* We could end up here due to having done directed advertising,
704          * so clean up the state if necessary. This should however only
705          * happen with broken hardware or if low duty cycle was used
706          * (which doesn't have a timeout of its own).
707          */
708         if (conn->role == HCI_ROLE_SLAVE) {
709                 /* Disable LE Advertising */
710                 le_disable_advertising(hdev);
711                 hci_dev_lock(hdev);
712                 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
713                 hci_dev_unlock(hdev);
714                 return;
715         }
716
717         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
718 }
719
720 struct iso_cig_params {
721         struct hci_cp_le_set_cig_params cp;
722         struct hci_cis_params cis[0x1f];
723 };
724
725 struct iso_list_data {
726         union {
727                 u8  cig;
728                 u8  big;
729         };
730         union {
731                 u8  cis;
732                 u8  bis;
733                 u16 sync_handle;
734         };
735         int count;
736         bool big_term;
737         bool big_sync_term;
738 };
739
740 static void bis_list(struct hci_conn *conn, void *data)
741 {
742         struct iso_list_data *d = data;
743
744         /* Skip if not broadcast/ANY address */
745         if (bacmp(&conn->dst, BDADDR_ANY))
746                 return;
747
748         if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
749             d->bis != conn->iso_qos.bcast.bis)
750                 return;
751
752         d->count++;
753 }
754
755 static int terminate_big_sync(struct hci_dev *hdev, void *data)
756 {
757         struct iso_list_data *d = data;
758
759         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
760
761         hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
762
763         /* Only terminate BIG if it has been created */
764         if (!d->big_term)
765                 return 0;
766
767         return hci_le_terminate_big_sync(hdev, d->big,
768                                          HCI_ERROR_LOCAL_HOST_TERM);
769 }
770
771 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
772 {
773         kfree(data);
774 }
775
776 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
777 {
778         struct iso_list_data *d;
779         int ret;
780
781         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
782                    conn->iso_qos.bcast.bis);
783
784         d = kzalloc(sizeof(*d), GFP_KERNEL);
785         if (!d)
786                 return -ENOMEM;
787
788         d->big = conn->iso_qos.bcast.big;
789         d->bis = conn->iso_qos.bcast.bis;
790         d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
791
792         ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
793                                  terminate_big_destroy);
794         if (ret)
795                 kfree(d);
796
797         return ret;
798 }
799
800 static int big_terminate_sync(struct hci_dev *hdev, void *data)
801 {
802         struct iso_list_data *d = data;
803
804         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
805                    d->sync_handle);
806
807         if (d->big_sync_term)
808                 hci_le_big_terminate_sync(hdev, d->big);
809
810         return hci_le_pa_terminate_sync(hdev, d->sync_handle);
811 }
812
813 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
814 {
815         struct iso_list_data *d;
816         int ret;
817
818         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
819
820         d = kzalloc(sizeof(*d), GFP_KERNEL);
821         if (!d)
822                 return -ENOMEM;
823
824         d->big = big;
825         d->sync_handle = conn->sync_handle;
826         d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
827
828         ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
829                                  terminate_big_destroy);
830         if (ret)
831                 kfree(d);
832
833         return ret;
834 }
835
836 /* Cleanup BIS connection
837  *
838  * Detects if there any BIS left connected in a BIG
839  * broadcaster: Remove advertising instance and terminate BIG.
840  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
841  */
842 static void bis_cleanup(struct hci_conn *conn)
843 {
844         struct hci_dev *hdev = conn->hdev;
845         struct hci_conn *bis;
846
847         bt_dev_dbg(hdev, "conn %p", conn);
848
849         if (conn->role == HCI_ROLE_MASTER) {
850                 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
851                         return;
852
853                 /* Check if ISO connection is a BIS and terminate advertising
854                  * set and BIG if there are no other connections using it.
855                  */
856                 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
857                 if (bis)
858                         return;
859
860                 hci_le_terminate_big(hdev, conn);
861         } else {
862                 bis = hci_conn_hash_lookup_big_any_dst(hdev,
863                                                        conn->iso_qos.bcast.big);
864
865                 if (bis)
866                         return;
867
868                 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
869                                      conn);
870         }
871 }
872
873 static int remove_cig_sync(struct hci_dev *hdev, void *data)
874 {
875         u8 handle = PTR_UINT(data);
876
877         return hci_le_remove_cig_sync(hdev, handle);
878 }
879
880 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
881 {
882         bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
883
884         return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
885                                   NULL);
886 }
887
888 static void find_cis(struct hci_conn *conn, void *data)
889 {
890         struct iso_list_data *d = data;
891
892         /* Ignore broadcast or if CIG don't match */
893         if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
894                 return;
895
896         d->count++;
897 }
898
899 /* Cleanup CIS connection:
900  *
901  * Detects if there any CIS left connected in a CIG and remove it.
902  */
903 static void cis_cleanup(struct hci_conn *conn)
904 {
905         struct hci_dev *hdev = conn->hdev;
906         struct iso_list_data d;
907
908         if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
909                 return;
910
911         memset(&d, 0, sizeof(d));
912         d.cig = conn->iso_qos.ucast.cig;
913
914         /* Check if ISO connection is a CIS and remove CIG if there are
915          * no other connections using it.
916          */
917         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
918         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
919         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
920         if (d.count)
921                 return;
922
923         hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
924 }
925
926 static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
927 {
928         struct hci_conn_hash *h = &hdev->conn_hash;
929         struct hci_conn  *c;
930         u16 handle = HCI_CONN_HANDLE_MAX + 1;
931
932         rcu_read_lock();
933
934         list_for_each_entry_rcu(c, &h->list, list) {
935                 /* Find the first unused handle */
936                 if (handle == 0xffff || c->handle != handle)
937                         break;
938                 handle++;
939         }
940         rcu_read_unlock();
941
942         return handle;
943 }
944
945 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
946                               u8 role)
947 {
948         struct hci_conn *conn;
949
950         BT_DBG("%s dst %pMR", hdev->name, dst);
951
952         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
953         if (!conn)
954                 return NULL;
955
956         bacpy(&conn->dst, dst);
957         bacpy(&conn->src, &hdev->bdaddr);
958         conn->handle = hci_conn_hash_alloc_unset(hdev);
959         conn->hdev  = hdev;
960         conn->type  = type;
961         conn->role  = role;
962         conn->mode  = HCI_CM_ACTIVE;
963         conn->state = BT_OPEN;
964         conn->auth_type = HCI_AT_GENERAL_BONDING;
965         conn->io_capability = hdev->io_capability;
966         conn->remote_auth = 0xff;
967         conn->key_type = 0xff;
968         conn->rssi = HCI_RSSI_INVALID;
969         conn->tx_power = HCI_TX_POWER_INVALID;
970         conn->max_tx_power = HCI_TX_POWER_INVALID;
971
972         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
973         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
974
975         /* Set Default Authenticated payload timeout to 30s */
976         conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
977
978         if (conn->role == HCI_ROLE_MASTER)
979                 conn->out = true;
980
981         switch (type) {
982         case ACL_LINK:
983                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
984                 break;
985         case LE_LINK:
986                 /* conn->src should reflect the local identity address */
987                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
988                 break;
989         case ISO_LINK:
990                 /* conn->src should reflect the local identity address */
991                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
992
993                 /* set proper cleanup function */
994                 if (!bacmp(dst, BDADDR_ANY))
995                         conn->cleanup = bis_cleanup;
996                 else if (conn->role == HCI_ROLE_MASTER)
997                         conn->cleanup = cis_cleanup;
998
999                 break;
1000         case SCO_LINK:
1001                 if (lmp_esco_capable(hdev))
1002                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1003                                         (hdev->esco_type & EDR_ESCO_MASK);
1004                 else
1005                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1006                 break;
1007         case ESCO_LINK:
1008                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1009                 break;
1010         }
1011
1012         skb_queue_head_init(&conn->data_q);
1013
1014         INIT_LIST_HEAD(&conn->chan_list);
1015         INIT_LIST_HEAD(&conn->link_list);
1016
1017         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1018         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1019         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1020         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1021
1022         atomic_set(&conn->refcnt, 0);
1023
1024         hci_dev_hold(hdev);
1025
1026         hci_conn_hash_add(hdev, conn);
1027
1028         /* The SCO and eSCO connections will only be notified when their
1029          * setup has been completed. This is different to ACL links which
1030          * can be notified right away.
1031          */
1032         if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1033                 if (hdev->notify)
1034                         hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1035         }
1036
1037         hci_conn_init_sysfs(conn);
1038
1039         return conn;
1040 }
1041
1042 static void hci_conn_unlink(struct hci_conn *conn)
1043 {
1044         struct hci_dev *hdev = conn->hdev;
1045
1046         bt_dev_dbg(hdev, "hcon %p", conn);
1047
1048         if (!conn->parent) {
1049                 struct hci_link *link, *t;
1050
1051                 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1052                         struct hci_conn *child = link->conn;
1053
1054                         hci_conn_unlink(child);
1055
1056                         /* If hdev is down it means
1057                          * hci_dev_close_sync/hci_conn_hash_flush is in progress
1058                          * and links don't need to be cleanup as all connections
1059                          * would be cleanup.
1060                          */
1061                         if (!test_bit(HCI_UP, &hdev->flags))
1062                                 continue;
1063
1064                         /* Due to race, SCO connection might be not established
1065                          * yet at this point. Delete it now, otherwise it is
1066                          * possible for it to be stuck and can't be deleted.
1067                          */
1068                         if ((child->type == SCO_LINK ||
1069                              child->type == ESCO_LINK) &&
1070                             HCI_CONN_HANDLE_UNSET(child->handle))
1071                                 hci_conn_del(child);
1072                 }
1073
1074                 return;
1075         }
1076
1077         if (!conn->link)
1078                 return;
1079
1080         list_del_rcu(&conn->link->list);
1081         synchronize_rcu();
1082
1083         hci_conn_drop(conn->parent);
1084         hci_conn_put(conn->parent);
1085         conn->parent = NULL;
1086
1087         kfree(conn->link);
1088         conn->link = NULL;
1089 }
1090
1091 void hci_conn_del(struct hci_conn *conn)
1092 {
1093         struct hci_dev *hdev = conn->hdev;
1094
1095         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1096
1097         hci_conn_unlink(conn);
1098
1099         cancel_delayed_work_sync(&conn->disc_work);
1100         cancel_delayed_work_sync(&conn->auto_accept_work);
1101         cancel_delayed_work_sync(&conn->idle_work);
1102
1103         if (conn->type == ACL_LINK) {
1104                 /* Unacked frames */
1105                 hdev->acl_cnt += conn->sent;
1106         } else if (conn->type == LE_LINK) {
1107                 cancel_delayed_work(&conn->le_conn_timeout);
1108
1109                 if (hdev->le_pkts)
1110                         hdev->le_cnt += conn->sent;
1111                 else
1112                         hdev->acl_cnt += conn->sent;
1113         } else {
1114                 /* Unacked ISO frames */
1115                 if (conn->type == ISO_LINK) {
1116                         if (hdev->iso_pkts)
1117                                 hdev->iso_cnt += conn->sent;
1118                         else if (hdev->le_pkts)
1119                                 hdev->le_cnt += conn->sent;
1120                         else
1121                                 hdev->acl_cnt += conn->sent;
1122                 }
1123         }
1124
1125         if (conn->amp_mgr)
1126                 amp_mgr_put(conn->amp_mgr);
1127
1128         skb_queue_purge(&conn->data_q);
1129
1130         /* Remove the connection from the list and cleanup its remaining
1131          * state. This is a separate function since for some cases like
1132          * BT_CONNECT_SCAN we *only* want the cleanup part without the
1133          * rest of hci_conn_del.
1134          */
1135         hci_conn_cleanup(conn);
1136 }
1137
1138 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1139 {
1140         int use_src = bacmp(src, BDADDR_ANY);
1141         struct hci_dev *hdev = NULL, *d;
1142
1143         BT_DBG("%pMR -> %pMR", src, dst);
1144
1145         read_lock(&hci_dev_list_lock);
1146
1147         list_for_each_entry(d, &hci_dev_list, list) {
1148                 if (!test_bit(HCI_UP, &d->flags) ||
1149                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1150                     d->dev_type != HCI_PRIMARY)
1151                         continue;
1152
1153                 /* Simple routing:
1154                  *   No source address - find interface with bdaddr != dst
1155                  *   Source address    - find interface with bdaddr == src
1156                  */
1157
1158                 if (use_src) {
1159                         bdaddr_t id_addr;
1160                         u8 id_addr_type;
1161
1162                         if (src_type == BDADDR_BREDR) {
1163                                 if (!lmp_bredr_capable(d))
1164                                         continue;
1165                                 bacpy(&id_addr, &d->bdaddr);
1166                                 id_addr_type = BDADDR_BREDR;
1167                         } else {
1168                                 if (!lmp_le_capable(d))
1169                                         continue;
1170
1171                                 hci_copy_identity_address(d, &id_addr,
1172                                                           &id_addr_type);
1173
1174                                 /* Convert from HCI to three-value type */
1175                                 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1176                                         id_addr_type = BDADDR_LE_PUBLIC;
1177                                 else
1178                                         id_addr_type = BDADDR_LE_RANDOM;
1179                         }
1180
1181                         if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1182                                 hdev = d; break;
1183                         }
1184                 } else {
1185                         if (bacmp(&d->bdaddr, dst)) {
1186                                 hdev = d; break;
1187                         }
1188                 }
1189         }
1190
1191         if (hdev)
1192                 hdev = hci_dev_hold(hdev);
1193
1194         read_unlock(&hci_dev_list_lock);
1195         return hdev;
1196 }
1197 EXPORT_SYMBOL(hci_get_route);
1198
1199 /* This function requires the caller holds hdev->lock */
1200 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1201 {
1202         struct hci_dev *hdev = conn->hdev;
1203
1204         hci_connect_le_scan_cleanup(conn, status);
1205
1206         /* Enable advertising in case this was a failed connection
1207          * attempt as a peripheral.
1208          */
1209         hci_enable_advertising(hdev);
1210 }
1211
1212 /* This function requires the caller holds hdev->lock */
1213 void hci_conn_failed(struct hci_conn *conn, u8 status)
1214 {
1215         struct hci_dev *hdev = conn->hdev;
1216
1217         bt_dev_dbg(hdev, "status 0x%2.2x", status);
1218
1219         switch (conn->type) {
1220         case LE_LINK:
1221                 hci_le_conn_failed(conn, status);
1222                 break;
1223         case ACL_LINK:
1224                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1225                                     conn->dst_type, status);
1226                 break;
1227         }
1228
1229         conn->state = BT_CLOSED;
1230         hci_connect_cfm(conn, status);
1231         hci_conn_del(conn);
1232 }
1233
1234 /* This function requires the caller holds hdev->lock */
1235 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1236 {
1237         struct hci_dev *hdev = conn->hdev;
1238
1239         bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1240
1241         if (conn->handle == handle)
1242                 return 0;
1243
1244         if (handle > HCI_CONN_HANDLE_MAX) {
1245                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1246                            handle, HCI_CONN_HANDLE_MAX);
1247                 return HCI_ERROR_INVALID_PARAMETERS;
1248         }
1249
1250         /* If abort_reason has been sent it means the connection is being
1251          * aborted and the handle shall not be changed.
1252          */
1253         if (conn->abort_reason)
1254                 return conn->abort_reason;
1255
1256         conn->handle = handle;
1257
1258         return 0;
1259 }
1260
1261 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1262 {
1263         struct hci_conn *conn;
1264         u16 handle = PTR_UINT(data);
1265
1266         conn = hci_conn_hash_lookup_handle(hdev, handle);
1267         if (!conn)
1268                 return;
1269
1270         bt_dev_dbg(hdev, "err %d", err);
1271
1272         hci_dev_lock(hdev);
1273
1274         if (!err) {
1275                 hci_connect_le_scan_cleanup(conn, 0x00);
1276                 goto done;
1277         }
1278
1279         /* Check if connection is still pending */
1280         if (conn != hci_lookup_le_connect(hdev))
1281                 goto done;
1282
1283         /* Flush to make sure we send create conn cancel command if needed */
1284         flush_delayed_work(&conn->le_conn_timeout);
1285         hci_conn_failed(conn, bt_status(err));
1286
1287 done:
1288         hci_dev_unlock(hdev);
1289 }
1290
1291 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1292 {
1293         struct hci_conn *conn;
1294         u16 handle = PTR_UINT(data);
1295
1296         conn = hci_conn_hash_lookup_handle(hdev, handle);
1297         if (!conn)
1298                 return 0;
1299
1300         bt_dev_dbg(hdev, "conn %p", conn);
1301
1302         conn->state = BT_CONNECT;
1303
1304         return hci_le_create_conn_sync(hdev, conn);
1305 }
1306
1307 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1308                                 u8 dst_type, bool dst_resolved, u8 sec_level,
1309                                 u16 conn_timeout, u8 role)
1310 {
1311         struct hci_conn *conn;
1312         struct smp_irk *irk;
1313         int err;
1314
1315         /* Let's make sure that le is enabled.*/
1316         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1317                 if (lmp_le_capable(hdev))
1318                         return ERR_PTR(-ECONNREFUSED);
1319
1320                 return ERR_PTR(-EOPNOTSUPP);
1321         }
1322
1323         /* Since the controller supports only one LE connection attempt at a
1324          * time, we return -EBUSY if there is any connection attempt running.
1325          */
1326         if (hci_lookup_le_connect(hdev))
1327                 return ERR_PTR(-EBUSY);
1328
1329         /* If there's already a connection object but it's not in
1330          * scanning state it means it must already be established, in
1331          * which case we can't do anything else except report a failure
1332          * to connect.
1333          */
1334         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1335         if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1336                 return ERR_PTR(-EBUSY);
1337         }
1338
1339         /* Check if the destination address has been resolved by the controller
1340          * since if it did then the identity address shall be used.
1341          */
1342         if (!dst_resolved) {
1343                 /* When given an identity address with existing identity
1344                  * resolving key, the connection needs to be established
1345                  * to a resolvable random address.
1346                  *
1347                  * Storing the resolvable random address is required here
1348                  * to handle connection failures. The address will later
1349                  * be resolved back into the original identity address
1350                  * from the connect request.
1351                  */
1352                 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1353                 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1354                         dst = &irk->rpa;
1355                         dst_type = ADDR_LE_DEV_RANDOM;
1356                 }
1357         }
1358
1359         if (conn) {
1360                 bacpy(&conn->dst, dst);
1361         } else {
1362                 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1363                 if (!conn)
1364                         return ERR_PTR(-ENOMEM);
1365                 hci_conn_hold(conn);
1366                 conn->pending_sec_level = sec_level;
1367         }
1368
1369         conn->dst_type = dst_type;
1370         conn->sec_level = BT_SECURITY_LOW;
1371         conn->conn_timeout = conn_timeout;
1372
1373         clear_bit(HCI_CONN_SCANNING, &conn->flags);
1374
1375         err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1376                                  UINT_PTR(conn->handle),
1377                                  create_le_conn_complete);
1378         if (err) {
1379                 hci_conn_del(conn);
1380                 return ERR_PTR(err);
1381         }
1382
1383         return conn;
1384 }
1385
1386 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1387 {
1388         struct hci_conn *conn;
1389
1390         conn = hci_conn_hash_lookup_le(hdev, addr, type);
1391         if (!conn)
1392                 return false;
1393
1394         if (conn->state != BT_CONNECTED)
1395                 return false;
1396
1397         return true;
1398 }
1399
1400 /* This function requires the caller holds hdev->lock */
1401 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1402                                         bdaddr_t *addr, u8 addr_type)
1403 {
1404         struct hci_conn_params *params;
1405
1406         if (is_connected(hdev, addr, addr_type))
1407                 return -EISCONN;
1408
1409         params = hci_conn_params_lookup(hdev, addr, addr_type);
1410         if (!params) {
1411                 params = hci_conn_params_add(hdev, addr, addr_type);
1412                 if (!params)
1413                         return -ENOMEM;
1414
1415                 /* If we created new params, mark them to be deleted in
1416                  * hci_connect_le_scan_cleanup. It's different case than
1417                  * existing disabled params, those will stay after cleanup.
1418                  */
1419                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1420         }
1421
1422         /* We're trying to connect, so make sure params are at pend_le_conns */
1423         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1424             params->auto_connect == HCI_AUTO_CONN_REPORT ||
1425             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1426                 hci_pend_le_list_del_init(params);
1427                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
1428         }
1429
1430         params->explicit_connect = true;
1431
1432         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1433                params->auto_connect);
1434
1435         return 0;
1436 }
1437
1438 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1439 {
1440         struct hci_conn *conn;
1441         u8  big;
1442
1443         /* Allocate a BIG if not set */
1444         if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1445                 for (big = 0x00; big < 0xef; big++) {
1446
1447                         conn = hci_conn_hash_lookup_big(hdev, big);
1448                         if (!conn)
1449                                 break;
1450                 }
1451
1452                 if (big == 0xef)
1453                         return -EADDRNOTAVAIL;
1454
1455                 /* Update BIG */
1456                 qos->bcast.big = big;
1457         }
1458
1459         return 0;
1460 }
1461
1462 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1463 {
1464         struct hci_conn *conn;
1465         u8  bis;
1466
1467         /* Allocate BIS if not set */
1468         if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1469                 /* Find an unused adv set to advertise BIS, skip instance 0x00
1470                  * since it is reserved as general purpose set.
1471                  */
1472                 for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1473                      bis++) {
1474
1475                         conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1476                         if (!conn)
1477                                 break;
1478                 }
1479
1480                 if (bis == hdev->le_num_of_adv_sets)
1481                         return -EADDRNOTAVAIL;
1482
1483                 /* Update BIS */
1484                 qos->bcast.bis = bis;
1485         }
1486
1487         return 0;
1488 }
1489
1490 /* This function requires the caller holds hdev->lock */
1491 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1492                                     struct bt_iso_qos *qos, __u8 base_len,
1493                                     __u8 *base)
1494 {
1495         struct hci_conn *conn;
1496         int err;
1497
1498         /* Let's make sure that le is enabled.*/
1499         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1500                 if (lmp_le_capable(hdev))
1501                         return ERR_PTR(-ECONNREFUSED);
1502                 return ERR_PTR(-EOPNOTSUPP);
1503         }
1504
1505         err = qos_set_big(hdev, qos);
1506         if (err)
1507                 return ERR_PTR(err);
1508
1509         err = qos_set_bis(hdev, qos);
1510         if (err)
1511                 return ERR_PTR(err);
1512
1513         /* Check if the LE Create BIG command has already been sent */
1514         conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1515                                                 qos->bcast.big);
1516         if (conn)
1517                 return ERR_PTR(-EADDRINUSE);
1518
1519         /* Check BIS settings against other bound BISes, since all
1520          * BISes in a BIG must have the same value for all parameters
1521          */
1522         conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1523
1524         if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1525                      base_len != conn->le_per_adv_data_len ||
1526                      memcmp(conn->le_per_adv_data, base, base_len)))
1527                 return ERR_PTR(-EADDRINUSE);
1528
1529         conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1530         if (!conn)
1531                 return ERR_PTR(-ENOMEM);
1532
1533         conn->state = BT_CONNECT;
1534
1535         hci_conn_hold(conn);
1536         return conn;
1537 }
1538
1539 /* This function requires the caller holds hdev->lock */
1540 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1541                                      u8 dst_type, u8 sec_level,
1542                                      u16 conn_timeout,
1543                                      enum conn_reasons conn_reason)
1544 {
1545         struct hci_conn *conn;
1546
1547         /* Let's make sure that le is enabled.*/
1548         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1549                 if (lmp_le_capable(hdev))
1550                         return ERR_PTR(-ECONNREFUSED);
1551
1552                 return ERR_PTR(-EOPNOTSUPP);
1553         }
1554
1555         /* Some devices send ATT messages as soon as the physical link is
1556          * established. To be able to handle these ATT messages, the user-
1557          * space first establishes the connection and then starts the pairing
1558          * process.
1559          *
1560          * So if a hci_conn object already exists for the following connection
1561          * attempt, we simply update pending_sec_level and auth_type fields
1562          * and return the object found.
1563          */
1564         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1565         if (conn) {
1566                 if (conn->pending_sec_level < sec_level)
1567                         conn->pending_sec_level = sec_level;
1568                 goto done;
1569         }
1570
1571         BT_DBG("requesting refresh of dst_addr");
1572
1573         conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1574         if (!conn)
1575                 return ERR_PTR(-ENOMEM);
1576
1577         if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1578                 hci_conn_del(conn);
1579                 return ERR_PTR(-EBUSY);
1580         }
1581
1582         conn->state = BT_CONNECT;
1583         set_bit(HCI_CONN_SCANNING, &conn->flags);
1584         conn->dst_type = dst_type;
1585         conn->sec_level = BT_SECURITY_LOW;
1586         conn->pending_sec_level = sec_level;
1587         conn->conn_timeout = conn_timeout;
1588         conn->conn_reason = conn_reason;
1589
1590         hci_update_passive_scan(hdev);
1591
1592 done:
1593         hci_conn_hold(conn);
1594         return conn;
1595 }
1596
1597 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1598                                  u8 sec_level, u8 auth_type,
1599                                  enum conn_reasons conn_reason)
1600 {
1601         struct hci_conn *acl;
1602
1603         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1604                 if (lmp_bredr_capable(hdev))
1605                         return ERR_PTR(-ECONNREFUSED);
1606
1607                 return ERR_PTR(-EOPNOTSUPP);
1608         }
1609
1610         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1611         if (!acl) {
1612                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1613                 if (!acl)
1614                         return ERR_PTR(-ENOMEM);
1615         }
1616
1617         hci_conn_hold(acl);
1618
1619         acl->conn_reason = conn_reason;
1620         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1621                 acl->sec_level = BT_SECURITY_LOW;
1622                 acl->pending_sec_level = sec_level;
1623                 acl->auth_type = auth_type;
1624                 hci_acl_create_connection(acl);
1625         }
1626
1627         return acl;
1628 }
1629
1630 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1631                                       struct hci_conn *conn)
1632 {
1633         struct hci_dev *hdev = parent->hdev;
1634         struct hci_link *link;
1635
1636         bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1637
1638         if (conn->link)
1639                 return conn->link;
1640
1641         if (conn->parent)
1642                 return NULL;
1643
1644         link = kzalloc(sizeof(*link), GFP_KERNEL);
1645         if (!link)
1646                 return NULL;
1647
1648         link->conn = hci_conn_hold(conn);
1649         conn->link = link;
1650         conn->parent = hci_conn_get(parent);
1651
1652         /* Use list_add_tail_rcu append to the list */
1653         list_add_tail_rcu(&link->list, &parent->link_list);
1654
1655         return link;
1656 }
1657
1658 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1659                                  __u16 setting, struct bt_codec *codec)
1660 {
1661         struct hci_conn *acl;
1662         struct hci_conn *sco;
1663         struct hci_link *link;
1664
1665         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1666                               CONN_REASON_SCO_CONNECT);
1667         if (IS_ERR(acl))
1668                 return acl;
1669
1670         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1671         if (!sco) {
1672                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1673                 if (!sco) {
1674                         hci_conn_drop(acl);
1675                         return ERR_PTR(-ENOMEM);
1676                 }
1677         }
1678
1679         link = hci_conn_link(acl, sco);
1680         if (!link) {
1681                 hci_conn_drop(acl);
1682                 hci_conn_drop(sco);
1683                 return ERR_PTR(-ENOLINK);
1684         }
1685
1686         sco->setting = setting;
1687         sco->codec = *codec;
1688
1689         if (acl->state == BT_CONNECTED &&
1690             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1691                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1692                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1693
1694                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1695                         /* defer SCO setup until mode change completed */
1696                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1697                         return sco;
1698                 }
1699
1700                 hci_sco_setup(acl, 0x00);
1701         }
1702
1703         return sco;
1704 }
1705
1706 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1707 {
1708         struct hci_dev *hdev = conn->hdev;
1709         struct hci_cp_le_create_big cp;
1710         struct iso_list_data data;
1711
1712         memset(&cp, 0, sizeof(cp));
1713
1714         data.big = qos->bcast.big;
1715         data.bis = qos->bcast.bis;
1716         data.count = 0;
1717
1718         /* Create a BIS for each bound connection */
1719         hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1720                                  BT_BOUND, &data);
1721
1722         cp.handle = qos->bcast.big;
1723         cp.adv_handle = qos->bcast.bis;
1724         cp.num_bis  = data.count;
1725         hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1726         cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1727         cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1728         cp.bis.rtn  = qos->bcast.out.rtn;
1729         cp.bis.phy  = qos->bcast.out.phy;
1730         cp.bis.packing = qos->bcast.packing;
1731         cp.bis.framing = qos->bcast.framing;
1732         cp.bis.encryption = qos->bcast.encryption;
1733         memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1734
1735         return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1736 }
1737
1738 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1739 {
1740         u8 cig_id = PTR_UINT(data);
1741         struct hci_conn *conn;
1742         struct bt_iso_qos *qos;
1743         struct iso_cig_params pdu;
1744         u8 cis_id;
1745
1746         conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1747         if (!conn)
1748                 return 0;
1749
1750         memset(&pdu, 0, sizeof(pdu));
1751
1752         qos = &conn->iso_qos;
1753         pdu.cp.cig_id = cig_id;
1754         hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1755         hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1756         pdu.cp.sca = qos->ucast.sca;
1757         pdu.cp.packing = qos->ucast.packing;
1758         pdu.cp.framing = qos->ucast.framing;
1759         pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1760         pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1761
1762         /* Reprogram all CIS(s) with the same CIG, valid range are:
1763          * num_cis: 0x00 to 0x1F
1764          * cis_id: 0x00 to 0xEF
1765          */
1766         for (cis_id = 0x00; cis_id < 0xf0 &&
1767              pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1768                 struct hci_cis_params *cis;
1769
1770                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1771                 if (!conn)
1772                         continue;
1773
1774                 qos = &conn->iso_qos;
1775
1776                 cis = &pdu.cis[pdu.cp.num_cis++];
1777                 cis->cis_id = cis_id;
1778                 cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1779                 cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1780                 cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1781                               qos->ucast.in.phy;
1782                 cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1783                               qos->ucast.out.phy;
1784                 cis->c_rtn  = qos->ucast.out.rtn;
1785                 cis->p_rtn  = qos->ucast.in.rtn;
1786         }
1787
1788         if (!pdu.cp.num_cis)
1789                 return 0;
1790
1791         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1792                                      sizeof(pdu.cp) +
1793                                      pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1794                                      HCI_CMD_TIMEOUT);
1795 }
1796
1797 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1798 {
1799         struct hci_dev *hdev = conn->hdev;
1800         struct iso_list_data data;
1801
1802         memset(&data, 0, sizeof(data));
1803
1804         /* Allocate first still reconfigurable CIG if not set */
1805         if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1806                 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1807                         data.count = 0;
1808
1809                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1810                                                  BT_CONNECT, &data);
1811                         if (data.count)
1812                                 continue;
1813
1814                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1815                                                  BT_CONNECTED, &data);
1816                         if (!data.count)
1817                                 break;
1818                 }
1819
1820                 if (data.cig == 0xf0)
1821                         return false;
1822
1823                 /* Update CIG */
1824                 qos->ucast.cig = data.cig;
1825         }
1826
1827         if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1828                 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1829                                              qos->ucast.cis))
1830                         return false;
1831                 goto done;
1832         }
1833
1834         /* Allocate first available CIS if not set */
1835         for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1836              data.cis++) {
1837                 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1838                                               data.cis)) {
1839                         /* Update CIS */
1840                         qos->ucast.cis = data.cis;
1841                         break;
1842                 }
1843         }
1844
1845         if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1846                 return false;
1847
1848 done:
1849         if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1850                                UINT_PTR(qos->ucast.cig), NULL) < 0)
1851                 return false;
1852
1853         return true;
1854 }
1855
1856 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1857                               __u8 dst_type, struct bt_iso_qos *qos)
1858 {
1859         struct hci_conn *cis;
1860
1861         cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1862                                        qos->ucast.cis);
1863         if (!cis) {
1864                 cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1865                 if (!cis)
1866                         return ERR_PTR(-ENOMEM);
1867                 cis->cleanup = cis_cleanup;
1868                 cis->dst_type = dst_type;
1869                 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1870                 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1871         }
1872
1873         if (cis->state == BT_CONNECTED)
1874                 return cis;
1875
1876         /* Check if CIS has been set and the settings matches */
1877         if (cis->state == BT_BOUND &&
1878             !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1879                 return cis;
1880
1881         /* Update LINK PHYs according to QoS preference */
1882         cis->le_tx_phy = qos->ucast.out.phy;
1883         cis->le_rx_phy = qos->ucast.in.phy;
1884
1885         /* If output interval is not set use the input interval as it cannot be
1886          * 0x000000.
1887          */
1888         if (!qos->ucast.out.interval)
1889                 qos->ucast.out.interval = qos->ucast.in.interval;
1890
1891         /* If input interval is not set use the output interval as it cannot be
1892          * 0x000000.
1893          */
1894         if (!qos->ucast.in.interval)
1895                 qos->ucast.in.interval = qos->ucast.out.interval;
1896
1897         /* If output latency is not set use the input latency as it cannot be
1898          * 0x0000.
1899          */
1900         if (!qos->ucast.out.latency)
1901                 qos->ucast.out.latency = qos->ucast.in.latency;
1902
1903         /* If input latency is not set use the output latency as it cannot be
1904          * 0x0000.
1905          */
1906         if (!qos->ucast.in.latency)
1907                 qos->ucast.in.latency = qos->ucast.out.latency;
1908
1909         if (!hci_le_set_cig_params(cis, qos)) {
1910                 hci_conn_drop(cis);
1911                 return ERR_PTR(-EINVAL);
1912         }
1913
1914         hci_conn_hold(cis);
1915
1916         cis->iso_qos = *qos;
1917         cis->state = BT_BOUND;
1918
1919         return cis;
1920 }
1921
1922 bool hci_iso_setup_path(struct hci_conn *conn)
1923 {
1924         struct hci_dev *hdev = conn->hdev;
1925         struct hci_cp_le_setup_iso_path cmd;
1926
1927         memset(&cmd, 0, sizeof(cmd));
1928
1929         if (conn->iso_qos.ucast.out.sdu) {
1930                 cmd.handle = cpu_to_le16(conn->handle);
1931                 cmd.direction = 0x00; /* Input (Host to Controller) */
1932                 cmd.path = 0x00; /* HCI path if enabled */
1933                 cmd.codec = 0x03; /* Transparent Data */
1934
1935                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1936                                  &cmd) < 0)
1937                         return false;
1938         }
1939
1940         if (conn->iso_qos.ucast.in.sdu) {
1941                 cmd.handle = cpu_to_le16(conn->handle);
1942                 cmd.direction = 0x01; /* Output (Controller to Host) */
1943                 cmd.path = 0x00; /* HCI path if enabled */
1944                 cmd.codec = 0x03; /* Transparent Data */
1945
1946                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1947                                  &cmd) < 0)
1948                         return false;
1949         }
1950
1951         return true;
1952 }
1953
1954 int hci_conn_check_create_cis(struct hci_conn *conn)
1955 {
1956         if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1957                 return -EINVAL;
1958
1959         if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1960             conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1961                 return 1;
1962
1963         return 0;
1964 }
1965
1966 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1967 {
1968         return hci_le_create_cis_sync(hdev);
1969 }
1970
1971 int hci_le_create_cis_pending(struct hci_dev *hdev)
1972 {
1973         struct hci_conn *conn;
1974         bool pending = false;
1975
1976         rcu_read_lock();
1977
1978         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1979                 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1980                         rcu_read_unlock();
1981                         return -EBUSY;
1982                 }
1983
1984                 if (!hci_conn_check_create_cis(conn))
1985                         pending = true;
1986         }
1987
1988         rcu_read_unlock();
1989
1990         if (!pending)
1991                 return 0;
1992
1993         /* Queue Create CIS */
1994         return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
1995 }
1996
1997 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1998                               struct bt_iso_io_qos *qos, __u8 phy)
1999 {
2000         /* Only set MTU if PHY is enabled */
2001         if (!qos->sdu && qos->phy) {
2002                 if (hdev->iso_mtu > 0)
2003                         qos->sdu = hdev->iso_mtu;
2004                 else if (hdev->le_mtu > 0)
2005                         qos->sdu = hdev->le_mtu;
2006                 else
2007                         qos->sdu = hdev->acl_mtu;
2008         }
2009
2010         /* Use the same PHY as ACL if set to any */
2011         if (qos->phy == BT_ISO_PHY_ANY)
2012                 qos->phy = phy;
2013
2014         /* Use LE ACL connection interval if not set */
2015         if (!qos->interval)
2016                 /* ACL interval unit in 1.25 ms to us */
2017                 qos->interval = conn->le_conn_interval * 1250;
2018
2019         /* Use LE ACL connection latency if not set */
2020         if (!qos->latency)
2021                 qos->latency = conn->le_conn_latency;
2022 }
2023
2024 static int create_big_sync(struct hci_dev *hdev, void *data)
2025 {
2026         struct hci_conn *conn = data;
2027         struct bt_iso_qos *qos = &conn->iso_qos;
2028         u16 interval, sync_interval = 0;
2029         u32 flags = 0;
2030         int err;
2031
2032         if (qos->bcast.out.phy == 0x02)
2033                 flags |= MGMT_ADV_FLAG_SEC_2M;
2034
2035         /* Align intervals */
2036         interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2037
2038         if (qos->bcast.bis)
2039                 sync_interval = interval * 4;
2040
2041         err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2042                                      conn->le_per_adv_data, flags, interval,
2043                                      interval, sync_interval);
2044         if (err)
2045                 return err;
2046
2047         return hci_le_create_big(conn, &conn->iso_qos);
2048 }
2049
2050 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2051 {
2052         struct hci_cp_le_pa_create_sync *cp = data;
2053
2054         bt_dev_dbg(hdev, "");
2055
2056         if (err)
2057                 bt_dev_err(hdev, "Unable to create PA: %d", err);
2058
2059         kfree(cp);
2060 }
2061
2062 static int create_pa_sync(struct hci_dev *hdev, void *data)
2063 {
2064         struct hci_cp_le_pa_create_sync *cp = data;
2065         int err;
2066
2067         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2068                                     sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2069         if (err) {
2070                 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2071                 return err;
2072         }
2073
2074         return hci_update_passive_scan_sync(hdev);
2075 }
2076
2077 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2078                        __u8 sid, struct bt_iso_qos *qos)
2079 {
2080         struct hci_cp_le_pa_create_sync *cp;
2081
2082         if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2083                 return -EBUSY;
2084
2085         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2086         if (!cp) {
2087                 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2088                 return -ENOMEM;
2089         }
2090
2091         cp->options = qos->bcast.options;
2092         cp->sid = sid;
2093         cp->addr_type = dst_type;
2094         bacpy(&cp->addr, dst);
2095         cp->skip = cpu_to_le16(qos->bcast.skip);
2096         cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2097         cp->sync_cte_type = qos->bcast.sync_cte_type;
2098
2099         /* Queue start pa_create_sync and scan */
2100         return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2101 }
2102
2103 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2104                            __u16 sync_handle, __u8 num_bis, __u8 bis[])
2105 {
2106         struct _packed {
2107                 struct hci_cp_le_big_create_sync cp;
2108                 __u8  bis[0x11];
2109         } pdu;
2110         int err;
2111
2112         if (num_bis > sizeof(pdu.bis))
2113                 return -EINVAL;
2114
2115         err = qos_set_big(hdev, qos);
2116         if (err)
2117                 return err;
2118
2119         memset(&pdu, 0, sizeof(pdu));
2120         pdu.cp.handle = qos->bcast.big;
2121         pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2122         pdu.cp.encryption = qos->bcast.encryption;
2123         memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2124         pdu.cp.mse = qos->bcast.mse;
2125         pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2126         pdu.cp.num_bis = num_bis;
2127         memcpy(pdu.bis, bis, num_bis);
2128
2129         return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2130                             sizeof(pdu.cp) + num_bis, &pdu);
2131 }
2132
2133 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2134 {
2135         struct hci_conn *conn = data;
2136
2137         bt_dev_dbg(hdev, "conn %p", conn);
2138
2139         if (err) {
2140                 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2141                 hci_connect_cfm(conn, err);
2142                 hci_conn_del(conn);
2143         }
2144 }
2145
2146 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2147                               struct bt_iso_qos *qos,
2148                               __u8 base_len, __u8 *base)
2149 {
2150         struct hci_conn *conn;
2151         __u8 eir[HCI_MAX_PER_AD_LENGTH];
2152
2153         if (base_len && base)
2154                 base_len = eir_append_service_data(eir, 0,  0x1851,
2155                                                    base, base_len);
2156
2157         /* We need hci_conn object using the BDADDR_ANY as dst */
2158         conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2159         if (IS_ERR(conn))
2160                 return conn;
2161
2162         /* Update LINK PHYs according to QoS preference */
2163         conn->le_tx_phy = qos->bcast.out.phy;
2164         conn->le_tx_phy = qos->bcast.out.phy;
2165
2166         /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2167         if (base_len && base) {
2168                 memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2169                 conn->le_per_adv_data_len = base_len;
2170         }
2171
2172         hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2173                           conn->le_tx_phy ? conn->le_tx_phy :
2174                           hdev->le_tx_def_phys);
2175
2176         conn->iso_qos = *qos;
2177         conn->state = BT_BOUND;
2178
2179         return conn;
2180 }
2181
2182 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2183 {
2184         struct iso_list_data *d = data;
2185
2186         /* Skip if not broadcast/ANY address */
2187         if (bacmp(&conn->dst, BDADDR_ANY))
2188                 return;
2189
2190         if (d->big != conn->iso_qos.bcast.big ||
2191             d->bis == BT_ISO_QOS_BIS_UNSET ||
2192             d->bis != conn->iso_qos.bcast.bis)
2193                 return;
2194
2195         set_bit(HCI_CONN_PER_ADV, &conn->flags);
2196 }
2197
2198 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2199                                  __u8 dst_type, struct bt_iso_qos *qos,
2200                                  __u8 base_len, __u8 *base)
2201 {
2202         struct hci_conn *conn;
2203         int err;
2204         struct iso_list_data data;
2205
2206         conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2207         if (IS_ERR(conn))
2208                 return conn;
2209
2210         data.big = qos->bcast.big;
2211         data.bis = qos->bcast.bis;
2212
2213         /* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2214          * the start periodic advertising and create BIG commands have
2215          * been queued
2216          */
2217         hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2218                                  BT_BOUND, &data);
2219
2220         /* Queue start periodic advertising and create BIG */
2221         err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2222                                  create_big_complete);
2223         if (err < 0) {
2224                 hci_conn_drop(conn);
2225                 return ERR_PTR(err);
2226         }
2227
2228         return conn;
2229 }
2230
2231 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2232                                  __u8 dst_type, struct bt_iso_qos *qos)
2233 {
2234         struct hci_conn *le;
2235         struct hci_conn *cis;
2236         struct hci_link *link;
2237
2238         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2239                 le = hci_connect_le(hdev, dst, dst_type, false,
2240                                     BT_SECURITY_LOW,
2241                                     HCI_LE_CONN_TIMEOUT,
2242                                     HCI_ROLE_SLAVE);
2243         else
2244                 le = hci_connect_le_scan(hdev, dst, dst_type,
2245                                          BT_SECURITY_LOW,
2246                                          HCI_LE_CONN_TIMEOUT,
2247                                          CONN_REASON_ISO_CONNECT);
2248         if (IS_ERR(le))
2249                 return le;
2250
2251         hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2252                           le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2253         hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2254                           le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2255
2256         cis = hci_bind_cis(hdev, dst, dst_type, qos);
2257         if (IS_ERR(cis)) {
2258                 hci_conn_drop(le);
2259                 return cis;
2260         }
2261
2262         link = hci_conn_link(le, cis);
2263         if (!link) {
2264                 hci_conn_drop(le);
2265                 hci_conn_drop(cis);
2266                 return ERR_PTR(-ENOLINK);
2267         }
2268
2269         /* Link takes the refcount */
2270         hci_conn_drop(cis);
2271
2272         cis->state = BT_CONNECT;
2273
2274         hci_le_create_cis_pending(hdev);
2275
2276         return cis;
2277 }
2278
2279 /* Check link security requirement */
2280 int hci_conn_check_link_mode(struct hci_conn *conn)
2281 {
2282         BT_DBG("hcon %p", conn);
2283
2284         /* In Secure Connections Only mode, it is required that Secure
2285          * Connections is used and the link is encrypted with AES-CCM
2286          * using a P-256 authenticated combination key.
2287          */
2288         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2289                 if (!hci_conn_sc_enabled(conn) ||
2290                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2291                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2292                         return 0;
2293         }
2294
2295          /* AES encryption is required for Level 4:
2296           *
2297           * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2298           * page 1319:
2299           *
2300           * 128-bit equivalent strength for link and encryption keys
2301           * required using FIPS approved algorithms (E0 not allowed,
2302           * SAFER+ not allowed, and P-192 not allowed; encryption key
2303           * not shortened)
2304           */
2305         if (conn->sec_level == BT_SECURITY_FIPS &&
2306             !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2307                 bt_dev_err(conn->hdev,
2308                            "Invalid security: Missing AES-CCM usage");
2309                 return 0;
2310         }
2311
2312         if (hci_conn_ssp_enabled(conn) &&
2313             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2314                 return 0;
2315
2316         return 1;
2317 }
2318
2319 /* Authenticate remote device */
2320 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2321 {
2322         BT_DBG("hcon %p", conn);
2323
2324         if (conn->pending_sec_level > sec_level)
2325                 sec_level = conn->pending_sec_level;
2326
2327         if (sec_level > conn->sec_level)
2328                 conn->pending_sec_level = sec_level;
2329         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2330                 return 1;
2331
2332         /* Make sure we preserve an existing MITM requirement*/
2333         auth_type |= (conn->auth_type & 0x01);
2334
2335         conn->auth_type = auth_type;
2336
2337         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2338                 struct hci_cp_auth_requested cp;
2339
2340                 cp.handle = cpu_to_le16(conn->handle);
2341                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2342                              sizeof(cp), &cp);
2343
2344                 /* If we're already encrypted set the REAUTH_PEND flag,
2345                  * otherwise set the ENCRYPT_PEND.
2346                  */
2347                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2348                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2349                 else
2350                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2351         }
2352
2353         return 0;
2354 }
2355
2356 /* Encrypt the link */
2357 static void hci_conn_encrypt(struct hci_conn *conn)
2358 {
2359         BT_DBG("hcon %p", conn);
2360
2361         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2362                 struct hci_cp_set_conn_encrypt cp;
2363                 cp.handle  = cpu_to_le16(conn->handle);
2364                 cp.encrypt = 0x01;
2365                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2366                              &cp);
2367         }
2368 }
2369
2370 /* Enable security */
2371 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2372                       bool initiator)
2373 {
2374         BT_DBG("hcon %p", conn);
2375
2376         if (conn->type == LE_LINK)
2377                 return smp_conn_security(conn, sec_level);
2378
2379         /* For sdp we don't need the link key. */
2380         if (sec_level == BT_SECURITY_SDP)
2381                 return 1;
2382
2383         /* For non 2.1 devices and low security level we don't need the link
2384            key. */
2385         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2386                 return 1;
2387
2388         /* For other security levels we need the link key. */
2389         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2390                 goto auth;
2391
2392         /* An authenticated FIPS approved combination key has sufficient
2393          * security for security level 4. */
2394         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2395             sec_level == BT_SECURITY_FIPS)
2396                 goto encrypt;
2397
2398         /* An authenticated combination key has sufficient security for
2399            security level 3. */
2400         if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2401              conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2402             sec_level == BT_SECURITY_HIGH)
2403                 goto encrypt;
2404
2405         /* An unauthenticated combination key has sufficient security for
2406            security level 1 and 2. */
2407         if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2408              conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2409             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2410                 goto encrypt;
2411
2412         /* A combination key has always sufficient security for the security
2413            levels 1 or 2. High security level requires the combination key
2414            is generated using maximum PIN code length (16).
2415            For pre 2.1 units. */
2416         if (conn->key_type == HCI_LK_COMBINATION &&
2417             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2418              conn->pin_length == 16))
2419                 goto encrypt;
2420
2421 auth:
2422         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2423                 return 0;
2424
2425         if (initiator)
2426                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2427
2428         if (!hci_conn_auth(conn, sec_level, auth_type))
2429                 return 0;
2430
2431 encrypt:
2432         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2433                 /* Ensure that the encryption key size has been read,
2434                  * otherwise stall the upper layer responses.
2435                  */
2436                 if (!conn->enc_key_size)
2437                         return 0;
2438
2439                 /* Nothing else needed, all requirements are met */
2440                 return 1;
2441         }
2442
2443         hci_conn_encrypt(conn);
2444         return 0;
2445 }
2446 EXPORT_SYMBOL(hci_conn_security);
2447
2448 /* Check secure link requirement */
2449 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2450 {
2451         BT_DBG("hcon %p", conn);
2452
2453         /* Accept if non-secure or higher security level is required */
2454         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2455                 return 1;
2456
2457         /* Accept if secure or higher security level is already present */
2458         if (conn->sec_level == BT_SECURITY_HIGH ||
2459             conn->sec_level == BT_SECURITY_FIPS)
2460                 return 1;
2461
2462         /* Reject not secure link */
2463         return 0;
2464 }
2465 EXPORT_SYMBOL(hci_conn_check_secure);
2466
2467 /* Switch role */
2468 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2469 {
2470         BT_DBG("hcon %p", conn);
2471
2472         if (role == conn->role)
2473                 return 1;
2474
2475         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2476                 struct hci_cp_switch_role cp;
2477                 bacpy(&cp.bdaddr, &conn->dst);
2478                 cp.role = role;
2479                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2480         }
2481
2482         return 0;
2483 }
2484 EXPORT_SYMBOL(hci_conn_switch_role);
2485
2486 /* Enter active mode */
2487 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2488 {
2489         struct hci_dev *hdev = conn->hdev;
2490
2491         BT_DBG("hcon %p mode %d", conn, conn->mode);
2492
2493         if (conn->mode != HCI_CM_SNIFF)
2494                 goto timer;
2495
2496         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2497                 goto timer;
2498
2499         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2500                 struct hci_cp_exit_sniff_mode cp;
2501                 cp.handle = cpu_to_le16(conn->handle);
2502                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2503         }
2504
2505 timer:
2506         if (hdev->idle_timeout > 0)
2507                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2508                                    msecs_to_jiffies(hdev->idle_timeout));
2509 }
2510
2511 /* Drop all connection on the device */
2512 void hci_conn_hash_flush(struct hci_dev *hdev)
2513 {
2514         struct list_head *head = &hdev->conn_hash.list;
2515         struct hci_conn *conn;
2516
2517         BT_DBG("hdev %s", hdev->name);
2518
2519         /* We should not traverse the list here, because hci_conn_del
2520          * can remove extra links, which may cause the list traversal
2521          * to hit items that have already been released.
2522          */
2523         while ((conn = list_first_entry_or_null(head,
2524                                                 struct hci_conn,
2525                                                 list)) != NULL) {
2526                 conn->state = BT_CLOSED;
2527                 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2528                 hci_conn_del(conn);
2529         }
2530 }
2531
2532 /* Check pending connect attempts */
2533 void hci_conn_check_pending(struct hci_dev *hdev)
2534 {
2535         struct hci_conn *conn;
2536
2537         BT_DBG("hdev %s", hdev->name);
2538
2539         hci_dev_lock(hdev);
2540
2541         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2542         if (conn)
2543                 hci_acl_create_connection(conn);
2544
2545         hci_dev_unlock(hdev);
2546 }
2547
2548 static u32 get_link_mode(struct hci_conn *conn)
2549 {
2550         u32 link_mode = 0;
2551
2552         if (conn->role == HCI_ROLE_MASTER)
2553                 link_mode |= HCI_LM_MASTER;
2554
2555         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2556                 link_mode |= HCI_LM_ENCRYPT;
2557
2558         if (test_bit(HCI_CONN_AUTH, &conn->flags))
2559                 link_mode |= HCI_LM_AUTH;
2560
2561         if (test_bit(HCI_CONN_SECURE, &conn->flags))
2562                 link_mode |= HCI_LM_SECURE;
2563
2564         if (test_bit(HCI_CONN_FIPS, &conn->flags))
2565                 link_mode |= HCI_LM_FIPS;
2566
2567         return link_mode;
2568 }
2569
2570 int hci_get_conn_list(void __user *arg)
2571 {
2572         struct hci_conn *c;
2573         struct hci_conn_list_req req, *cl;
2574         struct hci_conn_info *ci;
2575         struct hci_dev *hdev;
2576         int n = 0, size, err;
2577
2578         if (copy_from_user(&req, arg, sizeof(req)))
2579                 return -EFAULT;
2580
2581         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2582                 return -EINVAL;
2583
2584         size = sizeof(req) + req.conn_num * sizeof(*ci);
2585
2586         cl = kmalloc(size, GFP_KERNEL);
2587         if (!cl)
2588                 return -ENOMEM;
2589
2590         hdev = hci_dev_get(req.dev_id);
2591         if (!hdev) {
2592                 kfree(cl);
2593                 return -ENODEV;
2594         }
2595
2596         ci = cl->conn_info;
2597
2598         hci_dev_lock(hdev);
2599         list_for_each_entry(c, &hdev->conn_hash.list, list) {
2600                 bacpy(&(ci + n)->bdaddr, &c->dst);
2601                 (ci + n)->handle = c->handle;
2602                 (ci + n)->type  = c->type;
2603                 (ci + n)->out   = c->out;
2604                 (ci + n)->state = c->state;
2605                 (ci + n)->link_mode = get_link_mode(c);
2606                 if (++n >= req.conn_num)
2607                         break;
2608         }
2609         hci_dev_unlock(hdev);
2610
2611         cl->dev_id = hdev->id;
2612         cl->conn_num = n;
2613         size = sizeof(req) + n * sizeof(*ci);
2614
2615         hci_dev_put(hdev);
2616
2617         err = copy_to_user(arg, cl, size);
2618         kfree(cl);
2619
2620         return err ? -EFAULT : 0;
2621 }
2622
2623 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2624 {
2625         struct hci_conn_info_req req;
2626         struct hci_conn_info ci;
2627         struct hci_conn *conn;
2628         char __user *ptr = arg + sizeof(req);
2629
2630         if (copy_from_user(&req, arg, sizeof(req)))
2631                 return -EFAULT;
2632
2633         hci_dev_lock(hdev);
2634         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2635         if (conn) {
2636                 bacpy(&ci.bdaddr, &conn->dst);
2637                 ci.handle = conn->handle;
2638                 ci.type  = conn->type;
2639                 ci.out   = conn->out;
2640                 ci.state = conn->state;
2641                 ci.link_mode = get_link_mode(conn);
2642         }
2643         hci_dev_unlock(hdev);
2644
2645         if (!conn)
2646                 return -ENOENT;
2647
2648         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2649 }
2650
2651 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2652 {
2653         struct hci_auth_info_req req;
2654         struct hci_conn *conn;
2655
2656         if (copy_from_user(&req, arg, sizeof(req)))
2657                 return -EFAULT;
2658
2659         hci_dev_lock(hdev);
2660         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2661         if (conn)
2662                 req.type = conn->auth_type;
2663         hci_dev_unlock(hdev);
2664
2665         if (!conn)
2666                 return -ENOENT;
2667
2668         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2669 }
2670
2671 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2672 {
2673         struct hci_dev *hdev = conn->hdev;
2674         struct hci_chan *chan;
2675
2676         BT_DBG("%s hcon %p", hdev->name, conn);
2677
2678         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2679                 BT_DBG("Refusing to create new hci_chan");
2680                 return NULL;
2681         }
2682
2683         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2684         if (!chan)
2685                 return NULL;
2686
2687         chan->conn = hci_conn_get(conn);
2688         skb_queue_head_init(&chan->data_q);
2689         chan->state = BT_CONNECTED;
2690
2691         list_add_rcu(&chan->list, &conn->chan_list);
2692
2693         return chan;
2694 }
2695
2696 void hci_chan_del(struct hci_chan *chan)
2697 {
2698         struct hci_conn *conn = chan->conn;
2699         struct hci_dev *hdev = conn->hdev;
2700
2701         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2702
2703         list_del_rcu(&chan->list);
2704
2705         synchronize_rcu();
2706
2707         /* Prevent new hci_chan's to be created for this hci_conn */
2708         set_bit(HCI_CONN_DROP, &conn->flags);
2709
2710         hci_conn_put(conn);
2711
2712         skb_queue_purge(&chan->data_q);
2713         kfree(chan);
2714 }
2715
2716 void hci_chan_list_flush(struct hci_conn *conn)
2717 {
2718         struct hci_chan *chan, *n;
2719
2720         BT_DBG("hcon %p", conn);
2721
2722         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2723                 hci_chan_del(chan);
2724 }
2725
2726 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2727                                                  __u16 handle)
2728 {
2729         struct hci_chan *hchan;
2730
2731         list_for_each_entry(hchan, &hcon->chan_list, list) {
2732                 if (hchan->handle == handle)
2733                         return hchan;
2734         }
2735
2736         return NULL;
2737 }
2738
2739 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2740 {
2741         struct hci_conn_hash *h = &hdev->conn_hash;
2742         struct hci_conn *hcon;
2743         struct hci_chan *hchan = NULL;
2744
2745         rcu_read_lock();
2746
2747         list_for_each_entry_rcu(hcon, &h->list, list) {
2748                 hchan = __hci_chan_lookup_handle(hcon, handle);
2749                 if (hchan)
2750                         break;
2751         }
2752
2753         rcu_read_unlock();
2754
2755         return hchan;
2756 }
2757
2758 u32 hci_conn_get_phy(struct hci_conn *conn)
2759 {
2760         u32 phys = 0;
2761
2762         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2763          * Table 6.2: Packets defined for synchronous, asynchronous, and
2764          * CPB logical transport types.
2765          */
2766         switch (conn->type) {
2767         case SCO_LINK:
2768                 /* SCO logical transport (1 Mb/s):
2769                  * HV1, HV2, HV3 and DV.
2770                  */
2771                 phys |= BT_PHY_BR_1M_1SLOT;
2772
2773                 break;
2774
2775         case ACL_LINK:
2776                 /* ACL logical transport (1 Mb/s) ptt=0:
2777                  * DH1, DM3, DH3, DM5 and DH5.
2778                  */
2779                 phys |= BT_PHY_BR_1M_1SLOT;
2780
2781                 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2782                         phys |= BT_PHY_BR_1M_3SLOT;
2783
2784                 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2785                         phys |= BT_PHY_BR_1M_5SLOT;
2786
2787                 /* ACL logical transport (2 Mb/s) ptt=1:
2788                  * 2-DH1, 2-DH3 and 2-DH5.
2789                  */
2790                 if (!(conn->pkt_type & HCI_2DH1))
2791                         phys |= BT_PHY_EDR_2M_1SLOT;
2792
2793                 if (!(conn->pkt_type & HCI_2DH3))
2794                         phys |= BT_PHY_EDR_2M_3SLOT;
2795
2796                 if (!(conn->pkt_type & HCI_2DH5))
2797                         phys |= BT_PHY_EDR_2M_5SLOT;
2798
2799                 /* ACL logical transport (3 Mb/s) ptt=1:
2800                  * 3-DH1, 3-DH3 and 3-DH5.
2801                  */
2802                 if (!(conn->pkt_type & HCI_3DH1))
2803                         phys |= BT_PHY_EDR_3M_1SLOT;
2804
2805                 if (!(conn->pkt_type & HCI_3DH3))
2806                         phys |= BT_PHY_EDR_3M_3SLOT;
2807
2808                 if (!(conn->pkt_type & HCI_3DH5))
2809                         phys |= BT_PHY_EDR_3M_5SLOT;
2810
2811                 break;
2812
2813         case ESCO_LINK:
2814                 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2815                 phys |= BT_PHY_BR_1M_1SLOT;
2816
2817                 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2818                         phys |= BT_PHY_BR_1M_3SLOT;
2819
2820                 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2821                 if (!(conn->pkt_type & ESCO_2EV3))
2822                         phys |= BT_PHY_EDR_2M_1SLOT;
2823
2824                 if (!(conn->pkt_type & ESCO_2EV5))
2825                         phys |= BT_PHY_EDR_2M_3SLOT;
2826
2827                 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2828                 if (!(conn->pkt_type & ESCO_3EV3))
2829                         phys |= BT_PHY_EDR_3M_1SLOT;
2830
2831                 if (!(conn->pkt_type & ESCO_3EV5))
2832                         phys |= BT_PHY_EDR_3M_3SLOT;
2833
2834                 break;
2835
2836         case LE_LINK:
2837                 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2838                         phys |= BT_PHY_LE_1M_TX;
2839
2840                 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2841                         phys |= BT_PHY_LE_1M_RX;
2842
2843                 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2844                         phys |= BT_PHY_LE_2M_TX;
2845
2846                 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2847                         phys |= BT_PHY_LE_2M_RX;
2848
2849                 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2850                         phys |= BT_PHY_LE_CODED_TX;
2851
2852                 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2853                         phys |= BT_PHY_LE_CODED_RX;
2854
2855                 break;
2856         }
2857
2858         return phys;
2859 }
2860
2861 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2862 {
2863         struct hci_conn *conn;
2864         u16 handle = PTR_UINT(data);
2865
2866         conn = hci_conn_hash_lookup_handle(hdev, handle);
2867         if (!conn)
2868                 return 0;
2869
2870         return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2871 }
2872
2873 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2874 {
2875         struct hci_dev *hdev = conn->hdev;
2876
2877         /* If abort_reason has already been set it means the connection is
2878          * already being aborted so don't attempt to overwrite it.
2879          */
2880         if (conn->abort_reason)
2881                 return 0;
2882
2883         bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2884
2885         conn->abort_reason = reason;
2886
2887         /* If the connection is pending check the command opcode since that
2888          * might be blocking on hci_cmd_sync_work while waiting its respective
2889          * event so we need to hci_cmd_sync_cancel to cancel it.
2890          *
2891          * hci_connect_le serializes the connection attempts so only one
2892          * connection can be in BT_CONNECT at time.
2893          */
2894         if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2895                 switch (hci_skb_event(hdev->sent_cmd)) {
2896                 case HCI_EV_LE_CONN_COMPLETE:
2897                 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2898                 case HCI_EVT_LE_CIS_ESTABLISHED:
2899                         hci_cmd_sync_cancel(hdev, -ECANCELED);
2900                         break;
2901                 }
2902         }
2903
2904         return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
2905                                   NULL);
2906 }