2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 return bdaddr_type(hcon->type, hcon->src_type);
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 return bdaddr_type(hcon->type, hcon->dst_type);
89 /* ---- L2CAP channels ---- */
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
96 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
115 /* Find channel with given SCID.
116 * Returns locked channel. */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 struct l2cap_chan *c;
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
126 mutex_unlock(&conn->chan_lock);
131 /* Find channel with given DCID.
132 * Returns locked channel.
134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 struct l2cap_chan *c;
139 mutex_lock(&conn->chan_lock);
140 c = __l2cap_get_chan_by_dcid(conn, cid);
143 mutex_unlock(&conn->chan_lock);
148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 struct l2cap_chan *c;
153 list_for_each_entry(c, &conn->chan_l, list) {
154 if (c->ident == ident)
160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
163 struct l2cap_chan *c;
165 mutex_lock(&conn->chan_lock);
166 c = __l2cap_get_chan_by_ident(conn, ident);
169 mutex_unlock(&conn->chan_lock);
174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
176 struct l2cap_chan *c;
178 list_for_each_entry(c, &chan_list, global_l) {
179 if (c->sport == psm && !bacmp(&c->src, src))
185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
189 write_lock(&chan_list_lock);
191 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
201 u16 p, start, end, incr;
203 if (chan->src_type == BDADDR_BREDR) {
204 start = L2CAP_PSM_DYN_START;
205 end = L2CAP_PSM_AUTO_END;
208 start = L2CAP_PSM_LE_DYN_START;
209 end = L2CAP_PSM_LE_DYN_END;
214 for (p = start; p <= end; p += incr)
215 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 chan->psm = cpu_to_le16(p);
217 chan->sport = cpu_to_le16(p);
224 write_unlock(&chan_list_lock);
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
229 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
231 write_lock(&chan_list_lock);
233 /* Override the defaults (which are for conn-oriented) */
234 chan->omtu = L2CAP_DEFAULT_MTU;
235 chan->chan_type = L2CAP_CHAN_FIXED;
239 write_unlock(&chan_list_lock);
244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
248 if (conn->hcon->type == LE_LINK)
249 dyn_end = L2CAP_CID_LE_DYN_END;
251 dyn_end = L2CAP_CID_DYN_END;
253 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 if (!__l2cap_get_chan_by_scid(conn, cid))
261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
263 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 state_to_string(state));
267 chan->ops->state_change(chan, state, 0);
270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
274 chan->ops->state_change(chan, chan->state, err);
277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
279 chan->ops->state_change(chan, chan->state, err);
282 static void __set_retrans_timer(struct l2cap_chan *chan)
284 if (!delayed_work_pending(&chan->monitor_timer) &&
285 chan->retrans_timeout) {
286 l2cap_set_timer(chan, &chan->retrans_timer,
287 msecs_to_jiffies(chan->retrans_timeout));
291 static void __set_monitor_timer(struct l2cap_chan *chan)
293 __clear_retrans_timer(chan);
294 if (chan->monitor_timeout) {
295 l2cap_set_timer(chan, &chan->monitor_timer,
296 msecs_to_jiffies(chan->monitor_timeout));
300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
305 skb_queue_walk(head, skb) {
306 if (bt_cb(skb)->l2cap.txseq == seq)
313 /* ---- L2CAP sequence number lists ---- */
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316 * SREJ requests that are received and for frames that are to be
317 * retransmitted. These seq_list functions implement a singly-linked
318 * list in an array, where membership in the list can also be checked
319 * in constant time. Items can also be added to the tail of the list
320 * and removed from the head in constant time, without further memory
324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
326 size_t alloc_size, i;
328 /* Allocated size is a power of 2 to map sequence numbers
329 * (which may be up to 14 bits) in to a smaller array that is
330 * sized for the negotiated ERTM transmit windows.
332 alloc_size = roundup_pow_of_two(size);
334 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
338 seq_list->mask = alloc_size - 1;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 for (i = 0; i < alloc_size; i++)
342 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
349 kfree(seq_list->list);
352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
355 /* Constant-time check for list membership */
356 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 u16 seq = seq_list->head;
362 u16 mask = seq_list->mask;
364 seq_list->head = seq_list->list[seq & mask];
365 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
367 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
379 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
382 for (i = 0; i <= seq_list->mask; i++)
383 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
385 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
391 u16 mask = seq_list->mask;
393 /* All appends happen in constant time */
395 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
398 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 seq_list->head = seq;
401 seq_list->list[seq_list->tail & mask] = seq;
403 seq_list->tail = seq;
404 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
407 static void l2cap_chan_timeout(struct work_struct *work)
409 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
411 struct l2cap_conn *conn = chan->conn;
414 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
416 mutex_lock(&conn->chan_lock);
417 l2cap_chan_lock(chan);
419 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
420 reason = ECONNREFUSED;
421 else if (chan->state == BT_CONNECT &&
422 chan->sec_level != BT_SECURITY_SDP)
423 reason = ECONNREFUSED;
427 l2cap_chan_close(chan, reason);
429 l2cap_chan_unlock(chan);
431 chan->ops->close(chan);
432 mutex_unlock(&conn->chan_lock);
434 l2cap_chan_put(chan);
437 struct l2cap_chan *l2cap_chan_create(void)
439 struct l2cap_chan *chan;
441 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
445 mutex_init(&chan->lock);
447 /* Set default lock nesting level */
448 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
450 write_lock(&chan_list_lock);
451 list_add(&chan->global_l, &chan_list);
452 write_unlock(&chan_list_lock);
454 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
456 chan->state = BT_OPEN;
458 kref_init(&chan->kref);
460 /* This flag is cleared in l2cap_chan_ready() */
461 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
463 BT_DBG("chan %p", chan);
467 EXPORT_SYMBOL_GPL(l2cap_chan_create);
469 static void l2cap_chan_destroy(struct kref *kref)
471 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
473 BT_DBG("chan %p", chan);
475 write_lock(&chan_list_lock);
476 list_del(&chan->global_l);
477 write_unlock(&chan_list_lock);
482 void l2cap_chan_hold(struct l2cap_chan *c)
484 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
489 void l2cap_chan_put(struct l2cap_chan *c)
491 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
493 kref_put(&c->kref, l2cap_chan_destroy);
495 EXPORT_SYMBOL_GPL(l2cap_chan_put);
497 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
499 chan->fcs = L2CAP_FCS_CRC16;
500 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
501 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
502 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
503 chan->remote_max_tx = chan->max_tx;
504 chan->remote_tx_win = chan->tx_win;
505 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
506 chan->sec_level = BT_SECURITY_LOW;
507 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
509 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
510 chan->conf_state = 0;
512 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
514 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
516 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
519 chan->sdu_last_frag = NULL;
521 chan->tx_credits = 0;
522 chan->rx_credits = le_max_credits;
523 chan->mps = min_t(u16, chan->imtu, le_default_mps);
525 skb_queue_head_init(&chan->tx_q);
528 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
530 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
531 __le16_to_cpu(chan->psm), chan->dcid);
533 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
537 switch (chan->chan_type) {
538 case L2CAP_CHAN_CONN_ORIENTED:
539 /* Alloc CID for connection-oriented socket */
540 chan->scid = l2cap_alloc_cid(conn);
541 if (conn->hcon->type == ACL_LINK)
542 chan->omtu = L2CAP_DEFAULT_MTU;
545 case L2CAP_CHAN_CONN_LESS:
546 /* Connectionless socket */
547 chan->scid = L2CAP_CID_CONN_LESS;
548 chan->dcid = L2CAP_CID_CONN_LESS;
549 chan->omtu = L2CAP_DEFAULT_MTU;
552 case L2CAP_CHAN_FIXED:
553 /* Caller will set CID and CID specific MTU values */
557 /* Raw socket can send/recv signalling messages only */
558 chan->scid = L2CAP_CID_SIGNALING;
559 chan->dcid = L2CAP_CID_SIGNALING;
560 chan->omtu = L2CAP_DEFAULT_MTU;
563 chan->local_id = L2CAP_BESTEFFORT_ID;
564 chan->local_stype = L2CAP_SERV_BESTEFFORT;
565 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
566 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
567 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
568 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
570 l2cap_chan_hold(chan);
572 /* Only keep a reference for fixed channels if they requested it */
573 if (chan->chan_type != L2CAP_CHAN_FIXED ||
574 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
575 hci_conn_hold(conn->hcon);
577 list_add(&chan->list, &conn->chan_l);
580 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
582 mutex_lock(&conn->chan_lock);
583 __l2cap_chan_add(conn, chan);
584 mutex_unlock(&conn->chan_lock);
587 void l2cap_chan_del(struct l2cap_chan *chan, int err)
589 struct l2cap_conn *conn = chan->conn;
591 __clear_chan_timer(chan);
593 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
594 state_to_string(chan->state));
596 chan->ops->teardown(chan, err);
599 struct amp_mgr *mgr = conn->hcon->amp_mgr;
600 /* Delete from channel list */
601 list_del(&chan->list);
603 l2cap_chan_put(chan);
607 /* Reference was only held for non-fixed channels or
608 * fixed channels that explicitly requested it using the
609 * FLAG_HOLD_HCI_CONN flag.
611 if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 hci_conn_drop(conn->hcon);
615 if (mgr && mgr->bredr_chan == chan)
616 mgr->bredr_chan = NULL;
619 if (chan->hs_hchan) {
620 struct hci_chan *hs_hchan = chan->hs_hchan;
622 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
623 amp_disconnect_logical_link(hs_hchan);
626 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
630 case L2CAP_MODE_BASIC:
633 case L2CAP_MODE_LE_FLOWCTL:
634 skb_queue_purge(&chan->tx_q);
637 case L2CAP_MODE_ERTM:
638 __clear_retrans_timer(chan);
639 __clear_monitor_timer(chan);
640 __clear_ack_timer(chan);
642 skb_queue_purge(&chan->srej_q);
644 l2cap_seq_list_free(&chan->srej_list);
645 l2cap_seq_list_free(&chan->retrans_list);
649 case L2CAP_MODE_STREAMING:
650 skb_queue_purge(&chan->tx_q);
656 EXPORT_SYMBOL_GPL(l2cap_chan_del);
658 static void l2cap_conn_update_id_addr(struct work_struct *work)
660 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
661 id_addr_update_work);
662 struct hci_conn *hcon = conn->hcon;
663 struct l2cap_chan *chan;
665 mutex_lock(&conn->chan_lock);
667 list_for_each_entry(chan, &conn->chan_l, list) {
668 l2cap_chan_lock(chan);
669 bacpy(&chan->dst, &hcon->dst);
670 chan->dst_type = bdaddr_dst_type(hcon);
671 l2cap_chan_unlock(chan);
674 mutex_unlock(&conn->chan_lock);
677 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
679 struct l2cap_conn *conn = chan->conn;
680 struct l2cap_le_conn_rsp rsp;
683 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
684 result = L2CAP_CR_AUTHORIZATION;
686 result = L2CAP_CR_BAD_PSM;
688 l2cap_state_change(chan, BT_DISCONN);
690 rsp.dcid = cpu_to_le16(chan->scid);
691 rsp.mtu = cpu_to_le16(chan->imtu);
692 rsp.mps = cpu_to_le16(chan->mps);
693 rsp.credits = cpu_to_le16(chan->rx_credits);
694 rsp.result = cpu_to_le16(result);
696 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
700 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
702 struct l2cap_conn *conn = chan->conn;
703 struct l2cap_conn_rsp rsp;
706 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
707 result = L2CAP_CR_SEC_BLOCK;
709 result = L2CAP_CR_BAD_PSM;
711 l2cap_state_change(chan, BT_DISCONN);
713 rsp.scid = cpu_to_le16(chan->dcid);
714 rsp.dcid = cpu_to_le16(chan->scid);
715 rsp.result = cpu_to_le16(result);
716 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
718 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
721 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
723 struct l2cap_conn *conn = chan->conn;
725 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
727 switch (chan->state) {
729 chan->ops->teardown(chan, 0);
734 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
735 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
736 l2cap_send_disconn_req(chan, reason);
738 l2cap_chan_del(chan, reason);
742 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
743 if (conn->hcon->type == ACL_LINK)
744 l2cap_chan_connect_reject(chan);
745 else if (conn->hcon->type == LE_LINK)
746 l2cap_chan_le_connect_reject(chan);
749 l2cap_chan_del(chan, reason);
754 l2cap_chan_del(chan, reason);
758 chan->ops->teardown(chan, 0);
762 EXPORT_SYMBOL(l2cap_chan_close);
764 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
766 switch (chan->chan_type) {
768 switch (chan->sec_level) {
769 case BT_SECURITY_HIGH:
770 case BT_SECURITY_FIPS:
771 return HCI_AT_DEDICATED_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_DEDICATED_BONDING;
775 return HCI_AT_NO_BONDING;
778 case L2CAP_CHAN_CONN_LESS:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
787 return HCI_AT_NO_BONDING;
789 case L2CAP_CHAN_CONN_ORIENTED:
790 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
791 if (chan->sec_level == BT_SECURITY_LOW)
792 chan->sec_level = BT_SECURITY_SDP;
794 if (chan->sec_level == BT_SECURITY_HIGH ||
795 chan->sec_level == BT_SECURITY_FIPS)
796 return HCI_AT_NO_BONDING_MITM;
798 return HCI_AT_NO_BONDING;
802 switch (chan->sec_level) {
803 case BT_SECURITY_HIGH:
804 case BT_SECURITY_FIPS:
805 return HCI_AT_GENERAL_BONDING_MITM;
806 case BT_SECURITY_MEDIUM:
807 return HCI_AT_GENERAL_BONDING;
809 return HCI_AT_NO_BONDING;
815 /* Service level security */
816 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
818 struct l2cap_conn *conn = chan->conn;
821 if (conn->hcon->type == LE_LINK)
822 return smp_conn_security(conn->hcon, chan->sec_level);
824 auth_type = l2cap_get_auth_type(chan);
826 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
830 static u8 l2cap_get_ident(struct l2cap_conn *conn)
834 /* Get next available identificator.
835 * 1 - 128 are used by kernel.
836 * 129 - 199 are reserved.
837 * 200 - 254 are used by utilities like l2ping, etc.
840 mutex_lock(&conn->ident_lock);
842 if (++conn->tx_ident > 128)
847 mutex_unlock(&conn->ident_lock);
852 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
855 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
858 BT_DBG("code 0x%2.2x", code);
863 /* Use NO_FLUSH if supported or we have an LE link (which does
864 * not support auto-flushing packets) */
865 if (lmp_no_flush_capable(conn->hcon->hdev) ||
866 conn->hcon->type == LE_LINK)
867 flags = ACL_START_NO_FLUSH;
871 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
872 skb->priority = HCI_PRIO_MAX;
874 hci_send_acl(conn->hchan, skb, flags);
877 static bool __chan_is_moving(struct l2cap_chan *chan)
879 return chan->move_state != L2CAP_MOVE_STABLE &&
880 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
883 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
885 struct hci_conn *hcon = chan->conn->hcon;
888 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
891 if (chan->hs_hcon && !__chan_is_moving(chan)) {
893 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
900 /* Use NO_FLUSH for LE links (where this is the only option) or
901 * if the BR/EDR link supports it and flushing has not been
902 * explicitly requested (through FLAG_FLUSHABLE).
904 if (hcon->type == LE_LINK ||
905 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
906 lmp_no_flush_capable(hcon->hdev)))
907 flags = ACL_START_NO_FLUSH;
911 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
912 hci_send_acl(chan->conn->hchan, skb, flags);
915 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
917 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
918 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
920 if (enh & L2CAP_CTRL_FRAME_TYPE) {
923 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
924 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
931 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
932 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
939 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
941 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
942 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
944 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
947 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
948 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
955 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
956 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
963 static inline void __unpack_control(struct l2cap_chan *chan,
966 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
967 __unpack_extended_control(get_unaligned_le32(skb->data),
969 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
971 __unpack_enhanced_control(get_unaligned_le16(skb->data),
973 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
977 static u32 __pack_extended_control(struct l2cap_ctrl *control)
981 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
982 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
984 if (control->sframe) {
985 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
986 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
987 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
989 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
990 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
996 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1000 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1001 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1003 if (control->sframe) {
1004 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1005 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1006 packed |= L2CAP_CTRL_FRAME_TYPE;
1008 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1009 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1015 static inline void __pack_control(struct l2cap_chan *chan,
1016 struct l2cap_ctrl *control,
1017 struct sk_buff *skb)
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1020 put_unaligned_le32(__pack_extended_control(control),
1021 skb->data + L2CAP_HDR_SIZE);
1023 put_unaligned_le16(__pack_enhanced_control(control),
1024 skb->data + L2CAP_HDR_SIZE);
1028 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1030 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1031 return L2CAP_EXT_HDR_SIZE;
1033 return L2CAP_ENH_HDR_SIZE;
1036 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1039 struct sk_buff *skb;
1040 struct l2cap_hdr *lh;
1041 int hlen = __ertm_hdr_size(chan);
1043 if (chan->fcs == L2CAP_FCS_CRC16)
1044 hlen += L2CAP_FCS_SIZE;
1046 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1049 return ERR_PTR(-ENOMEM);
1051 lh = skb_put(skb, L2CAP_HDR_SIZE);
1052 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1053 lh->cid = cpu_to_le16(chan->dcid);
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1058 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1060 if (chan->fcs == L2CAP_FCS_CRC16) {
1061 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1062 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1065 skb->priority = HCI_PRIO_MAX;
1069 static void l2cap_send_sframe(struct l2cap_chan *chan,
1070 struct l2cap_ctrl *control)
1072 struct sk_buff *skb;
1075 BT_DBG("chan %p, control %p", chan, control);
1077 if (!control->sframe)
1080 if (__chan_is_moving(chan))
1083 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1087 if (control->super == L2CAP_SUPER_RR)
1088 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1089 else if (control->super == L2CAP_SUPER_RNR)
1090 set_bit(CONN_RNR_SENT, &chan->conn_state);
1092 if (control->super != L2CAP_SUPER_SREJ) {
1093 chan->last_acked_seq = control->reqseq;
1094 __clear_ack_timer(chan);
1097 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1098 control->final, control->poll, control->super);
1100 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1101 control_field = __pack_extended_control(control);
1103 control_field = __pack_enhanced_control(control);
1105 skb = l2cap_create_sframe_pdu(chan, control_field);
1107 l2cap_do_send(chan, skb);
1110 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1112 struct l2cap_ctrl control;
1114 BT_DBG("chan %p, poll %d", chan, poll);
1116 memset(&control, 0, sizeof(control));
1118 control.poll = poll;
1120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1121 control.super = L2CAP_SUPER_RNR;
1123 control.super = L2CAP_SUPER_RR;
1125 control.reqseq = chan->buffer_seq;
1126 l2cap_send_sframe(chan, &control);
1129 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1131 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1134 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1137 static bool __amp_capable(struct l2cap_chan *chan)
1139 struct l2cap_conn *conn = chan->conn;
1140 struct hci_dev *hdev;
1141 bool amp_available = false;
1143 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1146 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1149 read_lock(&hci_dev_list_lock);
1150 list_for_each_entry(hdev, &hci_dev_list, list) {
1151 if (hdev->amp_type != AMP_TYPE_BREDR &&
1152 test_bit(HCI_UP, &hdev->flags)) {
1153 amp_available = true;
1157 read_unlock(&hci_dev_list_lock);
1159 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1160 return amp_available;
1165 static bool l2cap_check_efs(struct l2cap_chan *chan)
1167 /* Check EFS parameters */
1171 void l2cap_send_conn_req(struct l2cap_chan *chan)
1173 struct l2cap_conn *conn = chan->conn;
1174 struct l2cap_conn_req req;
1176 req.scid = cpu_to_le16(chan->scid);
1177 req.psm = chan->psm;
1179 chan->ident = l2cap_get_ident(conn);
1181 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1183 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1186 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1188 struct l2cap_create_chan_req req;
1189 req.scid = cpu_to_le16(chan->scid);
1190 req.psm = chan->psm;
1191 req.amp_id = amp_id;
1193 chan->ident = l2cap_get_ident(chan->conn);
1195 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1199 static void l2cap_move_setup(struct l2cap_chan *chan)
1201 struct sk_buff *skb;
1203 BT_DBG("chan %p", chan);
1205 if (chan->mode != L2CAP_MODE_ERTM)
1208 __clear_retrans_timer(chan);
1209 __clear_monitor_timer(chan);
1210 __clear_ack_timer(chan);
1212 chan->retry_count = 0;
1213 skb_queue_walk(&chan->tx_q, skb) {
1214 if (bt_cb(skb)->l2cap.retries)
1215 bt_cb(skb)->l2cap.retries = 1;
1220 chan->expected_tx_seq = chan->buffer_seq;
1222 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1223 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1224 l2cap_seq_list_clear(&chan->retrans_list);
1225 l2cap_seq_list_clear(&chan->srej_list);
1226 skb_queue_purge(&chan->srej_q);
1228 chan->tx_state = L2CAP_TX_STATE_XMIT;
1229 chan->rx_state = L2CAP_RX_STATE_MOVE;
1231 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1234 static void l2cap_move_done(struct l2cap_chan *chan)
1236 u8 move_role = chan->move_role;
1237 BT_DBG("chan %p", chan);
1239 chan->move_state = L2CAP_MOVE_STABLE;
1240 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1242 if (chan->mode != L2CAP_MODE_ERTM)
1245 switch (move_role) {
1246 case L2CAP_MOVE_ROLE_INITIATOR:
1247 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1248 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1250 case L2CAP_MOVE_ROLE_RESPONDER:
1251 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1258 /* The channel may have already been flagged as connected in
1259 * case of receiving data before the L2CAP info req/rsp
1260 * procedure is complete.
1262 if (chan->state == BT_CONNECTED)
1265 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 chan->conf_state = 0;
1267 __clear_chan_timer(chan);
1269 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1270 chan->ops->suspend(chan);
1272 chan->state = BT_CONNECTED;
1274 chan->ops->ready(chan);
1277 static void l2cap_le_connect(struct l2cap_chan *chan)
1279 struct l2cap_conn *conn = chan->conn;
1280 struct l2cap_le_conn_req req;
1282 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1285 req.psm = chan->psm;
1286 req.scid = cpu_to_le16(chan->scid);
1287 req.mtu = cpu_to_le16(chan->imtu);
1288 req.mps = cpu_to_le16(chan->mps);
1289 req.credits = cpu_to_le16(chan->rx_credits);
1291 chan->ident = l2cap_get_ident(conn);
1293 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1297 static void l2cap_le_start(struct l2cap_chan *chan)
1299 struct l2cap_conn *conn = chan->conn;
1301 if (!smp_conn_security(conn->hcon, chan->sec_level))
1305 l2cap_chan_ready(chan);
1309 if (chan->state == BT_CONNECT)
1310 l2cap_le_connect(chan);
1313 static void l2cap_start_connection(struct l2cap_chan *chan)
1315 if (__amp_capable(chan)) {
1316 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1317 a2mp_discover_amp(chan);
1318 } else if (chan->conn->hcon->type == LE_LINK) {
1319 l2cap_le_start(chan);
1321 l2cap_send_conn_req(chan);
1325 static void l2cap_request_info(struct l2cap_conn *conn)
1327 struct l2cap_info_req req;
1329 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1335 conn->info_ident = l2cap_get_ident(conn);
1337 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1339 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1343 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1345 /* The minimum encryption key size needs to be enforced by the
1346 * host stack before establishing any L2CAP connections. The
1347 * specification in theory allows a minimum of 1, but to align
1348 * BR/EDR and LE transports, a minimum of 7 is chosen.
1350 * This check might also be called for unencrypted connections
1351 * that have no key size requirements. Ensure that the link is
1352 * actually encrypted before enforcing a key size.
1354 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1355 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1358 static void l2cap_do_start(struct l2cap_chan *chan)
1360 struct l2cap_conn *conn = chan->conn;
1362 if (conn->hcon->type == LE_LINK) {
1363 l2cap_le_start(chan);
1367 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1368 l2cap_request_info(conn);
1372 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1375 if (!l2cap_chan_check_security(chan, true) ||
1376 !__l2cap_no_conn_pending(chan))
1379 if (l2cap_check_enc_key_size(conn->hcon))
1380 l2cap_start_connection(chan);
1382 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1385 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1387 u32 local_feat_mask = l2cap_feat_mask;
1389 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1392 case L2CAP_MODE_ERTM:
1393 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1394 case L2CAP_MODE_STREAMING:
1395 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1401 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1403 struct l2cap_conn *conn = chan->conn;
1404 struct l2cap_disconn_req req;
1409 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1410 __clear_retrans_timer(chan);
1411 __clear_monitor_timer(chan);
1412 __clear_ack_timer(chan);
1415 if (chan->scid == L2CAP_CID_A2MP) {
1416 l2cap_state_change(chan, BT_DISCONN);
1420 req.dcid = cpu_to_le16(chan->dcid);
1421 req.scid = cpu_to_le16(chan->scid);
1422 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1425 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1428 /* ---- L2CAP connections ---- */
1429 static void l2cap_conn_start(struct l2cap_conn *conn)
1431 struct l2cap_chan *chan, *tmp;
1433 BT_DBG("conn %p", conn);
1435 mutex_lock(&conn->chan_lock);
1437 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1438 l2cap_chan_lock(chan);
1440 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1441 l2cap_chan_ready(chan);
1442 l2cap_chan_unlock(chan);
1446 if (chan->state == BT_CONNECT) {
1447 if (!l2cap_chan_check_security(chan, true) ||
1448 !__l2cap_no_conn_pending(chan)) {
1449 l2cap_chan_unlock(chan);
1453 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1454 && test_bit(CONF_STATE2_DEVICE,
1455 &chan->conf_state)) {
1456 l2cap_chan_close(chan, ECONNRESET);
1457 l2cap_chan_unlock(chan);
1461 if (l2cap_check_enc_key_size(conn->hcon))
1462 l2cap_start_connection(chan);
1464 l2cap_chan_close(chan, ECONNREFUSED);
1466 } else if (chan->state == BT_CONNECT2) {
1467 struct l2cap_conn_rsp rsp;
1469 rsp.scid = cpu_to_le16(chan->dcid);
1470 rsp.dcid = cpu_to_le16(chan->scid);
1472 if (l2cap_chan_check_security(chan, false)) {
1473 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1476 chan->ops->defer(chan);
1479 l2cap_state_change(chan, BT_CONFIG);
1480 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1481 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1484 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1485 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1488 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1491 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1492 rsp.result != L2CAP_CR_SUCCESS) {
1493 l2cap_chan_unlock(chan);
1497 set_bit(CONF_REQ_SENT, &chan->conf_state);
1498 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1499 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1500 chan->num_conf_req++;
1503 l2cap_chan_unlock(chan);
1506 mutex_unlock(&conn->chan_lock);
1509 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1511 struct hci_conn *hcon = conn->hcon;
1512 struct hci_dev *hdev = hcon->hdev;
1514 BT_DBG("%s conn %p", hdev->name, conn);
1516 /* For outgoing pairing which doesn't necessarily have an
1517 * associated socket (e.g. mgmt_pair_device).
1520 smp_conn_security(hcon, hcon->pending_sec_level);
1522 /* For LE slave connections, make sure the connection interval
1523 * is in the range of the minium and maximum interval that has
1524 * been configured for this connection. If not, then trigger
1525 * the connection update procedure.
1527 if (hcon->role == HCI_ROLE_SLAVE &&
1528 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1529 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1530 struct l2cap_conn_param_update_req req;
1532 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1533 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1534 req.latency = cpu_to_le16(hcon->le_conn_latency);
1535 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1537 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1538 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1542 static void l2cap_conn_ready(struct l2cap_conn *conn)
1544 struct l2cap_chan *chan;
1545 struct hci_conn *hcon = conn->hcon;
1547 BT_DBG("conn %p", conn);
1549 if (hcon->type == ACL_LINK)
1550 l2cap_request_info(conn);
1552 mutex_lock(&conn->chan_lock);
1554 list_for_each_entry(chan, &conn->chan_l, list) {
1556 l2cap_chan_lock(chan);
1558 if (chan->scid == L2CAP_CID_A2MP) {
1559 l2cap_chan_unlock(chan);
1563 if (hcon->type == LE_LINK) {
1564 l2cap_le_start(chan);
1565 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1566 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1567 l2cap_chan_ready(chan);
1568 } else if (chan->state == BT_CONNECT) {
1569 l2cap_do_start(chan);
1572 l2cap_chan_unlock(chan);
1575 mutex_unlock(&conn->chan_lock);
1577 if (hcon->type == LE_LINK)
1578 l2cap_le_conn_ready(conn);
1580 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1583 /* Notify sockets that we cannot guaranty reliability anymore */
1584 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1586 struct l2cap_chan *chan;
1588 BT_DBG("conn %p", conn);
1590 mutex_lock(&conn->chan_lock);
1592 list_for_each_entry(chan, &conn->chan_l, list) {
1593 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1594 l2cap_chan_set_err(chan, err);
1597 mutex_unlock(&conn->chan_lock);
1600 static void l2cap_info_timeout(struct work_struct *work)
1602 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1605 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1606 conn->info_ident = 0;
1608 l2cap_conn_start(conn);
1613 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1614 * callback is called during registration. The ->remove callback is called
1615 * during unregistration.
1616 * An l2cap_user object can either be explicitly unregistered or when the
1617 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1618 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1619 * External modules must own a reference to the l2cap_conn object if they intend
1620 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1621 * any time if they don't.
1624 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1626 struct hci_dev *hdev = conn->hcon->hdev;
1629 /* We need to check whether l2cap_conn is registered. If it is not, we
1630 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1631 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1632 * relies on the parent hci_conn object to be locked. This itself relies
1633 * on the hci_dev object to be locked. So we must lock the hci device
1638 if (!list_empty(&user->list)) {
1643 /* conn->hchan is NULL after l2cap_conn_del() was called */
1649 ret = user->probe(conn, user);
1653 list_add(&user->list, &conn->users);
1657 hci_dev_unlock(hdev);
1660 EXPORT_SYMBOL(l2cap_register_user);
1662 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1664 struct hci_dev *hdev = conn->hcon->hdev;
1668 if (list_empty(&user->list))
1671 list_del_init(&user->list);
1672 user->remove(conn, user);
1675 hci_dev_unlock(hdev);
1677 EXPORT_SYMBOL(l2cap_unregister_user);
1679 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1681 struct l2cap_user *user;
1683 while (!list_empty(&conn->users)) {
1684 user = list_first_entry(&conn->users, struct l2cap_user, list);
1685 list_del_init(&user->list);
1686 user->remove(conn, user);
1690 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1692 struct l2cap_conn *conn = hcon->l2cap_data;
1693 struct l2cap_chan *chan, *l;
1698 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1700 kfree_skb(conn->rx_skb);
1702 skb_queue_purge(&conn->pending_rx);
1704 /* We can not call flush_work(&conn->pending_rx_work) here since we
1705 * might block if we are running on a worker from the same workqueue
1706 * pending_rx_work is waiting on.
1708 if (work_pending(&conn->pending_rx_work))
1709 cancel_work_sync(&conn->pending_rx_work);
1711 if (work_pending(&conn->id_addr_update_work))
1712 cancel_work_sync(&conn->id_addr_update_work);
1714 l2cap_unregister_all_users(conn);
1716 /* Force the connection to be immediately dropped */
1717 hcon->disc_timeout = 0;
1719 mutex_lock(&conn->chan_lock);
1722 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1723 l2cap_chan_hold(chan);
1724 l2cap_chan_lock(chan);
1726 l2cap_chan_del(chan, err);
1728 l2cap_chan_unlock(chan);
1730 chan->ops->close(chan);
1731 l2cap_chan_put(chan);
1734 mutex_unlock(&conn->chan_lock);
1736 hci_chan_del(conn->hchan);
1738 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1739 cancel_delayed_work_sync(&conn->info_timer);
1741 hcon->l2cap_data = NULL;
1743 l2cap_conn_put(conn);
1746 static void l2cap_conn_free(struct kref *ref)
1748 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1750 hci_conn_put(conn->hcon);
1754 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1756 kref_get(&conn->ref);
1759 EXPORT_SYMBOL(l2cap_conn_get);
1761 void l2cap_conn_put(struct l2cap_conn *conn)
1763 kref_put(&conn->ref, l2cap_conn_free);
1765 EXPORT_SYMBOL(l2cap_conn_put);
1767 /* ---- Socket interface ---- */
1769 /* Find socket with psm and source / destination bdaddr.
1770 * Returns closest match.
1772 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1777 struct l2cap_chan *c, *c1 = NULL;
1779 read_lock(&chan_list_lock);
1781 list_for_each_entry(c, &chan_list, global_l) {
1782 if (state && c->state != state)
1785 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1788 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1791 if (c->psm == psm) {
1792 int src_match, dst_match;
1793 int src_any, dst_any;
1796 src_match = !bacmp(&c->src, src);
1797 dst_match = !bacmp(&c->dst, dst);
1798 if (src_match && dst_match) {
1800 read_unlock(&chan_list_lock);
1805 src_any = !bacmp(&c->src, BDADDR_ANY);
1806 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1807 if ((src_match && dst_any) || (src_any && dst_match) ||
1808 (src_any && dst_any))
1814 l2cap_chan_hold(c1);
1816 read_unlock(&chan_list_lock);
1821 static void l2cap_monitor_timeout(struct work_struct *work)
1823 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1824 monitor_timer.work);
1826 BT_DBG("chan %p", chan);
1828 l2cap_chan_lock(chan);
1831 l2cap_chan_unlock(chan);
1832 l2cap_chan_put(chan);
1836 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1838 l2cap_chan_unlock(chan);
1839 l2cap_chan_put(chan);
1842 static void l2cap_retrans_timeout(struct work_struct *work)
1844 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1845 retrans_timer.work);
1847 BT_DBG("chan %p", chan);
1849 l2cap_chan_lock(chan);
1852 l2cap_chan_unlock(chan);
1853 l2cap_chan_put(chan);
1857 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1858 l2cap_chan_unlock(chan);
1859 l2cap_chan_put(chan);
1862 static void l2cap_streaming_send(struct l2cap_chan *chan,
1863 struct sk_buff_head *skbs)
1865 struct sk_buff *skb;
1866 struct l2cap_ctrl *control;
1868 BT_DBG("chan %p, skbs %p", chan, skbs);
1870 if (__chan_is_moving(chan))
1873 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1875 while (!skb_queue_empty(&chan->tx_q)) {
1877 skb = skb_dequeue(&chan->tx_q);
1879 bt_cb(skb)->l2cap.retries = 1;
1880 control = &bt_cb(skb)->l2cap;
1882 control->reqseq = 0;
1883 control->txseq = chan->next_tx_seq;
1885 __pack_control(chan, control, skb);
1887 if (chan->fcs == L2CAP_FCS_CRC16) {
1888 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1889 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1892 l2cap_do_send(chan, skb);
1894 BT_DBG("Sent txseq %u", control->txseq);
1896 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1897 chan->frames_sent++;
1901 static int l2cap_ertm_send(struct l2cap_chan *chan)
1903 struct sk_buff *skb, *tx_skb;
1904 struct l2cap_ctrl *control;
1907 BT_DBG("chan %p", chan);
1909 if (chan->state != BT_CONNECTED)
1912 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1915 if (__chan_is_moving(chan))
1918 while (chan->tx_send_head &&
1919 chan->unacked_frames < chan->remote_tx_win &&
1920 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1922 skb = chan->tx_send_head;
1924 bt_cb(skb)->l2cap.retries = 1;
1925 control = &bt_cb(skb)->l2cap;
1927 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1930 control->reqseq = chan->buffer_seq;
1931 chan->last_acked_seq = chan->buffer_seq;
1932 control->txseq = chan->next_tx_seq;
1934 __pack_control(chan, control, skb);
1936 if (chan->fcs == L2CAP_FCS_CRC16) {
1937 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1938 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1941 /* Clone after data has been modified. Data is assumed to be
1942 read-only (for locking purposes) on cloned sk_buffs.
1944 tx_skb = skb_clone(skb, GFP_KERNEL);
1949 __set_retrans_timer(chan);
1951 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1952 chan->unacked_frames++;
1953 chan->frames_sent++;
1956 if (skb_queue_is_last(&chan->tx_q, skb))
1957 chan->tx_send_head = NULL;
1959 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1961 l2cap_do_send(chan, tx_skb);
1962 BT_DBG("Sent txseq %u", control->txseq);
1965 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1966 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1971 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1973 struct l2cap_ctrl control;
1974 struct sk_buff *skb;
1975 struct sk_buff *tx_skb;
1978 BT_DBG("chan %p", chan);
1980 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1983 if (__chan_is_moving(chan))
1986 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1987 seq = l2cap_seq_list_pop(&chan->retrans_list);
1989 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1991 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1996 bt_cb(skb)->l2cap.retries++;
1997 control = bt_cb(skb)->l2cap;
1999 if (chan->max_tx != 0 &&
2000 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2001 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2002 l2cap_send_disconn_req(chan, ECONNRESET);
2003 l2cap_seq_list_clear(&chan->retrans_list);
2007 control.reqseq = chan->buffer_seq;
2008 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2013 if (skb_cloned(skb)) {
2014 /* Cloned sk_buffs are read-only, so we need a
2017 tx_skb = skb_copy(skb, GFP_KERNEL);
2019 tx_skb = skb_clone(skb, GFP_KERNEL);
2023 l2cap_seq_list_clear(&chan->retrans_list);
2027 /* Update skb contents */
2028 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2029 put_unaligned_le32(__pack_extended_control(&control),
2030 tx_skb->data + L2CAP_HDR_SIZE);
2032 put_unaligned_le16(__pack_enhanced_control(&control),
2033 tx_skb->data + L2CAP_HDR_SIZE);
2037 if (chan->fcs == L2CAP_FCS_CRC16) {
2038 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2039 tx_skb->len - L2CAP_FCS_SIZE);
2040 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2044 l2cap_do_send(chan, tx_skb);
2046 BT_DBG("Resent txseq %d", control.txseq);
2048 chan->last_acked_seq = chan->buffer_seq;
2052 static void l2cap_retransmit(struct l2cap_chan *chan,
2053 struct l2cap_ctrl *control)
2055 BT_DBG("chan %p, control %p", chan, control);
2057 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2058 l2cap_ertm_resend(chan);
2061 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2062 struct l2cap_ctrl *control)
2064 struct sk_buff *skb;
2066 BT_DBG("chan %p, control %p", chan, control);
2069 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2071 l2cap_seq_list_clear(&chan->retrans_list);
2073 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2076 if (chan->unacked_frames) {
2077 skb_queue_walk(&chan->tx_q, skb) {
2078 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2079 skb == chan->tx_send_head)
2083 skb_queue_walk_from(&chan->tx_q, skb) {
2084 if (skb == chan->tx_send_head)
2087 l2cap_seq_list_append(&chan->retrans_list,
2088 bt_cb(skb)->l2cap.txseq);
2091 l2cap_ertm_resend(chan);
2095 static void l2cap_send_ack(struct l2cap_chan *chan)
2097 struct l2cap_ctrl control;
2098 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2099 chan->last_acked_seq);
2102 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2103 chan, chan->last_acked_seq, chan->buffer_seq);
2105 memset(&control, 0, sizeof(control));
2108 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2109 chan->rx_state == L2CAP_RX_STATE_RECV) {
2110 __clear_ack_timer(chan);
2111 control.super = L2CAP_SUPER_RNR;
2112 control.reqseq = chan->buffer_seq;
2113 l2cap_send_sframe(chan, &control);
2115 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2116 l2cap_ertm_send(chan);
2117 /* If any i-frames were sent, they included an ack */
2118 if (chan->buffer_seq == chan->last_acked_seq)
2122 /* Ack now if the window is 3/4ths full.
2123 * Calculate without mul or div
2125 threshold = chan->ack_win;
2126 threshold += threshold << 1;
2129 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2132 if (frames_to_ack >= threshold) {
2133 __clear_ack_timer(chan);
2134 control.super = L2CAP_SUPER_RR;
2135 control.reqseq = chan->buffer_seq;
2136 l2cap_send_sframe(chan, &control);
2141 __set_ack_timer(chan);
2145 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2146 struct msghdr *msg, int len,
2147 int count, struct sk_buff *skb)
2149 struct l2cap_conn *conn = chan->conn;
2150 struct sk_buff **frag;
2153 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2159 /* Continuation fragments (no L2CAP header) */
2160 frag = &skb_shinfo(skb)->frag_list;
2162 struct sk_buff *tmp;
2164 count = min_t(unsigned int, conn->mtu, len);
2166 tmp = chan->ops->alloc_skb(chan, 0, count,
2167 msg->msg_flags & MSG_DONTWAIT);
2169 return PTR_ERR(tmp);
2173 if (!copy_from_iter_full(skb_put(*frag, count), count,
2180 skb->len += (*frag)->len;
2181 skb->data_len += (*frag)->len;
2183 frag = &(*frag)->next;
2189 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2190 struct msghdr *msg, size_t len)
2192 struct l2cap_conn *conn = chan->conn;
2193 struct sk_buff *skb;
2194 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2195 struct l2cap_hdr *lh;
2197 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2198 __le16_to_cpu(chan->psm), len);
2200 count = min_t(unsigned int, (conn->mtu - hlen), len);
2202 skb = chan->ops->alloc_skb(chan, hlen, count,
2203 msg->msg_flags & MSG_DONTWAIT);
2207 /* Create L2CAP header */
2208 lh = skb_put(skb, L2CAP_HDR_SIZE);
2209 lh->cid = cpu_to_le16(chan->dcid);
2210 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2211 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2213 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2214 if (unlikely(err < 0)) {
2216 return ERR_PTR(err);
2221 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2222 struct msghdr *msg, size_t len)
2224 struct l2cap_conn *conn = chan->conn;
2225 struct sk_buff *skb;
2227 struct l2cap_hdr *lh;
2229 BT_DBG("chan %p len %zu", chan, len);
2231 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2233 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2234 msg->msg_flags & MSG_DONTWAIT);
2238 /* Create L2CAP header */
2239 lh = skb_put(skb, L2CAP_HDR_SIZE);
2240 lh->cid = cpu_to_le16(chan->dcid);
2241 lh->len = cpu_to_le16(len);
2243 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2244 if (unlikely(err < 0)) {
2246 return ERR_PTR(err);
2251 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2252 struct msghdr *msg, size_t len,
2255 struct l2cap_conn *conn = chan->conn;
2256 struct sk_buff *skb;
2257 int err, count, hlen;
2258 struct l2cap_hdr *lh;
2260 BT_DBG("chan %p len %zu", chan, len);
2263 return ERR_PTR(-ENOTCONN);
2265 hlen = __ertm_hdr_size(chan);
2268 hlen += L2CAP_SDULEN_SIZE;
2270 if (chan->fcs == L2CAP_FCS_CRC16)
2271 hlen += L2CAP_FCS_SIZE;
2273 count = min_t(unsigned int, (conn->mtu - hlen), len);
2275 skb = chan->ops->alloc_skb(chan, hlen, count,
2276 msg->msg_flags & MSG_DONTWAIT);
2280 /* Create L2CAP header */
2281 lh = skb_put(skb, L2CAP_HDR_SIZE);
2282 lh->cid = cpu_to_le16(chan->dcid);
2283 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2285 /* Control header is populated later */
2286 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2287 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2289 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2292 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2294 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2295 if (unlikely(err < 0)) {
2297 return ERR_PTR(err);
2300 bt_cb(skb)->l2cap.fcs = chan->fcs;
2301 bt_cb(skb)->l2cap.retries = 0;
2305 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2306 struct sk_buff_head *seg_queue,
2307 struct msghdr *msg, size_t len)
2309 struct sk_buff *skb;
2314 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2316 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2317 * so fragmented skbs are not used. The HCI layer's handling
2318 * of fragmented skbs is not compatible with ERTM's queueing.
2321 /* PDU size is derived from the HCI MTU */
2322 pdu_len = chan->conn->mtu;
2324 /* Constrain PDU size for BR/EDR connections */
2326 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2328 /* Adjust for largest possible L2CAP overhead. */
2330 pdu_len -= L2CAP_FCS_SIZE;
2332 pdu_len -= __ertm_hdr_size(chan);
2334 /* Remote device may have requested smaller PDUs */
2335 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2337 if (len <= pdu_len) {
2338 sar = L2CAP_SAR_UNSEGMENTED;
2342 sar = L2CAP_SAR_START;
2347 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2350 __skb_queue_purge(seg_queue);
2351 return PTR_ERR(skb);
2354 bt_cb(skb)->l2cap.sar = sar;
2355 __skb_queue_tail(seg_queue, skb);
2361 if (len <= pdu_len) {
2362 sar = L2CAP_SAR_END;
2365 sar = L2CAP_SAR_CONTINUE;
2372 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2374 size_t len, u16 sdulen)
2376 struct l2cap_conn *conn = chan->conn;
2377 struct sk_buff *skb;
2378 int err, count, hlen;
2379 struct l2cap_hdr *lh;
2381 BT_DBG("chan %p len %zu", chan, len);
2384 return ERR_PTR(-ENOTCONN);
2386 hlen = L2CAP_HDR_SIZE;
2389 hlen += L2CAP_SDULEN_SIZE;
2391 count = min_t(unsigned int, (conn->mtu - hlen), len);
2393 skb = chan->ops->alloc_skb(chan, hlen, count,
2394 msg->msg_flags & MSG_DONTWAIT);
2398 /* Create L2CAP header */
2399 lh = skb_put(skb, L2CAP_HDR_SIZE);
2400 lh->cid = cpu_to_le16(chan->dcid);
2401 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2404 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2406 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2407 if (unlikely(err < 0)) {
2409 return ERR_PTR(err);
2415 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2416 struct sk_buff_head *seg_queue,
2417 struct msghdr *msg, size_t len)
2419 struct sk_buff *skb;
2423 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2426 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2432 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2434 __skb_queue_purge(seg_queue);
2435 return PTR_ERR(skb);
2438 __skb_queue_tail(seg_queue, skb);
2444 pdu_len += L2CAP_SDULEN_SIZE;
2451 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2455 BT_DBG("chan %p", chan);
2457 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2458 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2463 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2464 skb_queue_len(&chan->tx_q));
2467 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2469 struct sk_buff *skb;
2471 struct sk_buff_head seg_queue;
2476 /* Connectionless channel */
2477 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2478 skb = l2cap_create_connless_pdu(chan, msg, len);
2480 return PTR_ERR(skb);
2482 /* Channel lock is released before requesting new skb and then
2483 * reacquired thus we need to recheck channel state.
2485 if (chan->state != BT_CONNECTED) {
2490 l2cap_do_send(chan, skb);
2494 switch (chan->mode) {
2495 case L2CAP_MODE_LE_FLOWCTL:
2496 /* Check outgoing MTU */
2497 if (len > chan->omtu)
2500 __skb_queue_head_init(&seg_queue);
2502 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2504 if (chan->state != BT_CONNECTED) {
2505 __skb_queue_purge(&seg_queue);
2512 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2514 l2cap_le_flowctl_send(chan);
2516 if (!chan->tx_credits)
2517 chan->ops->suspend(chan);
2523 case L2CAP_MODE_BASIC:
2524 /* Check outgoing MTU */
2525 if (len > chan->omtu)
2528 /* Create a basic PDU */
2529 skb = l2cap_create_basic_pdu(chan, msg, len);
2531 return PTR_ERR(skb);
2533 /* Channel lock is released before requesting new skb and then
2534 * reacquired thus we need to recheck channel state.
2536 if (chan->state != BT_CONNECTED) {
2541 l2cap_do_send(chan, skb);
2545 case L2CAP_MODE_ERTM:
2546 case L2CAP_MODE_STREAMING:
2547 /* Check outgoing MTU */
2548 if (len > chan->omtu) {
2553 __skb_queue_head_init(&seg_queue);
2555 /* Do segmentation before calling in to the state machine,
2556 * since it's possible to block while waiting for memory
2559 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2561 /* The channel could have been closed while segmenting,
2562 * check that it is still connected.
2564 if (chan->state != BT_CONNECTED) {
2565 __skb_queue_purge(&seg_queue);
2572 if (chan->mode == L2CAP_MODE_ERTM)
2573 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2575 l2cap_streaming_send(chan, &seg_queue);
2579 /* If the skbs were not queued for sending, they'll still be in
2580 * seg_queue and need to be purged.
2582 __skb_queue_purge(&seg_queue);
2586 BT_DBG("bad state %1.1x", chan->mode);
2592 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2594 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2596 struct l2cap_ctrl control;
2599 BT_DBG("chan %p, txseq %u", chan, txseq);
2601 memset(&control, 0, sizeof(control));
2603 control.super = L2CAP_SUPER_SREJ;
2605 for (seq = chan->expected_tx_seq; seq != txseq;
2606 seq = __next_seq(chan, seq)) {
2607 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2608 control.reqseq = seq;
2609 l2cap_send_sframe(chan, &control);
2610 l2cap_seq_list_append(&chan->srej_list, seq);
2614 chan->expected_tx_seq = __next_seq(chan, txseq);
2617 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2619 struct l2cap_ctrl control;
2621 BT_DBG("chan %p", chan);
2623 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2626 memset(&control, 0, sizeof(control));
2628 control.super = L2CAP_SUPER_SREJ;
2629 control.reqseq = chan->srej_list.tail;
2630 l2cap_send_sframe(chan, &control);
2633 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2635 struct l2cap_ctrl control;
2639 BT_DBG("chan %p, txseq %u", chan, txseq);
2641 memset(&control, 0, sizeof(control));
2643 control.super = L2CAP_SUPER_SREJ;
2645 /* Capture initial list head to allow only one pass through the list. */
2646 initial_head = chan->srej_list.head;
2649 seq = l2cap_seq_list_pop(&chan->srej_list);
2650 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2653 control.reqseq = seq;
2654 l2cap_send_sframe(chan, &control);
2655 l2cap_seq_list_append(&chan->srej_list, seq);
2656 } while (chan->srej_list.head != initial_head);
2659 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2661 struct sk_buff *acked_skb;
2664 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2666 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2669 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2670 chan->expected_ack_seq, chan->unacked_frames);
2672 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2673 ackseq = __next_seq(chan, ackseq)) {
2675 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2677 skb_unlink(acked_skb, &chan->tx_q);
2678 kfree_skb(acked_skb);
2679 chan->unacked_frames--;
2683 chan->expected_ack_seq = reqseq;
2685 if (chan->unacked_frames == 0)
2686 __clear_retrans_timer(chan);
2688 BT_DBG("unacked_frames %u", chan->unacked_frames);
2691 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2693 BT_DBG("chan %p", chan);
2695 chan->expected_tx_seq = chan->buffer_seq;
2696 l2cap_seq_list_clear(&chan->srej_list);
2697 skb_queue_purge(&chan->srej_q);
2698 chan->rx_state = L2CAP_RX_STATE_RECV;
2701 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2702 struct l2cap_ctrl *control,
2703 struct sk_buff_head *skbs, u8 event)
2705 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2709 case L2CAP_EV_DATA_REQUEST:
2710 if (chan->tx_send_head == NULL)
2711 chan->tx_send_head = skb_peek(skbs);
2713 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2714 l2cap_ertm_send(chan);
2716 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2717 BT_DBG("Enter LOCAL_BUSY");
2718 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2721 /* The SREJ_SENT state must be aborted if we are to
2722 * enter the LOCAL_BUSY state.
2724 l2cap_abort_rx_srej_sent(chan);
2727 l2cap_send_ack(chan);
2730 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2731 BT_DBG("Exit LOCAL_BUSY");
2732 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2735 struct l2cap_ctrl local_control;
2737 memset(&local_control, 0, sizeof(local_control));
2738 local_control.sframe = 1;
2739 local_control.super = L2CAP_SUPER_RR;
2740 local_control.poll = 1;
2741 local_control.reqseq = chan->buffer_seq;
2742 l2cap_send_sframe(chan, &local_control);
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2749 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2750 l2cap_process_reqseq(chan, control->reqseq);
2752 case L2CAP_EV_EXPLICIT_POLL:
2753 l2cap_send_rr_or_rnr(chan, 1);
2754 chan->retry_count = 1;
2755 __set_monitor_timer(chan);
2756 __clear_ack_timer(chan);
2757 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2759 case L2CAP_EV_RETRANS_TO:
2760 l2cap_send_rr_or_rnr(chan, 1);
2761 chan->retry_count = 1;
2762 __set_monitor_timer(chan);
2763 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2765 case L2CAP_EV_RECV_FBIT:
2766 /* Nothing to process */
2773 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2774 struct l2cap_ctrl *control,
2775 struct sk_buff_head *skbs, u8 event)
2777 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2781 case L2CAP_EV_DATA_REQUEST:
2782 if (chan->tx_send_head == NULL)
2783 chan->tx_send_head = skb_peek(skbs);
2784 /* Queue data, but don't send. */
2785 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2787 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2788 BT_DBG("Enter LOCAL_BUSY");
2789 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2791 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2792 /* The SREJ_SENT state must be aborted if we are to
2793 * enter the LOCAL_BUSY state.
2795 l2cap_abort_rx_srej_sent(chan);
2798 l2cap_send_ack(chan);
2801 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2802 BT_DBG("Exit LOCAL_BUSY");
2803 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2805 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2806 struct l2cap_ctrl local_control;
2807 memset(&local_control, 0, sizeof(local_control));
2808 local_control.sframe = 1;
2809 local_control.super = L2CAP_SUPER_RR;
2810 local_control.poll = 1;
2811 local_control.reqseq = chan->buffer_seq;
2812 l2cap_send_sframe(chan, &local_control);
2814 chan->retry_count = 1;
2815 __set_monitor_timer(chan);
2816 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2819 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2820 l2cap_process_reqseq(chan, control->reqseq);
2824 case L2CAP_EV_RECV_FBIT:
2825 if (control && control->final) {
2826 __clear_monitor_timer(chan);
2827 if (chan->unacked_frames > 0)
2828 __set_retrans_timer(chan);
2829 chan->retry_count = 0;
2830 chan->tx_state = L2CAP_TX_STATE_XMIT;
2831 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2834 case L2CAP_EV_EXPLICIT_POLL:
2837 case L2CAP_EV_MONITOR_TO:
2838 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2839 l2cap_send_rr_or_rnr(chan, 1);
2840 __set_monitor_timer(chan);
2841 chan->retry_count++;
2843 l2cap_send_disconn_req(chan, ECONNABORTED);
2851 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2852 struct sk_buff_head *skbs, u8 event)
2854 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2855 chan, control, skbs, event, chan->tx_state);
2857 switch (chan->tx_state) {
2858 case L2CAP_TX_STATE_XMIT:
2859 l2cap_tx_state_xmit(chan, control, skbs, event);
2861 case L2CAP_TX_STATE_WAIT_F:
2862 l2cap_tx_state_wait_f(chan, control, skbs, event);
2870 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2871 struct l2cap_ctrl *control)
2873 BT_DBG("chan %p, control %p", chan, control);
2874 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2877 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2878 struct l2cap_ctrl *control)
2880 BT_DBG("chan %p, control %p", chan, control);
2881 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2884 /* Copy frame to all raw sockets on that connection */
2885 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2887 struct sk_buff *nskb;
2888 struct l2cap_chan *chan;
2890 BT_DBG("conn %p", conn);
2892 mutex_lock(&conn->chan_lock);
2894 list_for_each_entry(chan, &conn->chan_l, list) {
2895 if (chan->chan_type != L2CAP_CHAN_RAW)
2898 /* Don't send frame to the channel it came from */
2899 if (bt_cb(skb)->l2cap.chan == chan)
2902 nskb = skb_clone(skb, GFP_KERNEL);
2905 if (chan->ops->recv(chan, nskb))
2909 mutex_unlock(&conn->chan_lock);
2912 /* ---- L2CAP signalling commands ---- */
2913 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2914 u8 ident, u16 dlen, void *data)
2916 struct sk_buff *skb, **frag;
2917 struct l2cap_cmd_hdr *cmd;
2918 struct l2cap_hdr *lh;
2921 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2922 conn, code, ident, dlen);
2924 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2927 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2928 count = min_t(unsigned int, conn->mtu, len);
2930 skb = bt_skb_alloc(count, GFP_KERNEL);
2934 lh = skb_put(skb, L2CAP_HDR_SIZE);
2935 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2937 if (conn->hcon->type == LE_LINK)
2938 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2940 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2942 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2945 cmd->len = cpu_to_le16(dlen);
2948 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2949 skb_put_data(skb, data, count);
2955 /* Continuation fragments (no L2CAP header) */
2956 frag = &skb_shinfo(skb)->frag_list;
2958 count = min_t(unsigned int, conn->mtu, len);
2960 *frag = bt_skb_alloc(count, GFP_KERNEL);
2964 skb_put_data(*frag, data, count);
2969 frag = &(*frag)->next;
2979 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2982 struct l2cap_conf_opt *opt = *ptr;
2985 len = L2CAP_CONF_OPT_SIZE + opt->len;
2993 *val = *((u8 *) opt->val);
2997 *val = get_unaligned_le16(opt->val);
3001 *val = get_unaligned_le32(opt->val);
3005 *val = (unsigned long) opt->val;
3009 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3013 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3015 struct l2cap_conf_opt *opt = *ptr;
3017 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3019 if (size < L2CAP_CONF_OPT_SIZE + len)
3027 *((u8 *) opt->val) = val;
3031 put_unaligned_le16(val, opt->val);
3035 put_unaligned_le32(val, opt->val);
3039 memcpy(opt->val, (void *) val, len);
3043 *ptr += L2CAP_CONF_OPT_SIZE + len;
3046 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3048 struct l2cap_conf_efs efs;
3050 switch (chan->mode) {
3051 case L2CAP_MODE_ERTM:
3052 efs.id = chan->local_id;
3053 efs.stype = chan->local_stype;
3054 efs.msdu = cpu_to_le16(chan->local_msdu);
3055 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3056 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3057 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3060 case L2CAP_MODE_STREAMING:
3062 efs.stype = L2CAP_SERV_BESTEFFORT;
3063 efs.msdu = cpu_to_le16(chan->local_msdu);
3064 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3073 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3074 (unsigned long) &efs, size);
3077 static void l2cap_ack_timeout(struct work_struct *work)
3079 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3083 BT_DBG("chan %p", chan);
3085 l2cap_chan_lock(chan);
3087 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3088 chan->last_acked_seq);
3091 l2cap_send_rr_or_rnr(chan, 0);
3093 l2cap_chan_unlock(chan);
3094 l2cap_chan_put(chan);
3097 int l2cap_ertm_init(struct l2cap_chan *chan)
3101 chan->next_tx_seq = 0;
3102 chan->expected_tx_seq = 0;
3103 chan->expected_ack_seq = 0;
3104 chan->unacked_frames = 0;
3105 chan->buffer_seq = 0;
3106 chan->frames_sent = 0;
3107 chan->last_acked_seq = 0;
3109 chan->sdu_last_frag = NULL;
3112 skb_queue_head_init(&chan->tx_q);
3114 chan->local_amp_id = AMP_ID_BREDR;
3115 chan->move_id = AMP_ID_BREDR;
3116 chan->move_state = L2CAP_MOVE_STABLE;
3117 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3119 if (chan->mode != L2CAP_MODE_ERTM)
3122 chan->rx_state = L2CAP_RX_STATE_RECV;
3123 chan->tx_state = L2CAP_TX_STATE_XMIT;
3125 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3126 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3127 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3129 skb_queue_head_init(&chan->srej_q);
3131 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3135 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3137 l2cap_seq_list_free(&chan->srej_list);
3142 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3145 case L2CAP_MODE_STREAMING:
3146 case L2CAP_MODE_ERTM:
3147 if (l2cap_mode_supported(mode, remote_feat_mask))
3151 return L2CAP_MODE_BASIC;
3155 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3157 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3158 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3161 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3163 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3164 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3167 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3168 struct l2cap_conf_rfc *rfc)
3170 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3171 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3173 /* Class 1 devices have must have ERTM timeouts
3174 * exceeding the Link Supervision Timeout. The
3175 * default Link Supervision Timeout for AMP
3176 * controllers is 10 seconds.
3178 * Class 1 devices use 0xffffffff for their
3179 * best-effort flush timeout, so the clamping logic
3180 * will result in a timeout that meets the above
3181 * requirement. ERTM timeouts are 16-bit values, so
3182 * the maximum timeout is 65.535 seconds.
3185 /* Convert timeout to milliseconds and round */
3186 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3188 /* This is the recommended formula for class 2 devices
3189 * that start ERTM timers when packets are sent to the
3192 ertm_to = 3 * ertm_to + 500;
3194 if (ertm_to > 0xffff)
3197 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3198 rfc->monitor_timeout = rfc->retrans_timeout;
3200 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3201 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3205 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3207 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3208 __l2cap_ews_supported(chan->conn)) {
3209 /* use extended control field */
3210 set_bit(FLAG_EXT_CTRL, &chan->flags);
3211 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3213 chan->tx_win = min_t(u16, chan->tx_win,
3214 L2CAP_DEFAULT_TX_WINDOW);
3215 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3217 chan->ack_win = chan->tx_win;
3220 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3222 struct l2cap_conf_req *req = data;
3223 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3224 void *ptr = req->data;
3225 void *endptr = data + data_size;
3228 BT_DBG("chan %p", chan);
3230 if (chan->num_conf_req || chan->num_conf_rsp)
3233 switch (chan->mode) {
3234 case L2CAP_MODE_STREAMING:
3235 case L2CAP_MODE_ERTM:
3236 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3239 if (__l2cap_efs_supported(chan->conn))
3240 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3244 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3249 if (chan->imtu != L2CAP_DEFAULT_MTU)
3250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3252 switch (chan->mode) {
3253 case L2CAP_MODE_BASIC:
3257 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3258 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3261 rfc.mode = L2CAP_MODE_BASIC;
3263 rfc.max_transmit = 0;
3264 rfc.retrans_timeout = 0;
3265 rfc.monitor_timeout = 0;
3266 rfc.max_pdu_size = 0;
3268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3269 (unsigned long) &rfc, endptr - ptr);
3272 case L2CAP_MODE_ERTM:
3273 rfc.mode = L2CAP_MODE_ERTM;
3274 rfc.max_transmit = chan->max_tx;
3276 __l2cap_set_ertm_timeouts(chan, &rfc);
3278 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3279 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3281 rfc.max_pdu_size = cpu_to_le16(size);
3283 l2cap_txwin_setup(chan);
3285 rfc.txwin_size = min_t(u16, chan->tx_win,
3286 L2CAP_DEFAULT_TX_WINDOW);
3288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3289 (unsigned long) &rfc, endptr - ptr);
3291 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3292 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3294 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3295 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3296 chan->tx_win, endptr - ptr);
3298 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3299 if (chan->fcs == L2CAP_FCS_NONE ||
3300 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3301 chan->fcs = L2CAP_FCS_NONE;
3302 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3303 chan->fcs, endptr - ptr);
3307 case L2CAP_MODE_STREAMING:
3308 l2cap_txwin_setup(chan);
3309 rfc.mode = L2CAP_MODE_STREAMING;
3311 rfc.max_transmit = 0;
3312 rfc.retrans_timeout = 0;
3313 rfc.monitor_timeout = 0;
3315 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3316 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3318 rfc.max_pdu_size = cpu_to_le16(size);
3320 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3321 (unsigned long) &rfc, endptr - ptr);
3323 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3324 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3326 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3327 if (chan->fcs == L2CAP_FCS_NONE ||
3328 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3329 chan->fcs = L2CAP_FCS_NONE;
3330 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3331 chan->fcs, endptr - ptr);
3336 req->dcid = cpu_to_le16(chan->dcid);
3337 req->flags = cpu_to_le16(0);
3342 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3344 struct l2cap_conf_rsp *rsp = data;
3345 void *ptr = rsp->data;
3346 void *endptr = data + data_size;
3347 void *req = chan->conf_req;
3348 int len = chan->conf_len;
3349 int type, hint, olen;
3351 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3352 struct l2cap_conf_efs efs;
3354 u16 mtu = L2CAP_DEFAULT_MTU;
3355 u16 result = L2CAP_CONF_SUCCESS;
3358 BT_DBG("chan %p", chan);
3360 while (len >= L2CAP_CONF_OPT_SIZE) {
3361 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3365 hint = type & L2CAP_CONF_HINT;
3366 type &= L2CAP_CONF_MASK;
3369 case L2CAP_CONF_MTU:
3375 case L2CAP_CONF_FLUSH_TO:
3378 chan->flush_to = val;
3381 case L2CAP_CONF_QOS:
3384 case L2CAP_CONF_RFC:
3385 if (olen != sizeof(rfc))
3387 memcpy(&rfc, (void *) val, olen);
3390 case L2CAP_CONF_FCS:
3393 if (val == L2CAP_FCS_NONE)
3394 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3397 case L2CAP_CONF_EFS:
3398 if (olen != sizeof(efs))
3401 memcpy(&efs, (void *) val, olen);
3404 case L2CAP_CONF_EWS:
3407 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3408 return -ECONNREFUSED;
3409 set_bit(FLAG_EXT_CTRL, &chan->flags);
3410 set_bit(CONF_EWS_RECV, &chan->conf_state);
3411 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3412 chan->remote_tx_win = val;
3418 result = L2CAP_CONF_UNKNOWN;
3419 *((u8 *) ptr++) = type;
3424 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3427 switch (chan->mode) {
3428 case L2CAP_MODE_STREAMING:
3429 case L2CAP_MODE_ERTM:
3430 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3431 chan->mode = l2cap_select_mode(rfc.mode,
3432 chan->conn->feat_mask);
3437 if (__l2cap_efs_supported(chan->conn))
3438 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3440 return -ECONNREFUSED;
3443 if (chan->mode != rfc.mode)
3444 return -ECONNREFUSED;
3450 if (chan->mode != rfc.mode) {
3451 result = L2CAP_CONF_UNACCEPT;
3452 rfc.mode = chan->mode;
3454 if (chan->num_conf_rsp == 1)
3455 return -ECONNREFUSED;
3457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3458 (unsigned long) &rfc, endptr - ptr);
3461 if (result == L2CAP_CONF_SUCCESS) {
3462 /* Configure output options and let the other side know
3463 * which ones we don't like. */
3465 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3466 result = L2CAP_CONF_UNACCEPT;
3469 set_bit(CONF_MTU_DONE, &chan->conf_state);
3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3474 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3475 efs.stype != L2CAP_SERV_NOTRAFIC &&
3476 efs.stype != chan->local_stype) {
3478 result = L2CAP_CONF_UNACCEPT;
3480 if (chan->num_conf_req >= 1)
3481 return -ECONNREFUSED;
3483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3485 (unsigned long) &efs, endptr - ptr);
3487 /* Send PENDING Conf Rsp */
3488 result = L2CAP_CONF_PENDING;
3489 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3494 case L2CAP_MODE_BASIC:
3495 chan->fcs = L2CAP_FCS_NONE;
3496 set_bit(CONF_MODE_DONE, &chan->conf_state);
3499 case L2CAP_MODE_ERTM:
3500 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3501 chan->remote_tx_win = rfc.txwin_size;
3503 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3505 chan->remote_max_tx = rfc.max_transmit;
3507 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3508 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3509 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3510 rfc.max_pdu_size = cpu_to_le16(size);
3511 chan->remote_mps = size;
3513 __l2cap_set_ertm_timeouts(chan, &rfc);
3515 set_bit(CONF_MODE_DONE, &chan->conf_state);
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3518 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3520 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3521 chan->remote_id = efs.id;
3522 chan->remote_stype = efs.stype;
3523 chan->remote_msdu = le16_to_cpu(efs.msdu);
3524 chan->remote_flush_to =
3525 le32_to_cpu(efs.flush_to);
3526 chan->remote_acc_lat =
3527 le32_to_cpu(efs.acc_lat);
3528 chan->remote_sdu_itime =
3529 le32_to_cpu(efs.sdu_itime);
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3532 (unsigned long) &efs, endptr - ptr);
3536 case L2CAP_MODE_STREAMING:
3537 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3538 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3539 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3540 rfc.max_pdu_size = cpu_to_le16(size);
3541 chan->remote_mps = size;
3543 set_bit(CONF_MODE_DONE, &chan->conf_state);
3545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3546 (unsigned long) &rfc, endptr - ptr);
3551 result = L2CAP_CONF_UNACCEPT;
3553 memset(&rfc, 0, sizeof(rfc));
3554 rfc.mode = chan->mode;
3557 if (result == L2CAP_CONF_SUCCESS)
3558 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3560 rsp->scid = cpu_to_le16(chan->dcid);
3561 rsp->result = cpu_to_le16(result);
3562 rsp->flags = cpu_to_le16(0);
3567 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3568 void *data, size_t size, u16 *result)
3570 struct l2cap_conf_req *req = data;
3571 void *ptr = req->data;
3572 void *endptr = data + size;
3575 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3576 struct l2cap_conf_efs efs;
3578 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3580 while (len >= L2CAP_CONF_OPT_SIZE) {
3581 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3586 case L2CAP_CONF_MTU:
3589 if (val < L2CAP_DEFAULT_MIN_MTU) {
3590 *result = L2CAP_CONF_UNACCEPT;
3591 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3594 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3598 case L2CAP_CONF_FLUSH_TO:
3601 chan->flush_to = val;
3602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3603 chan->flush_to, endptr - ptr);
3606 case L2CAP_CONF_RFC:
3607 if (olen != sizeof(rfc))
3609 memcpy(&rfc, (void *)val, olen);
3610 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3611 rfc.mode != chan->mode)
3612 return -ECONNREFUSED;
3614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3615 (unsigned long) &rfc, endptr - ptr);
3618 case L2CAP_CONF_EWS:
3621 chan->ack_win = min_t(u16, val, chan->ack_win);
3622 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3623 chan->tx_win, endptr - ptr);
3626 case L2CAP_CONF_EFS:
3627 if (olen != sizeof(efs))
3629 memcpy(&efs, (void *)val, olen);
3630 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3631 efs.stype != L2CAP_SERV_NOTRAFIC &&
3632 efs.stype != chan->local_stype)
3633 return -ECONNREFUSED;
3634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3635 (unsigned long) &efs, endptr - ptr);
3638 case L2CAP_CONF_FCS:
3641 if (*result == L2CAP_CONF_PENDING)
3642 if (val == L2CAP_FCS_NONE)
3643 set_bit(CONF_RECV_NO_FCS,
3649 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3650 return -ECONNREFUSED;
3652 chan->mode = rfc.mode;
3654 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3656 case L2CAP_MODE_ERTM:
3657 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3658 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3659 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3660 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3661 chan->ack_win = min_t(u16, chan->ack_win,
3664 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3665 chan->local_msdu = le16_to_cpu(efs.msdu);
3666 chan->local_sdu_itime =
3667 le32_to_cpu(efs.sdu_itime);
3668 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3669 chan->local_flush_to =
3670 le32_to_cpu(efs.flush_to);
3674 case L2CAP_MODE_STREAMING:
3675 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3679 req->dcid = cpu_to_le16(chan->dcid);
3680 req->flags = cpu_to_le16(0);
3685 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3686 u16 result, u16 flags)
3688 struct l2cap_conf_rsp *rsp = data;
3689 void *ptr = rsp->data;
3691 BT_DBG("chan %p", chan);
3693 rsp->scid = cpu_to_le16(chan->dcid);
3694 rsp->result = cpu_to_le16(result);
3695 rsp->flags = cpu_to_le16(flags);
3700 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3702 struct l2cap_le_conn_rsp rsp;
3703 struct l2cap_conn *conn = chan->conn;
3705 BT_DBG("chan %p", chan);
3707 rsp.dcid = cpu_to_le16(chan->scid);
3708 rsp.mtu = cpu_to_le16(chan->imtu);
3709 rsp.mps = cpu_to_le16(chan->mps);
3710 rsp.credits = cpu_to_le16(chan->rx_credits);
3711 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3713 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3717 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3719 struct l2cap_conn_rsp rsp;
3720 struct l2cap_conn *conn = chan->conn;
3724 rsp.scid = cpu_to_le16(chan->dcid);
3725 rsp.dcid = cpu_to_le16(chan->scid);
3726 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3727 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3730 rsp_code = L2CAP_CREATE_CHAN_RSP;
3732 rsp_code = L2CAP_CONN_RSP;
3734 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3736 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3738 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3741 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3742 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3743 chan->num_conf_req++;
3746 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3750 /* Use sane default values in case a misbehaving remote device
3751 * did not send an RFC or extended window size option.
3753 u16 txwin_ext = chan->ack_win;
3754 struct l2cap_conf_rfc rfc = {
3756 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3757 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3758 .max_pdu_size = cpu_to_le16(chan->imtu),
3759 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3762 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3764 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3767 while (len >= L2CAP_CONF_OPT_SIZE) {
3768 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3773 case L2CAP_CONF_RFC:
3774 if (olen != sizeof(rfc))
3776 memcpy(&rfc, (void *)val, olen);
3778 case L2CAP_CONF_EWS:
3787 case L2CAP_MODE_ERTM:
3788 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3789 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3790 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3791 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3792 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3794 chan->ack_win = min_t(u16, chan->ack_win,
3797 case L2CAP_MODE_STREAMING:
3798 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3802 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3803 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3806 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3808 if (cmd_len < sizeof(*rej))
3811 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3814 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3815 cmd->ident == conn->info_ident) {
3816 cancel_delayed_work(&conn->info_timer);
3818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3819 conn->info_ident = 0;
3821 l2cap_conn_start(conn);
3827 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3828 struct l2cap_cmd_hdr *cmd,
3829 u8 *data, u8 rsp_code, u8 amp_id)
3831 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3832 struct l2cap_conn_rsp rsp;
3833 struct l2cap_chan *chan = NULL, *pchan;
3834 int result, status = L2CAP_CS_NO_INFO;
3836 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3837 __le16 psm = req->psm;
3839 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3841 /* Check if we have socket listening on psm */
3842 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3843 &conn->hcon->dst, ACL_LINK);
3845 result = L2CAP_CR_BAD_PSM;
3849 mutex_lock(&conn->chan_lock);
3850 l2cap_chan_lock(pchan);
3852 /* Check if the ACL is secure enough (if not SDP) */
3853 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3854 !hci_conn_check_link_mode(conn->hcon)) {
3855 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3856 result = L2CAP_CR_SEC_BLOCK;
3860 result = L2CAP_CR_NO_MEM;
3862 /* Check if we already have channel with that dcid */
3863 if (__l2cap_get_chan_by_dcid(conn, scid))
3866 chan = pchan->ops->new_connection(pchan);
3870 /* For certain devices (ex: HID mouse), support for authentication,
3871 * pairing and bonding is optional. For such devices, inorder to avoid
3872 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3873 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3875 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3877 bacpy(&chan->src, &conn->hcon->src);
3878 bacpy(&chan->dst, &conn->hcon->dst);
3879 chan->src_type = bdaddr_src_type(conn->hcon);
3880 chan->dst_type = bdaddr_dst_type(conn->hcon);
3883 chan->local_amp_id = amp_id;
3885 __l2cap_chan_add(conn, chan);
3889 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3891 chan->ident = cmd->ident;
3893 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3894 if (l2cap_chan_check_security(chan, false)) {
3895 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3896 l2cap_state_change(chan, BT_CONNECT2);
3897 result = L2CAP_CR_PEND;
3898 status = L2CAP_CS_AUTHOR_PEND;
3899 chan->ops->defer(chan);
3901 /* Force pending result for AMP controllers.
3902 * The connection will succeed after the
3903 * physical link is up.
3905 if (amp_id == AMP_ID_BREDR) {
3906 l2cap_state_change(chan, BT_CONFIG);
3907 result = L2CAP_CR_SUCCESS;
3909 l2cap_state_change(chan, BT_CONNECT2);
3910 result = L2CAP_CR_PEND;
3912 status = L2CAP_CS_NO_INFO;
3915 l2cap_state_change(chan, BT_CONNECT2);
3916 result = L2CAP_CR_PEND;
3917 status = L2CAP_CS_AUTHEN_PEND;
3920 l2cap_state_change(chan, BT_CONNECT2);
3921 result = L2CAP_CR_PEND;
3922 status = L2CAP_CS_NO_INFO;
3926 l2cap_chan_unlock(pchan);
3927 mutex_unlock(&conn->chan_lock);
3928 l2cap_chan_put(pchan);
3931 rsp.scid = cpu_to_le16(scid);
3932 rsp.dcid = cpu_to_le16(dcid);
3933 rsp.result = cpu_to_le16(result);
3934 rsp.status = cpu_to_le16(status);
3935 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3937 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3938 struct l2cap_info_req info;
3939 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3941 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3942 conn->info_ident = l2cap_get_ident(conn);
3944 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3946 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3947 sizeof(info), &info);
3950 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3951 result == L2CAP_CR_SUCCESS) {
3953 set_bit(CONF_REQ_SENT, &chan->conf_state);
3954 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3955 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3956 chan->num_conf_req++;
3962 static int l2cap_connect_req(struct l2cap_conn *conn,
3963 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3965 struct hci_dev *hdev = conn->hcon->hdev;
3966 struct hci_conn *hcon = conn->hcon;
3968 if (cmd_len < sizeof(struct l2cap_conn_req))
3972 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3973 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3974 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3975 hci_dev_unlock(hdev);
3977 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3981 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3982 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3985 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3986 u16 scid, dcid, result, status;
3987 struct l2cap_chan *chan;
3991 if (cmd_len < sizeof(*rsp))
3994 scid = __le16_to_cpu(rsp->scid);
3995 dcid = __le16_to_cpu(rsp->dcid);
3996 result = __le16_to_cpu(rsp->result);
3997 status = __le16_to_cpu(rsp->status);
3999 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4000 dcid, scid, result, status);
4002 mutex_lock(&conn->chan_lock);
4005 chan = __l2cap_get_chan_by_scid(conn, scid);
4011 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4020 l2cap_chan_lock(chan);
4023 case L2CAP_CR_SUCCESS:
4024 l2cap_state_change(chan, BT_CONFIG);
4027 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4029 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4032 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4033 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4034 chan->num_conf_req++;
4038 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4042 l2cap_chan_del(chan, ECONNREFUSED);
4046 l2cap_chan_unlock(chan);
4049 mutex_unlock(&conn->chan_lock);
4054 static inline void set_default_fcs(struct l2cap_chan *chan)
4056 /* FCS is enabled only in ERTM or streaming mode, if one or both
4059 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4060 chan->fcs = L2CAP_FCS_NONE;
4061 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4062 chan->fcs = L2CAP_FCS_CRC16;
4065 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4066 u8 ident, u16 flags)
4068 struct l2cap_conn *conn = chan->conn;
4070 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4073 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4074 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4076 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4077 l2cap_build_conf_rsp(chan, data,
4078 L2CAP_CONF_SUCCESS, flags), data);
4081 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4084 struct l2cap_cmd_rej_cid rej;
4086 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4087 rej.scid = __cpu_to_le16(scid);
4088 rej.dcid = __cpu_to_le16(dcid);
4090 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4093 static inline int l2cap_config_req(struct l2cap_conn *conn,
4094 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4097 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4100 struct l2cap_chan *chan;
4103 if (cmd_len < sizeof(*req))
4106 dcid = __le16_to_cpu(req->dcid);
4107 flags = __le16_to_cpu(req->flags);
4109 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4111 chan = l2cap_get_chan_by_scid(conn, dcid);
4113 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4117 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4118 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4123 /* Reject if config buffer is too small. */
4124 len = cmd_len - sizeof(*req);
4125 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4126 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4127 l2cap_build_conf_rsp(chan, rsp,
4128 L2CAP_CONF_REJECT, flags), rsp);
4133 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4134 chan->conf_len += len;
4136 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4137 /* Incomplete config. Send empty response. */
4138 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4139 l2cap_build_conf_rsp(chan, rsp,
4140 L2CAP_CONF_SUCCESS, flags), rsp);
4144 /* Complete config. */
4145 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4147 l2cap_send_disconn_req(chan, ECONNRESET);
4151 chan->ident = cmd->ident;
4152 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4153 chan->num_conf_rsp++;
4155 /* Reset config buffer. */
4158 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4161 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4162 set_default_fcs(chan);
4164 if (chan->mode == L2CAP_MODE_ERTM ||
4165 chan->mode == L2CAP_MODE_STREAMING)
4166 err = l2cap_ertm_init(chan);
4169 l2cap_send_disconn_req(chan, -err);
4171 l2cap_chan_ready(chan);
4176 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4178 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4179 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4180 chan->num_conf_req++;
4183 /* Got Conf Rsp PENDING from remote side and assume we sent
4184 Conf Rsp PENDING in the code above */
4185 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4186 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4188 /* check compatibility */
4190 /* Send rsp for BR/EDR channel */
4192 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4194 chan->ident = cmd->ident;
4198 l2cap_chan_unlock(chan);
4202 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4203 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4206 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4207 u16 scid, flags, result;
4208 struct l2cap_chan *chan;
4209 int len = cmd_len - sizeof(*rsp);
4212 if (cmd_len < sizeof(*rsp))
4215 scid = __le16_to_cpu(rsp->scid);
4216 flags = __le16_to_cpu(rsp->flags);
4217 result = __le16_to_cpu(rsp->result);
4219 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4222 chan = l2cap_get_chan_by_scid(conn, scid);
4227 case L2CAP_CONF_SUCCESS:
4228 l2cap_conf_rfc_get(chan, rsp->data, len);
4229 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4232 case L2CAP_CONF_PENDING:
4233 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4235 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4238 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4239 buf, sizeof(buf), &result);
4241 l2cap_send_disconn_req(chan, ECONNRESET);
4245 if (!chan->hs_hcon) {
4246 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4249 if (l2cap_check_efs(chan)) {
4250 amp_create_logical_link(chan);
4251 chan->ident = cmd->ident;
4257 case L2CAP_CONF_UNACCEPT:
4258 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4261 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4262 l2cap_send_disconn_req(chan, ECONNRESET);
4266 /* throw out any old stored conf requests */
4267 result = L2CAP_CONF_SUCCESS;
4268 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4269 req, sizeof(req), &result);
4271 l2cap_send_disconn_req(chan, ECONNRESET);
4275 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4276 L2CAP_CONF_REQ, len, req);
4277 chan->num_conf_req++;
4278 if (result != L2CAP_CONF_SUCCESS)
4284 l2cap_chan_set_err(chan, ECONNRESET);
4286 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4287 l2cap_send_disconn_req(chan, ECONNRESET);
4291 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4294 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4296 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4297 set_default_fcs(chan);
4299 if (chan->mode == L2CAP_MODE_ERTM ||
4300 chan->mode == L2CAP_MODE_STREAMING)
4301 err = l2cap_ertm_init(chan);
4304 l2cap_send_disconn_req(chan, -err);
4306 l2cap_chan_ready(chan);
4310 l2cap_chan_unlock(chan);
4314 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4315 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4318 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4319 struct l2cap_disconn_rsp rsp;
4321 struct l2cap_chan *chan;
4323 if (cmd_len != sizeof(*req))
4326 scid = __le16_to_cpu(req->scid);
4327 dcid = __le16_to_cpu(req->dcid);
4329 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4331 mutex_lock(&conn->chan_lock);
4333 chan = __l2cap_get_chan_by_scid(conn, dcid);
4335 mutex_unlock(&conn->chan_lock);
4336 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4340 l2cap_chan_lock(chan);
4342 rsp.dcid = cpu_to_le16(chan->scid);
4343 rsp.scid = cpu_to_le16(chan->dcid);
4344 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4346 chan->ops->set_shutdown(chan);
4348 l2cap_chan_hold(chan);
4349 l2cap_chan_del(chan, ECONNRESET);
4351 l2cap_chan_unlock(chan);
4353 chan->ops->close(chan);
4354 l2cap_chan_put(chan);
4356 mutex_unlock(&conn->chan_lock);
4361 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4362 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4365 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4367 struct l2cap_chan *chan;
4369 if (cmd_len != sizeof(*rsp))
4372 scid = __le16_to_cpu(rsp->scid);
4373 dcid = __le16_to_cpu(rsp->dcid);
4375 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4377 mutex_lock(&conn->chan_lock);
4379 chan = __l2cap_get_chan_by_scid(conn, scid);
4381 mutex_unlock(&conn->chan_lock);
4385 l2cap_chan_lock(chan);
4387 if (chan->state != BT_DISCONN) {
4388 l2cap_chan_unlock(chan);
4389 mutex_unlock(&conn->chan_lock);
4393 l2cap_chan_hold(chan);
4394 l2cap_chan_del(chan, 0);
4396 l2cap_chan_unlock(chan);
4398 chan->ops->close(chan);
4399 l2cap_chan_put(chan);
4401 mutex_unlock(&conn->chan_lock);
4406 static inline int l2cap_information_req(struct l2cap_conn *conn,
4407 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4410 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4413 if (cmd_len != sizeof(*req))
4416 type = __le16_to_cpu(req->type);
4418 BT_DBG("type 0x%4.4x", type);
4420 if (type == L2CAP_IT_FEAT_MASK) {
4422 u32 feat_mask = l2cap_feat_mask;
4423 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4424 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4425 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4427 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4429 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4430 feat_mask |= L2CAP_FEAT_EXT_FLOW
4431 | L2CAP_FEAT_EXT_WINDOW;
4433 put_unaligned_le32(feat_mask, rsp->data);
4434 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4436 } else if (type == L2CAP_IT_FIXED_CHAN) {
4438 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4440 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4441 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4442 rsp->data[0] = conn->local_fixed_chan;
4443 memset(rsp->data + 1, 0, 7);
4444 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4447 struct l2cap_info_rsp rsp;
4448 rsp.type = cpu_to_le16(type);
4449 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4450 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4457 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4458 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4461 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4464 if (cmd_len < sizeof(*rsp))
4467 type = __le16_to_cpu(rsp->type);
4468 result = __le16_to_cpu(rsp->result);
4470 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4472 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4473 if (cmd->ident != conn->info_ident ||
4474 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4477 cancel_delayed_work(&conn->info_timer);
4479 if (result != L2CAP_IR_SUCCESS) {
4480 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4481 conn->info_ident = 0;
4483 l2cap_conn_start(conn);
4489 case L2CAP_IT_FEAT_MASK:
4490 conn->feat_mask = get_unaligned_le32(rsp->data);
4492 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4493 struct l2cap_info_req req;
4494 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4496 conn->info_ident = l2cap_get_ident(conn);
4498 l2cap_send_cmd(conn, conn->info_ident,
4499 L2CAP_INFO_REQ, sizeof(req), &req);
4501 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4502 conn->info_ident = 0;
4504 l2cap_conn_start(conn);
4508 case L2CAP_IT_FIXED_CHAN:
4509 conn->remote_fixed_chan = rsp->data[0];
4510 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4511 conn->info_ident = 0;
4513 l2cap_conn_start(conn);
4520 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4521 struct l2cap_cmd_hdr *cmd,
4522 u16 cmd_len, void *data)
4524 struct l2cap_create_chan_req *req = data;
4525 struct l2cap_create_chan_rsp rsp;
4526 struct l2cap_chan *chan;
4527 struct hci_dev *hdev;
4530 if (cmd_len != sizeof(*req))
4533 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4536 psm = le16_to_cpu(req->psm);
4537 scid = le16_to_cpu(req->scid);
4539 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4541 /* For controller id 0 make BR/EDR connection */
4542 if (req->amp_id == AMP_ID_BREDR) {
4543 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4548 /* Validate AMP controller id */
4549 hdev = hci_dev_get(req->amp_id);
4553 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4558 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4561 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4562 struct hci_conn *hs_hcon;
4564 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4568 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4573 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4575 mgr->bredr_chan = chan;
4576 chan->hs_hcon = hs_hcon;
4577 chan->fcs = L2CAP_FCS_NONE;
4578 conn->mtu = hdev->block_mtu;
4587 rsp.scid = cpu_to_le16(scid);
4588 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4589 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4591 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4597 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4599 struct l2cap_move_chan_req req;
4602 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4604 ident = l2cap_get_ident(chan->conn);
4605 chan->ident = ident;
4607 req.icid = cpu_to_le16(chan->scid);
4608 req.dest_amp_id = dest_amp_id;
4610 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4613 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4616 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4618 struct l2cap_move_chan_rsp rsp;
4620 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4622 rsp.icid = cpu_to_le16(chan->dcid);
4623 rsp.result = cpu_to_le16(result);
4625 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4629 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4631 struct l2cap_move_chan_cfm cfm;
4633 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4635 chan->ident = l2cap_get_ident(chan->conn);
4637 cfm.icid = cpu_to_le16(chan->scid);
4638 cfm.result = cpu_to_le16(result);
4640 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4643 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4646 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4648 struct l2cap_move_chan_cfm cfm;
4650 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4652 cfm.icid = cpu_to_le16(icid);
4653 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4655 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4659 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4662 struct l2cap_move_chan_cfm_rsp rsp;
4664 BT_DBG("icid 0x%4.4x", icid);
4666 rsp.icid = cpu_to_le16(icid);
4667 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4670 static void __release_logical_link(struct l2cap_chan *chan)
4672 chan->hs_hchan = NULL;
4673 chan->hs_hcon = NULL;
4675 /* Placeholder - release the logical link */
4678 static void l2cap_logical_fail(struct l2cap_chan *chan)
4680 /* Logical link setup failed */
4681 if (chan->state != BT_CONNECTED) {
4682 /* Create channel failure, disconnect */
4683 l2cap_send_disconn_req(chan, ECONNRESET);
4687 switch (chan->move_role) {
4688 case L2CAP_MOVE_ROLE_RESPONDER:
4689 l2cap_move_done(chan);
4690 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4692 case L2CAP_MOVE_ROLE_INITIATOR:
4693 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4694 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4695 /* Remote has only sent pending or
4696 * success responses, clean up
4698 l2cap_move_done(chan);
4701 /* Other amp move states imply that the move
4702 * has already aborted
4704 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4709 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4710 struct hci_chan *hchan)
4712 struct l2cap_conf_rsp rsp;
4714 chan->hs_hchan = hchan;
4715 chan->hs_hcon->l2cap_data = chan->conn;
4717 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4719 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4722 set_default_fcs(chan);
4724 err = l2cap_ertm_init(chan);
4726 l2cap_send_disconn_req(chan, -err);
4728 l2cap_chan_ready(chan);
4732 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4733 struct hci_chan *hchan)
4735 chan->hs_hcon = hchan->conn;
4736 chan->hs_hcon->l2cap_data = chan->conn;
4738 BT_DBG("move_state %d", chan->move_state);
4740 switch (chan->move_state) {
4741 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4742 /* Move confirm will be sent after a success
4743 * response is received
4745 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4747 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4748 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4749 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4750 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4751 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4752 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4753 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4754 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4755 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4759 /* Move was not in expected state, free the channel */
4760 __release_logical_link(chan);
4762 chan->move_state = L2CAP_MOVE_STABLE;
4766 /* Call with chan locked */
4767 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4770 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4773 l2cap_logical_fail(chan);
4774 __release_logical_link(chan);
4778 if (chan->state != BT_CONNECTED) {
4779 /* Ignore logical link if channel is on BR/EDR */
4780 if (chan->local_amp_id != AMP_ID_BREDR)
4781 l2cap_logical_finish_create(chan, hchan);
4783 l2cap_logical_finish_move(chan, hchan);
4787 void l2cap_move_start(struct l2cap_chan *chan)
4789 BT_DBG("chan %p", chan);
4791 if (chan->local_amp_id == AMP_ID_BREDR) {
4792 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4794 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4795 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4796 /* Placeholder - start physical link setup */
4798 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4799 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4801 l2cap_move_setup(chan);
4802 l2cap_send_move_chan_req(chan, 0);
4806 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4807 u8 local_amp_id, u8 remote_amp_id)
4809 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4810 local_amp_id, remote_amp_id);
4812 chan->fcs = L2CAP_FCS_NONE;
4814 /* Outgoing channel on AMP */
4815 if (chan->state == BT_CONNECT) {
4816 if (result == L2CAP_CR_SUCCESS) {
4817 chan->local_amp_id = local_amp_id;
4818 l2cap_send_create_chan_req(chan, remote_amp_id);
4820 /* Revert to BR/EDR connect */
4821 l2cap_send_conn_req(chan);
4827 /* Incoming channel on AMP */
4828 if (__l2cap_no_conn_pending(chan)) {
4829 struct l2cap_conn_rsp rsp;
4831 rsp.scid = cpu_to_le16(chan->dcid);
4832 rsp.dcid = cpu_to_le16(chan->scid);
4834 if (result == L2CAP_CR_SUCCESS) {
4835 /* Send successful response */
4836 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4837 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4839 /* Send negative response */
4840 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4841 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4844 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4847 if (result == L2CAP_CR_SUCCESS) {
4848 l2cap_state_change(chan, BT_CONFIG);
4849 set_bit(CONF_REQ_SENT, &chan->conf_state);
4850 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4852 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4853 chan->num_conf_req++;
4858 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4861 l2cap_move_setup(chan);
4862 chan->move_id = local_amp_id;
4863 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4865 l2cap_send_move_chan_req(chan, remote_amp_id);
4868 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4870 struct hci_chan *hchan = NULL;
4872 /* Placeholder - get hci_chan for logical link */
4875 if (hchan->state == BT_CONNECTED) {
4876 /* Logical link is ready to go */
4877 chan->hs_hcon = hchan->conn;
4878 chan->hs_hcon->l2cap_data = chan->conn;
4879 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4880 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4882 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4884 /* Wait for logical link to be ready */
4885 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4888 /* Logical link not available */
4889 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4893 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4895 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4897 if (result == -EINVAL)
4898 rsp_result = L2CAP_MR_BAD_ID;
4900 rsp_result = L2CAP_MR_NOT_ALLOWED;
4902 l2cap_send_move_chan_rsp(chan, rsp_result);
4905 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4906 chan->move_state = L2CAP_MOVE_STABLE;
4908 /* Restart data transmission */
4909 l2cap_ertm_send(chan);
4912 /* Invoke with locked chan */
4913 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4915 u8 local_amp_id = chan->local_amp_id;
4916 u8 remote_amp_id = chan->remote_amp_id;
4918 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4919 chan, result, local_amp_id, remote_amp_id);
4921 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4922 l2cap_chan_unlock(chan);
4926 if (chan->state != BT_CONNECTED) {
4927 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4928 } else if (result != L2CAP_MR_SUCCESS) {
4929 l2cap_do_move_cancel(chan, result);
4931 switch (chan->move_role) {
4932 case L2CAP_MOVE_ROLE_INITIATOR:
4933 l2cap_do_move_initiate(chan, local_amp_id,
4936 case L2CAP_MOVE_ROLE_RESPONDER:
4937 l2cap_do_move_respond(chan, result);
4940 l2cap_do_move_cancel(chan, result);
4946 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4947 struct l2cap_cmd_hdr *cmd,
4948 u16 cmd_len, void *data)
4950 struct l2cap_move_chan_req *req = data;
4951 struct l2cap_move_chan_rsp rsp;
4952 struct l2cap_chan *chan;
4954 u16 result = L2CAP_MR_NOT_ALLOWED;
4956 if (cmd_len != sizeof(*req))
4959 icid = le16_to_cpu(req->icid);
4961 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4963 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4966 chan = l2cap_get_chan_by_dcid(conn, icid);
4968 rsp.icid = cpu_to_le16(icid);
4969 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4970 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4975 chan->ident = cmd->ident;
4977 if (chan->scid < L2CAP_CID_DYN_START ||
4978 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4979 (chan->mode != L2CAP_MODE_ERTM &&
4980 chan->mode != L2CAP_MODE_STREAMING)) {
4981 result = L2CAP_MR_NOT_ALLOWED;
4982 goto send_move_response;
4985 if (chan->local_amp_id == req->dest_amp_id) {
4986 result = L2CAP_MR_SAME_ID;
4987 goto send_move_response;
4990 if (req->dest_amp_id != AMP_ID_BREDR) {
4991 struct hci_dev *hdev;
4992 hdev = hci_dev_get(req->dest_amp_id);
4993 if (!hdev || hdev->dev_type != HCI_AMP ||
4994 !test_bit(HCI_UP, &hdev->flags)) {
4998 result = L2CAP_MR_BAD_ID;
4999 goto send_move_response;
5004 /* Detect a move collision. Only send a collision response
5005 * if this side has "lost", otherwise proceed with the move.
5006 * The winner has the larger bd_addr.
5008 if ((__chan_is_moving(chan) ||
5009 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5010 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5011 result = L2CAP_MR_COLLISION;
5012 goto send_move_response;
5015 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5016 l2cap_move_setup(chan);
5017 chan->move_id = req->dest_amp_id;
5020 if (req->dest_amp_id == AMP_ID_BREDR) {
5021 /* Moving to BR/EDR */
5022 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5023 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5024 result = L2CAP_MR_PEND;
5026 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5027 result = L2CAP_MR_SUCCESS;
5030 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5031 /* Placeholder - uncomment when amp functions are available */
5032 /*amp_accept_physical(chan, req->dest_amp_id);*/
5033 result = L2CAP_MR_PEND;
5037 l2cap_send_move_chan_rsp(chan, result);
5039 l2cap_chan_unlock(chan);
5044 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5046 struct l2cap_chan *chan;
5047 struct hci_chan *hchan = NULL;
5049 chan = l2cap_get_chan_by_scid(conn, icid);
5051 l2cap_send_move_chan_cfm_icid(conn, icid);
5055 __clear_chan_timer(chan);
5056 if (result == L2CAP_MR_PEND)
5057 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5059 switch (chan->move_state) {
5060 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5061 /* Move confirm will be sent when logical link
5064 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5066 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5067 if (result == L2CAP_MR_PEND) {
5069 } else if (test_bit(CONN_LOCAL_BUSY,
5070 &chan->conn_state)) {
5071 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5073 /* Logical link is up or moving to BR/EDR,
5076 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5077 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5080 case L2CAP_MOVE_WAIT_RSP:
5082 if (result == L2CAP_MR_SUCCESS) {
5083 /* Remote is ready, send confirm immediately
5084 * after logical link is ready
5086 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5088 /* Both logical link and move success
5089 * are required to confirm
5091 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5094 /* Placeholder - get hci_chan for logical link */
5096 /* Logical link not available */
5097 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5101 /* If the logical link is not yet connected, do not
5102 * send confirmation.
5104 if (hchan->state != BT_CONNECTED)
5107 /* Logical link is already ready to go */
5109 chan->hs_hcon = hchan->conn;
5110 chan->hs_hcon->l2cap_data = chan->conn;
5112 if (result == L2CAP_MR_SUCCESS) {
5113 /* Can confirm now */
5114 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5116 /* Now only need move success
5119 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5122 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5125 /* Any other amp move state means the move failed. */
5126 chan->move_id = chan->local_amp_id;
5127 l2cap_move_done(chan);
5128 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5131 l2cap_chan_unlock(chan);
5134 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5137 struct l2cap_chan *chan;
5139 chan = l2cap_get_chan_by_ident(conn, ident);
5141 /* Could not locate channel, icid is best guess */
5142 l2cap_send_move_chan_cfm_icid(conn, icid);
5146 __clear_chan_timer(chan);
5148 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5149 if (result == L2CAP_MR_COLLISION) {
5150 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5152 /* Cleanup - cancel move */
5153 chan->move_id = chan->local_amp_id;
5154 l2cap_move_done(chan);
5158 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5160 l2cap_chan_unlock(chan);
5163 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5164 struct l2cap_cmd_hdr *cmd,
5165 u16 cmd_len, void *data)
5167 struct l2cap_move_chan_rsp *rsp = data;
5170 if (cmd_len != sizeof(*rsp))
5173 icid = le16_to_cpu(rsp->icid);
5174 result = le16_to_cpu(rsp->result);
5176 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5178 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5179 l2cap_move_continue(conn, icid, result);
5181 l2cap_move_fail(conn, cmd->ident, icid, result);
5186 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5187 struct l2cap_cmd_hdr *cmd,
5188 u16 cmd_len, void *data)
5190 struct l2cap_move_chan_cfm *cfm = data;
5191 struct l2cap_chan *chan;
5194 if (cmd_len != sizeof(*cfm))
5197 icid = le16_to_cpu(cfm->icid);
5198 result = le16_to_cpu(cfm->result);
5200 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5202 chan = l2cap_get_chan_by_dcid(conn, icid);
5204 /* Spec requires a response even if the icid was not found */
5205 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5209 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5210 if (result == L2CAP_MC_CONFIRMED) {
5211 chan->local_amp_id = chan->move_id;
5212 if (chan->local_amp_id == AMP_ID_BREDR)
5213 __release_logical_link(chan);
5215 chan->move_id = chan->local_amp_id;
5218 l2cap_move_done(chan);
5221 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5223 l2cap_chan_unlock(chan);
5228 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5229 struct l2cap_cmd_hdr *cmd,
5230 u16 cmd_len, void *data)
5232 struct l2cap_move_chan_cfm_rsp *rsp = data;
5233 struct l2cap_chan *chan;
5236 if (cmd_len != sizeof(*rsp))
5239 icid = le16_to_cpu(rsp->icid);
5241 BT_DBG("icid 0x%4.4x", icid);
5243 chan = l2cap_get_chan_by_scid(conn, icid);
5247 __clear_chan_timer(chan);
5249 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5250 chan->local_amp_id = chan->move_id;
5252 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5253 __release_logical_link(chan);
5255 l2cap_move_done(chan);
5258 l2cap_chan_unlock(chan);
5263 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5264 struct l2cap_cmd_hdr *cmd,
5265 u16 cmd_len, u8 *data)
5267 struct hci_conn *hcon = conn->hcon;
5268 struct l2cap_conn_param_update_req *req;
5269 struct l2cap_conn_param_update_rsp rsp;
5270 u16 min, max, latency, to_multiplier;
5273 if (hcon->role != HCI_ROLE_MASTER)
5276 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5279 req = (struct l2cap_conn_param_update_req *) data;
5280 min = __le16_to_cpu(req->min);
5281 max = __le16_to_cpu(req->max);
5282 latency = __le16_to_cpu(req->latency);
5283 to_multiplier = __le16_to_cpu(req->to_multiplier);
5285 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5286 min, max, latency, to_multiplier);
5288 memset(&rsp, 0, sizeof(rsp));
5290 if (min < hcon->le_conn_min_interval ||
5291 max > hcon->le_conn_max_interval) {
5292 BT_DBG("requested connection interval exceeds current bounds.");
5295 err = hci_check_conn_params(min, max, latency, to_multiplier);
5299 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5301 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5303 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5309 store_hint = hci_le_conn_update(hcon, min, max, latency,
5311 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5312 store_hint, min, max, latency,
5320 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5321 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5324 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5325 struct hci_conn *hcon = conn->hcon;
5326 u16 dcid, mtu, mps, credits, result;
5327 struct l2cap_chan *chan;
5330 if (cmd_len < sizeof(*rsp))
5333 dcid = __le16_to_cpu(rsp->dcid);
5334 mtu = __le16_to_cpu(rsp->mtu);
5335 mps = __le16_to_cpu(rsp->mps);
5336 credits = __le16_to_cpu(rsp->credits);
5337 result = __le16_to_cpu(rsp->result);
5339 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5340 dcid < L2CAP_CID_DYN_START ||
5341 dcid > L2CAP_CID_LE_DYN_END))
5344 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5345 dcid, mtu, mps, credits, result);
5347 mutex_lock(&conn->chan_lock);
5349 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5357 l2cap_chan_lock(chan);
5360 case L2CAP_CR_SUCCESS:
5361 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5369 chan->remote_mps = mps;
5370 chan->tx_credits = credits;
5371 l2cap_chan_ready(chan);
5374 case L2CAP_CR_AUTHENTICATION:
5375 case L2CAP_CR_ENCRYPTION:
5376 /* If we already have MITM protection we can't do
5379 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5380 l2cap_chan_del(chan, ECONNREFUSED);
5384 sec_level = hcon->sec_level + 1;
5385 if (chan->sec_level < sec_level)
5386 chan->sec_level = sec_level;
5388 /* We'll need to send a new Connect Request */
5389 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5391 smp_conn_security(hcon, chan->sec_level);
5395 l2cap_chan_del(chan, ECONNREFUSED);
5399 l2cap_chan_unlock(chan);
5402 mutex_unlock(&conn->chan_lock);
5407 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5408 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5413 switch (cmd->code) {
5414 case L2CAP_COMMAND_REJ:
5415 l2cap_command_rej(conn, cmd, cmd_len, data);
5418 case L2CAP_CONN_REQ:
5419 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5422 case L2CAP_CONN_RSP:
5423 case L2CAP_CREATE_CHAN_RSP:
5424 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5427 case L2CAP_CONF_REQ:
5428 err = l2cap_config_req(conn, cmd, cmd_len, data);
5431 case L2CAP_CONF_RSP:
5432 l2cap_config_rsp(conn, cmd, cmd_len, data);
5435 case L2CAP_DISCONN_REQ:
5436 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5439 case L2CAP_DISCONN_RSP:
5440 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5443 case L2CAP_ECHO_REQ:
5444 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5447 case L2CAP_ECHO_RSP:
5450 case L2CAP_INFO_REQ:
5451 err = l2cap_information_req(conn, cmd, cmd_len, data);
5454 case L2CAP_INFO_RSP:
5455 l2cap_information_rsp(conn, cmd, cmd_len, data);
5458 case L2CAP_CREATE_CHAN_REQ:
5459 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5462 case L2CAP_MOVE_CHAN_REQ:
5463 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5466 case L2CAP_MOVE_CHAN_RSP:
5467 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5470 case L2CAP_MOVE_CHAN_CFM:
5471 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5474 case L2CAP_MOVE_CHAN_CFM_RSP:
5475 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5479 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5487 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5488 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5491 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5492 struct l2cap_le_conn_rsp rsp;
5493 struct l2cap_chan *chan, *pchan;
5494 u16 dcid, scid, credits, mtu, mps;
5498 if (cmd_len != sizeof(*req))
5501 scid = __le16_to_cpu(req->scid);
5502 mtu = __le16_to_cpu(req->mtu);
5503 mps = __le16_to_cpu(req->mps);
5508 if (mtu < 23 || mps < 23)
5511 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5514 /* Check if we have socket listening on psm */
5515 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5516 &conn->hcon->dst, LE_LINK);
5518 result = L2CAP_CR_BAD_PSM;
5523 mutex_lock(&conn->chan_lock);
5524 l2cap_chan_lock(pchan);
5526 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5528 result = L2CAP_CR_AUTHENTICATION;
5530 goto response_unlock;
5533 /* Check for valid dynamic CID range */
5534 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5535 result = L2CAP_CR_INVALID_SCID;
5537 goto response_unlock;
5540 /* Check if we already have channel with that dcid */
5541 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5542 result = L2CAP_CR_SCID_IN_USE;
5544 goto response_unlock;
5547 chan = pchan->ops->new_connection(pchan);
5549 result = L2CAP_CR_NO_MEM;
5550 goto response_unlock;
5553 l2cap_le_flowctl_init(chan);
5555 bacpy(&chan->src, &conn->hcon->src);
5556 bacpy(&chan->dst, &conn->hcon->dst);
5557 chan->src_type = bdaddr_src_type(conn->hcon);
5558 chan->dst_type = bdaddr_dst_type(conn->hcon);
5562 chan->remote_mps = mps;
5563 chan->tx_credits = __le16_to_cpu(req->credits);
5565 __l2cap_chan_add(conn, chan);
5567 credits = chan->rx_credits;
5569 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5571 chan->ident = cmd->ident;
5573 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5574 l2cap_state_change(chan, BT_CONNECT2);
5575 /* The following result value is actually not defined
5576 * for LE CoC but we use it to let the function know
5577 * that it should bail out after doing its cleanup
5578 * instead of sending a response.
5580 result = L2CAP_CR_PEND;
5581 chan->ops->defer(chan);
5583 l2cap_chan_ready(chan);
5584 result = L2CAP_CR_SUCCESS;
5588 l2cap_chan_unlock(pchan);
5589 mutex_unlock(&conn->chan_lock);
5590 l2cap_chan_put(pchan);
5592 if (result == L2CAP_CR_PEND)
5597 rsp.mtu = cpu_to_le16(chan->imtu);
5598 rsp.mps = cpu_to_le16(chan->mps);
5604 rsp.dcid = cpu_to_le16(dcid);
5605 rsp.credits = cpu_to_le16(credits);
5606 rsp.result = cpu_to_le16(result);
5608 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5613 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5614 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5617 struct l2cap_le_credits *pkt;
5618 struct l2cap_chan *chan;
5619 u16 cid, credits, max_credits;
5621 if (cmd_len != sizeof(*pkt))
5624 pkt = (struct l2cap_le_credits *) data;
5625 cid = __le16_to_cpu(pkt->cid);
5626 credits = __le16_to_cpu(pkt->credits);
5628 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5630 chan = l2cap_get_chan_by_dcid(conn, cid);
5634 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5635 if (credits > max_credits) {
5636 BT_ERR("LE credits overflow");
5637 l2cap_send_disconn_req(chan, ECONNRESET);
5638 l2cap_chan_unlock(chan);
5640 /* Return 0 so that we don't trigger an unnecessary
5641 * command reject packet.
5646 chan->tx_credits += credits;
5648 /* Resume sending */
5649 l2cap_le_flowctl_send(chan);
5651 if (chan->tx_credits)
5652 chan->ops->resume(chan);
5654 l2cap_chan_unlock(chan);
5659 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5660 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5663 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5664 struct l2cap_chan *chan;
5666 if (cmd_len < sizeof(*rej))
5669 mutex_lock(&conn->chan_lock);
5671 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5675 l2cap_chan_lock(chan);
5676 l2cap_chan_del(chan, ECONNREFUSED);
5677 l2cap_chan_unlock(chan);
5680 mutex_unlock(&conn->chan_lock);
5684 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5685 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5690 switch (cmd->code) {
5691 case L2CAP_COMMAND_REJ:
5692 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5695 case L2CAP_CONN_PARAM_UPDATE_REQ:
5696 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5699 case L2CAP_CONN_PARAM_UPDATE_RSP:
5702 case L2CAP_LE_CONN_RSP:
5703 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5706 case L2CAP_LE_CONN_REQ:
5707 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5710 case L2CAP_LE_CREDITS:
5711 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5714 case L2CAP_DISCONN_REQ:
5715 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5718 case L2CAP_DISCONN_RSP:
5719 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5723 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5731 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5732 struct sk_buff *skb)
5734 struct hci_conn *hcon = conn->hcon;
5735 struct l2cap_cmd_hdr *cmd;
5739 if (hcon->type != LE_LINK)
5742 if (skb->len < L2CAP_CMD_HDR_SIZE)
5745 cmd = (void *) skb->data;
5746 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5748 len = le16_to_cpu(cmd->len);
5750 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5752 if (len != skb->len || !cmd->ident) {
5753 BT_DBG("corrupted command");
5757 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5759 struct l2cap_cmd_rej_unk rej;
5761 BT_ERR("Wrong link type (%d)", err);
5763 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5764 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5772 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5773 struct sk_buff *skb)
5775 struct hci_conn *hcon = conn->hcon;
5776 u8 *data = skb->data;
5778 struct l2cap_cmd_hdr cmd;
5781 l2cap_raw_recv(conn, skb);
5783 if (hcon->type != ACL_LINK)
5786 while (len >= L2CAP_CMD_HDR_SIZE) {
5788 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5789 data += L2CAP_CMD_HDR_SIZE;
5790 len -= L2CAP_CMD_HDR_SIZE;
5792 cmd_len = le16_to_cpu(cmd.len);
5794 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5797 if (cmd_len > len || !cmd.ident) {
5798 BT_DBG("corrupted command");
5802 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5804 struct l2cap_cmd_rej_unk rej;
5806 BT_ERR("Wrong link type (%d)", err);
5808 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5809 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5821 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5823 u16 our_fcs, rcv_fcs;
5826 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5827 hdr_size = L2CAP_EXT_HDR_SIZE;
5829 hdr_size = L2CAP_ENH_HDR_SIZE;
5831 if (chan->fcs == L2CAP_FCS_CRC16) {
5832 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5833 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5834 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5836 if (our_fcs != rcv_fcs)
5842 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5844 struct l2cap_ctrl control;
5846 BT_DBG("chan %p", chan);
5848 memset(&control, 0, sizeof(control));
5851 control.reqseq = chan->buffer_seq;
5852 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5854 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5855 control.super = L2CAP_SUPER_RNR;
5856 l2cap_send_sframe(chan, &control);
5859 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5860 chan->unacked_frames > 0)
5861 __set_retrans_timer(chan);
5863 /* Send pending iframes */
5864 l2cap_ertm_send(chan);
5866 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5867 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5868 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5871 control.super = L2CAP_SUPER_RR;
5872 l2cap_send_sframe(chan, &control);
5876 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5877 struct sk_buff **last_frag)
5879 /* skb->len reflects data in skb as well as all fragments
5880 * skb->data_len reflects only data in fragments
5882 if (!skb_has_frag_list(skb))
5883 skb_shinfo(skb)->frag_list = new_frag;
5885 new_frag->next = NULL;
5887 (*last_frag)->next = new_frag;
5888 *last_frag = new_frag;
5890 skb->len += new_frag->len;
5891 skb->data_len += new_frag->len;
5892 skb->truesize += new_frag->truesize;
5895 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5896 struct l2cap_ctrl *control)
5900 switch (control->sar) {
5901 case L2CAP_SAR_UNSEGMENTED:
5905 err = chan->ops->recv(chan, skb);
5908 case L2CAP_SAR_START:
5912 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5915 chan->sdu_len = get_unaligned_le16(skb->data);
5916 skb_pull(skb, L2CAP_SDULEN_SIZE);
5918 if (chan->sdu_len > chan->imtu) {
5923 if (skb->len >= chan->sdu_len)
5927 chan->sdu_last_frag = skb;
5933 case L2CAP_SAR_CONTINUE:
5937 append_skb_frag(chan->sdu, skb,
5938 &chan->sdu_last_frag);
5941 if (chan->sdu->len >= chan->sdu_len)
5951 append_skb_frag(chan->sdu, skb,
5952 &chan->sdu_last_frag);
5955 if (chan->sdu->len != chan->sdu_len)
5958 err = chan->ops->recv(chan, chan->sdu);
5961 /* Reassembly complete */
5963 chan->sdu_last_frag = NULL;
5971 kfree_skb(chan->sdu);
5973 chan->sdu_last_frag = NULL;
5980 static int l2cap_resegment(struct l2cap_chan *chan)
5986 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5990 if (chan->mode != L2CAP_MODE_ERTM)
5993 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5994 l2cap_tx(chan, NULL, NULL, event);
5997 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6000 /* Pass sequential frames to l2cap_reassemble_sdu()
6001 * until a gap is encountered.
6004 BT_DBG("chan %p", chan);
6006 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6007 struct sk_buff *skb;
6008 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6009 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6011 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6016 skb_unlink(skb, &chan->srej_q);
6017 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6018 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6023 if (skb_queue_empty(&chan->srej_q)) {
6024 chan->rx_state = L2CAP_RX_STATE_RECV;
6025 l2cap_send_ack(chan);
6031 static void l2cap_handle_srej(struct l2cap_chan *chan,
6032 struct l2cap_ctrl *control)
6034 struct sk_buff *skb;
6036 BT_DBG("chan %p, control %p", chan, control);
6038 if (control->reqseq == chan->next_tx_seq) {
6039 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6040 l2cap_send_disconn_req(chan, ECONNRESET);
6044 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6047 BT_DBG("Seq %d not available for retransmission",
6052 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6053 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6054 l2cap_send_disconn_req(chan, ECONNRESET);
6058 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6060 if (control->poll) {
6061 l2cap_pass_to_tx(chan, control);
6063 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6064 l2cap_retransmit(chan, control);
6065 l2cap_ertm_send(chan);
6067 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6068 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6069 chan->srej_save_reqseq = control->reqseq;
6072 l2cap_pass_to_tx_fbit(chan, control);
6074 if (control->final) {
6075 if (chan->srej_save_reqseq != control->reqseq ||
6076 !test_and_clear_bit(CONN_SREJ_ACT,
6078 l2cap_retransmit(chan, control);
6080 l2cap_retransmit(chan, control);
6081 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6082 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6083 chan->srej_save_reqseq = control->reqseq;
6089 static void l2cap_handle_rej(struct l2cap_chan *chan,
6090 struct l2cap_ctrl *control)
6092 struct sk_buff *skb;
6094 BT_DBG("chan %p, control %p", chan, control);
6096 if (control->reqseq == chan->next_tx_seq) {
6097 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6098 l2cap_send_disconn_req(chan, ECONNRESET);
6102 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6104 if (chan->max_tx && skb &&
6105 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6106 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6107 l2cap_send_disconn_req(chan, ECONNRESET);
6111 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6113 l2cap_pass_to_tx(chan, control);
6115 if (control->final) {
6116 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6117 l2cap_retransmit_all(chan, control);
6119 l2cap_retransmit_all(chan, control);
6120 l2cap_ertm_send(chan);
6121 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6122 set_bit(CONN_REJ_ACT, &chan->conn_state);
6126 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6128 BT_DBG("chan %p, txseq %d", chan, txseq);
6130 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6131 chan->expected_tx_seq);
6133 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6134 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6136 /* See notes below regarding "double poll" and
6139 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6140 BT_DBG("Invalid/Ignore - after SREJ");
6141 return L2CAP_TXSEQ_INVALID_IGNORE;
6143 BT_DBG("Invalid - in window after SREJ sent");
6144 return L2CAP_TXSEQ_INVALID;
6148 if (chan->srej_list.head == txseq) {
6149 BT_DBG("Expected SREJ");
6150 return L2CAP_TXSEQ_EXPECTED_SREJ;
6153 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6154 BT_DBG("Duplicate SREJ - txseq already stored");
6155 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6158 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6159 BT_DBG("Unexpected SREJ - not requested");
6160 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6164 if (chan->expected_tx_seq == txseq) {
6165 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6167 BT_DBG("Invalid - txseq outside tx window");
6168 return L2CAP_TXSEQ_INVALID;
6171 return L2CAP_TXSEQ_EXPECTED;
6175 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6176 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6177 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6178 return L2CAP_TXSEQ_DUPLICATE;
6181 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6182 /* A source of invalid packets is a "double poll" condition,
6183 * where delays cause us to send multiple poll packets. If
6184 * the remote stack receives and processes both polls,
6185 * sequence numbers can wrap around in such a way that a
6186 * resent frame has a sequence number that looks like new data
6187 * with a sequence gap. This would trigger an erroneous SREJ
6190 * Fortunately, this is impossible with a tx window that's
6191 * less than half of the maximum sequence number, which allows
6192 * invalid frames to be safely ignored.
6194 * With tx window sizes greater than half of the tx window
6195 * maximum, the frame is invalid and cannot be ignored. This
6196 * causes a disconnect.
6199 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6200 BT_DBG("Invalid/Ignore - txseq outside tx window");
6201 return L2CAP_TXSEQ_INVALID_IGNORE;
6203 BT_DBG("Invalid - txseq outside tx window");
6204 return L2CAP_TXSEQ_INVALID;
6207 BT_DBG("Unexpected - txseq indicates missing frames");
6208 return L2CAP_TXSEQ_UNEXPECTED;
6212 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6213 struct l2cap_ctrl *control,
6214 struct sk_buff *skb, u8 event)
6217 bool skb_in_use = false;
6219 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6223 case L2CAP_EV_RECV_IFRAME:
6224 switch (l2cap_classify_txseq(chan, control->txseq)) {
6225 case L2CAP_TXSEQ_EXPECTED:
6226 l2cap_pass_to_tx(chan, control);
6228 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6229 BT_DBG("Busy, discarding expected seq %d",
6234 chan->expected_tx_seq = __next_seq(chan,
6237 chan->buffer_seq = chan->expected_tx_seq;
6240 err = l2cap_reassemble_sdu(chan, skb, control);
6244 if (control->final) {
6245 if (!test_and_clear_bit(CONN_REJ_ACT,
6246 &chan->conn_state)) {
6248 l2cap_retransmit_all(chan, control);
6249 l2cap_ertm_send(chan);
6253 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6254 l2cap_send_ack(chan);
6256 case L2CAP_TXSEQ_UNEXPECTED:
6257 l2cap_pass_to_tx(chan, control);
6259 /* Can't issue SREJ frames in the local busy state.
6260 * Drop this frame, it will be seen as missing
6261 * when local busy is exited.
6263 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6264 BT_DBG("Busy, discarding unexpected seq %d",
6269 /* There was a gap in the sequence, so an SREJ
6270 * must be sent for each missing frame. The
6271 * current frame is stored for later use.
6273 skb_queue_tail(&chan->srej_q, skb);
6275 BT_DBG("Queued %p (queue len %d)", skb,
6276 skb_queue_len(&chan->srej_q));
6278 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6279 l2cap_seq_list_clear(&chan->srej_list);
6280 l2cap_send_srej(chan, control->txseq);
6282 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6284 case L2CAP_TXSEQ_DUPLICATE:
6285 l2cap_pass_to_tx(chan, control);
6287 case L2CAP_TXSEQ_INVALID_IGNORE:
6289 case L2CAP_TXSEQ_INVALID:
6291 l2cap_send_disconn_req(chan, ECONNRESET);
6295 case L2CAP_EV_RECV_RR:
6296 l2cap_pass_to_tx(chan, control);
6297 if (control->final) {
6298 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6300 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6301 !__chan_is_moving(chan)) {
6303 l2cap_retransmit_all(chan, control);
6306 l2cap_ertm_send(chan);
6307 } else if (control->poll) {
6308 l2cap_send_i_or_rr_or_rnr(chan);
6310 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6311 &chan->conn_state) &&
6312 chan->unacked_frames)
6313 __set_retrans_timer(chan);
6315 l2cap_ertm_send(chan);
6318 case L2CAP_EV_RECV_RNR:
6319 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6320 l2cap_pass_to_tx(chan, control);
6321 if (control && control->poll) {
6322 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6323 l2cap_send_rr_or_rnr(chan, 0);
6325 __clear_retrans_timer(chan);
6326 l2cap_seq_list_clear(&chan->retrans_list);
6328 case L2CAP_EV_RECV_REJ:
6329 l2cap_handle_rej(chan, control);
6331 case L2CAP_EV_RECV_SREJ:
6332 l2cap_handle_srej(chan, control);
6338 if (skb && !skb_in_use) {
6339 BT_DBG("Freeing %p", skb);
6346 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6347 struct l2cap_ctrl *control,
6348 struct sk_buff *skb, u8 event)
6351 u16 txseq = control->txseq;
6352 bool skb_in_use = false;
6354 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6358 case L2CAP_EV_RECV_IFRAME:
6359 switch (l2cap_classify_txseq(chan, txseq)) {
6360 case L2CAP_TXSEQ_EXPECTED:
6361 /* Keep frame for reassembly later */
6362 l2cap_pass_to_tx(chan, control);
6363 skb_queue_tail(&chan->srej_q, skb);
6365 BT_DBG("Queued %p (queue len %d)", skb,
6366 skb_queue_len(&chan->srej_q));
6368 chan->expected_tx_seq = __next_seq(chan, txseq);
6370 case L2CAP_TXSEQ_EXPECTED_SREJ:
6371 l2cap_seq_list_pop(&chan->srej_list);
6373 l2cap_pass_to_tx(chan, control);
6374 skb_queue_tail(&chan->srej_q, skb);
6376 BT_DBG("Queued %p (queue len %d)", skb,
6377 skb_queue_len(&chan->srej_q));
6379 err = l2cap_rx_queued_iframes(chan);
6384 case L2CAP_TXSEQ_UNEXPECTED:
6385 /* Got a frame that can't be reassembled yet.
6386 * Save it for later, and send SREJs to cover
6387 * the missing frames.
6389 skb_queue_tail(&chan->srej_q, skb);
6391 BT_DBG("Queued %p (queue len %d)", skb,
6392 skb_queue_len(&chan->srej_q));
6394 l2cap_pass_to_tx(chan, control);
6395 l2cap_send_srej(chan, control->txseq);
6397 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6398 /* This frame was requested with an SREJ, but
6399 * some expected retransmitted frames are
6400 * missing. Request retransmission of missing
6403 skb_queue_tail(&chan->srej_q, skb);
6405 BT_DBG("Queued %p (queue len %d)", skb,
6406 skb_queue_len(&chan->srej_q));
6408 l2cap_pass_to_tx(chan, control);
6409 l2cap_send_srej_list(chan, control->txseq);
6411 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6412 /* We've already queued this frame. Drop this copy. */
6413 l2cap_pass_to_tx(chan, control);
6415 case L2CAP_TXSEQ_DUPLICATE:
6416 /* Expecting a later sequence number, so this frame
6417 * was already received. Ignore it completely.
6420 case L2CAP_TXSEQ_INVALID_IGNORE:
6422 case L2CAP_TXSEQ_INVALID:
6424 l2cap_send_disconn_req(chan, ECONNRESET);
6428 case L2CAP_EV_RECV_RR:
6429 l2cap_pass_to_tx(chan, control);
6430 if (control->final) {
6431 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6433 if (!test_and_clear_bit(CONN_REJ_ACT,
6434 &chan->conn_state)) {
6436 l2cap_retransmit_all(chan, control);
6439 l2cap_ertm_send(chan);
6440 } else if (control->poll) {
6441 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6442 &chan->conn_state) &&
6443 chan->unacked_frames) {
6444 __set_retrans_timer(chan);
6447 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6448 l2cap_send_srej_tail(chan);
6450 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6451 &chan->conn_state) &&
6452 chan->unacked_frames)
6453 __set_retrans_timer(chan);
6455 l2cap_send_ack(chan);
6458 case L2CAP_EV_RECV_RNR:
6459 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6460 l2cap_pass_to_tx(chan, control);
6461 if (control->poll) {
6462 l2cap_send_srej_tail(chan);
6464 struct l2cap_ctrl rr_control;
6465 memset(&rr_control, 0, sizeof(rr_control));
6466 rr_control.sframe = 1;
6467 rr_control.super = L2CAP_SUPER_RR;
6468 rr_control.reqseq = chan->buffer_seq;
6469 l2cap_send_sframe(chan, &rr_control);
6473 case L2CAP_EV_RECV_REJ:
6474 l2cap_handle_rej(chan, control);
6476 case L2CAP_EV_RECV_SREJ:
6477 l2cap_handle_srej(chan, control);
6481 if (skb && !skb_in_use) {
6482 BT_DBG("Freeing %p", skb);
6489 static int l2cap_finish_move(struct l2cap_chan *chan)
6491 BT_DBG("chan %p", chan);
6493 chan->rx_state = L2CAP_RX_STATE_RECV;
6496 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6498 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6500 return l2cap_resegment(chan);
6503 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6504 struct l2cap_ctrl *control,
6505 struct sk_buff *skb, u8 event)
6509 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6515 l2cap_process_reqseq(chan, control->reqseq);
6517 if (!skb_queue_empty(&chan->tx_q))
6518 chan->tx_send_head = skb_peek(&chan->tx_q);
6520 chan->tx_send_head = NULL;
6522 /* Rewind next_tx_seq to the point expected
6525 chan->next_tx_seq = control->reqseq;
6526 chan->unacked_frames = 0;
6528 err = l2cap_finish_move(chan);
6532 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6533 l2cap_send_i_or_rr_or_rnr(chan);
6535 if (event == L2CAP_EV_RECV_IFRAME)
6538 return l2cap_rx_state_recv(chan, control, NULL, event);
6541 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6542 struct l2cap_ctrl *control,
6543 struct sk_buff *skb, u8 event)
6547 if (!control->final)
6550 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6552 chan->rx_state = L2CAP_RX_STATE_RECV;
6553 l2cap_process_reqseq(chan, control->reqseq);
6555 if (!skb_queue_empty(&chan->tx_q))
6556 chan->tx_send_head = skb_peek(&chan->tx_q);
6558 chan->tx_send_head = NULL;
6560 /* Rewind next_tx_seq to the point expected
6563 chan->next_tx_seq = control->reqseq;
6564 chan->unacked_frames = 0;
6567 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6569 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6571 err = l2cap_resegment(chan);
6574 err = l2cap_rx_state_recv(chan, control, skb, event);
6579 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6581 /* Make sure reqseq is for a packet that has been sent but not acked */
6584 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6585 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6588 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6589 struct sk_buff *skb, u8 event)
6593 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6594 control, skb, event, chan->rx_state);
6596 if (__valid_reqseq(chan, control->reqseq)) {
6597 switch (chan->rx_state) {
6598 case L2CAP_RX_STATE_RECV:
6599 err = l2cap_rx_state_recv(chan, control, skb, event);
6601 case L2CAP_RX_STATE_SREJ_SENT:
6602 err = l2cap_rx_state_srej_sent(chan, control, skb,
6605 case L2CAP_RX_STATE_WAIT_P:
6606 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6608 case L2CAP_RX_STATE_WAIT_F:
6609 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6616 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6617 control->reqseq, chan->next_tx_seq,
6618 chan->expected_ack_seq);
6619 l2cap_send_disconn_req(chan, ECONNRESET);
6625 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6626 struct sk_buff *skb)
6628 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6631 if (l2cap_classify_txseq(chan, control->txseq) ==
6632 L2CAP_TXSEQ_EXPECTED) {
6633 l2cap_pass_to_tx(chan, control);
6635 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6636 __next_seq(chan, chan->buffer_seq));
6638 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6640 l2cap_reassemble_sdu(chan, skb, control);
6643 kfree_skb(chan->sdu);
6646 chan->sdu_last_frag = NULL;
6650 BT_DBG("Freeing %p", skb);
6655 chan->last_acked_seq = control->txseq;
6656 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6661 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6663 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6667 __unpack_control(chan, skb);
6672 * We can just drop the corrupted I-frame here.
6673 * Receiver will miss it and start proper recovery
6674 * procedures and ask for retransmission.
6676 if (l2cap_check_fcs(chan, skb))
6679 if (!control->sframe && control->sar == L2CAP_SAR_START)
6680 len -= L2CAP_SDULEN_SIZE;
6682 if (chan->fcs == L2CAP_FCS_CRC16)
6683 len -= L2CAP_FCS_SIZE;
6685 if (len > chan->mps) {
6686 l2cap_send_disconn_req(chan, ECONNRESET);
6690 if ((chan->mode == L2CAP_MODE_ERTM ||
6691 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6694 if (!control->sframe) {
6697 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6698 control->sar, control->reqseq, control->final,
6701 /* Validate F-bit - F=0 always valid, F=1 only
6702 * valid in TX WAIT_F
6704 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6707 if (chan->mode != L2CAP_MODE_STREAMING) {
6708 event = L2CAP_EV_RECV_IFRAME;
6709 err = l2cap_rx(chan, control, skb, event);
6711 err = l2cap_stream_rx(chan, control, skb);
6715 l2cap_send_disconn_req(chan, ECONNRESET);
6717 const u8 rx_func_to_event[4] = {
6718 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6719 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6722 /* Only I-frames are expected in streaming mode */
6723 if (chan->mode == L2CAP_MODE_STREAMING)
6726 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6727 control->reqseq, control->final, control->poll,
6731 BT_ERR("Trailing bytes: %d in sframe", len);
6732 l2cap_send_disconn_req(chan, ECONNRESET);
6736 /* Validate F and P bits */
6737 if (control->final && (control->poll ||
6738 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6741 event = rx_func_to_event[control->super];
6742 if (l2cap_rx(chan, control, skb, event))
6743 l2cap_send_disconn_req(chan, ECONNRESET);
6753 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6755 struct l2cap_conn *conn = chan->conn;
6756 struct l2cap_le_credits pkt;
6759 /* We return more credits to the sender only after the amount of
6760 * credits falls below half of the initial amount.
6762 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6765 return_credits = le_max_credits - chan->rx_credits;
6767 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6769 chan->rx_credits += return_credits;
6771 pkt.cid = cpu_to_le16(chan->scid);
6772 pkt.credits = cpu_to_le16(return_credits);
6774 chan->ident = l2cap_get_ident(conn);
6776 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6779 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6783 if (!chan->rx_credits) {
6784 BT_ERR("No credits to receive LE L2CAP data");
6785 l2cap_send_disconn_req(chan, ECONNRESET);
6789 if (chan->imtu < skb->len) {
6790 BT_ERR("Too big LE L2CAP PDU");
6795 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6797 l2cap_chan_le_send_credits(chan);
6804 sdu_len = get_unaligned_le16(skb->data);
6805 skb_pull(skb, L2CAP_SDULEN_SIZE);
6807 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6808 sdu_len, skb->len, chan->imtu);
6810 if (sdu_len > chan->imtu) {
6811 BT_ERR("Too big LE L2CAP SDU length received");
6816 if (skb->len > sdu_len) {
6817 BT_ERR("Too much LE L2CAP data received");
6822 if (skb->len == sdu_len)
6823 return chan->ops->recv(chan, skb);
6826 chan->sdu_len = sdu_len;
6827 chan->sdu_last_frag = skb;
6832 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6833 chan->sdu->len, skb->len, chan->sdu_len);
6835 if (chan->sdu->len + skb->len > chan->sdu_len) {
6836 BT_ERR("Too much LE L2CAP data received");
6841 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6844 if (chan->sdu->len == chan->sdu_len) {
6845 err = chan->ops->recv(chan, chan->sdu);
6848 chan->sdu_last_frag = NULL;
6856 kfree_skb(chan->sdu);
6858 chan->sdu_last_frag = NULL;
6862 /* We can't return an error here since we took care of the skb
6863 * freeing internally. An error return would cause the caller to
6864 * do a double-free of the skb.
6869 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6870 struct sk_buff *skb)
6872 struct l2cap_chan *chan;
6874 chan = l2cap_get_chan_by_scid(conn, cid);
6876 if (cid == L2CAP_CID_A2MP) {
6877 chan = a2mp_channel_create(conn, skb);
6883 l2cap_chan_lock(chan);
6885 BT_DBG("unknown cid 0x%4.4x", cid);
6886 /* Drop packet and return */
6892 BT_DBG("chan %p, len %d", chan, skb->len);
6894 /* If we receive data on a fixed channel before the info req/rsp
6895 * procdure is done simply assume that the channel is supported
6896 * and mark it as ready.
6898 if (chan->chan_type == L2CAP_CHAN_FIXED)
6899 l2cap_chan_ready(chan);
6901 if (chan->state != BT_CONNECTED)
6904 switch (chan->mode) {
6905 case L2CAP_MODE_LE_FLOWCTL:
6906 if (l2cap_le_data_rcv(chan, skb) < 0)
6911 case L2CAP_MODE_BASIC:
6912 /* If socket recv buffers overflows we drop data here
6913 * which is *bad* because L2CAP has to be reliable.
6914 * But we don't have any other choice. L2CAP doesn't
6915 * provide flow control mechanism. */
6917 if (chan->imtu < skb->len) {
6918 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6922 if (!chan->ops->recv(chan, skb))
6926 case L2CAP_MODE_ERTM:
6927 case L2CAP_MODE_STREAMING:
6928 l2cap_data_rcv(chan, skb);
6932 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6940 l2cap_chan_unlock(chan);
6943 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6944 struct sk_buff *skb)
6946 struct hci_conn *hcon = conn->hcon;
6947 struct l2cap_chan *chan;
6949 if (hcon->type != ACL_LINK)
6952 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6957 BT_DBG("chan %p, len %d", chan, skb->len);
6959 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6962 if (chan->imtu < skb->len)
6965 /* Store remote BD_ADDR and PSM for msg_name */
6966 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6967 bt_cb(skb)->l2cap.psm = psm;
6969 if (!chan->ops->recv(chan, skb)) {
6970 l2cap_chan_put(chan);
6975 l2cap_chan_put(chan);
6980 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6982 struct l2cap_hdr *lh = (void *) skb->data;
6983 struct hci_conn *hcon = conn->hcon;
6987 if (hcon->state != BT_CONNECTED) {
6988 BT_DBG("queueing pending rx skb");
6989 skb_queue_tail(&conn->pending_rx, skb);
6993 skb_pull(skb, L2CAP_HDR_SIZE);
6994 cid = __le16_to_cpu(lh->cid);
6995 len = __le16_to_cpu(lh->len);
6997 if (len != skb->len) {
7002 /* Since we can't actively block incoming LE connections we must
7003 * at least ensure that we ignore incoming data from them.
7005 if (hcon->type == LE_LINK &&
7006 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7007 bdaddr_dst_type(hcon))) {
7012 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7015 case L2CAP_CID_SIGNALING:
7016 l2cap_sig_channel(conn, skb);
7019 case L2CAP_CID_CONN_LESS:
7020 psm = get_unaligned((__le16 *) skb->data);
7021 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7022 l2cap_conless_channel(conn, psm, skb);
7025 case L2CAP_CID_LE_SIGNALING:
7026 l2cap_le_sig_channel(conn, skb);
7030 l2cap_data_channel(conn, cid, skb);
7035 static void process_pending_rx(struct work_struct *work)
7037 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7039 struct sk_buff *skb;
7043 while ((skb = skb_dequeue(&conn->pending_rx)))
7044 l2cap_recv_frame(conn, skb);
7047 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7049 struct l2cap_conn *conn = hcon->l2cap_data;
7050 struct hci_chan *hchan;
7055 hchan = hci_chan_create(hcon);
7059 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7061 hci_chan_del(hchan);
7065 kref_init(&conn->ref);
7066 hcon->l2cap_data = conn;
7067 conn->hcon = hci_conn_get(hcon);
7068 conn->hchan = hchan;
7070 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7072 switch (hcon->type) {
7074 if (hcon->hdev->le_mtu) {
7075 conn->mtu = hcon->hdev->le_mtu;
7080 conn->mtu = hcon->hdev->acl_mtu;
7084 conn->feat_mask = 0;
7086 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7088 if (hcon->type == ACL_LINK &&
7089 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7090 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7092 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7093 (bredr_sc_enabled(hcon->hdev) ||
7094 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7095 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7097 mutex_init(&conn->ident_lock);
7098 mutex_init(&conn->chan_lock);
7100 INIT_LIST_HEAD(&conn->chan_l);
7101 INIT_LIST_HEAD(&conn->users);
7103 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7105 skb_queue_head_init(&conn->pending_rx);
7106 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7107 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7109 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7114 static bool is_valid_psm(u16 psm, u8 dst_type) {
7118 if (bdaddr_type_is_le(dst_type))
7119 return (psm <= 0x00ff);
7121 /* PSM must be odd and lsb of upper byte must be 0 */
7122 return ((psm & 0x0101) == 0x0001);
7125 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7126 bdaddr_t *dst, u8 dst_type)
7128 struct l2cap_conn *conn;
7129 struct hci_conn *hcon;
7130 struct hci_dev *hdev;
7133 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7134 dst_type, __le16_to_cpu(psm));
7136 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7138 return -EHOSTUNREACH;
7142 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7143 chan->chan_type != L2CAP_CHAN_RAW) {
7148 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7153 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7158 switch (chan->mode) {
7159 case L2CAP_MODE_BASIC:
7161 case L2CAP_MODE_LE_FLOWCTL:
7162 l2cap_le_flowctl_init(chan);
7164 case L2CAP_MODE_ERTM:
7165 case L2CAP_MODE_STREAMING:
7174 switch (chan->state) {
7178 /* Already connecting */
7183 /* Already connected */
7197 /* Set destination address and psm */
7198 bacpy(&chan->dst, dst);
7199 chan->dst_type = dst_type;
7204 if (bdaddr_type_is_le(dst_type)) {
7205 /* Convert from L2CAP channel address type to HCI address type
7207 if (dst_type == BDADDR_LE_PUBLIC)
7208 dst_type = ADDR_LE_DEV_PUBLIC;
7210 dst_type = ADDR_LE_DEV_RANDOM;
7212 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7213 hcon = hci_connect_le(hdev, dst, dst_type,
7215 HCI_LE_CONN_TIMEOUT,
7216 HCI_ROLE_SLAVE, NULL);
7218 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7220 HCI_LE_CONN_TIMEOUT);
7223 u8 auth_type = l2cap_get_auth_type(chan);
7224 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7228 err = PTR_ERR(hcon);
7232 conn = l2cap_conn_add(hcon);
7234 hci_conn_drop(hcon);
7239 mutex_lock(&conn->chan_lock);
7240 l2cap_chan_lock(chan);
7242 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7243 hci_conn_drop(hcon);
7248 /* Update source addr of the socket */
7249 bacpy(&chan->src, &hcon->src);
7250 chan->src_type = bdaddr_src_type(hcon);
7252 __l2cap_chan_add(conn, chan);
7254 /* l2cap_chan_add takes its own ref so we can drop this one */
7255 hci_conn_drop(hcon);
7257 l2cap_state_change(chan, BT_CONNECT);
7258 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7260 /* Release chan->sport so that it can be reused by other
7261 * sockets (as it's only used for listening sockets).
7263 write_lock(&chan_list_lock);
7265 write_unlock(&chan_list_lock);
7267 if (hcon->state == BT_CONNECTED) {
7268 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7269 __clear_chan_timer(chan);
7270 if (l2cap_chan_check_security(chan, true))
7271 l2cap_state_change(chan, BT_CONNECTED);
7273 l2cap_do_start(chan);
7279 l2cap_chan_unlock(chan);
7280 mutex_unlock(&conn->chan_lock);
7282 hci_dev_unlock(hdev);
7286 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7288 /* ---- L2CAP interface with lower layer (HCI) ---- */
7290 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7292 int exact = 0, lm1 = 0, lm2 = 0;
7293 struct l2cap_chan *c;
7295 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7297 /* Find listening sockets and check their link_mode */
7298 read_lock(&chan_list_lock);
7299 list_for_each_entry(c, &chan_list, global_l) {
7300 if (c->state != BT_LISTEN)
7303 if (!bacmp(&c->src, &hdev->bdaddr)) {
7304 lm1 |= HCI_LM_ACCEPT;
7305 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7306 lm1 |= HCI_LM_MASTER;
7308 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7309 lm2 |= HCI_LM_ACCEPT;
7310 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7311 lm2 |= HCI_LM_MASTER;
7314 read_unlock(&chan_list_lock);
7316 return exact ? lm1 : lm2;
7319 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7320 * from an existing channel in the list or from the beginning of the
7321 * global list (by passing NULL as first parameter).
7323 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7324 struct hci_conn *hcon)
7326 u8 src_type = bdaddr_src_type(hcon);
7328 read_lock(&chan_list_lock);
7331 c = list_next_entry(c, global_l);
7333 c = list_entry(chan_list.next, typeof(*c), global_l);
7335 list_for_each_entry_from(c, &chan_list, global_l) {
7336 if (c->chan_type != L2CAP_CHAN_FIXED)
7338 if (c->state != BT_LISTEN)
7340 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7342 if (src_type != c->src_type)
7346 read_unlock(&chan_list_lock);
7350 read_unlock(&chan_list_lock);
7355 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7357 struct hci_dev *hdev = hcon->hdev;
7358 struct l2cap_conn *conn;
7359 struct l2cap_chan *pchan;
7362 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7365 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7368 l2cap_conn_del(hcon, bt_to_errno(status));
7372 conn = l2cap_conn_add(hcon);
7376 dst_type = bdaddr_dst_type(hcon);
7378 /* If device is blocked, do not create channels for it */
7379 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7382 /* Find fixed channels and notify them of the new connection. We
7383 * use multiple individual lookups, continuing each time where
7384 * we left off, because the list lock would prevent calling the
7385 * potentially sleeping l2cap_chan_lock() function.
7387 pchan = l2cap_global_fixed_chan(NULL, hcon);
7389 struct l2cap_chan *chan, *next;
7391 /* Client fixed channels should override server ones */
7392 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7395 l2cap_chan_lock(pchan);
7396 chan = pchan->ops->new_connection(pchan);
7398 bacpy(&chan->src, &hcon->src);
7399 bacpy(&chan->dst, &hcon->dst);
7400 chan->src_type = bdaddr_src_type(hcon);
7401 chan->dst_type = dst_type;
7403 __l2cap_chan_add(conn, chan);
7406 l2cap_chan_unlock(pchan);
7408 next = l2cap_global_fixed_chan(pchan, hcon);
7409 l2cap_chan_put(pchan);
7413 l2cap_conn_ready(conn);
7416 int l2cap_disconn_ind(struct hci_conn *hcon)
7418 struct l2cap_conn *conn = hcon->l2cap_data;
7420 BT_DBG("hcon %p", hcon);
7423 return HCI_ERROR_REMOTE_USER_TERM;
7424 return conn->disc_reason;
7427 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7429 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7432 BT_DBG("hcon %p reason %d", hcon, reason);
7434 l2cap_conn_del(hcon, bt_to_errno(reason));
7437 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7439 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7442 if (encrypt == 0x00) {
7443 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7444 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7445 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7446 chan->sec_level == BT_SECURITY_FIPS)
7447 l2cap_chan_close(chan, ECONNREFUSED);
7449 if (chan->sec_level == BT_SECURITY_MEDIUM)
7450 __clear_chan_timer(chan);
7454 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7456 struct l2cap_conn *conn = hcon->l2cap_data;
7457 struct l2cap_chan *chan;
7462 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7464 mutex_lock(&conn->chan_lock);
7466 list_for_each_entry(chan, &conn->chan_l, list) {
7467 l2cap_chan_lock(chan);
7469 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7470 state_to_string(chan->state));
7472 if (chan->scid == L2CAP_CID_A2MP) {
7473 l2cap_chan_unlock(chan);
7477 if (!status && encrypt)
7478 chan->sec_level = hcon->sec_level;
7480 if (!__l2cap_no_conn_pending(chan)) {
7481 l2cap_chan_unlock(chan);
7485 if (!status && (chan->state == BT_CONNECTED ||
7486 chan->state == BT_CONFIG)) {
7487 chan->ops->resume(chan);
7488 l2cap_check_encryption(chan, encrypt);
7489 l2cap_chan_unlock(chan);
7493 if (chan->state == BT_CONNECT) {
7494 if (!status && l2cap_check_enc_key_size(hcon))
7495 l2cap_start_connection(chan);
7497 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7498 } else if (chan->state == BT_CONNECT2 &&
7499 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7500 struct l2cap_conn_rsp rsp;
7503 if (!status && l2cap_check_enc_key_size(hcon)) {
7504 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7505 res = L2CAP_CR_PEND;
7506 stat = L2CAP_CS_AUTHOR_PEND;
7507 chan->ops->defer(chan);
7509 l2cap_state_change(chan, BT_CONFIG);
7510 res = L2CAP_CR_SUCCESS;
7511 stat = L2CAP_CS_NO_INFO;
7514 l2cap_state_change(chan, BT_DISCONN);
7515 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7516 res = L2CAP_CR_SEC_BLOCK;
7517 stat = L2CAP_CS_NO_INFO;
7520 rsp.scid = cpu_to_le16(chan->dcid);
7521 rsp.dcid = cpu_to_le16(chan->scid);
7522 rsp.result = cpu_to_le16(res);
7523 rsp.status = cpu_to_le16(stat);
7524 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7527 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7528 res == L2CAP_CR_SUCCESS) {
7530 set_bit(CONF_REQ_SENT, &chan->conf_state);
7531 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7533 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7535 chan->num_conf_req++;
7539 l2cap_chan_unlock(chan);
7542 mutex_unlock(&conn->chan_lock);
7545 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7547 struct l2cap_conn *conn = hcon->l2cap_data;
7548 struct l2cap_hdr *hdr;
7551 /* For AMP controller do not create l2cap conn */
7552 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7556 conn = l2cap_conn_add(hcon);
7561 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7565 case ACL_START_NO_FLUSH:
7568 BT_ERR("Unexpected start frame (len %d)", skb->len);
7569 kfree_skb(conn->rx_skb);
7570 conn->rx_skb = NULL;
7572 l2cap_conn_unreliable(conn, ECOMM);
7575 /* Start fragment always begin with Basic L2CAP header */
7576 if (skb->len < L2CAP_HDR_SIZE) {
7577 BT_ERR("Frame is too short (len %d)", skb->len);
7578 l2cap_conn_unreliable(conn, ECOMM);
7582 hdr = (struct l2cap_hdr *) skb->data;
7583 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7585 if (len == skb->len) {
7586 /* Complete frame received */
7587 l2cap_recv_frame(conn, skb);
7591 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7593 if (skb->len > len) {
7594 BT_ERR("Frame is too long (len %d, expected len %d)",
7596 l2cap_conn_unreliable(conn, ECOMM);
7600 /* Allocate skb for the complete frame (with header) */
7601 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7605 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7607 conn->rx_len = len - skb->len;
7611 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7613 if (!conn->rx_len) {
7614 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7615 l2cap_conn_unreliable(conn, ECOMM);
7619 if (skb->len > conn->rx_len) {
7620 BT_ERR("Fragment is too long (len %d, expected %d)",
7621 skb->len, conn->rx_len);
7622 kfree_skb(conn->rx_skb);
7623 conn->rx_skb = NULL;
7625 l2cap_conn_unreliable(conn, ECOMM);
7629 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7631 conn->rx_len -= skb->len;
7633 if (!conn->rx_len) {
7634 /* Complete frame received. l2cap_recv_frame
7635 * takes ownership of the skb so set the global
7636 * rx_skb pointer to NULL first.
7638 struct sk_buff *rx_skb = conn->rx_skb;
7639 conn->rx_skb = NULL;
7640 l2cap_recv_frame(conn, rx_skb);
7649 static struct hci_cb l2cap_cb = {
7651 .connect_cfm = l2cap_connect_cfm,
7652 .disconn_cfm = l2cap_disconn_cfm,
7653 .security_cfm = l2cap_security_cfm,
7656 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7658 struct l2cap_chan *c;
7660 read_lock(&chan_list_lock);
7662 list_for_each_entry(c, &chan_list, global_l) {
7663 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7664 &c->src, c->src_type, &c->dst, c->dst_type,
7665 c->state, __le16_to_cpu(c->psm),
7666 c->scid, c->dcid, c->imtu, c->omtu,
7667 c->sec_level, c->mode);
7670 read_unlock(&chan_list_lock);
7675 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7677 return single_open(file, l2cap_debugfs_show, inode->i_private);
7680 static const struct file_operations l2cap_debugfs_fops = {
7681 .open = l2cap_debugfs_open,
7683 .llseek = seq_lseek,
7684 .release = single_release,
7687 static struct dentry *l2cap_debugfs;
7689 int __init l2cap_init(void)
7693 err = l2cap_init_sockets();
7697 hci_register_cb(&l2cap_cb);
7699 if (IS_ERR_OR_NULL(bt_debugfs))
7702 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7703 NULL, &l2cap_debugfs_fops);
7705 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7707 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7713 void l2cap_exit(void)
7715 debugfs_remove(l2cap_debugfs);
7716 hci_unregister_cb(&l2cap_cb);
7717 l2cap_cleanup_sockets();
7720 module_param(disable_ertm, bool, 0644);
7721 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");