2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
47 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
48 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
66 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 if (hcon->type == LE_LINK) {
69 if (type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
72 return BDADDR_LE_RANDOM;
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
97 list_for_each_entry(c, &conn->chan_l, list) {
104 /* Find channel with given SCID.
105 * Returns locked channel. */
106 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_scid(conn, cid);
115 mutex_unlock(&conn->chan_lock);
120 /* Find channel with given DCID.
121 * Returns locked channel.
123 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
126 struct l2cap_chan *c;
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_dcid(conn, cid);
132 mutex_unlock(&conn->chan_lock);
137 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
140 struct l2cap_chan *c;
142 list_for_each_entry(c, &conn->chan_l, list) {
143 if (c->ident == ident)
149 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
152 struct l2cap_chan *c;
154 mutex_lock(&conn->chan_lock);
155 c = __l2cap_get_chan_by_ident(conn, ident);
158 mutex_unlock(&conn->chan_lock);
163 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 struct l2cap_chan *c;
167 list_for_each_entry(c, &chan_list, global_l) {
168 if (c->sport == psm && !bacmp(&c->src, src))
174 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
178 write_lock(&chan_list_lock);
180 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
193 for (p = 0x1001; p < 0x1100; p += 2)
194 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
195 chan->psm = cpu_to_le16(p);
196 chan->sport = cpu_to_le16(p);
203 write_unlock(&chan_list_lock);
207 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
209 write_lock(&chan_list_lock);
213 write_unlock(&chan_list_lock);
218 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
222 if (conn->hcon->type == LE_LINK)
223 dyn_end = L2CAP_CID_LE_DYN_END;
225 dyn_end = L2CAP_CID_DYN_END;
227 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
228 if (!__l2cap_get_chan_by_scid(conn, cid))
235 static void l2cap_state_change(struct l2cap_chan *chan, int state)
237 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
238 state_to_string(state));
241 chan->ops->state_change(chan, state, 0);
244 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
248 chan->ops->state_change(chan, chan->state, err);
251 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
253 chan->ops->state_change(chan, chan->state, err);
256 static void __set_retrans_timer(struct l2cap_chan *chan)
258 if (!delayed_work_pending(&chan->monitor_timer) &&
259 chan->retrans_timeout) {
260 l2cap_set_timer(chan, &chan->retrans_timer,
261 msecs_to_jiffies(chan->retrans_timeout));
265 static void __set_monitor_timer(struct l2cap_chan *chan)
267 __clear_retrans_timer(chan);
268 if (chan->monitor_timeout) {
269 l2cap_set_timer(chan, &chan->monitor_timer,
270 msecs_to_jiffies(chan->monitor_timeout));
274 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
279 skb_queue_walk(head, skb) {
280 if (bt_cb(skb)->control.txseq == seq)
287 /* ---- L2CAP sequence number lists ---- */
289 /* For ERTM, ordered lists of sequence numbers must be tracked for
290 * SREJ requests that are received and for frames that are to be
291 * retransmitted. These seq_list functions implement a singly-linked
292 * list in an array, where membership in the list can also be checked
293 * in constant time. Items can also be added to the tail of the list
294 * and removed from the head in constant time, without further memory
298 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
300 size_t alloc_size, i;
302 /* Allocated size is a power of 2 to map sequence numbers
303 * (which may be up to 14 bits) in to a smaller array that is
304 * sized for the negotiated ERTM transmit windows.
306 alloc_size = roundup_pow_of_two(size);
308 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
312 seq_list->mask = alloc_size - 1;
313 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
314 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
315 for (i = 0; i < alloc_size; i++)
316 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
321 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
323 kfree(seq_list->list);
326 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 /* Constant-time check for list membership */
330 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
335 u16 mask = seq_list->mask;
337 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
338 /* In case someone tries to pop the head of an empty list */
339 return L2CAP_SEQ_LIST_CLEAR;
340 } else if (seq_list->head == seq) {
341 /* Head can be removed in constant time */
342 seq_list->head = seq_list->list[seq & mask];
343 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
345 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
346 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
347 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 /* Walk the list to find the sequence number */
351 u16 prev = seq_list->head;
352 while (seq_list->list[prev & mask] != seq) {
353 prev = seq_list->list[prev & mask];
354 if (prev == L2CAP_SEQ_LIST_TAIL)
355 return L2CAP_SEQ_LIST_CLEAR;
358 /* Unlink the number from the list and clear it */
359 seq_list->list[prev & mask] = seq_list->list[seq & mask];
360 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
361 if (seq_list->tail == seq)
362 seq_list->tail = prev;
367 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
369 /* Remove the head in constant time */
370 return l2cap_seq_list_remove(seq_list, seq_list->head);
373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
389 u16 mask = seq_list->mask;
391 /* All appends happen in constant time */
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
399 seq_list->list[seq_list->tail & mask] = seq;
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
405 static void l2cap_chan_timeout(struct work_struct *work)
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
409 struct l2cap_conn *conn = chan->conn;
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
414 mutex_lock(&conn->chan_lock);
415 l2cap_chan_lock(chan);
417 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
418 reason = ECONNREFUSED;
419 else if (chan->state == BT_CONNECT &&
420 chan->sec_level != BT_SECURITY_SDP)
421 reason = ECONNREFUSED;
425 l2cap_chan_close(chan, reason);
427 l2cap_chan_unlock(chan);
429 chan->ops->close(chan);
430 mutex_unlock(&conn->chan_lock);
432 l2cap_chan_put(chan);
435 struct l2cap_chan *l2cap_chan_create(void)
437 struct l2cap_chan *chan;
439 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
443 mutex_init(&chan->lock);
445 write_lock(&chan_list_lock);
446 list_add(&chan->global_l, &chan_list);
447 write_unlock(&chan_list_lock);
449 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
451 chan->state = BT_OPEN;
453 kref_init(&chan->kref);
455 /* This flag is cleared in l2cap_chan_ready() */
456 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
458 BT_DBG("chan %p", chan);
463 static void l2cap_chan_destroy(struct kref *kref)
465 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
467 BT_DBG("chan %p", chan);
469 write_lock(&chan_list_lock);
470 list_del(&chan->global_l);
471 write_unlock(&chan_list_lock);
476 void l2cap_chan_hold(struct l2cap_chan *c)
478 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
483 void l2cap_chan_put(struct l2cap_chan *c)
485 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
487 kref_put(&c->kref, l2cap_chan_destroy);
490 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
492 chan->fcs = L2CAP_FCS_CRC16;
493 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
494 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
495 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
496 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
497 chan->sec_level = BT_SECURITY_LOW;
499 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
502 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
505 chan->sdu_last_frag = NULL;
507 chan->tx_credits = 0;
508 chan->rx_credits = le_max_credits;
509 chan->mps = min_t(u16, chan->imtu, L2CAP_LE_DEFAULT_MPS);
511 skb_queue_head_init(&chan->tx_q);
514 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
516 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
517 __le16_to_cpu(chan->psm), chan->dcid);
519 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
523 switch (chan->chan_type) {
524 case L2CAP_CHAN_CONN_ORIENTED:
525 if (conn->hcon->type == LE_LINK) {
526 if (chan->dcid == L2CAP_CID_ATT) {
527 chan->omtu = L2CAP_DEFAULT_MTU;
528 chan->scid = L2CAP_CID_ATT;
530 chan->scid = l2cap_alloc_cid(conn);
533 /* Alloc CID for connection-oriented socket */
534 chan->scid = l2cap_alloc_cid(conn);
535 chan->omtu = L2CAP_DEFAULT_MTU;
539 case L2CAP_CHAN_CONN_LESS:
540 /* Connectionless socket */
541 chan->scid = L2CAP_CID_CONN_LESS;
542 chan->dcid = L2CAP_CID_CONN_LESS;
543 chan->omtu = L2CAP_DEFAULT_MTU;
546 case L2CAP_CHAN_CONN_FIX_A2MP:
547 chan->scid = L2CAP_CID_A2MP;
548 chan->dcid = L2CAP_CID_A2MP;
549 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
550 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
554 /* Raw socket can send/recv signalling messages only */
555 chan->scid = L2CAP_CID_SIGNALING;
556 chan->dcid = L2CAP_CID_SIGNALING;
557 chan->omtu = L2CAP_DEFAULT_MTU;
560 chan->local_id = L2CAP_BESTEFFORT_ID;
561 chan->local_stype = L2CAP_SERV_BESTEFFORT;
562 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
563 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
564 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
565 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
567 l2cap_chan_hold(chan);
569 hci_conn_hold(conn->hcon);
571 list_add(&chan->list, &conn->chan_l);
574 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
576 mutex_lock(&conn->chan_lock);
577 __l2cap_chan_add(conn, chan);
578 mutex_unlock(&conn->chan_lock);
581 void l2cap_chan_del(struct l2cap_chan *chan, int err)
583 struct l2cap_conn *conn = chan->conn;
585 __clear_chan_timer(chan);
587 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
590 struct amp_mgr *mgr = conn->hcon->amp_mgr;
591 /* Delete from channel list */
592 list_del(&chan->list);
594 l2cap_chan_put(chan);
598 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
599 hci_conn_drop(conn->hcon);
601 if (mgr && mgr->bredr_chan == chan)
602 mgr->bredr_chan = NULL;
605 if (chan->hs_hchan) {
606 struct hci_chan *hs_hchan = chan->hs_hchan;
608 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
609 amp_disconnect_logical_link(hs_hchan);
612 chan->ops->teardown(chan, err);
614 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
618 case L2CAP_MODE_BASIC:
621 case L2CAP_MODE_LE_FLOWCTL:
622 skb_queue_purge(&chan->tx_q);
625 case L2CAP_MODE_ERTM:
626 __clear_retrans_timer(chan);
627 __clear_monitor_timer(chan);
628 __clear_ack_timer(chan);
630 skb_queue_purge(&chan->srej_q);
632 l2cap_seq_list_free(&chan->srej_list);
633 l2cap_seq_list_free(&chan->retrans_list);
637 case L2CAP_MODE_STREAMING:
638 skb_queue_purge(&chan->tx_q);
645 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
647 struct l2cap_conn *conn = chan->conn;
648 struct l2cap_le_conn_rsp rsp;
651 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
652 result = L2CAP_CR_AUTHORIZATION;
654 result = L2CAP_CR_BAD_PSM;
656 l2cap_state_change(chan, BT_DISCONN);
658 rsp.dcid = cpu_to_le16(chan->scid);
659 rsp.mtu = cpu_to_le16(chan->imtu);
660 rsp.mps = cpu_to_le16(chan->mps);
661 rsp.credits = cpu_to_le16(chan->rx_credits);
662 rsp.result = cpu_to_le16(result);
664 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
668 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
670 struct l2cap_conn *conn = chan->conn;
671 struct l2cap_conn_rsp rsp;
674 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
675 result = L2CAP_CR_SEC_BLOCK;
677 result = L2CAP_CR_BAD_PSM;
679 l2cap_state_change(chan, BT_DISCONN);
681 rsp.scid = cpu_to_le16(chan->dcid);
682 rsp.dcid = cpu_to_le16(chan->scid);
683 rsp.result = cpu_to_le16(result);
684 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
686 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
689 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
691 struct l2cap_conn *conn = chan->conn;
693 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
695 switch (chan->state) {
697 chan->ops->teardown(chan, 0);
702 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
703 * check for chan->psm.
705 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
706 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
707 l2cap_send_disconn_req(chan, reason);
709 l2cap_chan_del(chan, reason);
713 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
714 if (conn->hcon->type == ACL_LINK)
715 l2cap_chan_connect_reject(chan);
716 else if (conn->hcon->type == LE_LINK)
717 l2cap_chan_le_connect_reject(chan);
720 l2cap_chan_del(chan, reason);
725 l2cap_chan_del(chan, reason);
729 chan->ops->teardown(chan, 0);
734 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
736 switch (chan->chan_type) {
738 switch (chan->sec_level) {
739 case BT_SECURITY_HIGH:
740 return HCI_AT_DEDICATED_BONDING_MITM;
741 case BT_SECURITY_MEDIUM:
742 return HCI_AT_DEDICATED_BONDING;
744 return HCI_AT_NO_BONDING;
747 case L2CAP_CHAN_CONN_LESS:
748 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
749 if (chan->sec_level == BT_SECURITY_LOW)
750 chan->sec_level = BT_SECURITY_SDP;
752 if (chan->sec_level == BT_SECURITY_HIGH)
753 return HCI_AT_NO_BONDING_MITM;
755 return HCI_AT_NO_BONDING;
757 case L2CAP_CHAN_CONN_ORIENTED:
758 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
759 if (chan->sec_level == BT_SECURITY_LOW)
760 chan->sec_level = BT_SECURITY_SDP;
762 if (chan->sec_level == BT_SECURITY_HIGH)
763 return HCI_AT_NO_BONDING_MITM;
765 return HCI_AT_NO_BONDING;
769 switch (chan->sec_level) {
770 case BT_SECURITY_HIGH:
771 return HCI_AT_GENERAL_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_GENERAL_BONDING;
775 return HCI_AT_NO_BONDING;
781 /* Service level security */
782 int l2cap_chan_check_security(struct l2cap_chan *chan)
784 struct l2cap_conn *conn = chan->conn;
787 if (conn->hcon->type == LE_LINK)
788 return smp_conn_security(conn->hcon, chan->sec_level);
790 auth_type = l2cap_get_auth_type(chan);
792 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
795 static u8 l2cap_get_ident(struct l2cap_conn *conn)
799 /* Get next available identificator.
800 * 1 - 128 are used by kernel.
801 * 129 - 199 are reserved.
802 * 200 - 254 are used by utilities like l2ping, etc.
805 spin_lock(&conn->lock);
807 if (++conn->tx_ident > 128)
812 spin_unlock(&conn->lock);
817 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
820 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
823 BT_DBG("code 0x%2.2x", code);
828 if (lmp_no_flush_capable(conn->hcon->hdev))
829 flags = ACL_START_NO_FLUSH;
833 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
834 skb->priority = HCI_PRIO_MAX;
836 hci_send_acl(conn->hchan, skb, flags);
839 static bool __chan_is_moving(struct l2cap_chan *chan)
841 return chan->move_state != L2CAP_MOVE_STABLE &&
842 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
845 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
847 struct hci_conn *hcon = chan->conn->hcon;
850 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
853 if (chan->hs_hcon && !__chan_is_moving(chan)) {
855 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
862 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
863 lmp_no_flush_capable(hcon->hdev))
864 flags = ACL_START_NO_FLUSH;
868 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
869 hci_send_acl(chan->conn->hchan, skb, flags);
872 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
874 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
875 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
877 if (enh & L2CAP_CTRL_FRAME_TYPE) {
880 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
881 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
888 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
889 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
896 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
898 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
899 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
901 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
904 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
905 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
912 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
913 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
920 static inline void __unpack_control(struct l2cap_chan *chan,
923 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
924 __unpack_extended_control(get_unaligned_le32(skb->data),
925 &bt_cb(skb)->control);
926 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
928 __unpack_enhanced_control(get_unaligned_le16(skb->data),
929 &bt_cb(skb)->control);
930 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
934 static u32 __pack_extended_control(struct l2cap_ctrl *control)
938 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
939 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
941 if (control->sframe) {
942 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
943 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
944 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
946 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
947 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
953 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
957 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
958 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
960 if (control->sframe) {
961 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
962 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
963 packed |= L2CAP_CTRL_FRAME_TYPE;
965 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
966 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
972 static inline void __pack_control(struct l2cap_chan *chan,
973 struct l2cap_ctrl *control,
976 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
977 put_unaligned_le32(__pack_extended_control(control),
978 skb->data + L2CAP_HDR_SIZE);
980 put_unaligned_le16(__pack_enhanced_control(control),
981 skb->data + L2CAP_HDR_SIZE);
985 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
987 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
988 return L2CAP_EXT_HDR_SIZE;
990 return L2CAP_ENH_HDR_SIZE;
993 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
997 struct l2cap_hdr *lh;
998 int hlen = __ertm_hdr_size(chan);
1000 if (chan->fcs == L2CAP_FCS_CRC16)
1001 hlen += L2CAP_FCS_SIZE;
1003 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1006 return ERR_PTR(-ENOMEM);
1008 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1009 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1010 lh->cid = cpu_to_le16(chan->dcid);
1012 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1013 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1015 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1017 if (chan->fcs == L2CAP_FCS_CRC16) {
1018 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1019 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1022 skb->priority = HCI_PRIO_MAX;
1026 static void l2cap_send_sframe(struct l2cap_chan *chan,
1027 struct l2cap_ctrl *control)
1029 struct sk_buff *skb;
1032 BT_DBG("chan %p, control %p", chan, control);
1034 if (!control->sframe)
1037 if (__chan_is_moving(chan))
1040 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1044 if (control->super == L2CAP_SUPER_RR)
1045 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1046 else if (control->super == L2CAP_SUPER_RNR)
1047 set_bit(CONN_RNR_SENT, &chan->conn_state);
1049 if (control->super != L2CAP_SUPER_SREJ) {
1050 chan->last_acked_seq = control->reqseq;
1051 __clear_ack_timer(chan);
1054 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1055 control->final, control->poll, control->super);
1057 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1058 control_field = __pack_extended_control(control);
1060 control_field = __pack_enhanced_control(control);
1062 skb = l2cap_create_sframe_pdu(chan, control_field);
1064 l2cap_do_send(chan, skb);
1067 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1069 struct l2cap_ctrl control;
1071 BT_DBG("chan %p, poll %d", chan, poll);
1073 memset(&control, 0, sizeof(control));
1075 control.poll = poll;
1077 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1078 control.super = L2CAP_SUPER_RNR;
1080 control.super = L2CAP_SUPER_RR;
1082 control.reqseq = chan->buffer_seq;
1083 l2cap_send_sframe(chan, &control);
1086 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1088 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1091 static bool __amp_capable(struct l2cap_chan *chan)
1093 struct l2cap_conn *conn = chan->conn;
1094 struct hci_dev *hdev;
1095 bool amp_available = false;
1097 if (!conn->hs_enabled)
1100 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1103 read_lock(&hci_dev_list_lock);
1104 list_for_each_entry(hdev, &hci_dev_list, list) {
1105 if (hdev->amp_type != AMP_TYPE_BREDR &&
1106 test_bit(HCI_UP, &hdev->flags)) {
1107 amp_available = true;
1111 read_unlock(&hci_dev_list_lock);
1113 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1114 return amp_available;
1119 static bool l2cap_check_efs(struct l2cap_chan *chan)
1121 /* Check EFS parameters */
1125 void l2cap_send_conn_req(struct l2cap_chan *chan)
1127 struct l2cap_conn *conn = chan->conn;
1128 struct l2cap_conn_req req;
1130 req.scid = cpu_to_le16(chan->scid);
1131 req.psm = chan->psm;
1133 chan->ident = l2cap_get_ident(conn);
1135 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1137 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1140 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1142 struct l2cap_create_chan_req req;
1143 req.scid = cpu_to_le16(chan->scid);
1144 req.psm = chan->psm;
1145 req.amp_id = amp_id;
1147 chan->ident = l2cap_get_ident(chan->conn);
1149 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1153 static void l2cap_move_setup(struct l2cap_chan *chan)
1155 struct sk_buff *skb;
1157 BT_DBG("chan %p", chan);
1159 if (chan->mode != L2CAP_MODE_ERTM)
1162 __clear_retrans_timer(chan);
1163 __clear_monitor_timer(chan);
1164 __clear_ack_timer(chan);
1166 chan->retry_count = 0;
1167 skb_queue_walk(&chan->tx_q, skb) {
1168 if (bt_cb(skb)->control.retries)
1169 bt_cb(skb)->control.retries = 1;
1174 chan->expected_tx_seq = chan->buffer_seq;
1176 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1177 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1178 l2cap_seq_list_clear(&chan->retrans_list);
1179 l2cap_seq_list_clear(&chan->srej_list);
1180 skb_queue_purge(&chan->srej_q);
1182 chan->tx_state = L2CAP_TX_STATE_XMIT;
1183 chan->rx_state = L2CAP_RX_STATE_MOVE;
1185 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1188 static void l2cap_move_done(struct l2cap_chan *chan)
1190 u8 move_role = chan->move_role;
1191 BT_DBG("chan %p", chan);
1193 chan->move_state = L2CAP_MOVE_STABLE;
1194 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1196 if (chan->mode != L2CAP_MODE_ERTM)
1199 switch (move_role) {
1200 case L2CAP_MOVE_ROLE_INITIATOR:
1201 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1202 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1204 case L2CAP_MOVE_ROLE_RESPONDER:
1205 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1210 static void l2cap_chan_ready(struct l2cap_chan *chan)
1212 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1213 chan->conf_state = 0;
1214 __clear_chan_timer(chan);
1216 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1217 chan->ops->suspend(chan);
1219 chan->state = BT_CONNECTED;
1221 chan->ops->ready(chan);
1224 static void l2cap_le_connect(struct l2cap_chan *chan)
1226 struct l2cap_conn *conn = chan->conn;
1227 struct l2cap_le_conn_req req;
1229 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1232 req.psm = chan->psm;
1233 req.scid = cpu_to_le16(chan->scid);
1234 req.mtu = cpu_to_le16(chan->imtu);
1235 req.mps = cpu_to_le16(chan->mps);
1236 req.credits = cpu_to_le16(chan->rx_credits);
1238 chan->ident = l2cap_get_ident(conn);
1240 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1244 static void l2cap_le_start(struct l2cap_chan *chan)
1246 struct l2cap_conn *conn = chan->conn;
1248 if (!smp_conn_security(conn->hcon, chan->sec_level))
1252 l2cap_chan_ready(chan);
1256 if (chan->state == BT_CONNECT)
1257 l2cap_le_connect(chan);
1260 static void l2cap_start_connection(struct l2cap_chan *chan)
1262 if (__amp_capable(chan)) {
1263 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1264 a2mp_discover_amp(chan);
1265 } else if (chan->conn->hcon->type == LE_LINK) {
1266 l2cap_le_start(chan);
1268 l2cap_send_conn_req(chan);
1272 static void l2cap_do_start(struct l2cap_chan *chan)
1274 struct l2cap_conn *conn = chan->conn;
1276 if (conn->hcon->type == LE_LINK) {
1277 l2cap_le_start(chan);
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1282 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1285 if (l2cap_chan_check_security(chan) &&
1286 __l2cap_no_conn_pending(chan)) {
1287 l2cap_start_connection(chan);
1290 struct l2cap_info_req req;
1291 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1293 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1294 conn->info_ident = l2cap_get_ident(conn);
1296 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1298 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1303 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1305 u32 local_feat_mask = l2cap_feat_mask;
1307 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1310 case L2CAP_MODE_ERTM:
1311 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1312 case L2CAP_MODE_STREAMING:
1313 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1319 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1321 struct l2cap_conn *conn = chan->conn;
1322 struct l2cap_disconn_req req;
1327 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1328 __clear_retrans_timer(chan);
1329 __clear_monitor_timer(chan);
1330 __clear_ack_timer(chan);
1333 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1334 l2cap_state_change(chan, BT_DISCONN);
1338 req.dcid = cpu_to_le16(chan->dcid);
1339 req.scid = cpu_to_le16(chan->scid);
1340 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1343 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1346 /* ---- L2CAP connections ---- */
1347 static void l2cap_conn_start(struct l2cap_conn *conn)
1349 struct l2cap_chan *chan, *tmp;
1351 BT_DBG("conn %p", conn);
1353 mutex_lock(&conn->chan_lock);
1355 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1356 l2cap_chan_lock(chan);
1358 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1359 l2cap_chan_unlock(chan);
1363 if (chan->state == BT_CONNECT) {
1364 if (!l2cap_chan_check_security(chan) ||
1365 !__l2cap_no_conn_pending(chan)) {
1366 l2cap_chan_unlock(chan);
1370 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1371 && test_bit(CONF_STATE2_DEVICE,
1372 &chan->conf_state)) {
1373 l2cap_chan_close(chan, ECONNRESET);
1374 l2cap_chan_unlock(chan);
1378 l2cap_start_connection(chan);
1380 } else if (chan->state == BT_CONNECT2) {
1381 struct l2cap_conn_rsp rsp;
1383 rsp.scid = cpu_to_le16(chan->dcid);
1384 rsp.dcid = cpu_to_le16(chan->scid);
1386 if (l2cap_chan_check_security(chan)) {
1387 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1388 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1389 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1390 chan->ops->defer(chan);
1393 l2cap_state_change(chan, BT_CONFIG);
1394 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1395 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1398 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1399 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1402 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1405 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1406 rsp.result != L2CAP_CR_SUCCESS) {
1407 l2cap_chan_unlock(chan);
1411 set_bit(CONF_REQ_SENT, &chan->conf_state);
1412 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1413 l2cap_build_conf_req(chan, buf), buf);
1414 chan->num_conf_req++;
1417 l2cap_chan_unlock(chan);
1420 mutex_unlock(&conn->chan_lock);
1423 /* Find socket with cid and source/destination bdaddr.
1424 * Returns closest match, locked.
1426 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1430 struct l2cap_chan *c, *c1 = NULL;
1432 read_lock(&chan_list_lock);
1434 list_for_each_entry(c, &chan_list, global_l) {
1435 if (state && c->state != state)
1438 if (c->scid == cid) {
1439 int src_match, dst_match;
1440 int src_any, dst_any;
1443 src_match = !bacmp(&c->src, src);
1444 dst_match = !bacmp(&c->dst, dst);
1445 if (src_match && dst_match) {
1446 read_unlock(&chan_list_lock);
1451 src_any = !bacmp(&c->src, BDADDR_ANY);
1452 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1453 if ((src_match && dst_any) || (src_any && dst_match) ||
1454 (src_any && dst_any))
1459 read_unlock(&chan_list_lock);
1464 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1466 struct hci_conn *hcon = conn->hcon;
1467 struct l2cap_chan *chan, *pchan;
1472 bt_6lowpan_add_conn(conn);
1474 /* Check if we have socket listening on cid */
1475 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1476 &hcon->src, &hcon->dst);
1480 /* Client ATT sockets should override the server one */
1481 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1484 dst_type = bdaddr_type(hcon, hcon->dst_type);
1486 /* If device is blocked, do not create a channel for it */
1487 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1490 l2cap_chan_lock(pchan);
1492 chan = pchan->ops->new_connection(pchan);
1496 chan->dcid = L2CAP_CID_ATT;
1498 bacpy(&chan->src, &hcon->src);
1499 bacpy(&chan->dst, &hcon->dst);
1500 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1501 chan->dst_type = dst_type;
1503 __l2cap_chan_add(conn, chan);
1506 l2cap_chan_unlock(pchan);
1509 static void l2cap_conn_ready(struct l2cap_conn *conn)
1511 struct l2cap_chan *chan;
1512 struct hci_conn *hcon = conn->hcon;
1514 BT_DBG("conn %p", conn);
1516 /* For outgoing pairing which doesn't necessarily have an
1517 * associated socket (e.g. mgmt_pair_device).
1519 if (hcon->out && hcon->type == LE_LINK)
1520 smp_conn_security(hcon, hcon->pending_sec_level);
1522 mutex_lock(&conn->chan_lock);
1524 if (hcon->type == LE_LINK)
1525 l2cap_le_conn_ready(conn);
1527 list_for_each_entry(chan, &conn->chan_l, list) {
1529 l2cap_chan_lock(chan);
1531 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1532 l2cap_chan_unlock(chan);
1536 if (hcon->type == LE_LINK) {
1537 l2cap_le_start(chan);
1538 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1539 l2cap_chan_ready(chan);
1541 } else if (chan->state == BT_CONNECT) {
1542 l2cap_do_start(chan);
1545 l2cap_chan_unlock(chan);
1548 mutex_unlock(&conn->chan_lock);
1551 /* Notify sockets that we cannot guaranty reliability anymore */
1552 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1554 struct l2cap_chan *chan;
1556 BT_DBG("conn %p", conn);
1558 mutex_lock(&conn->chan_lock);
1560 list_for_each_entry(chan, &conn->chan_l, list) {
1561 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1562 l2cap_chan_set_err(chan, err);
1565 mutex_unlock(&conn->chan_lock);
1568 static void l2cap_info_timeout(struct work_struct *work)
1570 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1573 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1574 conn->info_ident = 0;
1576 l2cap_conn_start(conn);
1581 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1582 * callback is called during registration. The ->remove callback is called
1583 * during unregistration.
1584 * An l2cap_user object can either be explicitly unregistered or when the
1585 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1586 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1587 * External modules must own a reference to the l2cap_conn object if they intend
1588 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1589 * any time if they don't.
1592 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1594 struct hci_dev *hdev = conn->hcon->hdev;
1597 /* We need to check whether l2cap_conn is registered. If it is not, we
1598 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1599 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1600 * relies on the parent hci_conn object to be locked. This itself relies
1601 * on the hci_dev object to be locked. So we must lock the hci device
1606 if (user->list.next || user->list.prev) {
1611 /* conn->hchan is NULL after l2cap_conn_del() was called */
1617 ret = user->probe(conn, user);
1621 list_add(&user->list, &conn->users);
1625 hci_dev_unlock(hdev);
1628 EXPORT_SYMBOL(l2cap_register_user);
1630 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1632 struct hci_dev *hdev = conn->hcon->hdev;
1636 if (!user->list.next || !user->list.prev)
1639 list_del(&user->list);
1640 user->list.next = NULL;
1641 user->list.prev = NULL;
1642 user->remove(conn, user);
1645 hci_dev_unlock(hdev);
1647 EXPORT_SYMBOL(l2cap_unregister_user);
1649 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1651 struct l2cap_user *user;
1653 while (!list_empty(&conn->users)) {
1654 user = list_first_entry(&conn->users, struct l2cap_user, list);
1655 list_del(&user->list);
1656 user->list.next = NULL;
1657 user->list.prev = NULL;
1658 user->remove(conn, user);
1662 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1664 struct l2cap_conn *conn = hcon->l2cap_data;
1665 struct l2cap_chan *chan, *l;
1670 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1672 kfree_skb(conn->rx_skb);
1674 l2cap_unregister_all_users(conn);
1676 mutex_lock(&conn->chan_lock);
1679 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1680 l2cap_chan_hold(chan);
1681 l2cap_chan_lock(chan);
1683 l2cap_chan_del(chan, err);
1685 l2cap_chan_unlock(chan);
1687 chan->ops->close(chan);
1688 l2cap_chan_put(chan);
1691 mutex_unlock(&conn->chan_lock);
1693 hci_chan_del(conn->hchan);
1695 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1696 cancel_delayed_work_sync(&conn->info_timer);
1698 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1699 cancel_delayed_work_sync(&conn->security_timer);
1700 smp_chan_destroy(conn);
1703 hcon->l2cap_data = NULL;
1705 l2cap_conn_put(conn);
1708 static void security_timeout(struct work_struct *work)
1710 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1711 security_timer.work);
1713 BT_DBG("conn %p", conn);
1715 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1716 smp_chan_destroy(conn);
1717 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1721 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1723 struct l2cap_conn *conn = hcon->l2cap_data;
1724 struct hci_chan *hchan;
1729 hchan = hci_chan_create(hcon);
1733 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1735 hci_chan_del(hchan);
1739 kref_init(&conn->ref);
1740 hcon->l2cap_data = conn;
1742 hci_conn_get(conn->hcon);
1743 conn->hchan = hchan;
1745 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1747 switch (hcon->type) {
1749 if (hcon->hdev->le_mtu) {
1750 conn->mtu = hcon->hdev->le_mtu;
1755 conn->mtu = hcon->hdev->acl_mtu;
1759 conn->feat_mask = 0;
1761 if (hcon->type == ACL_LINK)
1762 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1763 &hcon->hdev->dev_flags);
1765 spin_lock_init(&conn->lock);
1766 mutex_init(&conn->chan_lock);
1768 INIT_LIST_HEAD(&conn->chan_l);
1769 INIT_LIST_HEAD(&conn->users);
1771 if (hcon->type == LE_LINK)
1772 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1774 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1776 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1781 static void l2cap_conn_free(struct kref *ref)
1783 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1785 hci_conn_put(conn->hcon);
1789 void l2cap_conn_get(struct l2cap_conn *conn)
1791 kref_get(&conn->ref);
1793 EXPORT_SYMBOL(l2cap_conn_get);
1795 void l2cap_conn_put(struct l2cap_conn *conn)
1797 kref_put(&conn->ref, l2cap_conn_free);
1799 EXPORT_SYMBOL(l2cap_conn_put);
1801 /* ---- Socket interface ---- */
1803 /* Find socket with psm and source / destination bdaddr.
1804 * Returns closest match.
1806 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1811 struct l2cap_chan *c, *c1 = NULL;
1813 read_lock(&chan_list_lock);
1815 list_for_each_entry(c, &chan_list, global_l) {
1816 if (state && c->state != state)
1819 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1822 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1825 if (c->psm == psm) {
1826 int src_match, dst_match;
1827 int src_any, dst_any;
1830 src_match = !bacmp(&c->src, src);
1831 dst_match = !bacmp(&c->dst, dst);
1832 if (src_match && dst_match) {
1833 read_unlock(&chan_list_lock);
1838 src_any = !bacmp(&c->src, BDADDR_ANY);
1839 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1840 if ((src_match && dst_any) || (src_any && dst_match) ||
1841 (src_any && dst_any))
1846 read_unlock(&chan_list_lock);
1851 static bool is_valid_psm(u16 psm, u8 dst_type)
1856 if (bdaddr_type_is_le(dst_type))
1857 return (psm <= 0x00ff);
1859 /* PSM must be odd and lsb of upper byte must be 0 */
1860 return ((psm & 0x0101) == 0x0001);
1863 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1864 bdaddr_t *dst, u8 dst_type)
1866 struct l2cap_conn *conn;
1867 struct hci_conn *hcon;
1868 struct hci_dev *hdev;
1872 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1873 dst_type, __le16_to_cpu(psm));
1875 hdev = hci_get_route(dst, &chan->src);
1877 return -EHOSTUNREACH;
1881 l2cap_chan_lock(chan);
1883 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
1884 chan->chan_type != L2CAP_CHAN_RAW) {
1889 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1894 switch (chan->mode) {
1895 case L2CAP_MODE_BASIC:
1897 case L2CAP_MODE_LE_FLOWCTL:
1898 l2cap_le_flowctl_init(chan);
1900 case L2CAP_MODE_ERTM:
1901 case L2CAP_MODE_STREAMING:
1910 switch (chan->state) {
1914 /* Already connecting */
1919 /* Already connected */
1933 /* Set destination address and psm */
1934 bacpy(&chan->dst, dst);
1935 chan->dst_type = dst_type;
1940 auth_type = l2cap_get_auth_type(chan);
1942 if (bdaddr_type_is_le(dst_type))
1943 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1944 chan->sec_level, auth_type);
1946 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1947 chan->sec_level, auth_type);
1950 err = PTR_ERR(hcon);
1954 conn = l2cap_conn_add(hcon);
1956 hci_conn_drop(hcon);
1961 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1962 hci_conn_drop(hcon);
1967 /* Update source addr of the socket */
1968 bacpy(&chan->src, &hcon->src);
1969 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1971 l2cap_chan_unlock(chan);
1972 l2cap_chan_add(conn, chan);
1973 l2cap_chan_lock(chan);
1975 /* l2cap_chan_add takes its own ref so we can drop this one */
1976 hci_conn_drop(hcon);
1978 l2cap_state_change(chan, BT_CONNECT);
1979 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1981 if (hcon->state == BT_CONNECTED) {
1982 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1983 __clear_chan_timer(chan);
1984 if (l2cap_chan_check_security(chan))
1985 l2cap_state_change(chan, BT_CONNECTED);
1987 l2cap_do_start(chan);
1993 l2cap_chan_unlock(chan);
1994 hci_dev_unlock(hdev);
1999 static void l2cap_monitor_timeout(struct work_struct *work)
2001 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2002 monitor_timer.work);
2004 BT_DBG("chan %p", chan);
2006 l2cap_chan_lock(chan);
2009 l2cap_chan_unlock(chan);
2010 l2cap_chan_put(chan);
2014 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2016 l2cap_chan_unlock(chan);
2017 l2cap_chan_put(chan);
2020 static void l2cap_retrans_timeout(struct work_struct *work)
2022 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2023 retrans_timer.work);
2025 BT_DBG("chan %p", chan);
2027 l2cap_chan_lock(chan);
2030 l2cap_chan_unlock(chan);
2031 l2cap_chan_put(chan);
2035 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2036 l2cap_chan_unlock(chan);
2037 l2cap_chan_put(chan);
2040 static void l2cap_streaming_send(struct l2cap_chan *chan,
2041 struct sk_buff_head *skbs)
2043 struct sk_buff *skb;
2044 struct l2cap_ctrl *control;
2046 BT_DBG("chan %p, skbs %p", chan, skbs);
2048 if (__chan_is_moving(chan))
2051 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2053 while (!skb_queue_empty(&chan->tx_q)) {
2055 skb = skb_dequeue(&chan->tx_q);
2057 bt_cb(skb)->control.retries = 1;
2058 control = &bt_cb(skb)->control;
2060 control->reqseq = 0;
2061 control->txseq = chan->next_tx_seq;
2063 __pack_control(chan, control, skb);
2065 if (chan->fcs == L2CAP_FCS_CRC16) {
2066 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2067 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2070 l2cap_do_send(chan, skb);
2072 BT_DBG("Sent txseq %u", control->txseq);
2074 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2075 chan->frames_sent++;
2079 static int l2cap_ertm_send(struct l2cap_chan *chan)
2081 struct sk_buff *skb, *tx_skb;
2082 struct l2cap_ctrl *control;
2085 BT_DBG("chan %p", chan);
2087 if (chan->state != BT_CONNECTED)
2090 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2093 if (__chan_is_moving(chan))
2096 while (chan->tx_send_head &&
2097 chan->unacked_frames < chan->remote_tx_win &&
2098 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2100 skb = chan->tx_send_head;
2102 bt_cb(skb)->control.retries = 1;
2103 control = &bt_cb(skb)->control;
2105 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2108 control->reqseq = chan->buffer_seq;
2109 chan->last_acked_seq = chan->buffer_seq;
2110 control->txseq = chan->next_tx_seq;
2112 __pack_control(chan, control, skb);
2114 if (chan->fcs == L2CAP_FCS_CRC16) {
2115 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2116 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2119 /* Clone after data has been modified. Data is assumed to be
2120 read-only (for locking purposes) on cloned sk_buffs.
2122 tx_skb = skb_clone(skb, GFP_KERNEL);
2127 __set_retrans_timer(chan);
2129 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2130 chan->unacked_frames++;
2131 chan->frames_sent++;
2134 if (skb_queue_is_last(&chan->tx_q, skb))
2135 chan->tx_send_head = NULL;
2137 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2139 l2cap_do_send(chan, tx_skb);
2140 BT_DBG("Sent txseq %u", control->txseq);
2143 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2144 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2149 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2151 struct l2cap_ctrl control;
2152 struct sk_buff *skb;
2153 struct sk_buff *tx_skb;
2156 BT_DBG("chan %p", chan);
2158 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2161 if (__chan_is_moving(chan))
2164 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2165 seq = l2cap_seq_list_pop(&chan->retrans_list);
2167 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2169 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2174 bt_cb(skb)->control.retries++;
2175 control = bt_cb(skb)->control;
2177 if (chan->max_tx != 0 &&
2178 bt_cb(skb)->control.retries > chan->max_tx) {
2179 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2180 l2cap_send_disconn_req(chan, ECONNRESET);
2181 l2cap_seq_list_clear(&chan->retrans_list);
2185 control.reqseq = chan->buffer_seq;
2186 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2191 if (skb_cloned(skb)) {
2192 /* Cloned sk_buffs are read-only, so we need a
2195 tx_skb = skb_copy(skb, GFP_KERNEL);
2197 tx_skb = skb_clone(skb, GFP_KERNEL);
2201 l2cap_seq_list_clear(&chan->retrans_list);
2205 /* Update skb contents */
2206 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2207 put_unaligned_le32(__pack_extended_control(&control),
2208 tx_skb->data + L2CAP_HDR_SIZE);
2210 put_unaligned_le16(__pack_enhanced_control(&control),
2211 tx_skb->data + L2CAP_HDR_SIZE);
2214 if (chan->fcs == L2CAP_FCS_CRC16) {
2215 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2216 put_unaligned_le16(fcs, skb_put(tx_skb,
2220 l2cap_do_send(chan, tx_skb);
2222 BT_DBG("Resent txseq %d", control.txseq);
2224 chan->last_acked_seq = chan->buffer_seq;
2228 static void l2cap_retransmit(struct l2cap_chan *chan,
2229 struct l2cap_ctrl *control)
2231 BT_DBG("chan %p, control %p", chan, control);
2233 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2234 l2cap_ertm_resend(chan);
2237 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2238 struct l2cap_ctrl *control)
2240 struct sk_buff *skb;
2242 BT_DBG("chan %p, control %p", chan, control);
2245 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2247 l2cap_seq_list_clear(&chan->retrans_list);
2249 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2252 if (chan->unacked_frames) {
2253 skb_queue_walk(&chan->tx_q, skb) {
2254 if (bt_cb(skb)->control.txseq == control->reqseq ||
2255 skb == chan->tx_send_head)
2259 skb_queue_walk_from(&chan->tx_q, skb) {
2260 if (skb == chan->tx_send_head)
2263 l2cap_seq_list_append(&chan->retrans_list,
2264 bt_cb(skb)->control.txseq);
2267 l2cap_ertm_resend(chan);
2271 static void l2cap_send_ack(struct l2cap_chan *chan)
2273 struct l2cap_ctrl control;
2274 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2275 chan->last_acked_seq);
2278 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2279 chan, chan->last_acked_seq, chan->buffer_seq);
2281 memset(&control, 0, sizeof(control));
2284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2285 chan->rx_state == L2CAP_RX_STATE_RECV) {
2286 __clear_ack_timer(chan);
2287 control.super = L2CAP_SUPER_RNR;
2288 control.reqseq = chan->buffer_seq;
2289 l2cap_send_sframe(chan, &control);
2291 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2292 l2cap_ertm_send(chan);
2293 /* If any i-frames were sent, they included an ack */
2294 if (chan->buffer_seq == chan->last_acked_seq)
2298 /* Ack now if the window is 3/4ths full.
2299 * Calculate without mul or div
2301 threshold = chan->ack_win;
2302 threshold += threshold << 1;
2305 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2308 if (frames_to_ack >= threshold) {
2309 __clear_ack_timer(chan);
2310 control.super = L2CAP_SUPER_RR;
2311 control.reqseq = chan->buffer_seq;
2312 l2cap_send_sframe(chan, &control);
2317 __set_ack_timer(chan);
2321 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2322 struct msghdr *msg, int len,
2323 int count, struct sk_buff *skb)
2325 struct l2cap_conn *conn = chan->conn;
2326 struct sk_buff **frag;
2329 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2335 /* Continuation fragments (no L2CAP header) */
2336 frag = &skb_shinfo(skb)->frag_list;
2338 struct sk_buff *tmp;
2340 count = min_t(unsigned int, conn->mtu, len);
2342 tmp = chan->ops->alloc_skb(chan, count,
2343 msg->msg_flags & MSG_DONTWAIT);
2345 return PTR_ERR(tmp);
2349 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2352 (*frag)->priority = skb->priority;
2357 skb->len += (*frag)->len;
2358 skb->data_len += (*frag)->len;
2360 frag = &(*frag)->next;
2366 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2367 struct msghdr *msg, size_t len,
2370 struct l2cap_conn *conn = chan->conn;
2371 struct sk_buff *skb;
2372 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2373 struct l2cap_hdr *lh;
2375 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2376 __le16_to_cpu(chan->psm), len, priority);
2378 count = min_t(unsigned int, (conn->mtu - hlen), len);
2380 skb = chan->ops->alloc_skb(chan, count + hlen,
2381 msg->msg_flags & MSG_DONTWAIT);
2385 skb->priority = priority;
2387 /* Create L2CAP header */
2388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2389 lh->cid = cpu_to_le16(chan->dcid);
2390 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2391 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2393 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2394 if (unlikely(err < 0)) {
2396 return ERR_PTR(err);
2401 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2402 struct msghdr *msg, size_t len,
2405 struct l2cap_conn *conn = chan->conn;
2406 struct sk_buff *skb;
2408 struct l2cap_hdr *lh;
2410 BT_DBG("chan %p len %zu", chan, len);
2412 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2414 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2415 msg->msg_flags & MSG_DONTWAIT);
2419 skb->priority = priority;
2421 /* Create L2CAP header */
2422 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2423 lh->cid = cpu_to_le16(chan->dcid);
2424 lh->len = cpu_to_le16(len);
2426 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2427 if (unlikely(err < 0)) {
2429 return ERR_PTR(err);
2434 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2435 struct msghdr *msg, size_t len,
2438 struct l2cap_conn *conn = chan->conn;
2439 struct sk_buff *skb;
2440 int err, count, hlen;
2441 struct l2cap_hdr *lh;
2443 BT_DBG("chan %p len %zu", chan, len);
2446 return ERR_PTR(-ENOTCONN);
2448 hlen = __ertm_hdr_size(chan);
2451 hlen += L2CAP_SDULEN_SIZE;
2453 if (chan->fcs == L2CAP_FCS_CRC16)
2454 hlen += L2CAP_FCS_SIZE;
2456 count = min_t(unsigned int, (conn->mtu - hlen), len);
2458 skb = chan->ops->alloc_skb(chan, count + hlen,
2459 msg->msg_flags & MSG_DONTWAIT);
2463 /* Create L2CAP header */
2464 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2465 lh->cid = cpu_to_le16(chan->dcid);
2466 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2468 /* Control header is populated later */
2469 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2470 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2472 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2475 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2477 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2478 if (unlikely(err < 0)) {
2480 return ERR_PTR(err);
2483 bt_cb(skb)->control.fcs = chan->fcs;
2484 bt_cb(skb)->control.retries = 0;
2488 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2489 struct sk_buff_head *seg_queue,
2490 struct msghdr *msg, size_t len)
2492 struct sk_buff *skb;
2497 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2499 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2500 * so fragmented skbs are not used. The HCI layer's handling
2501 * of fragmented skbs is not compatible with ERTM's queueing.
2504 /* PDU size is derived from the HCI MTU */
2505 pdu_len = chan->conn->mtu;
2507 /* Constrain PDU size for BR/EDR connections */
2509 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2511 /* Adjust for largest possible L2CAP overhead. */
2513 pdu_len -= L2CAP_FCS_SIZE;
2515 pdu_len -= __ertm_hdr_size(chan);
2517 /* Remote device may have requested smaller PDUs */
2518 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2520 if (len <= pdu_len) {
2521 sar = L2CAP_SAR_UNSEGMENTED;
2525 sar = L2CAP_SAR_START;
2527 pdu_len -= L2CAP_SDULEN_SIZE;
2531 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2534 __skb_queue_purge(seg_queue);
2535 return PTR_ERR(skb);
2538 bt_cb(skb)->control.sar = sar;
2539 __skb_queue_tail(seg_queue, skb);
2544 pdu_len += L2CAP_SDULEN_SIZE;
2547 if (len <= pdu_len) {
2548 sar = L2CAP_SAR_END;
2551 sar = L2CAP_SAR_CONTINUE;
2558 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2560 size_t len, u16 sdulen)
2562 struct l2cap_conn *conn = chan->conn;
2563 struct sk_buff *skb;
2564 int err, count, hlen;
2565 struct l2cap_hdr *lh;
2567 BT_DBG("chan %p len %zu", chan, len);
2570 return ERR_PTR(-ENOTCONN);
2572 hlen = L2CAP_HDR_SIZE;
2575 hlen += L2CAP_SDULEN_SIZE;
2577 count = min_t(unsigned int, (conn->mtu - hlen), len);
2579 skb = chan->ops->alloc_skb(chan, count + hlen,
2580 msg->msg_flags & MSG_DONTWAIT);
2584 /* Create L2CAP header */
2585 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2586 lh->cid = cpu_to_le16(chan->dcid);
2587 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2590 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2592 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2593 if (unlikely(err < 0)) {
2595 return ERR_PTR(err);
2601 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2602 struct sk_buff_head *seg_queue,
2603 struct msghdr *msg, size_t len)
2605 struct sk_buff *skb;
2609 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2612 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2618 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2620 __skb_queue_purge(seg_queue);
2621 return PTR_ERR(skb);
2624 __skb_queue_tail(seg_queue, skb);
2630 pdu_len += L2CAP_SDULEN_SIZE;
2637 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2640 struct sk_buff *skb;
2642 struct sk_buff_head seg_queue;
2647 /* Connectionless channel */
2648 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2649 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2651 return PTR_ERR(skb);
2653 l2cap_do_send(chan, skb);
2657 switch (chan->mode) {
2658 case L2CAP_MODE_LE_FLOWCTL:
2659 /* Check outgoing MTU */
2660 if (len > chan->omtu)
2663 if (!chan->tx_credits)
2666 __skb_queue_head_init(&seg_queue);
2668 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2670 if (chan->state != BT_CONNECTED) {
2671 __skb_queue_purge(&seg_queue);
2678 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2680 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2681 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2685 if (!chan->tx_credits)
2686 chan->ops->suspend(chan);
2692 case L2CAP_MODE_BASIC:
2693 /* Check outgoing MTU */
2694 if (len > chan->omtu)
2697 /* Create a basic PDU */
2698 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2700 return PTR_ERR(skb);
2702 l2cap_do_send(chan, skb);
2706 case L2CAP_MODE_ERTM:
2707 case L2CAP_MODE_STREAMING:
2708 /* Check outgoing MTU */
2709 if (len > chan->omtu) {
2714 __skb_queue_head_init(&seg_queue);
2716 /* Do segmentation before calling in to the state machine,
2717 * since it's possible to block while waiting for memory
2720 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2722 /* The channel could have been closed while segmenting,
2723 * check that it is still connected.
2725 if (chan->state != BT_CONNECTED) {
2726 __skb_queue_purge(&seg_queue);
2733 if (chan->mode == L2CAP_MODE_ERTM)
2734 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2736 l2cap_streaming_send(chan, &seg_queue);
2740 /* If the skbs were not queued for sending, they'll still be in
2741 * seg_queue and need to be purged.
2743 __skb_queue_purge(&seg_queue);
2747 BT_DBG("bad state %1.1x", chan->mode);
2754 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2756 struct l2cap_ctrl control;
2759 BT_DBG("chan %p, txseq %u", chan, txseq);
2761 memset(&control, 0, sizeof(control));
2763 control.super = L2CAP_SUPER_SREJ;
2765 for (seq = chan->expected_tx_seq; seq != txseq;
2766 seq = __next_seq(chan, seq)) {
2767 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2768 control.reqseq = seq;
2769 l2cap_send_sframe(chan, &control);
2770 l2cap_seq_list_append(&chan->srej_list, seq);
2774 chan->expected_tx_seq = __next_seq(chan, txseq);
2777 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2779 struct l2cap_ctrl control;
2781 BT_DBG("chan %p", chan);
2783 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2786 memset(&control, 0, sizeof(control));
2788 control.super = L2CAP_SUPER_SREJ;
2789 control.reqseq = chan->srej_list.tail;
2790 l2cap_send_sframe(chan, &control);
2793 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2795 struct l2cap_ctrl control;
2799 BT_DBG("chan %p, txseq %u", chan, txseq);
2801 memset(&control, 0, sizeof(control));
2803 control.super = L2CAP_SUPER_SREJ;
2805 /* Capture initial list head to allow only one pass through the list. */
2806 initial_head = chan->srej_list.head;
2809 seq = l2cap_seq_list_pop(&chan->srej_list);
2810 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2813 control.reqseq = seq;
2814 l2cap_send_sframe(chan, &control);
2815 l2cap_seq_list_append(&chan->srej_list, seq);
2816 } while (chan->srej_list.head != initial_head);
2819 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2821 struct sk_buff *acked_skb;
2824 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2826 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2829 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2830 chan->expected_ack_seq, chan->unacked_frames);
2832 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2833 ackseq = __next_seq(chan, ackseq)) {
2835 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2837 skb_unlink(acked_skb, &chan->tx_q);
2838 kfree_skb(acked_skb);
2839 chan->unacked_frames--;
2843 chan->expected_ack_seq = reqseq;
2845 if (chan->unacked_frames == 0)
2846 __clear_retrans_timer(chan);
2848 BT_DBG("unacked_frames %u", chan->unacked_frames);
2851 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2853 BT_DBG("chan %p", chan);
2855 chan->expected_tx_seq = chan->buffer_seq;
2856 l2cap_seq_list_clear(&chan->srej_list);
2857 skb_queue_purge(&chan->srej_q);
2858 chan->rx_state = L2CAP_RX_STATE_RECV;
2861 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2862 struct l2cap_ctrl *control,
2863 struct sk_buff_head *skbs, u8 event)
2865 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2869 case L2CAP_EV_DATA_REQUEST:
2870 if (chan->tx_send_head == NULL)
2871 chan->tx_send_head = skb_peek(skbs);
2873 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2874 l2cap_ertm_send(chan);
2876 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2877 BT_DBG("Enter LOCAL_BUSY");
2878 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2880 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2881 /* The SREJ_SENT state must be aborted if we are to
2882 * enter the LOCAL_BUSY state.
2884 l2cap_abort_rx_srej_sent(chan);
2887 l2cap_send_ack(chan);
2890 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2891 BT_DBG("Exit LOCAL_BUSY");
2892 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2894 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2895 struct l2cap_ctrl local_control;
2897 memset(&local_control, 0, sizeof(local_control));
2898 local_control.sframe = 1;
2899 local_control.super = L2CAP_SUPER_RR;
2900 local_control.poll = 1;
2901 local_control.reqseq = chan->buffer_seq;
2902 l2cap_send_sframe(chan, &local_control);
2904 chan->retry_count = 1;
2905 __set_monitor_timer(chan);
2906 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2909 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2910 l2cap_process_reqseq(chan, control->reqseq);
2912 case L2CAP_EV_EXPLICIT_POLL:
2913 l2cap_send_rr_or_rnr(chan, 1);
2914 chan->retry_count = 1;
2915 __set_monitor_timer(chan);
2916 __clear_ack_timer(chan);
2917 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2919 case L2CAP_EV_RETRANS_TO:
2920 l2cap_send_rr_or_rnr(chan, 1);
2921 chan->retry_count = 1;
2922 __set_monitor_timer(chan);
2923 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2925 case L2CAP_EV_RECV_FBIT:
2926 /* Nothing to process */
2933 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2934 struct l2cap_ctrl *control,
2935 struct sk_buff_head *skbs, u8 event)
2937 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2941 case L2CAP_EV_DATA_REQUEST:
2942 if (chan->tx_send_head == NULL)
2943 chan->tx_send_head = skb_peek(skbs);
2944 /* Queue data, but don't send. */
2945 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2947 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2948 BT_DBG("Enter LOCAL_BUSY");
2949 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2951 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2952 /* The SREJ_SENT state must be aborted if we are to
2953 * enter the LOCAL_BUSY state.
2955 l2cap_abort_rx_srej_sent(chan);
2958 l2cap_send_ack(chan);
2961 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2962 BT_DBG("Exit LOCAL_BUSY");
2963 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2965 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2966 struct l2cap_ctrl local_control;
2967 memset(&local_control, 0, sizeof(local_control));
2968 local_control.sframe = 1;
2969 local_control.super = L2CAP_SUPER_RR;
2970 local_control.poll = 1;
2971 local_control.reqseq = chan->buffer_seq;
2972 l2cap_send_sframe(chan, &local_control);
2974 chan->retry_count = 1;
2975 __set_monitor_timer(chan);
2976 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2979 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2980 l2cap_process_reqseq(chan, control->reqseq);
2984 case L2CAP_EV_RECV_FBIT:
2985 if (control && control->final) {
2986 __clear_monitor_timer(chan);
2987 if (chan->unacked_frames > 0)
2988 __set_retrans_timer(chan);
2989 chan->retry_count = 0;
2990 chan->tx_state = L2CAP_TX_STATE_XMIT;
2991 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2994 case L2CAP_EV_EXPLICIT_POLL:
2997 case L2CAP_EV_MONITOR_TO:
2998 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2999 l2cap_send_rr_or_rnr(chan, 1);
3000 __set_monitor_timer(chan);
3001 chan->retry_count++;
3003 l2cap_send_disconn_req(chan, ECONNABORTED);
3011 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3012 struct sk_buff_head *skbs, u8 event)
3014 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3015 chan, control, skbs, event, chan->tx_state);
3017 switch (chan->tx_state) {
3018 case L2CAP_TX_STATE_XMIT:
3019 l2cap_tx_state_xmit(chan, control, skbs, event);
3021 case L2CAP_TX_STATE_WAIT_F:
3022 l2cap_tx_state_wait_f(chan, control, skbs, event);
3030 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3031 struct l2cap_ctrl *control)
3033 BT_DBG("chan %p, control %p", chan, control);
3034 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3037 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3038 struct l2cap_ctrl *control)
3040 BT_DBG("chan %p, control %p", chan, control);
3041 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3044 /* Copy frame to all raw sockets on that connection */
3045 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3047 struct sk_buff *nskb;
3048 struct l2cap_chan *chan;
3050 BT_DBG("conn %p", conn);
3052 mutex_lock(&conn->chan_lock);
3054 list_for_each_entry(chan, &conn->chan_l, list) {
3055 if (chan->chan_type != L2CAP_CHAN_RAW)
3058 /* Don't send frame to the channel it came from */
3059 if (bt_cb(skb)->chan == chan)
3062 nskb = skb_clone(skb, GFP_KERNEL);
3065 if (chan->ops->recv(chan, nskb))
3069 mutex_unlock(&conn->chan_lock);
3072 /* ---- L2CAP signalling commands ---- */
3073 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3074 u8 ident, u16 dlen, void *data)
3076 struct sk_buff *skb, **frag;
3077 struct l2cap_cmd_hdr *cmd;
3078 struct l2cap_hdr *lh;
3081 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3082 conn, code, ident, dlen);
3084 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3087 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3088 count = min_t(unsigned int, conn->mtu, len);
3090 skb = bt_skb_alloc(count, GFP_KERNEL);
3094 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3095 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3097 if (conn->hcon->type == LE_LINK)
3098 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3100 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3102 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3105 cmd->len = cpu_to_le16(dlen);
3108 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3109 memcpy(skb_put(skb, count), data, count);
3115 /* Continuation fragments (no L2CAP header) */
3116 frag = &skb_shinfo(skb)->frag_list;
3118 count = min_t(unsigned int, conn->mtu, len);
3120 *frag = bt_skb_alloc(count, GFP_KERNEL);
3124 memcpy(skb_put(*frag, count), data, count);
3129 frag = &(*frag)->next;
3139 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3142 struct l2cap_conf_opt *opt = *ptr;
3145 len = L2CAP_CONF_OPT_SIZE + opt->len;
3153 *val = *((u8 *) opt->val);
3157 *val = get_unaligned_le16(opt->val);
3161 *val = get_unaligned_le32(opt->val);
3165 *val = (unsigned long) opt->val;
3169 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3173 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3175 struct l2cap_conf_opt *opt = *ptr;
3177 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3184 *((u8 *) opt->val) = val;
3188 put_unaligned_le16(val, opt->val);
3192 put_unaligned_le32(val, opt->val);
3196 memcpy(opt->val, (void *) val, len);
3200 *ptr += L2CAP_CONF_OPT_SIZE + len;
3203 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3205 struct l2cap_conf_efs efs;
3207 switch (chan->mode) {
3208 case L2CAP_MODE_ERTM:
3209 efs.id = chan->local_id;
3210 efs.stype = chan->local_stype;
3211 efs.msdu = cpu_to_le16(chan->local_msdu);
3212 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3213 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3214 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3217 case L2CAP_MODE_STREAMING:
3219 efs.stype = L2CAP_SERV_BESTEFFORT;
3220 efs.msdu = cpu_to_le16(chan->local_msdu);
3221 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3230 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3231 (unsigned long) &efs);
3234 static void l2cap_ack_timeout(struct work_struct *work)
3236 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3240 BT_DBG("chan %p", chan);
3242 l2cap_chan_lock(chan);
3244 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3245 chan->last_acked_seq);
3248 l2cap_send_rr_or_rnr(chan, 0);
3250 l2cap_chan_unlock(chan);
3251 l2cap_chan_put(chan);
3254 int l2cap_ertm_init(struct l2cap_chan *chan)
3258 chan->next_tx_seq = 0;
3259 chan->expected_tx_seq = 0;
3260 chan->expected_ack_seq = 0;
3261 chan->unacked_frames = 0;
3262 chan->buffer_seq = 0;
3263 chan->frames_sent = 0;
3264 chan->last_acked_seq = 0;
3266 chan->sdu_last_frag = NULL;
3269 skb_queue_head_init(&chan->tx_q);
3271 chan->local_amp_id = AMP_ID_BREDR;
3272 chan->move_id = AMP_ID_BREDR;
3273 chan->move_state = L2CAP_MOVE_STABLE;
3274 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3276 if (chan->mode != L2CAP_MODE_ERTM)
3279 chan->rx_state = L2CAP_RX_STATE_RECV;
3280 chan->tx_state = L2CAP_TX_STATE_XMIT;
3282 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3283 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3284 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3286 skb_queue_head_init(&chan->srej_q);
3288 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3292 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3294 l2cap_seq_list_free(&chan->srej_list);
3299 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3302 case L2CAP_MODE_STREAMING:
3303 case L2CAP_MODE_ERTM:
3304 if (l2cap_mode_supported(mode, remote_feat_mask))
3308 return L2CAP_MODE_BASIC;
3312 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3314 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3317 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3319 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3322 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3323 struct l2cap_conf_rfc *rfc)
3325 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3326 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3328 /* Class 1 devices have must have ERTM timeouts
3329 * exceeding the Link Supervision Timeout. The
3330 * default Link Supervision Timeout for AMP
3331 * controllers is 10 seconds.
3333 * Class 1 devices use 0xffffffff for their
3334 * best-effort flush timeout, so the clamping logic
3335 * will result in a timeout that meets the above
3336 * requirement. ERTM timeouts are 16-bit values, so
3337 * the maximum timeout is 65.535 seconds.
3340 /* Convert timeout to milliseconds and round */
3341 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3343 /* This is the recommended formula for class 2 devices
3344 * that start ERTM timers when packets are sent to the
3347 ertm_to = 3 * ertm_to + 500;
3349 if (ertm_to > 0xffff)
3352 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3353 rfc->monitor_timeout = rfc->retrans_timeout;
3355 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3356 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3360 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3362 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3363 __l2cap_ews_supported(chan->conn)) {
3364 /* use extended control field */
3365 set_bit(FLAG_EXT_CTRL, &chan->flags);
3366 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3368 chan->tx_win = min_t(u16, chan->tx_win,
3369 L2CAP_DEFAULT_TX_WINDOW);
3370 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3372 chan->ack_win = chan->tx_win;
3375 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3377 struct l2cap_conf_req *req = data;
3378 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3379 void *ptr = req->data;
3382 BT_DBG("chan %p", chan);
3384 if (chan->num_conf_req || chan->num_conf_rsp)
3387 switch (chan->mode) {
3388 case L2CAP_MODE_STREAMING:
3389 case L2CAP_MODE_ERTM:
3390 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3393 if (__l2cap_efs_supported(chan->conn))
3394 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3398 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3403 if (chan->imtu != L2CAP_DEFAULT_MTU)
3404 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3406 switch (chan->mode) {
3407 case L2CAP_MODE_BASIC:
3408 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3409 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3412 rfc.mode = L2CAP_MODE_BASIC;
3414 rfc.max_transmit = 0;
3415 rfc.retrans_timeout = 0;
3416 rfc.monitor_timeout = 0;
3417 rfc.max_pdu_size = 0;
3419 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3420 (unsigned long) &rfc);
3423 case L2CAP_MODE_ERTM:
3424 rfc.mode = L2CAP_MODE_ERTM;
3425 rfc.max_transmit = chan->max_tx;
3427 __l2cap_set_ertm_timeouts(chan, &rfc);
3429 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3430 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3432 rfc.max_pdu_size = cpu_to_le16(size);
3434 l2cap_txwin_setup(chan);
3436 rfc.txwin_size = min_t(u16, chan->tx_win,
3437 L2CAP_DEFAULT_TX_WINDOW);
3439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3440 (unsigned long) &rfc);
3442 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3443 l2cap_add_opt_efs(&ptr, chan);
3445 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3449 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3450 if (chan->fcs == L2CAP_FCS_NONE ||
3451 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3452 chan->fcs = L2CAP_FCS_NONE;
3453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3458 case L2CAP_MODE_STREAMING:
3459 l2cap_txwin_setup(chan);
3460 rfc.mode = L2CAP_MODE_STREAMING;
3462 rfc.max_transmit = 0;
3463 rfc.retrans_timeout = 0;
3464 rfc.monitor_timeout = 0;
3466 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3467 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3469 rfc.max_pdu_size = cpu_to_le16(size);
3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3472 (unsigned long) &rfc);
3474 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3475 l2cap_add_opt_efs(&ptr, chan);
3477 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3478 if (chan->fcs == L2CAP_FCS_NONE ||
3479 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3480 chan->fcs = L2CAP_FCS_NONE;
3481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3487 req->dcid = cpu_to_le16(chan->dcid);
3488 req->flags = __constant_cpu_to_le16(0);
3493 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3495 struct l2cap_conf_rsp *rsp = data;
3496 void *ptr = rsp->data;
3497 void *req = chan->conf_req;
3498 int len = chan->conf_len;
3499 int type, hint, olen;
3501 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3502 struct l2cap_conf_efs efs;
3504 u16 mtu = L2CAP_DEFAULT_MTU;
3505 u16 result = L2CAP_CONF_SUCCESS;
3508 BT_DBG("chan %p", chan);
3510 while (len >= L2CAP_CONF_OPT_SIZE) {
3511 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3513 hint = type & L2CAP_CONF_HINT;
3514 type &= L2CAP_CONF_MASK;
3517 case L2CAP_CONF_MTU:
3521 case L2CAP_CONF_FLUSH_TO:
3522 chan->flush_to = val;
3525 case L2CAP_CONF_QOS:
3528 case L2CAP_CONF_RFC:
3529 if (olen == sizeof(rfc))
3530 memcpy(&rfc, (void *) val, olen);
3533 case L2CAP_CONF_FCS:
3534 if (val == L2CAP_FCS_NONE)
3535 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3538 case L2CAP_CONF_EFS:
3540 if (olen == sizeof(efs))
3541 memcpy(&efs, (void *) val, olen);
3544 case L2CAP_CONF_EWS:
3545 if (!chan->conn->hs_enabled)
3546 return -ECONNREFUSED;
3548 set_bit(FLAG_EXT_CTRL, &chan->flags);
3549 set_bit(CONF_EWS_RECV, &chan->conf_state);
3550 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3551 chan->remote_tx_win = val;
3558 result = L2CAP_CONF_UNKNOWN;
3559 *((u8 *) ptr++) = type;
3564 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3567 switch (chan->mode) {
3568 case L2CAP_MODE_STREAMING:
3569 case L2CAP_MODE_ERTM:
3570 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3571 chan->mode = l2cap_select_mode(rfc.mode,
3572 chan->conn->feat_mask);
3577 if (__l2cap_efs_supported(chan->conn))
3578 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3580 return -ECONNREFUSED;
3583 if (chan->mode != rfc.mode)
3584 return -ECONNREFUSED;
3590 if (chan->mode != rfc.mode) {
3591 result = L2CAP_CONF_UNACCEPT;
3592 rfc.mode = chan->mode;
3594 if (chan->num_conf_rsp == 1)
3595 return -ECONNREFUSED;
3597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3598 (unsigned long) &rfc);
3601 if (result == L2CAP_CONF_SUCCESS) {
3602 /* Configure output options and let the other side know
3603 * which ones we don't like. */
3605 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3606 result = L2CAP_CONF_UNACCEPT;
3609 set_bit(CONF_MTU_DONE, &chan->conf_state);
3611 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3614 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3615 efs.stype != L2CAP_SERV_NOTRAFIC &&
3616 efs.stype != chan->local_stype) {
3618 result = L2CAP_CONF_UNACCEPT;
3620 if (chan->num_conf_req >= 1)
3621 return -ECONNREFUSED;
3623 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3625 (unsigned long) &efs);
3627 /* Send PENDING Conf Rsp */
3628 result = L2CAP_CONF_PENDING;
3629 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3634 case L2CAP_MODE_BASIC:
3635 chan->fcs = L2CAP_FCS_NONE;
3636 set_bit(CONF_MODE_DONE, &chan->conf_state);
3639 case L2CAP_MODE_ERTM:
3640 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3641 chan->remote_tx_win = rfc.txwin_size;
3643 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3645 chan->remote_max_tx = rfc.max_transmit;
3647 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3648 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3649 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3650 rfc.max_pdu_size = cpu_to_le16(size);
3651 chan->remote_mps = size;
3653 __l2cap_set_ertm_timeouts(chan, &rfc);
3655 set_bit(CONF_MODE_DONE, &chan->conf_state);
3657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3658 sizeof(rfc), (unsigned long) &rfc);
3660 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3661 chan->remote_id = efs.id;
3662 chan->remote_stype = efs.stype;
3663 chan->remote_msdu = le16_to_cpu(efs.msdu);
3664 chan->remote_flush_to =
3665 le32_to_cpu(efs.flush_to);
3666 chan->remote_acc_lat =
3667 le32_to_cpu(efs.acc_lat);
3668 chan->remote_sdu_itime =
3669 le32_to_cpu(efs.sdu_itime);
3670 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3672 (unsigned long) &efs);
3676 case L2CAP_MODE_STREAMING:
3677 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3678 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3679 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3680 rfc.max_pdu_size = cpu_to_le16(size);
3681 chan->remote_mps = size;
3683 set_bit(CONF_MODE_DONE, &chan->conf_state);
3685 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3686 (unsigned long) &rfc);
3691 result = L2CAP_CONF_UNACCEPT;
3693 memset(&rfc, 0, sizeof(rfc));
3694 rfc.mode = chan->mode;
3697 if (result == L2CAP_CONF_SUCCESS)
3698 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3700 rsp->scid = cpu_to_le16(chan->dcid);
3701 rsp->result = cpu_to_le16(result);
3702 rsp->flags = __constant_cpu_to_le16(0);
3707 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3708 void *data, u16 *result)
3710 struct l2cap_conf_req *req = data;
3711 void *ptr = req->data;
3714 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3715 struct l2cap_conf_efs efs;
3717 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3719 while (len >= L2CAP_CONF_OPT_SIZE) {
3720 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3723 case L2CAP_CONF_MTU:
3724 if (val < L2CAP_DEFAULT_MIN_MTU) {
3725 *result = L2CAP_CONF_UNACCEPT;
3726 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3729 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3732 case L2CAP_CONF_FLUSH_TO:
3733 chan->flush_to = val;
3734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3738 case L2CAP_CONF_RFC:
3739 if (olen == sizeof(rfc))
3740 memcpy(&rfc, (void *)val, olen);
3742 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3743 rfc.mode != chan->mode)
3744 return -ECONNREFUSED;
3748 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3749 sizeof(rfc), (unsigned long) &rfc);
3752 case L2CAP_CONF_EWS:
3753 chan->ack_win = min_t(u16, val, chan->ack_win);
3754 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3758 case L2CAP_CONF_EFS:
3759 if (olen == sizeof(efs))
3760 memcpy(&efs, (void *)val, olen);
3762 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3763 efs.stype != L2CAP_SERV_NOTRAFIC &&
3764 efs.stype != chan->local_stype)
3765 return -ECONNREFUSED;
3767 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3768 (unsigned long) &efs);
3771 case L2CAP_CONF_FCS:
3772 if (*result == L2CAP_CONF_PENDING)
3773 if (val == L2CAP_FCS_NONE)
3774 set_bit(CONF_RECV_NO_FCS,
3780 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3781 return -ECONNREFUSED;
3783 chan->mode = rfc.mode;
3785 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3787 case L2CAP_MODE_ERTM:
3788 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3789 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3790 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3791 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3792 chan->ack_win = min_t(u16, chan->ack_win,
3795 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3796 chan->local_msdu = le16_to_cpu(efs.msdu);
3797 chan->local_sdu_itime =
3798 le32_to_cpu(efs.sdu_itime);
3799 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3800 chan->local_flush_to =
3801 le32_to_cpu(efs.flush_to);
3805 case L2CAP_MODE_STREAMING:
3806 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3810 req->dcid = cpu_to_le16(chan->dcid);
3811 req->flags = __constant_cpu_to_le16(0);
3816 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3817 u16 result, u16 flags)
3819 struct l2cap_conf_rsp *rsp = data;
3820 void *ptr = rsp->data;
3822 BT_DBG("chan %p", chan);
3824 rsp->scid = cpu_to_le16(chan->dcid);
3825 rsp->result = cpu_to_le16(result);
3826 rsp->flags = cpu_to_le16(flags);
3831 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3833 struct l2cap_le_conn_rsp rsp;
3834 struct l2cap_conn *conn = chan->conn;
3836 BT_DBG("chan %p", chan);
3838 rsp.dcid = cpu_to_le16(chan->scid);
3839 rsp.mtu = cpu_to_le16(chan->imtu);
3840 rsp.mps = cpu_to_le16(chan->mps);
3841 rsp.credits = cpu_to_le16(chan->rx_credits);
3842 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3844 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3848 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3850 struct l2cap_conn_rsp rsp;
3851 struct l2cap_conn *conn = chan->conn;
3855 rsp.scid = cpu_to_le16(chan->dcid);
3856 rsp.dcid = cpu_to_le16(chan->scid);
3857 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3858 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3861 rsp_code = L2CAP_CREATE_CHAN_RSP;
3863 rsp_code = L2CAP_CONN_RSP;
3865 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3867 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3869 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3872 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3873 l2cap_build_conf_req(chan, buf), buf);
3874 chan->num_conf_req++;
3877 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3881 /* Use sane default values in case a misbehaving remote device
3882 * did not send an RFC or extended window size option.
3884 u16 txwin_ext = chan->ack_win;
3885 struct l2cap_conf_rfc rfc = {
3887 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3888 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3889 .max_pdu_size = cpu_to_le16(chan->imtu),
3890 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3893 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3895 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3898 while (len >= L2CAP_CONF_OPT_SIZE) {
3899 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3902 case L2CAP_CONF_RFC:
3903 if (olen == sizeof(rfc))
3904 memcpy(&rfc, (void *)val, olen);
3906 case L2CAP_CONF_EWS:
3913 case L2CAP_MODE_ERTM:
3914 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3915 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3916 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3917 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3918 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3920 chan->ack_win = min_t(u16, chan->ack_win,
3923 case L2CAP_MODE_STREAMING:
3924 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3928 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3929 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3932 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3934 if (cmd_len < sizeof(*rej))
3937 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3940 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3941 cmd->ident == conn->info_ident) {
3942 cancel_delayed_work(&conn->info_timer);
3944 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3945 conn->info_ident = 0;
3947 l2cap_conn_start(conn);
3953 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3954 struct l2cap_cmd_hdr *cmd,
3955 u8 *data, u8 rsp_code, u8 amp_id)
3957 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3958 struct l2cap_conn_rsp rsp;
3959 struct l2cap_chan *chan = NULL, *pchan;
3960 int result, status = L2CAP_CS_NO_INFO;
3962 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3963 __le16 psm = req->psm;
3965 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3967 /* Check if we have socket listening on psm */
3968 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3969 &conn->hcon->dst, ACL_LINK);
3971 result = L2CAP_CR_BAD_PSM;
3975 mutex_lock(&conn->chan_lock);
3976 l2cap_chan_lock(pchan);
3978 /* Check if the ACL is secure enough (if not SDP) */
3979 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3980 !hci_conn_check_link_mode(conn->hcon)) {
3981 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3982 result = L2CAP_CR_SEC_BLOCK;
3986 result = L2CAP_CR_NO_MEM;
3988 /* Check if we already have channel with that dcid */
3989 if (__l2cap_get_chan_by_dcid(conn, scid))
3992 chan = pchan->ops->new_connection(pchan);
3996 /* For certain devices (ex: HID mouse), support for authentication,
3997 * pairing and bonding is optional. For such devices, inorder to avoid
3998 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3999 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4001 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4003 bacpy(&chan->src, &conn->hcon->src);
4004 bacpy(&chan->dst, &conn->hcon->dst);
4005 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4006 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4009 chan->local_amp_id = amp_id;
4011 __l2cap_chan_add(conn, chan);
4015 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4017 chan->ident = cmd->ident;
4019 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4020 if (l2cap_chan_check_security(chan)) {
4021 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4022 l2cap_state_change(chan, BT_CONNECT2);
4023 result = L2CAP_CR_PEND;
4024 status = L2CAP_CS_AUTHOR_PEND;
4025 chan->ops->defer(chan);
4027 /* Force pending result for AMP controllers.
4028 * The connection will succeed after the
4029 * physical link is up.
4031 if (amp_id == AMP_ID_BREDR) {
4032 l2cap_state_change(chan, BT_CONFIG);
4033 result = L2CAP_CR_SUCCESS;
4035 l2cap_state_change(chan, BT_CONNECT2);
4036 result = L2CAP_CR_PEND;
4038 status = L2CAP_CS_NO_INFO;
4041 l2cap_state_change(chan, BT_CONNECT2);
4042 result = L2CAP_CR_PEND;
4043 status = L2CAP_CS_AUTHEN_PEND;
4046 l2cap_state_change(chan, BT_CONNECT2);
4047 result = L2CAP_CR_PEND;
4048 status = L2CAP_CS_NO_INFO;
4052 l2cap_chan_unlock(pchan);
4053 mutex_unlock(&conn->chan_lock);
4056 rsp.scid = cpu_to_le16(scid);
4057 rsp.dcid = cpu_to_le16(dcid);
4058 rsp.result = cpu_to_le16(result);
4059 rsp.status = cpu_to_le16(status);
4060 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4062 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4063 struct l2cap_info_req info;
4064 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4066 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4067 conn->info_ident = l2cap_get_ident(conn);
4069 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4071 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4072 sizeof(info), &info);
4075 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4076 result == L2CAP_CR_SUCCESS) {
4078 set_bit(CONF_REQ_SENT, &chan->conf_state);
4079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4080 l2cap_build_conf_req(chan, buf), buf);
4081 chan->num_conf_req++;
4087 static int l2cap_connect_req(struct l2cap_conn *conn,
4088 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4090 struct hci_dev *hdev = conn->hcon->hdev;
4091 struct hci_conn *hcon = conn->hcon;
4093 if (cmd_len < sizeof(struct l2cap_conn_req))
4097 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4098 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4099 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4100 hcon->dst_type, 0, NULL, 0,
4102 hci_dev_unlock(hdev);
4104 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4108 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4112 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4113 u16 scid, dcid, result, status;
4114 struct l2cap_chan *chan;
4118 if (cmd_len < sizeof(*rsp))
4121 scid = __le16_to_cpu(rsp->scid);
4122 dcid = __le16_to_cpu(rsp->dcid);
4123 result = __le16_to_cpu(rsp->result);
4124 status = __le16_to_cpu(rsp->status);
4126 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4127 dcid, scid, result, status);
4129 mutex_lock(&conn->chan_lock);
4132 chan = __l2cap_get_chan_by_scid(conn, scid);
4138 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4147 l2cap_chan_lock(chan);
4150 case L2CAP_CR_SUCCESS:
4151 l2cap_state_change(chan, BT_CONFIG);
4154 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4156 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4159 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4160 l2cap_build_conf_req(chan, req), req);
4161 chan->num_conf_req++;
4165 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4169 l2cap_chan_del(chan, ECONNREFUSED);
4173 l2cap_chan_unlock(chan);
4176 mutex_unlock(&conn->chan_lock);
4181 static inline void set_default_fcs(struct l2cap_chan *chan)
4183 /* FCS is enabled only in ERTM or streaming mode, if one or both
4186 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4187 chan->fcs = L2CAP_FCS_NONE;
4188 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4189 chan->fcs = L2CAP_FCS_CRC16;
4192 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4193 u8 ident, u16 flags)
4195 struct l2cap_conn *conn = chan->conn;
4197 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4200 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4201 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4203 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4204 l2cap_build_conf_rsp(chan, data,
4205 L2CAP_CONF_SUCCESS, flags), data);
4208 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4211 struct l2cap_cmd_rej_cid rej;
4213 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4214 rej.scid = __cpu_to_le16(scid);
4215 rej.dcid = __cpu_to_le16(dcid);
4217 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4220 static inline int l2cap_config_req(struct l2cap_conn *conn,
4221 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4224 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4227 struct l2cap_chan *chan;
4230 if (cmd_len < sizeof(*req))
4233 dcid = __le16_to_cpu(req->dcid);
4234 flags = __le16_to_cpu(req->flags);
4236 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4238 chan = l2cap_get_chan_by_scid(conn, dcid);
4240 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4244 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4245 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4250 /* Reject if config buffer is too small. */
4251 len = cmd_len - sizeof(*req);
4252 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4253 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4254 l2cap_build_conf_rsp(chan, rsp,
4255 L2CAP_CONF_REJECT, flags), rsp);
4260 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4261 chan->conf_len += len;
4263 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4264 /* Incomplete config. Send empty response. */
4265 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4266 l2cap_build_conf_rsp(chan, rsp,
4267 L2CAP_CONF_SUCCESS, flags), rsp);
4271 /* Complete config. */
4272 len = l2cap_parse_conf_req(chan, rsp);
4274 l2cap_send_disconn_req(chan, ECONNRESET);
4278 chan->ident = cmd->ident;
4279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4280 chan->num_conf_rsp++;
4282 /* Reset config buffer. */
4285 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4288 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4289 set_default_fcs(chan);
4291 if (chan->mode == L2CAP_MODE_ERTM ||
4292 chan->mode == L2CAP_MODE_STREAMING)
4293 err = l2cap_ertm_init(chan);
4296 l2cap_send_disconn_req(chan, -err);
4298 l2cap_chan_ready(chan);
4303 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4305 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4306 l2cap_build_conf_req(chan, buf), buf);
4307 chan->num_conf_req++;
4310 /* Got Conf Rsp PENDING from remote side and asume we sent
4311 Conf Rsp PENDING in the code above */
4312 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4313 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4315 /* check compatibility */
4317 /* Send rsp for BR/EDR channel */
4319 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4321 chan->ident = cmd->ident;
4325 l2cap_chan_unlock(chan);
4329 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4330 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4333 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4334 u16 scid, flags, result;
4335 struct l2cap_chan *chan;
4336 int len = cmd_len - sizeof(*rsp);
4339 if (cmd_len < sizeof(*rsp))
4342 scid = __le16_to_cpu(rsp->scid);
4343 flags = __le16_to_cpu(rsp->flags);
4344 result = __le16_to_cpu(rsp->result);
4346 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4349 chan = l2cap_get_chan_by_scid(conn, scid);
4354 case L2CAP_CONF_SUCCESS:
4355 l2cap_conf_rfc_get(chan, rsp->data, len);
4356 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4359 case L2CAP_CONF_PENDING:
4360 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4362 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4365 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4368 l2cap_send_disconn_req(chan, ECONNRESET);
4372 if (!chan->hs_hcon) {
4373 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4376 if (l2cap_check_efs(chan)) {
4377 amp_create_logical_link(chan);
4378 chan->ident = cmd->ident;
4384 case L2CAP_CONF_UNACCEPT:
4385 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4388 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4389 l2cap_send_disconn_req(chan, ECONNRESET);
4393 /* throw out any old stored conf requests */
4394 result = L2CAP_CONF_SUCCESS;
4395 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4398 l2cap_send_disconn_req(chan, ECONNRESET);
4402 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4403 L2CAP_CONF_REQ, len, req);
4404 chan->num_conf_req++;
4405 if (result != L2CAP_CONF_SUCCESS)
4411 l2cap_chan_set_err(chan, ECONNRESET);
4413 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4414 l2cap_send_disconn_req(chan, ECONNRESET);
4418 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4421 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4423 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4424 set_default_fcs(chan);
4426 if (chan->mode == L2CAP_MODE_ERTM ||
4427 chan->mode == L2CAP_MODE_STREAMING)
4428 err = l2cap_ertm_init(chan);
4431 l2cap_send_disconn_req(chan, -err);
4433 l2cap_chan_ready(chan);
4437 l2cap_chan_unlock(chan);
4441 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4442 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4445 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4446 struct l2cap_disconn_rsp rsp;
4448 struct l2cap_chan *chan;
4450 if (cmd_len != sizeof(*req))
4453 scid = __le16_to_cpu(req->scid);
4454 dcid = __le16_to_cpu(req->dcid);
4456 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4458 mutex_lock(&conn->chan_lock);
4460 chan = __l2cap_get_chan_by_scid(conn, dcid);
4462 mutex_unlock(&conn->chan_lock);
4463 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4467 l2cap_chan_lock(chan);
4469 rsp.dcid = cpu_to_le16(chan->scid);
4470 rsp.scid = cpu_to_le16(chan->dcid);
4471 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4473 chan->ops->set_shutdown(chan);
4475 l2cap_chan_hold(chan);
4476 l2cap_chan_del(chan, ECONNRESET);
4478 l2cap_chan_unlock(chan);
4480 chan->ops->close(chan);
4481 l2cap_chan_put(chan);
4483 mutex_unlock(&conn->chan_lock);
4488 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4489 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4492 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4494 struct l2cap_chan *chan;
4496 if (cmd_len != sizeof(*rsp))
4499 scid = __le16_to_cpu(rsp->scid);
4500 dcid = __le16_to_cpu(rsp->dcid);
4502 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4504 mutex_lock(&conn->chan_lock);
4506 chan = __l2cap_get_chan_by_scid(conn, scid);
4508 mutex_unlock(&conn->chan_lock);
4512 l2cap_chan_lock(chan);
4514 l2cap_chan_hold(chan);
4515 l2cap_chan_del(chan, 0);
4517 l2cap_chan_unlock(chan);
4519 chan->ops->close(chan);
4520 l2cap_chan_put(chan);
4522 mutex_unlock(&conn->chan_lock);
4527 static inline int l2cap_information_req(struct l2cap_conn *conn,
4528 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4531 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4534 if (cmd_len != sizeof(*req))
4537 type = __le16_to_cpu(req->type);
4539 BT_DBG("type 0x%4.4x", type);
4541 if (type == L2CAP_IT_FEAT_MASK) {
4543 u32 feat_mask = l2cap_feat_mask;
4544 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4545 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4546 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4548 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4550 if (conn->hs_enabled)
4551 feat_mask |= L2CAP_FEAT_EXT_FLOW
4552 | L2CAP_FEAT_EXT_WINDOW;
4554 put_unaligned_le32(feat_mask, rsp->data);
4555 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4557 } else if (type == L2CAP_IT_FIXED_CHAN) {
4559 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4561 if (conn->hs_enabled)
4562 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4564 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4566 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4567 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4568 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4569 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4572 struct l2cap_info_rsp rsp;
4573 rsp.type = cpu_to_le16(type);
4574 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4575 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4582 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4583 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4586 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4589 if (cmd_len < sizeof(*rsp))
4592 type = __le16_to_cpu(rsp->type);
4593 result = __le16_to_cpu(rsp->result);
4595 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4597 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4598 if (cmd->ident != conn->info_ident ||
4599 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4602 cancel_delayed_work(&conn->info_timer);
4604 if (result != L2CAP_IR_SUCCESS) {
4605 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4606 conn->info_ident = 0;
4608 l2cap_conn_start(conn);
4614 case L2CAP_IT_FEAT_MASK:
4615 conn->feat_mask = get_unaligned_le32(rsp->data);
4617 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4618 struct l2cap_info_req req;
4619 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4621 conn->info_ident = l2cap_get_ident(conn);
4623 l2cap_send_cmd(conn, conn->info_ident,
4624 L2CAP_INFO_REQ, sizeof(req), &req);
4626 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4627 conn->info_ident = 0;
4629 l2cap_conn_start(conn);
4633 case L2CAP_IT_FIXED_CHAN:
4634 conn->fixed_chan_mask = rsp->data[0];
4635 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4636 conn->info_ident = 0;
4638 l2cap_conn_start(conn);
4645 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4646 struct l2cap_cmd_hdr *cmd,
4647 u16 cmd_len, void *data)
4649 struct l2cap_create_chan_req *req = data;
4650 struct l2cap_create_chan_rsp rsp;
4651 struct l2cap_chan *chan;
4652 struct hci_dev *hdev;
4655 if (cmd_len != sizeof(*req))
4658 if (!conn->hs_enabled)
4661 psm = le16_to_cpu(req->psm);
4662 scid = le16_to_cpu(req->scid);
4664 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4666 /* For controller id 0 make BR/EDR connection */
4667 if (req->amp_id == AMP_ID_BREDR) {
4668 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4673 /* Validate AMP controller id */
4674 hdev = hci_dev_get(req->amp_id);
4678 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4683 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4686 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4687 struct hci_conn *hs_hcon;
4689 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4693 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4698 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4700 mgr->bredr_chan = chan;
4701 chan->hs_hcon = hs_hcon;
4702 chan->fcs = L2CAP_FCS_NONE;
4703 conn->mtu = hdev->block_mtu;
4712 rsp.scid = cpu_to_le16(scid);
4713 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4714 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4716 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4722 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4724 struct l2cap_move_chan_req req;
4727 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4729 ident = l2cap_get_ident(chan->conn);
4730 chan->ident = ident;
4732 req.icid = cpu_to_le16(chan->scid);
4733 req.dest_amp_id = dest_amp_id;
4735 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4738 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4741 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4743 struct l2cap_move_chan_rsp rsp;
4745 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4747 rsp.icid = cpu_to_le16(chan->dcid);
4748 rsp.result = cpu_to_le16(result);
4750 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4754 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4756 struct l2cap_move_chan_cfm cfm;
4758 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4760 chan->ident = l2cap_get_ident(chan->conn);
4762 cfm.icid = cpu_to_le16(chan->scid);
4763 cfm.result = cpu_to_le16(result);
4765 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4768 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4771 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4773 struct l2cap_move_chan_cfm cfm;
4775 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4777 cfm.icid = cpu_to_le16(icid);
4778 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4780 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4784 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4787 struct l2cap_move_chan_cfm_rsp rsp;
4789 BT_DBG("icid 0x%4.4x", icid);
4791 rsp.icid = cpu_to_le16(icid);
4792 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4795 static void __release_logical_link(struct l2cap_chan *chan)
4797 chan->hs_hchan = NULL;
4798 chan->hs_hcon = NULL;
4800 /* Placeholder - release the logical link */
4803 static void l2cap_logical_fail(struct l2cap_chan *chan)
4805 /* Logical link setup failed */
4806 if (chan->state != BT_CONNECTED) {
4807 /* Create channel failure, disconnect */
4808 l2cap_send_disconn_req(chan, ECONNRESET);
4812 switch (chan->move_role) {
4813 case L2CAP_MOVE_ROLE_RESPONDER:
4814 l2cap_move_done(chan);
4815 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4817 case L2CAP_MOVE_ROLE_INITIATOR:
4818 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4819 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4820 /* Remote has only sent pending or
4821 * success responses, clean up
4823 l2cap_move_done(chan);
4826 /* Other amp move states imply that the move
4827 * has already aborted
4829 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4834 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4835 struct hci_chan *hchan)
4837 struct l2cap_conf_rsp rsp;
4839 chan->hs_hchan = hchan;
4840 chan->hs_hcon->l2cap_data = chan->conn;
4842 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4844 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4847 set_default_fcs(chan);
4849 err = l2cap_ertm_init(chan);
4851 l2cap_send_disconn_req(chan, -err);
4853 l2cap_chan_ready(chan);
4857 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4858 struct hci_chan *hchan)
4860 chan->hs_hcon = hchan->conn;
4861 chan->hs_hcon->l2cap_data = chan->conn;
4863 BT_DBG("move_state %d", chan->move_state);
4865 switch (chan->move_state) {
4866 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4867 /* Move confirm will be sent after a success
4868 * response is received
4870 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4872 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4873 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4874 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4875 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4876 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4877 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4878 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4879 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4880 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4884 /* Move was not in expected state, free the channel */
4885 __release_logical_link(chan);
4887 chan->move_state = L2CAP_MOVE_STABLE;
4891 /* Call with chan locked */
4892 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4895 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4898 l2cap_logical_fail(chan);
4899 __release_logical_link(chan);
4903 if (chan->state != BT_CONNECTED) {
4904 /* Ignore logical link if channel is on BR/EDR */
4905 if (chan->local_amp_id != AMP_ID_BREDR)
4906 l2cap_logical_finish_create(chan, hchan);
4908 l2cap_logical_finish_move(chan, hchan);
4912 void l2cap_move_start(struct l2cap_chan *chan)
4914 BT_DBG("chan %p", chan);
4916 if (chan->local_amp_id == AMP_ID_BREDR) {
4917 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4919 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4920 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4921 /* Placeholder - start physical link setup */
4923 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4924 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4926 l2cap_move_setup(chan);
4927 l2cap_send_move_chan_req(chan, 0);
4931 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4932 u8 local_amp_id, u8 remote_amp_id)
4934 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4935 local_amp_id, remote_amp_id);
4937 chan->fcs = L2CAP_FCS_NONE;
4939 /* Outgoing channel on AMP */
4940 if (chan->state == BT_CONNECT) {
4941 if (result == L2CAP_CR_SUCCESS) {
4942 chan->local_amp_id = local_amp_id;
4943 l2cap_send_create_chan_req(chan, remote_amp_id);
4945 /* Revert to BR/EDR connect */
4946 l2cap_send_conn_req(chan);
4952 /* Incoming channel on AMP */
4953 if (__l2cap_no_conn_pending(chan)) {
4954 struct l2cap_conn_rsp rsp;
4956 rsp.scid = cpu_to_le16(chan->dcid);
4957 rsp.dcid = cpu_to_le16(chan->scid);
4959 if (result == L2CAP_CR_SUCCESS) {
4960 /* Send successful response */
4961 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4962 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4964 /* Send negative response */
4965 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4966 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4969 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4972 if (result == L2CAP_CR_SUCCESS) {
4973 l2cap_state_change(chan, BT_CONFIG);
4974 set_bit(CONF_REQ_SENT, &chan->conf_state);
4975 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4977 l2cap_build_conf_req(chan, buf), buf);
4978 chan->num_conf_req++;
4983 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4986 l2cap_move_setup(chan);
4987 chan->move_id = local_amp_id;
4988 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4990 l2cap_send_move_chan_req(chan, remote_amp_id);
4993 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4995 struct hci_chan *hchan = NULL;
4997 /* Placeholder - get hci_chan for logical link */
5000 if (hchan->state == BT_CONNECTED) {
5001 /* Logical link is ready to go */
5002 chan->hs_hcon = hchan->conn;
5003 chan->hs_hcon->l2cap_data = chan->conn;
5004 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5005 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5007 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5009 /* Wait for logical link to be ready */
5010 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5013 /* Logical link not available */
5014 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5018 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5020 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5022 if (result == -EINVAL)
5023 rsp_result = L2CAP_MR_BAD_ID;
5025 rsp_result = L2CAP_MR_NOT_ALLOWED;
5027 l2cap_send_move_chan_rsp(chan, rsp_result);
5030 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5031 chan->move_state = L2CAP_MOVE_STABLE;
5033 /* Restart data transmission */
5034 l2cap_ertm_send(chan);
5037 /* Invoke with locked chan */
5038 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5040 u8 local_amp_id = chan->local_amp_id;
5041 u8 remote_amp_id = chan->remote_amp_id;
5043 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5044 chan, result, local_amp_id, remote_amp_id);
5046 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5047 l2cap_chan_unlock(chan);
5051 if (chan->state != BT_CONNECTED) {
5052 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5053 } else if (result != L2CAP_MR_SUCCESS) {
5054 l2cap_do_move_cancel(chan, result);
5056 switch (chan->move_role) {
5057 case L2CAP_MOVE_ROLE_INITIATOR:
5058 l2cap_do_move_initiate(chan, local_amp_id,
5061 case L2CAP_MOVE_ROLE_RESPONDER:
5062 l2cap_do_move_respond(chan, result);
5065 l2cap_do_move_cancel(chan, result);
5071 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5072 struct l2cap_cmd_hdr *cmd,
5073 u16 cmd_len, void *data)
5075 struct l2cap_move_chan_req *req = data;
5076 struct l2cap_move_chan_rsp rsp;
5077 struct l2cap_chan *chan;
5079 u16 result = L2CAP_MR_NOT_ALLOWED;
5081 if (cmd_len != sizeof(*req))
5084 icid = le16_to_cpu(req->icid);
5086 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5088 if (!conn->hs_enabled)
5091 chan = l2cap_get_chan_by_dcid(conn, icid);
5093 rsp.icid = cpu_to_le16(icid);
5094 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5095 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5100 chan->ident = cmd->ident;
5102 if (chan->scid < L2CAP_CID_DYN_START ||
5103 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5104 (chan->mode != L2CAP_MODE_ERTM &&
5105 chan->mode != L2CAP_MODE_STREAMING)) {
5106 result = L2CAP_MR_NOT_ALLOWED;
5107 goto send_move_response;
5110 if (chan->local_amp_id == req->dest_amp_id) {
5111 result = L2CAP_MR_SAME_ID;
5112 goto send_move_response;
5115 if (req->dest_amp_id != AMP_ID_BREDR) {
5116 struct hci_dev *hdev;
5117 hdev = hci_dev_get(req->dest_amp_id);
5118 if (!hdev || hdev->dev_type != HCI_AMP ||
5119 !test_bit(HCI_UP, &hdev->flags)) {
5123 result = L2CAP_MR_BAD_ID;
5124 goto send_move_response;
5129 /* Detect a move collision. Only send a collision response
5130 * if this side has "lost", otherwise proceed with the move.
5131 * The winner has the larger bd_addr.
5133 if ((__chan_is_moving(chan) ||
5134 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5135 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5136 result = L2CAP_MR_COLLISION;
5137 goto send_move_response;
5140 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5141 l2cap_move_setup(chan);
5142 chan->move_id = req->dest_amp_id;
5145 if (req->dest_amp_id == AMP_ID_BREDR) {
5146 /* Moving to BR/EDR */
5147 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5148 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5149 result = L2CAP_MR_PEND;
5151 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5152 result = L2CAP_MR_SUCCESS;
5155 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5156 /* Placeholder - uncomment when amp functions are available */
5157 /*amp_accept_physical(chan, req->dest_amp_id);*/
5158 result = L2CAP_MR_PEND;
5162 l2cap_send_move_chan_rsp(chan, result);
5164 l2cap_chan_unlock(chan);
5169 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5171 struct l2cap_chan *chan;
5172 struct hci_chan *hchan = NULL;
5174 chan = l2cap_get_chan_by_scid(conn, icid);
5176 l2cap_send_move_chan_cfm_icid(conn, icid);
5180 __clear_chan_timer(chan);
5181 if (result == L2CAP_MR_PEND)
5182 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5184 switch (chan->move_state) {
5185 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5186 /* Move confirm will be sent when logical link
5189 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5191 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5192 if (result == L2CAP_MR_PEND) {
5194 } else if (test_bit(CONN_LOCAL_BUSY,
5195 &chan->conn_state)) {
5196 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5198 /* Logical link is up or moving to BR/EDR,
5201 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5202 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5205 case L2CAP_MOVE_WAIT_RSP:
5207 if (result == L2CAP_MR_SUCCESS) {
5208 /* Remote is ready, send confirm immediately
5209 * after logical link is ready
5211 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5213 /* Both logical link and move success
5214 * are required to confirm
5216 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5219 /* Placeholder - get hci_chan for logical link */
5221 /* Logical link not available */
5222 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5226 /* If the logical link is not yet connected, do not
5227 * send confirmation.
5229 if (hchan->state != BT_CONNECTED)
5232 /* Logical link is already ready to go */
5234 chan->hs_hcon = hchan->conn;
5235 chan->hs_hcon->l2cap_data = chan->conn;
5237 if (result == L2CAP_MR_SUCCESS) {
5238 /* Can confirm now */
5239 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5241 /* Now only need move success
5244 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5247 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5250 /* Any other amp move state means the move failed. */
5251 chan->move_id = chan->local_amp_id;
5252 l2cap_move_done(chan);
5253 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5256 l2cap_chan_unlock(chan);
5259 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5262 struct l2cap_chan *chan;
5264 chan = l2cap_get_chan_by_ident(conn, ident);
5266 /* Could not locate channel, icid is best guess */
5267 l2cap_send_move_chan_cfm_icid(conn, icid);
5271 __clear_chan_timer(chan);
5273 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5274 if (result == L2CAP_MR_COLLISION) {
5275 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5277 /* Cleanup - cancel move */
5278 chan->move_id = chan->local_amp_id;
5279 l2cap_move_done(chan);
5283 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5285 l2cap_chan_unlock(chan);
5288 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5289 struct l2cap_cmd_hdr *cmd,
5290 u16 cmd_len, void *data)
5292 struct l2cap_move_chan_rsp *rsp = data;
5295 if (cmd_len != sizeof(*rsp))
5298 icid = le16_to_cpu(rsp->icid);
5299 result = le16_to_cpu(rsp->result);
5301 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5303 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5304 l2cap_move_continue(conn, icid, result);
5306 l2cap_move_fail(conn, cmd->ident, icid, result);
5311 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5312 struct l2cap_cmd_hdr *cmd,
5313 u16 cmd_len, void *data)
5315 struct l2cap_move_chan_cfm *cfm = data;
5316 struct l2cap_chan *chan;
5319 if (cmd_len != sizeof(*cfm))
5322 icid = le16_to_cpu(cfm->icid);
5323 result = le16_to_cpu(cfm->result);
5325 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5327 chan = l2cap_get_chan_by_dcid(conn, icid);
5329 /* Spec requires a response even if the icid was not found */
5330 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5334 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5335 if (result == L2CAP_MC_CONFIRMED) {
5336 chan->local_amp_id = chan->move_id;
5337 if (chan->local_amp_id == AMP_ID_BREDR)
5338 __release_logical_link(chan);
5340 chan->move_id = chan->local_amp_id;
5343 l2cap_move_done(chan);
5346 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5348 l2cap_chan_unlock(chan);
5353 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5354 struct l2cap_cmd_hdr *cmd,
5355 u16 cmd_len, void *data)
5357 struct l2cap_move_chan_cfm_rsp *rsp = data;
5358 struct l2cap_chan *chan;
5361 if (cmd_len != sizeof(*rsp))
5364 icid = le16_to_cpu(rsp->icid);
5366 BT_DBG("icid 0x%4.4x", icid);
5368 chan = l2cap_get_chan_by_scid(conn, icid);
5372 __clear_chan_timer(chan);
5374 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5375 chan->local_amp_id = chan->move_id;
5377 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5378 __release_logical_link(chan);
5380 l2cap_move_done(chan);
5383 l2cap_chan_unlock(chan);
5388 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5393 if (min > max || min < 6 || max > 3200)
5396 if (to_multiplier < 10 || to_multiplier > 3200)
5399 if (max >= to_multiplier * 8)
5402 max_latency = (to_multiplier * 8 / max) - 1;
5403 if (latency > 499 || latency > max_latency)
5409 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5410 struct l2cap_cmd_hdr *cmd,
5411 u16 cmd_len, u8 *data)
5413 struct hci_conn *hcon = conn->hcon;
5414 struct l2cap_conn_param_update_req *req;
5415 struct l2cap_conn_param_update_rsp rsp;
5416 u16 min, max, latency, to_multiplier;
5419 if (!(hcon->link_mode & HCI_LM_MASTER))
5422 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5425 req = (struct l2cap_conn_param_update_req *) data;
5426 min = __le16_to_cpu(req->min);
5427 max = __le16_to_cpu(req->max);
5428 latency = __le16_to_cpu(req->latency);
5429 to_multiplier = __le16_to_cpu(req->to_multiplier);
5431 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5432 min, max, latency, to_multiplier);
5434 memset(&rsp, 0, sizeof(rsp));
5436 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5438 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5440 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5442 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5446 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5451 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5452 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5455 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5456 u16 dcid, mtu, mps, credits, result;
5457 struct l2cap_chan *chan;
5460 if (cmd_len < sizeof(*rsp))
5463 dcid = __le16_to_cpu(rsp->dcid);
5464 mtu = __le16_to_cpu(rsp->mtu);
5465 mps = __le16_to_cpu(rsp->mps);
5466 credits = __le16_to_cpu(rsp->credits);
5467 result = __le16_to_cpu(rsp->result);
5469 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5472 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5473 dcid, mtu, mps, credits, result);
5475 mutex_lock(&conn->chan_lock);
5477 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5485 l2cap_chan_lock(chan);
5488 case L2CAP_CR_SUCCESS:
5492 chan->remote_mps = mps;
5493 chan->tx_credits = credits;
5494 l2cap_chan_ready(chan);
5498 l2cap_chan_del(chan, ECONNREFUSED);
5502 l2cap_chan_unlock(chan);
5505 mutex_unlock(&conn->chan_lock);
5510 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5511 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5516 switch (cmd->code) {
5517 case L2CAP_COMMAND_REJ:
5518 l2cap_command_rej(conn, cmd, cmd_len, data);
5521 case L2CAP_CONN_REQ:
5522 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5525 case L2CAP_CONN_RSP:
5526 case L2CAP_CREATE_CHAN_RSP:
5527 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5530 case L2CAP_CONF_REQ:
5531 err = l2cap_config_req(conn, cmd, cmd_len, data);
5534 case L2CAP_CONF_RSP:
5535 l2cap_config_rsp(conn, cmd, cmd_len, data);
5538 case L2CAP_DISCONN_REQ:
5539 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5542 case L2CAP_DISCONN_RSP:
5543 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5546 case L2CAP_ECHO_REQ:
5547 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5550 case L2CAP_ECHO_RSP:
5553 case L2CAP_INFO_REQ:
5554 err = l2cap_information_req(conn, cmd, cmd_len, data);
5557 case L2CAP_INFO_RSP:
5558 l2cap_information_rsp(conn, cmd, cmd_len, data);
5561 case L2CAP_CREATE_CHAN_REQ:
5562 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5565 case L2CAP_MOVE_CHAN_REQ:
5566 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5569 case L2CAP_MOVE_CHAN_RSP:
5570 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5573 case L2CAP_MOVE_CHAN_CFM:
5574 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5577 case L2CAP_MOVE_CHAN_CFM_RSP:
5578 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5582 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5590 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5591 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5594 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5595 struct l2cap_le_conn_rsp rsp;
5596 struct l2cap_chan *chan, *pchan;
5597 u16 dcid, scid, credits, mtu, mps;
5601 if (cmd_len != sizeof(*req))
5604 scid = __le16_to_cpu(req->scid);
5605 mtu = __le16_to_cpu(req->mtu);
5606 mps = __le16_to_cpu(req->mps);
5611 if (mtu < 23 || mps < 23)
5614 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5617 /* Check if we have socket listening on psm */
5618 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5619 &conn->hcon->dst, LE_LINK);
5621 result = L2CAP_CR_BAD_PSM;
5626 mutex_lock(&conn->chan_lock);
5627 l2cap_chan_lock(pchan);
5629 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5630 result = L2CAP_CR_AUTHENTICATION;
5632 goto response_unlock;
5635 /* Check if we already have channel with that dcid */
5636 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5637 result = L2CAP_CR_NO_MEM;
5639 goto response_unlock;
5642 chan = pchan->ops->new_connection(pchan);
5644 result = L2CAP_CR_NO_MEM;
5645 goto response_unlock;
5648 l2cap_le_flowctl_init(chan);
5650 bacpy(&chan->src, &conn->hcon->src);
5651 bacpy(&chan->dst, &conn->hcon->dst);
5652 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5653 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5657 chan->remote_mps = mps;
5658 chan->tx_credits = __le16_to_cpu(req->credits);
5660 __l2cap_chan_add(conn, chan);
5662 credits = chan->rx_credits;
5664 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5666 chan->ident = cmd->ident;
5668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5669 l2cap_state_change(chan, BT_CONNECT2);
5670 result = L2CAP_CR_PEND;
5671 chan->ops->defer(chan);
5673 l2cap_chan_ready(chan);
5674 result = L2CAP_CR_SUCCESS;
5678 l2cap_chan_unlock(pchan);
5679 mutex_unlock(&conn->chan_lock);
5681 if (result == L2CAP_CR_PEND)
5686 rsp.mtu = cpu_to_le16(chan->imtu);
5687 rsp.mps = cpu_to_le16(chan->mps);
5693 rsp.dcid = cpu_to_le16(dcid);
5694 rsp.credits = cpu_to_le16(credits);
5695 rsp.result = cpu_to_le16(result);
5697 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5702 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5703 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5706 struct l2cap_le_credits *pkt;
5707 struct l2cap_chan *chan;
5710 if (cmd_len != sizeof(*pkt))
5713 pkt = (struct l2cap_le_credits *) data;
5714 cid = __le16_to_cpu(pkt->cid);
5715 credits = __le16_to_cpu(pkt->credits);
5717 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5719 chan = l2cap_get_chan_by_dcid(conn, cid);
5723 chan->tx_credits += credits;
5725 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5726 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5730 if (chan->tx_credits)
5731 chan->ops->resume(chan);
5733 l2cap_chan_unlock(chan);
5738 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5739 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5742 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5743 struct l2cap_chan *chan;
5745 if (cmd_len < sizeof(*rej))
5748 mutex_lock(&conn->chan_lock);
5750 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5754 l2cap_chan_lock(chan);
5755 l2cap_chan_del(chan, ECONNREFUSED);
5756 l2cap_chan_unlock(chan);
5759 mutex_unlock(&conn->chan_lock);
5763 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5764 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5769 if (!enable_lecoc) {
5770 switch (cmd->code) {
5771 case L2CAP_LE_CONN_REQ:
5772 case L2CAP_LE_CONN_RSP:
5773 case L2CAP_LE_CREDITS:
5774 case L2CAP_DISCONN_REQ:
5775 case L2CAP_DISCONN_RSP:
5780 switch (cmd->code) {
5781 case L2CAP_COMMAND_REJ:
5782 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5785 case L2CAP_CONN_PARAM_UPDATE_REQ:
5786 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5789 case L2CAP_CONN_PARAM_UPDATE_RSP:
5792 case L2CAP_LE_CONN_RSP:
5793 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5796 case L2CAP_LE_CONN_REQ:
5797 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5800 case L2CAP_LE_CREDITS:
5801 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5804 case L2CAP_DISCONN_REQ:
5805 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5808 case L2CAP_DISCONN_RSP:
5809 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5813 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5821 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5822 struct sk_buff *skb)
5824 struct hci_conn *hcon = conn->hcon;
5825 struct l2cap_cmd_hdr *cmd;
5829 if (hcon->type != LE_LINK)
5832 if (skb->len < L2CAP_CMD_HDR_SIZE)
5835 cmd = (void *) skb->data;
5836 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5838 len = le16_to_cpu(cmd->len);
5840 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5842 if (len != skb->len || !cmd->ident) {
5843 BT_DBG("corrupted command");
5847 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5849 struct l2cap_cmd_rej_unk rej;
5851 BT_ERR("Wrong link type (%d)", err);
5853 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5854 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5862 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5863 struct sk_buff *skb)
5865 struct hci_conn *hcon = conn->hcon;
5866 u8 *data = skb->data;
5868 struct l2cap_cmd_hdr cmd;
5871 l2cap_raw_recv(conn, skb);
5873 if (hcon->type != ACL_LINK)
5876 while (len >= L2CAP_CMD_HDR_SIZE) {
5878 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5879 data += L2CAP_CMD_HDR_SIZE;
5880 len -= L2CAP_CMD_HDR_SIZE;
5882 cmd_len = le16_to_cpu(cmd.len);
5884 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5887 if (cmd_len > len || !cmd.ident) {
5888 BT_DBG("corrupted command");
5892 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5894 struct l2cap_cmd_rej_unk rej;
5896 BT_ERR("Wrong link type (%d)", err);
5898 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5899 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5911 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5913 u16 our_fcs, rcv_fcs;
5916 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5917 hdr_size = L2CAP_EXT_HDR_SIZE;
5919 hdr_size = L2CAP_ENH_HDR_SIZE;
5921 if (chan->fcs == L2CAP_FCS_CRC16) {
5922 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5923 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5924 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5926 if (our_fcs != rcv_fcs)
5932 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5934 struct l2cap_ctrl control;
5936 BT_DBG("chan %p", chan);
5938 memset(&control, 0, sizeof(control));
5941 control.reqseq = chan->buffer_seq;
5942 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5944 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5945 control.super = L2CAP_SUPER_RNR;
5946 l2cap_send_sframe(chan, &control);
5949 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5950 chan->unacked_frames > 0)
5951 __set_retrans_timer(chan);
5953 /* Send pending iframes */
5954 l2cap_ertm_send(chan);
5956 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5957 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5958 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5961 control.super = L2CAP_SUPER_RR;
5962 l2cap_send_sframe(chan, &control);
5966 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5967 struct sk_buff **last_frag)
5969 /* skb->len reflects data in skb as well as all fragments
5970 * skb->data_len reflects only data in fragments
5972 if (!skb_has_frag_list(skb))
5973 skb_shinfo(skb)->frag_list = new_frag;
5975 new_frag->next = NULL;
5977 (*last_frag)->next = new_frag;
5978 *last_frag = new_frag;
5980 skb->len += new_frag->len;
5981 skb->data_len += new_frag->len;
5982 skb->truesize += new_frag->truesize;
5985 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5986 struct l2cap_ctrl *control)
5990 switch (control->sar) {
5991 case L2CAP_SAR_UNSEGMENTED:
5995 err = chan->ops->recv(chan, skb);
5998 case L2CAP_SAR_START:
6002 chan->sdu_len = get_unaligned_le16(skb->data);
6003 skb_pull(skb, L2CAP_SDULEN_SIZE);
6005 if (chan->sdu_len > chan->imtu) {
6010 if (skb->len >= chan->sdu_len)
6014 chan->sdu_last_frag = skb;
6020 case L2CAP_SAR_CONTINUE:
6024 append_skb_frag(chan->sdu, skb,
6025 &chan->sdu_last_frag);
6028 if (chan->sdu->len >= chan->sdu_len)
6038 append_skb_frag(chan->sdu, skb,
6039 &chan->sdu_last_frag);
6042 if (chan->sdu->len != chan->sdu_len)
6045 err = chan->ops->recv(chan, chan->sdu);
6048 /* Reassembly complete */
6050 chan->sdu_last_frag = NULL;
6058 kfree_skb(chan->sdu);
6060 chan->sdu_last_frag = NULL;
6067 static int l2cap_resegment(struct l2cap_chan *chan)
6073 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6077 if (chan->mode != L2CAP_MODE_ERTM)
6080 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6081 l2cap_tx(chan, NULL, NULL, event);
6084 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6087 /* Pass sequential frames to l2cap_reassemble_sdu()
6088 * until a gap is encountered.
6091 BT_DBG("chan %p", chan);
6093 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6094 struct sk_buff *skb;
6095 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6096 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6098 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6103 skb_unlink(skb, &chan->srej_q);
6104 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6105 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6110 if (skb_queue_empty(&chan->srej_q)) {
6111 chan->rx_state = L2CAP_RX_STATE_RECV;
6112 l2cap_send_ack(chan);
6118 static void l2cap_handle_srej(struct l2cap_chan *chan,
6119 struct l2cap_ctrl *control)
6121 struct sk_buff *skb;
6123 BT_DBG("chan %p, control %p", chan, control);
6125 if (control->reqseq == chan->next_tx_seq) {
6126 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6127 l2cap_send_disconn_req(chan, ECONNRESET);
6131 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6134 BT_DBG("Seq %d not available for retransmission",
6139 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6140 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6141 l2cap_send_disconn_req(chan, ECONNRESET);
6145 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6147 if (control->poll) {
6148 l2cap_pass_to_tx(chan, control);
6150 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6151 l2cap_retransmit(chan, control);
6152 l2cap_ertm_send(chan);
6154 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6155 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6156 chan->srej_save_reqseq = control->reqseq;
6159 l2cap_pass_to_tx_fbit(chan, control);
6161 if (control->final) {
6162 if (chan->srej_save_reqseq != control->reqseq ||
6163 !test_and_clear_bit(CONN_SREJ_ACT,
6165 l2cap_retransmit(chan, control);
6167 l2cap_retransmit(chan, control);
6168 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6169 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6170 chan->srej_save_reqseq = control->reqseq;
6176 static void l2cap_handle_rej(struct l2cap_chan *chan,
6177 struct l2cap_ctrl *control)
6179 struct sk_buff *skb;
6181 BT_DBG("chan %p, control %p", chan, control);
6183 if (control->reqseq == chan->next_tx_seq) {
6184 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6185 l2cap_send_disconn_req(chan, ECONNRESET);
6189 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6191 if (chan->max_tx && skb &&
6192 bt_cb(skb)->control.retries >= chan->max_tx) {
6193 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6194 l2cap_send_disconn_req(chan, ECONNRESET);
6198 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6200 l2cap_pass_to_tx(chan, control);
6202 if (control->final) {
6203 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6204 l2cap_retransmit_all(chan, control);
6206 l2cap_retransmit_all(chan, control);
6207 l2cap_ertm_send(chan);
6208 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6209 set_bit(CONN_REJ_ACT, &chan->conn_state);
6213 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6215 BT_DBG("chan %p, txseq %d", chan, txseq);
6217 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6218 chan->expected_tx_seq);
6220 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6221 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6223 /* See notes below regarding "double poll" and
6226 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6227 BT_DBG("Invalid/Ignore - after SREJ");
6228 return L2CAP_TXSEQ_INVALID_IGNORE;
6230 BT_DBG("Invalid - in window after SREJ sent");
6231 return L2CAP_TXSEQ_INVALID;
6235 if (chan->srej_list.head == txseq) {
6236 BT_DBG("Expected SREJ");
6237 return L2CAP_TXSEQ_EXPECTED_SREJ;
6240 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6241 BT_DBG("Duplicate SREJ - txseq already stored");
6242 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6245 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6246 BT_DBG("Unexpected SREJ - not requested");
6247 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6251 if (chan->expected_tx_seq == txseq) {
6252 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6254 BT_DBG("Invalid - txseq outside tx window");
6255 return L2CAP_TXSEQ_INVALID;
6258 return L2CAP_TXSEQ_EXPECTED;
6262 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6263 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6264 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6265 return L2CAP_TXSEQ_DUPLICATE;
6268 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6269 /* A source of invalid packets is a "double poll" condition,
6270 * where delays cause us to send multiple poll packets. If
6271 * the remote stack receives and processes both polls,
6272 * sequence numbers can wrap around in such a way that a
6273 * resent frame has a sequence number that looks like new data
6274 * with a sequence gap. This would trigger an erroneous SREJ
6277 * Fortunately, this is impossible with a tx window that's
6278 * less than half of the maximum sequence number, which allows
6279 * invalid frames to be safely ignored.
6281 * With tx window sizes greater than half of the tx window
6282 * maximum, the frame is invalid and cannot be ignored. This
6283 * causes a disconnect.
6286 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6287 BT_DBG("Invalid/Ignore - txseq outside tx window");
6288 return L2CAP_TXSEQ_INVALID_IGNORE;
6290 BT_DBG("Invalid - txseq outside tx window");
6291 return L2CAP_TXSEQ_INVALID;
6294 BT_DBG("Unexpected - txseq indicates missing frames");
6295 return L2CAP_TXSEQ_UNEXPECTED;
6299 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6300 struct l2cap_ctrl *control,
6301 struct sk_buff *skb, u8 event)
6304 bool skb_in_use = false;
6306 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6310 case L2CAP_EV_RECV_IFRAME:
6311 switch (l2cap_classify_txseq(chan, control->txseq)) {
6312 case L2CAP_TXSEQ_EXPECTED:
6313 l2cap_pass_to_tx(chan, control);
6315 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6316 BT_DBG("Busy, discarding expected seq %d",
6321 chan->expected_tx_seq = __next_seq(chan,
6324 chan->buffer_seq = chan->expected_tx_seq;
6327 err = l2cap_reassemble_sdu(chan, skb, control);
6331 if (control->final) {
6332 if (!test_and_clear_bit(CONN_REJ_ACT,
6333 &chan->conn_state)) {
6335 l2cap_retransmit_all(chan, control);
6336 l2cap_ertm_send(chan);
6340 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6341 l2cap_send_ack(chan);
6343 case L2CAP_TXSEQ_UNEXPECTED:
6344 l2cap_pass_to_tx(chan, control);
6346 /* Can't issue SREJ frames in the local busy state.
6347 * Drop this frame, it will be seen as missing
6348 * when local busy is exited.
6350 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6351 BT_DBG("Busy, discarding unexpected seq %d",
6356 /* There was a gap in the sequence, so an SREJ
6357 * must be sent for each missing frame. The
6358 * current frame is stored for later use.
6360 skb_queue_tail(&chan->srej_q, skb);
6362 BT_DBG("Queued %p (queue len %d)", skb,
6363 skb_queue_len(&chan->srej_q));
6365 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6366 l2cap_seq_list_clear(&chan->srej_list);
6367 l2cap_send_srej(chan, control->txseq);
6369 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6371 case L2CAP_TXSEQ_DUPLICATE:
6372 l2cap_pass_to_tx(chan, control);
6374 case L2CAP_TXSEQ_INVALID_IGNORE:
6376 case L2CAP_TXSEQ_INVALID:
6378 l2cap_send_disconn_req(chan, ECONNRESET);
6382 case L2CAP_EV_RECV_RR:
6383 l2cap_pass_to_tx(chan, control);
6384 if (control->final) {
6385 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6387 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6388 !__chan_is_moving(chan)) {
6390 l2cap_retransmit_all(chan, control);
6393 l2cap_ertm_send(chan);
6394 } else if (control->poll) {
6395 l2cap_send_i_or_rr_or_rnr(chan);
6397 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6398 &chan->conn_state) &&
6399 chan->unacked_frames)
6400 __set_retrans_timer(chan);
6402 l2cap_ertm_send(chan);
6405 case L2CAP_EV_RECV_RNR:
6406 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6407 l2cap_pass_to_tx(chan, control);
6408 if (control && control->poll) {
6409 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6410 l2cap_send_rr_or_rnr(chan, 0);
6412 __clear_retrans_timer(chan);
6413 l2cap_seq_list_clear(&chan->retrans_list);
6415 case L2CAP_EV_RECV_REJ:
6416 l2cap_handle_rej(chan, control);
6418 case L2CAP_EV_RECV_SREJ:
6419 l2cap_handle_srej(chan, control);
6425 if (skb && !skb_in_use) {
6426 BT_DBG("Freeing %p", skb);
6433 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6434 struct l2cap_ctrl *control,
6435 struct sk_buff *skb, u8 event)
6438 u16 txseq = control->txseq;
6439 bool skb_in_use = false;
6441 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6445 case L2CAP_EV_RECV_IFRAME:
6446 switch (l2cap_classify_txseq(chan, txseq)) {
6447 case L2CAP_TXSEQ_EXPECTED:
6448 /* Keep frame for reassembly later */
6449 l2cap_pass_to_tx(chan, control);
6450 skb_queue_tail(&chan->srej_q, skb);
6452 BT_DBG("Queued %p (queue len %d)", skb,
6453 skb_queue_len(&chan->srej_q));
6455 chan->expected_tx_seq = __next_seq(chan, txseq);
6457 case L2CAP_TXSEQ_EXPECTED_SREJ:
6458 l2cap_seq_list_pop(&chan->srej_list);
6460 l2cap_pass_to_tx(chan, control);
6461 skb_queue_tail(&chan->srej_q, skb);
6463 BT_DBG("Queued %p (queue len %d)", skb,
6464 skb_queue_len(&chan->srej_q));
6466 err = l2cap_rx_queued_iframes(chan);
6471 case L2CAP_TXSEQ_UNEXPECTED:
6472 /* Got a frame that can't be reassembled yet.
6473 * Save it for later, and send SREJs to cover
6474 * the missing frames.
6476 skb_queue_tail(&chan->srej_q, skb);
6478 BT_DBG("Queued %p (queue len %d)", skb,
6479 skb_queue_len(&chan->srej_q));
6481 l2cap_pass_to_tx(chan, control);
6482 l2cap_send_srej(chan, control->txseq);
6484 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6485 /* This frame was requested with an SREJ, but
6486 * some expected retransmitted frames are
6487 * missing. Request retransmission of missing
6490 skb_queue_tail(&chan->srej_q, skb);
6492 BT_DBG("Queued %p (queue len %d)", skb,
6493 skb_queue_len(&chan->srej_q));
6495 l2cap_pass_to_tx(chan, control);
6496 l2cap_send_srej_list(chan, control->txseq);
6498 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6499 /* We've already queued this frame. Drop this copy. */
6500 l2cap_pass_to_tx(chan, control);
6502 case L2CAP_TXSEQ_DUPLICATE:
6503 /* Expecting a later sequence number, so this frame
6504 * was already received. Ignore it completely.
6507 case L2CAP_TXSEQ_INVALID_IGNORE:
6509 case L2CAP_TXSEQ_INVALID:
6511 l2cap_send_disconn_req(chan, ECONNRESET);
6515 case L2CAP_EV_RECV_RR:
6516 l2cap_pass_to_tx(chan, control);
6517 if (control->final) {
6518 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6520 if (!test_and_clear_bit(CONN_REJ_ACT,
6521 &chan->conn_state)) {
6523 l2cap_retransmit_all(chan, control);
6526 l2cap_ertm_send(chan);
6527 } else if (control->poll) {
6528 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6529 &chan->conn_state) &&
6530 chan->unacked_frames) {
6531 __set_retrans_timer(chan);
6534 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6535 l2cap_send_srej_tail(chan);
6537 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6538 &chan->conn_state) &&
6539 chan->unacked_frames)
6540 __set_retrans_timer(chan);
6542 l2cap_send_ack(chan);
6545 case L2CAP_EV_RECV_RNR:
6546 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6547 l2cap_pass_to_tx(chan, control);
6548 if (control->poll) {
6549 l2cap_send_srej_tail(chan);
6551 struct l2cap_ctrl rr_control;
6552 memset(&rr_control, 0, sizeof(rr_control));
6553 rr_control.sframe = 1;
6554 rr_control.super = L2CAP_SUPER_RR;
6555 rr_control.reqseq = chan->buffer_seq;
6556 l2cap_send_sframe(chan, &rr_control);
6560 case L2CAP_EV_RECV_REJ:
6561 l2cap_handle_rej(chan, control);
6563 case L2CAP_EV_RECV_SREJ:
6564 l2cap_handle_srej(chan, control);
6568 if (skb && !skb_in_use) {
6569 BT_DBG("Freeing %p", skb);
6576 static int l2cap_finish_move(struct l2cap_chan *chan)
6578 BT_DBG("chan %p", chan);
6580 chan->rx_state = L2CAP_RX_STATE_RECV;
6583 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6585 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6587 return l2cap_resegment(chan);
6590 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6591 struct l2cap_ctrl *control,
6592 struct sk_buff *skb, u8 event)
6596 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6602 l2cap_process_reqseq(chan, control->reqseq);
6604 if (!skb_queue_empty(&chan->tx_q))
6605 chan->tx_send_head = skb_peek(&chan->tx_q);
6607 chan->tx_send_head = NULL;
6609 /* Rewind next_tx_seq to the point expected
6612 chan->next_tx_seq = control->reqseq;
6613 chan->unacked_frames = 0;
6615 err = l2cap_finish_move(chan);
6619 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6620 l2cap_send_i_or_rr_or_rnr(chan);
6622 if (event == L2CAP_EV_RECV_IFRAME)
6625 return l2cap_rx_state_recv(chan, control, NULL, event);
6628 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6629 struct l2cap_ctrl *control,
6630 struct sk_buff *skb, u8 event)
6634 if (!control->final)
6637 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6639 chan->rx_state = L2CAP_RX_STATE_RECV;
6640 l2cap_process_reqseq(chan, control->reqseq);
6642 if (!skb_queue_empty(&chan->tx_q))
6643 chan->tx_send_head = skb_peek(&chan->tx_q);
6645 chan->tx_send_head = NULL;
6647 /* Rewind next_tx_seq to the point expected
6650 chan->next_tx_seq = control->reqseq;
6651 chan->unacked_frames = 0;
6654 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6656 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6658 err = l2cap_resegment(chan);
6661 err = l2cap_rx_state_recv(chan, control, skb, event);
6666 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6668 /* Make sure reqseq is for a packet that has been sent but not acked */
6671 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6672 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6675 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6676 struct sk_buff *skb, u8 event)
6680 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6681 control, skb, event, chan->rx_state);
6683 if (__valid_reqseq(chan, control->reqseq)) {
6684 switch (chan->rx_state) {
6685 case L2CAP_RX_STATE_RECV:
6686 err = l2cap_rx_state_recv(chan, control, skb, event);
6688 case L2CAP_RX_STATE_SREJ_SENT:
6689 err = l2cap_rx_state_srej_sent(chan, control, skb,
6692 case L2CAP_RX_STATE_WAIT_P:
6693 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6695 case L2CAP_RX_STATE_WAIT_F:
6696 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6703 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6704 control->reqseq, chan->next_tx_seq,
6705 chan->expected_ack_seq);
6706 l2cap_send_disconn_req(chan, ECONNRESET);
6712 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6713 struct sk_buff *skb)
6717 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6720 if (l2cap_classify_txseq(chan, control->txseq) ==
6721 L2CAP_TXSEQ_EXPECTED) {
6722 l2cap_pass_to_tx(chan, control);
6724 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6725 __next_seq(chan, chan->buffer_seq));
6727 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6729 l2cap_reassemble_sdu(chan, skb, control);
6732 kfree_skb(chan->sdu);
6735 chan->sdu_last_frag = NULL;
6739 BT_DBG("Freeing %p", skb);
6744 chan->last_acked_seq = control->txseq;
6745 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6750 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6752 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6756 __unpack_control(chan, skb);
6761 * We can just drop the corrupted I-frame here.
6762 * Receiver will miss it and start proper recovery
6763 * procedures and ask for retransmission.
6765 if (l2cap_check_fcs(chan, skb))
6768 if (!control->sframe && control->sar == L2CAP_SAR_START)
6769 len -= L2CAP_SDULEN_SIZE;
6771 if (chan->fcs == L2CAP_FCS_CRC16)
6772 len -= L2CAP_FCS_SIZE;
6774 if (len > chan->mps) {
6775 l2cap_send_disconn_req(chan, ECONNRESET);
6779 if (!control->sframe) {
6782 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6783 control->sar, control->reqseq, control->final,
6786 /* Validate F-bit - F=0 always valid, F=1 only
6787 * valid in TX WAIT_F
6789 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6792 if (chan->mode != L2CAP_MODE_STREAMING) {
6793 event = L2CAP_EV_RECV_IFRAME;
6794 err = l2cap_rx(chan, control, skb, event);
6796 err = l2cap_stream_rx(chan, control, skb);
6800 l2cap_send_disconn_req(chan, ECONNRESET);
6802 const u8 rx_func_to_event[4] = {
6803 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6804 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6807 /* Only I-frames are expected in streaming mode */
6808 if (chan->mode == L2CAP_MODE_STREAMING)
6811 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6812 control->reqseq, control->final, control->poll,
6816 BT_ERR("Trailing bytes: %d in sframe", len);
6817 l2cap_send_disconn_req(chan, ECONNRESET);
6821 /* Validate F and P bits */
6822 if (control->final && (control->poll ||
6823 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6826 event = rx_func_to_event[control->super];
6827 if (l2cap_rx(chan, control, skb, event))
6828 l2cap_send_disconn_req(chan, ECONNRESET);
6838 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6840 struct l2cap_conn *conn = chan->conn;
6841 struct l2cap_le_credits pkt;
6844 /* We return more credits to the sender only after the amount of
6845 * credits falls below half of the initial amount.
6847 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6850 return_credits = le_max_credits - chan->rx_credits;
6852 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6854 chan->rx_credits += return_credits;
6856 pkt.cid = cpu_to_le16(chan->scid);
6857 pkt.credits = cpu_to_le16(return_credits);
6859 chan->ident = l2cap_get_ident(conn);
6861 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6864 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6868 if (!chan->rx_credits) {
6869 BT_ERR("No credits to receive LE L2CAP data");
6873 if (chan->imtu < skb->len) {
6874 BT_ERR("Too big LE L2CAP PDU");
6879 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6881 l2cap_chan_le_send_credits(chan);
6888 sdu_len = get_unaligned_le16(skb->data);
6889 skb_pull(skb, L2CAP_SDULEN_SIZE);
6891 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6892 sdu_len, skb->len, chan->imtu);
6894 if (sdu_len > chan->imtu) {
6895 BT_ERR("Too big LE L2CAP SDU length received");
6900 if (skb->len > sdu_len) {
6901 BT_ERR("Too much LE L2CAP data received");
6906 if (skb->len == sdu_len)
6907 return chan->ops->recv(chan, skb);
6910 chan->sdu_len = sdu_len;
6911 chan->sdu_last_frag = skb;
6916 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6917 chan->sdu->len, skb->len, chan->sdu_len);
6919 if (chan->sdu->len + skb->len > chan->sdu_len) {
6920 BT_ERR("Too much LE L2CAP data received");
6925 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6928 if (chan->sdu->len == chan->sdu_len) {
6929 err = chan->ops->recv(chan, chan->sdu);
6932 chan->sdu_last_frag = NULL;
6940 kfree_skb(chan->sdu);
6942 chan->sdu_last_frag = NULL;
6946 /* We can't return an error here since we took care of the skb
6947 * freeing internally. An error return would cause the caller to
6948 * do a double-free of the skb.
6953 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6954 struct sk_buff *skb)
6956 struct l2cap_chan *chan;
6958 chan = l2cap_get_chan_by_scid(conn, cid);
6960 if (cid == L2CAP_CID_A2MP) {
6961 chan = a2mp_channel_create(conn, skb);
6967 l2cap_chan_lock(chan);
6969 BT_DBG("unknown cid 0x%4.4x", cid);
6970 /* Drop packet and return */
6976 BT_DBG("chan %p, len %d", chan, skb->len);
6978 if (chan->state != BT_CONNECTED)
6981 switch (chan->mode) {
6982 case L2CAP_MODE_LE_FLOWCTL:
6983 if (l2cap_le_data_rcv(chan, skb) < 0)
6988 case L2CAP_MODE_BASIC:
6989 /* If socket recv buffers overflows we drop data here
6990 * which is *bad* because L2CAP has to be reliable.
6991 * But we don't have any other choice. L2CAP doesn't
6992 * provide flow control mechanism. */
6994 if (chan->imtu < skb->len)
6997 if (!chan->ops->recv(chan, skb))
7001 case L2CAP_MODE_ERTM:
7002 case L2CAP_MODE_STREAMING:
7003 l2cap_data_rcv(chan, skb);
7007 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7015 l2cap_chan_unlock(chan);
7018 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7019 struct sk_buff *skb)
7021 struct hci_conn *hcon = conn->hcon;
7022 struct l2cap_chan *chan;
7024 if (hcon->type != ACL_LINK)
7027 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7032 BT_DBG("chan %p, len %d", chan, skb->len);
7034 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7037 if (chan->imtu < skb->len)
7040 /* Store remote BD_ADDR and PSM for msg_name */
7041 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7042 bt_cb(skb)->psm = psm;
7044 if (!chan->ops->recv(chan, skb))
7051 static void l2cap_att_channel(struct l2cap_conn *conn,
7052 struct sk_buff *skb)
7054 struct hci_conn *hcon = conn->hcon;
7055 struct l2cap_chan *chan;
7057 if (hcon->type != LE_LINK)
7060 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7061 &hcon->src, &hcon->dst);
7065 BT_DBG("chan %p, len %d", chan, skb->len);
7067 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7070 if (chan->imtu < skb->len)
7073 if (!chan->ops->recv(chan, skb))
7080 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7082 struct l2cap_hdr *lh = (void *) skb->data;
7086 skb_pull(skb, L2CAP_HDR_SIZE);
7087 cid = __le16_to_cpu(lh->cid);
7088 len = __le16_to_cpu(lh->len);
7090 if (len != skb->len) {
7095 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7098 case L2CAP_CID_SIGNALING:
7099 l2cap_sig_channel(conn, skb);
7102 case L2CAP_CID_CONN_LESS:
7103 psm = get_unaligned((__le16 *) skb->data);
7104 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7105 l2cap_conless_channel(conn, psm, skb);
7109 l2cap_att_channel(conn, skb);
7112 case L2CAP_CID_LE_SIGNALING:
7113 l2cap_le_sig_channel(conn, skb);
7117 if (smp_sig_channel(conn, skb))
7118 l2cap_conn_del(conn->hcon, EACCES);
7121 case L2CAP_FC_6LOWPAN:
7122 bt_6lowpan_recv(conn, skb);
7126 l2cap_data_channel(conn, cid, skb);
7131 /* ---- L2CAP interface with lower layer (HCI) ---- */
7133 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7135 int exact = 0, lm1 = 0, lm2 = 0;
7136 struct l2cap_chan *c;
7138 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7140 /* Find listening sockets and check their link_mode */
7141 read_lock(&chan_list_lock);
7142 list_for_each_entry(c, &chan_list, global_l) {
7143 if (c->state != BT_LISTEN)
7146 if (!bacmp(&c->src, &hdev->bdaddr)) {
7147 lm1 |= HCI_LM_ACCEPT;
7148 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7149 lm1 |= HCI_LM_MASTER;
7151 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7152 lm2 |= HCI_LM_ACCEPT;
7153 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7154 lm2 |= HCI_LM_MASTER;
7157 read_unlock(&chan_list_lock);
7159 return exact ? lm1 : lm2;
7162 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7164 struct l2cap_conn *conn;
7166 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7169 conn = l2cap_conn_add(hcon);
7171 l2cap_conn_ready(conn);
7173 l2cap_conn_del(hcon, bt_to_errno(status));
7177 int l2cap_disconn_ind(struct hci_conn *hcon)
7179 struct l2cap_conn *conn = hcon->l2cap_data;
7181 BT_DBG("hcon %p", hcon);
7184 return HCI_ERROR_REMOTE_USER_TERM;
7185 return conn->disc_reason;
7188 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7190 BT_DBG("hcon %p reason %d", hcon, reason);
7192 bt_6lowpan_del_conn(hcon->l2cap_data);
7194 l2cap_conn_del(hcon, bt_to_errno(reason));
7197 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7199 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7202 if (encrypt == 0x00) {
7203 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7204 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7205 } else if (chan->sec_level == BT_SECURITY_HIGH)
7206 l2cap_chan_close(chan, ECONNREFUSED);
7208 if (chan->sec_level == BT_SECURITY_MEDIUM)
7209 __clear_chan_timer(chan);
7213 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7215 struct l2cap_conn *conn = hcon->l2cap_data;
7216 struct l2cap_chan *chan;
7221 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7223 if (hcon->type == LE_LINK) {
7224 if (!status && encrypt)
7225 smp_distribute_keys(conn, 0);
7226 cancel_delayed_work(&conn->security_timer);
7229 mutex_lock(&conn->chan_lock);
7231 list_for_each_entry(chan, &conn->chan_l, list) {
7232 l2cap_chan_lock(chan);
7234 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7235 state_to_string(chan->state));
7237 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7238 l2cap_chan_unlock(chan);
7242 if (chan->scid == L2CAP_CID_ATT) {
7243 if (!status && encrypt) {
7244 chan->sec_level = hcon->sec_level;
7245 l2cap_chan_ready(chan);
7248 l2cap_chan_unlock(chan);
7252 if (!__l2cap_no_conn_pending(chan)) {
7253 l2cap_chan_unlock(chan);
7257 if (!status && (chan->state == BT_CONNECTED ||
7258 chan->state == BT_CONFIG)) {
7259 chan->ops->resume(chan);
7260 l2cap_check_encryption(chan, encrypt);
7261 l2cap_chan_unlock(chan);
7265 if (chan->state == BT_CONNECT) {
7267 l2cap_start_connection(chan);
7269 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7270 } else if (chan->state == BT_CONNECT2) {
7271 struct l2cap_conn_rsp rsp;
7275 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7276 res = L2CAP_CR_PEND;
7277 stat = L2CAP_CS_AUTHOR_PEND;
7278 chan->ops->defer(chan);
7280 l2cap_state_change(chan, BT_CONFIG);
7281 res = L2CAP_CR_SUCCESS;
7282 stat = L2CAP_CS_NO_INFO;
7285 l2cap_state_change(chan, BT_DISCONN);
7286 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7287 res = L2CAP_CR_SEC_BLOCK;
7288 stat = L2CAP_CS_NO_INFO;
7291 rsp.scid = cpu_to_le16(chan->dcid);
7292 rsp.dcid = cpu_to_le16(chan->scid);
7293 rsp.result = cpu_to_le16(res);
7294 rsp.status = cpu_to_le16(stat);
7295 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7298 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7299 res == L2CAP_CR_SUCCESS) {
7301 set_bit(CONF_REQ_SENT, &chan->conf_state);
7302 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7304 l2cap_build_conf_req(chan, buf),
7306 chan->num_conf_req++;
7310 l2cap_chan_unlock(chan);
7313 mutex_unlock(&conn->chan_lock);
7318 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7320 struct l2cap_conn *conn = hcon->l2cap_data;
7321 struct l2cap_hdr *hdr;
7324 /* For AMP controller do not create l2cap conn */
7325 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7329 conn = l2cap_conn_add(hcon);
7334 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7338 case ACL_START_NO_FLUSH:
7341 BT_ERR("Unexpected start frame (len %d)", skb->len);
7342 kfree_skb(conn->rx_skb);
7343 conn->rx_skb = NULL;
7345 l2cap_conn_unreliable(conn, ECOMM);
7348 /* Start fragment always begin with Basic L2CAP header */
7349 if (skb->len < L2CAP_HDR_SIZE) {
7350 BT_ERR("Frame is too short (len %d)", skb->len);
7351 l2cap_conn_unreliable(conn, ECOMM);
7355 hdr = (struct l2cap_hdr *) skb->data;
7356 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7358 if (len == skb->len) {
7359 /* Complete frame received */
7360 l2cap_recv_frame(conn, skb);
7364 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7366 if (skb->len > len) {
7367 BT_ERR("Frame is too long (len %d, expected len %d)",
7369 l2cap_conn_unreliable(conn, ECOMM);
7373 /* Allocate skb for the complete frame (with header) */
7374 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7378 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7380 conn->rx_len = len - skb->len;
7384 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7386 if (!conn->rx_len) {
7387 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7388 l2cap_conn_unreliable(conn, ECOMM);
7392 if (skb->len > conn->rx_len) {
7393 BT_ERR("Fragment is too long (len %d, expected %d)",
7394 skb->len, conn->rx_len);
7395 kfree_skb(conn->rx_skb);
7396 conn->rx_skb = NULL;
7398 l2cap_conn_unreliable(conn, ECOMM);
7402 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7404 conn->rx_len -= skb->len;
7406 if (!conn->rx_len) {
7407 /* Complete frame received. l2cap_recv_frame
7408 * takes ownership of the skb so set the global
7409 * rx_skb pointer to NULL first.
7411 struct sk_buff *rx_skb = conn->rx_skb;
7412 conn->rx_skb = NULL;
7413 l2cap_recv_frame(conn, rx_skb);
7423 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7425 struct l2cap_chan *c;
7427 read_lock(&chan_list_lock);
7429 list_for_each_entry(c, &chan_list, global_l) {
7430 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7432 c->state, __le16_to_cpu(c->psm),
7433 c->scid, c->dcid, c->imtu, c->omtu,
7434 c->sec_level, c->mode);
7437 read_unlock(&chan_list_lock);
7442 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7444 return single_open(file, l2cap_debugfs_show, inode->i_private);
7447 static const struct file_operations l2cap_debugfs_fops = {
7448 .open = l2cap_debugfs_open,
7450 .llseek = seq_lseek,
7451 .release = single_release,
7454 static struct dentry *l2cap_debugfs;
7456 int __init l2cap_init(void)
7460 err = l2cap_init_sockets();
7464 if (IS_ERR_OR_NULL(bt_debugfs))
7467 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7468 NULL, &l2cap_debugfs_fops);
7470 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7472 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7480 void l2cap_exit(void)
7482 bt_6lowpan_cleanup();
7483 debugfs_remove(l2cap_debugfs);
7484 l2cap_cleanup_sockets();
7487 module_param(disable_ertm, bool, 0644);
7488 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");