2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
70 if (link_type == LE_LINK) {
71 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
82 return bdaddr_type(hcon->type, hcon->src_type);
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
87 return bdaddr_type(hcon->type, hcon->dst_type);
90 /* ---- L2CAP channels ---- */
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
97 list_for_each_entry(c, &conn->chan_l, list) {
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 struct l2cap_chan *c;
109 list_for_each_entry(c, &conn->chan_l, list) {
116 /* Find channel with given SCID.
117 * Returns a reference locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_scid(conn, cid);
127 /* Only lock if chan reference is not 0 */
128 c = l2cap_chan_hold_unless_zero(c);
132 mutex_unlock(&conn->chan_lock);
137 /* Find channel with given DCID.
138 * Returns a reference locked channel.
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
143 struct l2cap_chan *c;
145 mutex_lock(&conn->chan_lock);
146 c = __l2cap_get_chan_by_dcid(conn, cid);
148 /* Only lock if chan reference is not 0 */
149 c = l2cap_chan_hold_unless_zero(c);
153 mutex_unlock(&conn->chan_lock);
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &conn->chan_l, list) {
164 if (c->ident == ident)
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
173 struct l2cap_chan *c;
175 mutex_lock(&conn->chan_lock);
176 c = __l2cap_get_chan_by_ident(conn, ident);
178 /* Only lock if chan reference is not 0 */
179 c = l2cap_chan_hold_unless_zero(c);
183 mutex_unlock(&conn->chan_lock);
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
191 struct l2cap_chan *c;
193 list_for_each_entry(c, &chan_list, global_l) {
194 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
197 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
200 if (c->sport == psm && !bacmp(&c->src, src))
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
210 write_lock(&chan_list_lock);
212 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
222 u16 p, start, end, incr;
224 if (chan->src_type == BDADDR_BREDR) {
225 start = L2CAP_PSM_DYN_START;
226 end = L2CAP_PSM_AUTO_END;
229 start = L2CAP_PSM_LE_DYN_START;
230 end = L2CAP_PSM_LE_DYN_END;
235 for (p = start; p <= end; p += incr)
236 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
238 chan->psm = cpu_to_le16(p);
239 chan->sport = cpu_to_le16(p);
246 write_unlock(&chan_list_lock);
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
251 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
253 write_lock(&chan_list_lock);
255 /* Override the defaults (which are for conn-oriented) */
256 chan->omtu = L2CAP_DEFAULT_MTU;
257 chan->chan_type = L2CAP_CHAN_FIXED;
261 write_unlock(&chan_list_lock);
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
270 if (conn->hcon->type == LE_LINK)
271 dyn_end = L2CAP_CID_LE_DYN_END;
273 dyn_end = L2CAP_CID_DYN_END;
275 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 if (!__l2cap_get_chan_by_scid(conn, cid))
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
285 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 state_to_string(state));
289 chan->ops->state_change(chan, state, 0);
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
296 chan->ops->state_change(chan, chan->state, err);
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
301 chan->ops->state_change(chan, chan->state, err);
304 static void __set_retrans_timer(struct l2cap_chan *chan)
306 if (!delayed_work_pending(&chan->monitor_timer) &&
307 chan->retrans_timeout) {
308 l2cap_set_timer(chan, &chan->retrans_timer,
309 msecs_to_jiffies(chan->retrans_timeout));
313 static void __set_monitor_timer(struct l2cap_chan *chan)
315 __clear_retrans_timer(chan);
316 if (chan->monitor_timeout) {
317 l2cap_set_timer(chan, &chan->monitor_timer,
318 msecs_to_jiffies(chan->monitor_timeout));
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
327 skb_queue_walk(head, skb) {
328 if (bt_cb(skb)->l2cap.txseq == seq)
335 /* ---- L2CAP sequence number lists ---- */
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338 * SREJ requests that are received and for frames that are to be
339 * retransmitted. These seq_list functions implement a singly-linked
340 * list in an array, where membership in the list can also be checked
341 * in constant time. Items can also be added to the tail of the list
342 * and removed from the head in constant time, without further memory
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
348 size_t alloc_size, i;
350 /* Allocated size is a power of 2 to map sequence numbers
351 * (which may be up to 14 bits) in to a smaller array that is
352 * sized for the negotiated ERTM transmit windows.
354 alloc_size = roundup_pow_of_two(size);
356 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
360 seq_list->mask = alloc_size - 1;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 for (i = 0; i < alloc_size; i++)
364 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
371 kfree(seq_list->list);
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
377 /* Constant-time check for list membership */
378 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
383 u16 seq = seq_list->head;
384 u16 mask = seq_list->mask;
386 seq_list->head = seq_list->list[seq & mask];
387 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
389 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
401 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
404 for (i = 0; i <= seq_list->mask; i++)
405 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
407 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
413 u16 mask = seq_list->mask;
415 /* All appends happen in constant time */
417 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
420 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 seq_list->head = seq;
423 seq_list->list[seq_list->tail & mask] = seq;
425 seq_list->tail = seq;
426 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
429 static void l2cap_chan_timeout(struct work_struct *work)
431 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
433 struct l2cap_conn *conn = chan->conn;
436 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
438 mutex_lock(&conn->chan_lock);
439 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 * this work. No need to call l2cap_chan_hold(chan) here again.
442 l2cap_chan_lock(chan);
444 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 reason = ECONNREFUSED;
446 else if (chan->state == BT_CONNECT &&
447 chan->sec_level != BT_SECURITY_SDP)
448 reason = ECONNREFUSED;
452 l2cap_chan_close(chan, reason);
454 chan->ops->close(chan);
456 l2cap_chan_unlock(chan);
457 l2cap_chan_put(chan);
459 mutex_unlock(&conn->chan_lock);
462 struct l2cap_chan *l2cap_chan_create(void)
464 struct l2cap_chan *chan;
466 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
470 skb_queue_head_init(&chan->tx_q);
471 skb_queue_head_init(&chan->srej_q);
472 mutex_init(&chan->lock);
474 /* Set default lock nesting level */
475 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
477 write_lock(&chan_list_lock);
478 list_add(&chan->global_l, &chan_list);
479 write_unlock(&chan_list_lock);
481 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
486 chan->state = BT_OPEN;
488 kref_init(&chan->kref);
490 /* This flag is cleared in l2cap_chan_ready() */
491 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
493 BT_DBG("chan %p", chan);
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
499 static void l2cap_chan_destroy(struct kref *kref)
501 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
503 BT_DBG("chan %p", chan);
505 write_lock(&chan_list_lock);
506 list_del(&chan->global_l);
507 write_unlock(&chan_list_lock);
512 void l2cap_chan_hold(struct l2cap_chan *c)
514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
521 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
523 if (!kref_get_unless_zero(&c->kref))
529 void l2cap_chan_put(struct l2cap_chan *c)
531 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
533 kref_put(&c->kref, l2cap_chan_destroy);
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
539 chan->fcs = L2CAP_FCS_CRC16;
540 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 chan->remote_max_tx = chan->max_tx;
544 chan->remote_tx_win = chan->tx_win;
545 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 chan->sec_level = BT_SECURITY_LOW;
547 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
551 chan->conf_state = 0;
552 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
554 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
561 chan->sdu_last_frag = NULL;
563 chan->tx_credits = tx_credits;
564 /* Derive MPS from connection MTU to stop HCI fragmentation */
565 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 /* Give enough credits for a full packet */
567 chan->rx_credits = (chan->imtu / chan->mps) + 1;
569 skb_queue_head_init(&chan->tx_q);
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
574 l2cap_le_flowctl_init(chan, tx_credits);
576 /* L2CAP implementations shall support a minimum MPS of 64 octets */
577 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 chan->mps = L2CAP_ECRED_MIN_MPS;
579 chan->rx_credits = (chan->imtu / chan->mps) + 1;
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
585 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 __le16_to_cpu(chan->psm), chan->dcid);
588 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 switch (chan->chan_type) {
593 case L2CAP_CHAN_CONN_ORIENTED:
594 /* Alloc CID for connection-oriented socket */
595 chan->scid = l2cap_alloc_cid(conn);
596 if (conn->hcon->type == ACL_LINK)
597 chan->omtu = L2CAP_DEFAULT_MTU;
600 case L2CAP_CHAN_CONN_LESS:
601 /* Connectionless socket */
602 chan->scid = L2CAP_CID_CONN_LESS;
603 chan->dcid = L2CAP_CID_CONN_LESS;
604 chan->omtu = L2CAP_DEFAULT_MTU;
607 case L2CAP_CHAN_FIXED:
608 /* Caller will set CID and CID specific MTU values */
612 /* Raw socket can send/recv signalling messages only */
613 chan->scid = L2CAP_CID_SIGNALING;
614 chan->dcid = L2CAP_CID_SIGNALING;
615 chan->omtu = L2CAP_DEFAULT_MTU;
618 chan->local_id = L2CAP_BESTEFFORT_ID;
619 chan->local_stype = L2CAP_SERV_BESTEFFORT;
620 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
621 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
622 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
623 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
625 l2cap_chan_hold(chan);
627 /* Only keep a reference for fixed channels if they requested it */
628 if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 hci_conn_hold(conn->hcon);
632 list_add(&chan->list, &conn->chan_l);
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
637 mutex_lock(&conn->chan_lock);
638 __l2cap_chan_add(conn, chan);
639 mutex_unlock(&conn->chan_lock);
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
644 struct l2cap_conn *conn = chan->conn;
646 __clear_chan_timer(chan);
648 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 state_to_string(chan->state));
651 chan->ops->teardown(chan, err);
654 struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 /* Delete from channel list */
656 list_del(&chan->list);
658 l2cap_chan_put(chan);
662 /* Reference was only held for non-fixed channels or
663 * fixed channels that explicitly requested it using the
664 * FLAG_HOLD_HCI_CONN flag.
666 if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 hci_conn_drop(conn->hcon);
670 if (mgr && mgr->bredr_chan == chan)
671 mgr->bredr_chan = NULL;
674 if (chan->hs_hchan) {
675 struct hci_chan *hs_hchan = chan->hs_hchan;
677 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 amp_disconnect_logical_link(hs_hchan);
681 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
684 switch (chan->mode) {
685 case L2CAP_MODE_BASIC:
688 case L2CAP_MODE_LE_FLOWCTL:
689 case L2CAP_MODE_EXT_FLOWCTL:
690 skb_queue_purge(&chan->tx_q);
693 case L2CAP_MODE_ERTM:
694 __clear_retrans_timer(chan);
695 __clear_monitor_timer(chan);
696 __clear_ack_timer(chan);
698 skb_queue_purge(&chan->srej_q);
700 l2cap_seq_list_free(&chan->srej_list);
701 l2cap_seq_list_free(&chan->retrans_list);
704 case L2CAP_MODE_STREAMING:
705 skb_queue_purge(&chan->tx_q);
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
714 struct l2cap_chan *chan;
716 list_for_each_entry(chan, &conn->chan_l, list) {
721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 mutex_lock(&conn->chan_lock);
728 __l2cap_chan_list(conn, func, data);
729 mutex_unlock(&conn->chan_lock);
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
734 static void l2cap_conn_update_id_addr(struct work_struct *work)
736 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 id_addr_update_work);
738 struct hci_conn *hcon = conn->hcon;
739 struct l2cap_chan *chan;
741 mutex_lock(&conn->chan_lock);
743 list_for_each_entry(chan, &conn->chan_l, list) {
744 l2cap_chan_lock(chan);
745 bacpy(&chan->dst, &hcon->dst);
746 chan->dst_type = bdaddr_dst_type(hcon);
747 l2cap_chan_unlock(chan);
750 mutex_unlock(&conn->chan_lock);
753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
755 struct l2cap_conn *conn = chan->conn;
756 struct l2cap_le_conn_rsp rsp;
759 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 result = L2CAP_CR_LE_AUTHORIZATION;
762 result = L2CAP_CR_LE_BAD_PSM;
764 l2cap_state_change(chan, BT_DISCONN);
766 rsp.dcid = cpu_to_le16(chan->scid);
767 rsp.mtu = cpu_to_le16(chan->imtu);
768 rsp.mps = cpu_to_le16(chan->mps);
769 rsp.credits = cpu_to_le16(chan->rx_credits);
770 rsp.result = cpu_to_le16(result);
772 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
778 struct l2cap_conn *conn = chan->conn;
779 struct l2cap_ecred_conn_rsp rsp;
782 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 result = L2CAP_CR_LE_AUTHORIZATION;
785 result = L2CAP_CR_LE_BAD_PSM;
787 l2cap_state_change(chan, BT_DISCONN);
789 memset(&rsp, 0, sizeof(rsp));
791 rsp.result = cpu_to_le16(result);
793 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
799 struct l2cap_conn *conn = chan->conn;
800 struct l2cap_conn_rsp rsp;
803 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 result = L2CAP_CR_SEC_BLOCK;
806 result = L2CAP_CR_BAD_PSM;
808 l2cap_state_change(chan, BT_DISCONN);
810 rsp.scid = cpu_to_le16(chan->dcid);
811 rsp.dcid = cpu_to_le16(chan->scid);
812 rsp.result = cpu_to_le16(result);
813 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
815 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
820 struct l2cap_conn *conn = chan->conn;
822 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
824 switch (chan->state) {
826 chan->ops->teardown(chan, 0);
831 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 l2cap_send_disconn_req(chan, reason);
835 l2cap_chan_del(chan, reason);
839 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 if (conn->hcon->type == ACL_LINK)
841 l2cap_chan_connect_reject(chan);
842 else if (conn->hcon->type == LE_LINK) {
843 switch (chan->mode) {
844 case L2CAP_MODE_LE_FLOWCTL:
845 l2cap_chan_le_connect_reject(chan);
847 case L2CAP_MODE_EXT_FLOWCTL:
848 l2cap_chan_ecred_connect_reject(chan);
854 l2cap_chan_del(chan, reason);
859 l2cap_chan_del(chan, reason);
863 chan->ops->teardown(chan, 0);
867 EXPORT_SYMBOL(l2cap_chan_close);
869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
871 switch (chan->chan_type) {
873 switch (chan->sec_level) {
874 case BT_SECURITY_HIGH:
875 case BT_SECURITY_FIPS:
876 return HCI_AT_DEDICATED_BONDING_MITM;
877 case BT_SECURITY_MEDIUM:
878 return HCI_AT_DEDICATED_BONDING;
880 return HCI_AT_NO_BONDING;
883 case L2CAP_CHAN_CONN_LESS:
884 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 if (chan->sec_level == BT_SECURITY_LOW)
886 chan->sec_level = BT_SECURITY_SDP;
888 if (chan->sec_level == BT_SECURITY_HIGH ||
889 chan->sec_level == BT_SECURITY_FIPS)
890 return HCI_AT_NO_BONDING_MITM;
892 return HCI_AT_NO_BONDING;
894 case L2CAP_CHAN_CONN_ORIENTED:
895 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 if (chan->sec_level == BT_SECURITY_LOW)
897 chan->sec_level = BT_SECURITY_SDP;
899 if (chan->sec_level == BT_SECURITY_HIGH ||
900 chan->sec_level == BT_SECURITY_FIPS)
901 return HCI_AT_NO_BONDING_MITM;
903 return HCI_AT_NO_BONDING;
908 switch (chan->sec_level) {
909 case BT_SECURITY_HIGH:
910 case BT_SECURITY_FIPS:
911 return HCI_AT_GENERAL_BONDING_MITM;
912 case BT_SECURITY_MEDIUM:
913 return HCI_AT_GENERAL_BONDING;
915 return HCI_AT_NO_BONDING;
921 /* Service level security */
922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
924 struct l2cap_conn *conn = chan->conn;
927 if (conn->hcon->type == LE_LINK)
928 return smp_conn_security(conn->hcon, chan->sec_level);
930 auth_type = l2cap_get_auth_type(chan);
932 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
940 /* Get next available identificator.
941 * 1 - 128 are used by kernel.
942 * 129 - 199 are reserved.
943 * 200 - 254 are used by utilities like l2ping, etc.
946 mutex_lock(&conn->ident_lock);
948 if (++conn->tx_ident > 128)
953 mutex_unlock(&conn->ident_lock);
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
961 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
964 BT_DBG("code 0x%2.2x", code);
969 /* Use NO_FLUSH if supported or we have an LE link (which does
970 * not support auto-flushing packets) */
971 if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 conn->hcon->type == LE_LINK)
973 flags = ACL_START_NO_FLUSH;
977 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 skb->priority = HCI_PRIO_MAX;
980 hci_send_acl(conn->hchan, skb, flags);
983 static bool __chan_is_moving(struct l2cap_chan *chan)
985 return chan->move_state != L2CAP_MOVE_STABLE &&
986 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
991 struct hci_conn *hcon = chan->conn->hcon;
994 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
997 if (chan->hs_hcon && !__chan_is_moving(chan)) {
999 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1006 /* Use NO_FLUSH for LE links (where this is the only option) or
1007 * if the BR/EDR link supports it and flushing has not been
1008 * explicitly requested (through FLAG_FLUSHABLE).
1010 if (hcon->type == LE_LINK ||
1011 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 lmp_no_flush_capable(hcon->hdev)))
1013 flags = ACL_START_NO_FLUSH;
1017 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 hci_send_acl(chan->conn->hchan, skb, flags);
1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1023 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1026 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1028 control->sframe = 1;
1029 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1036 control->sframe = 0;
1037 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1047 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1050 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1052 control->sframe = 1;
1053 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1060 control->sframe = 0;
1061 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 struct sk_buff *skb)
1072 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 __unpack_extended_control(get_unaligned_le32(skb->data),
1074 &bt_cb(skb)->l2cap);
1075 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1077 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 &bt_cb(skb)->l2cap);
1079 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1087 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1090 if (control->sframe) {
1091 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1095 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1106 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1109 if (control->sframe) {
1110 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 packed |= L2CAP_CTRL_FRAME_TYPE;
1114 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 struct l2cap_ctrl *control,
1123 struct sk_buff *skb)
1125 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 put_unaligned_le32(__pack_extended_control(control),
1127 skb->data + L2CAP_HDR_SIZE);
1129 put_unaligned_le16(__pack_enhanced_control(control),
1130 skb->data + L2CAP_HDR_SIZE);
1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1136 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 return L2CAP_EXT_HDR_SIZE;
1139 return L2CAP_ENH_HDR_SIZE;
1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1145 struct sk_buff *skb;
1146 struct l2cap_hdr *lh;
1147 int hlen = __ertm_hdr_size(chan);
1149 if (chan->fcs == L2CAP_FCS_CRC16)
1150 hlen += L2CAP_FCS_SIZE;
1152 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1155 return ERR_PTR(-ENOMEM);
1157 lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 lh->cid = cpu_to_le16(chan->dcid);
1161 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1164 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1166 if (chan->fcs == L2CAP_FCS_CRC16) {
1167 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1171 skb->priority = HCI_PRIO_MAX;
1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 struct l2cap_ctrl *control)
1178 struct sk_buff *skb;
1181 BT_DBG("chan %p, control %p", chan, control);
1183 if (!control->sframe)
1186 if (__chan_is_moving(chan))
1189 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1193 if (control->super == L2CAP_SUPER_RR)
1194 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 else if (control->super == L2CAP_SUPER_RNR)
1196 set_bit(CONN_RNR_SENT, &chan->conn_state);
1198 if (control->super != L2CAP_SUPER_SREJ) {
1199 chan->last_acked_seq = control->reqseq;
1200 __clear_ack_timer(chan);
1203 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 control->final, control->poll, control->super);
1206 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 control_field = __pack_extended_control(control);
1209 control_field = __pack_enhanced_control(control);
1211 skb = l2cap_create_sframe_pdu(chan, control_field);
1213 l2cap_do_send(chan, skb);
1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1218 struct l2cap_ctrl control;
1220 BT_DBG("chan %p, poll %d", chan, poll);
1222 memset(&control, 0, sizeof(control));
1224 control.poll = poll;
1226 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 control.super = L2CAP_SUPER_RNR;
1229 control.super = L2CAP_SUPER_RR;
1231 control.reqseq = chan->buffer_seq;
1232 l2cap_send_sframe(chan, &control);
1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1237 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1240 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1243 static bool __amp_capable(struct l2cap_chan *chan)
1245 struct l2cap_conn *conn = chan->conn;
1246 struct hci_dev *hdev;
1247 bool amp_available = false;
1249 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1252 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1255 read_lock(&hci_dev_list_lock);
1256 list_for_each_entry(hdev, &hci_dev_list, list) {
1257 if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 test_bit(HCI_UP, &hdev->flags)) {
1259 amp_available = true;
1263 read_unlock(&hci_dev_list_lock);
1265 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 return amp_available;
1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1273 /* Check EFS parameters */
1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1279 struct l2cap_conn *conn = chan->conn;
1280 struct l2cap_conn_req req;
1282 req.scid = cpu_to_le16(chan->scid);
1283 req.psm = chan->psm;
1285 chan->ident = l2cap_get_ident(conn);
1287 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1289 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1294 struct l2cap_create_chan_req req;
1295 req.scid = cpu_to_le16(chan->scid);
1296 req.psm = chan->psm;
1297 req.amp_id = amp_id;
1299 chan->ident = l2cap_get_ident(chan->conn);
1301 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1307 struct sk_buff *skb;
1309 BT_DBG("chan %p", chan);
1311 if (chan->mode != L2CAP_MODE_ERTM)
1314 __clear_retrans_timer(chan);
1315 __clear_monitor_timer(chan);
1316 __clear_ack_timer(chan);
1318 chan->retry_count = 0;
1319 skb_queue_walk(&chan->tx_q, skb) {
1320 if (bt_cb(skb)->l2cap.retries)
1321 bt_cb(skb)->l2cap.retries = 1;
1326 chan->expected_tx_seq = chan->buffer_seq;
1328 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 l2cap_seq_list_clear(&chan->retrans_list);
1331 l2cap_seq_list_clear(&chan->srej_list);
1332 skb_queue_purge(&chan->srej_q);
1334 chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 chan->rx_state = L2CAP_RX_STATE_MOVE;
1337 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1340 static void l2cap_move_done(struct l2cap_chan *chan)
1342 u8 move_role = chan->move_role;
1343 BT_DBG("chan %p", chan);
1345 chan->move_state = L2CAP_MOVE_STABLE;
1346 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1348 if (chan->mode != L2CAP_MODE_ERTM)
1351 switch (move_role) {
1352 case L2CAP_MOVE_ROLE_INITIATOR:
1353 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1356 case L2CAP_MOVE_ROLE_RESPONDER:
1357 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1364 /* The channel may have already been flagged as connected in
1365 * case of receiving data before the L2CAP info req/rsp
1366 * procedure is complete.
1369 if (chan->state == BT_CONNECTED)
1372 if (chan->state == BT_CONNECTED) {
1373 if (chan->psm == L2CAP_PSM_IPSP) {
1374 struct l2cap_conn *conn = chan->conn;
1376 if (conn->hcon->out)
1378 else if (conn->hcon->type != LE_LINK)
1386 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1387 chan->conf_state = 0;
1388 __clear_chan_timer(chan);
1390 switch (chan->mode) {
1391 case L2CAP_MODE_LE_FLOWCTL:
1392 case L2CAP_MODE_EXT_FLOWCTL:
1393 if (!chan->tx_credits)
1394 chan->ops->suspend(chan);
1398 chan->state = BT_CONNECTED;
1400 chan->ops->ready(chan);
1403 static void l2cap_le_connect(struct l2cap_chan *chan)
1405 struct l2cap_conn *conn = chan->conn;
1406 struct l2cap_le_conn_req req;
1408 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1412 chan->imtu = chan->conn->mtu;
1414 l2cap_le_flowctl_init(chan, 0);
1416 req.psm = chan->psm;
1417 req.scid = cpu_to_le16(chan->scid);
1418 req.mtu = cpu_to_le16(chan->imtu);
1419 req.mps = cpu_to_le16(chan->mps);
1420 req.credits = cpu_to_le16(chan->rx_credits);
1422 chan->ident = l2cap_get_ident(conn);
1424 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1428 struct l2cap_ecred_conn_data {
1430 struct l2cap_ecred_conn_req req;
1433 struct l2cap_chan *chan;
1438 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1440 struct l2cap_ecred_conn_data *conn = data;
1443 if (chan == conn->chan)
1446 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1449 pid = chan->ops->get_peer_pid(chan);
1451 /* Only add deferred channels with the same PID/PSM */
1452 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1453 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1456 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1459 l2cap_ecred_init(chan, 0);
1461 /* Set the same ident so we can match on the rsp */
1462 chan->ident = conn->chan->ident;
1464 /* Include all channels deferred */
1465 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1470 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1472 struct l2cap_conn *conn = chan->conn;
1473 struct l2cap_ecred_conn_data data;
1475 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1478 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1481 l2cap_ecred_init(chan, 0);
1483 memset(&data, 0, sizeof(data));
1484 data.pdu.req.psm = chan->psm;
1485 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1486 data.pdu.req.mps = cpu_to_le16(chan->mps);
1487 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1488 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1490 chan->ident = l2cap_get_ident(conn);
1491 data.pid = chan->ops->get_peer_pid(chan);
1495 data.pid = chan->ops->get_peer_pid(chan);
1497 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1499 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1500 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1504 static void l2cap_le_start(struct l2cap_chan *chan)
1506 struct l2cap_conn *conn = chan->conn;
1508 if (!smp_conn_security(conn->hcon, chan->sec_level))
1512 l2cap_chan_ready(chan);
1516 if (chan->state == BT_CONNECT) {
1517 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1518 l2cap_ecred_connect(chan);
1520 l2cap_le_connect(chan);
1524 static void l2cap_start_connection(struct l2cap_chan *chan)
1526 if (__amp_capable(chan)) {
1527 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1528 a2mp_discover_amp(chan);
1529 } else if (chan->conn->hcon->type == LE_LINK) {
1530 l2cap_le_start(chan);
1532 l2cap_send_conn_req(chan);
1536 static void l2cap_request_info(struct l2cap_conn *conn)
1538 struct l2cap_info_req req;
1540 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1543 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1545 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1546 conn->info_ident = l2cap_get_ident(conn);
1548 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1550 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1554 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1556 /* The minimum encryption key size needs to be enforced by the
1557 * host stack before establishing any L2CAP connections. The
1558 * specification in theory allows a minimum of 1, but to align
1559 * BR/EDR and LE transports, a minimum of 7 is chosen.
1561 * This check might also be called for unencrypted connections
1562 * that have no key size requirements. Ensure that the link is
1563 * actually encrypted before enforcing a key size.
1565 int min_key_size = hcon->hdev->min_enc_key_size;
1567 /* On FIPS security level, key size must be 16 bytes */
1568 if (hcon->sec_level == BT_SECURITY_FIPS)
1571 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1572 hcon->enc_key_size >= min_key_size);
1575 static void l2cap_do_start(struct l2cap_chan *chan)
1577 struct l2cap_conn *conn = chan->conn;
1579 if (conn->hcon->type == LE_LINK) {
1580 l2cap_le_start(chan);
1584 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1585 l2cap_request_info(conn);
1589 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1592 if (!l2cap_chan_check_security(chan, true) ||
1593 !__l2cap_no_conn_pending(chan))
1596 if (l2cap_check_enc_key_size(conn->hcon))
1597 l2cap_start_connection(chan);
1599 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1602 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1604 u32 local_feat_mask = l2cap_feat_mask;
1606 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1609 case L2CAP_MODE_ERTM:
1610 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1611 case L2CAP_MODE_STREAMING:
1612 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1618 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1620 struct l2cap_conn *conn = chan->conn;
1621 struct l2cap_disconn_req req;
1626 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1627 __clear_retrans_timer(chan);
1628 __clear_monitor_timer(chan);
1629 __clear_ack_timer(chan);
1632 if (chan->scid == L2CAP_CID_A2MP) {
1633 l2cap_state_change(chan, BT_DISCONN);
1637 req.dcid = cpu_to_le16(chan->dcid);
1638 req.scid = cpu_to_le16(chan->scid);
1639 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1642 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1645 /* ---- L2CAP connections ---- */
1646 static void l2cap_conn_start(struct l2cap_conn *conn)
1648 struct l2cap_chan *chan, *tmp;
1650 BT_DBG("conn %p", conn);
1652 mutex_lock(&conn->chan_lock);
1654 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1655 l2cap_chan_lock(chan);
1657 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1658 l2cap_chan_ready(chan);
1659 l2cap_chan_unlock(chan);
1663 if (chan->state == BT_CONNECT) {
1664 if (!l2cap_chan_check_security(chan, true) ||
1665 !__l2cap_no_conn_pending(chan)) {
1666 l2cap_chan_unlock(chan);
1670 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1671 && test_bit(CONF_STATE2_DEVICE,
1672 &chan->conf_state)) {
1673 l2cap_chan_close(chan, ECONNRESET);
1674 l2cap_chan_unlock(chan);
1678 if (l2cap_check_enc_key_size(conn->hcon))
1679 l2cap_start_connection(chan);
1681 l2cap_chan_close(chan, ECONNREFUSED);
1683 } else if (chan->state == BT_CONNECT2) {
1684 struct l2cap_conn_rsp rsp;
1686 rsp.scid = cpu_to_le16(chan->dcid);
1687 rsp.dcid = cpu_to_le16(chan->scid);
1689 if (l2cap_chan_check_security(chan, false)) {
1690 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1691 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1692 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1693 chan->ops->defer(chan);
1696 l2cap_state_change(chan, BT_CONFIG);
1697 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1698 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1701 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1702 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1705 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1708 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1709 rsp.result != L2CAP_CR_SUCCESS) {
1710 l2cap_chan_unlock(chan);
1714 set_bit(CONF_REQ_SENT, &chan->conf_state);
1715 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1716 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1717 chan->num_conf_req++;
1720 l2cap_chan_unlock(chan);
1723 mutex_unlock(&conn->chan_lock);
1726 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1728 struct hci_conn *hcon = conn->hcon;
1729 struct hci_dev *hdev = hcon->hdev;
1731 BT_DBG("%s conn %p", hdev->name, conn);
1733 /* For outgoing pairing which doesn't necessarily have an
1734 * associated socket (e.g. mgmt_pair_device).
1737 smp_conn_security(hcon, hcon->pending_sec_level);
1739 /* For LE peripheral connections, make sure the connection interval
1740 * is in the range of the minimum and maximum interval that has
1741 * been configured for this connection. If not, then trigger
1742 * the connection update procedure.
1744 if (hcon->role == HCI_ROLE_SLAVE &&
1745 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1746 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1747 struct l2cap_conn_param_update_req req;
1749 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1750 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1751 req.latency = cpu_to_le16(hcon->le_conn_latency);
1752 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1754 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1755 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1759 static void l2cap_conn_ready(struct l2cap_conn *conn)
1761 struct l2cap_chan *chan;
1762 struct hci_conn *hcon = conn->hcon;
1764 BT_DBG("conn %p", conn);
1766 if (hcon->type == ACL_LINK)
1767 l2cap_request_info(conn);
1769 mutex_lock(&conn->chan_lock);
1771 list_for_each_entry(chan, &conn->chan_l, list) {
1773 l2cap_chan_lock(chan);
1775 if (chan->scid == L2CAP_CID_A2MP) {
1776 l2cap_chan_unlock(chan);
1780 if (hcon->type == LE_LINK) {
1781 l2cap_le_start(chan);
1782 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1783 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1784 l2cap_chan_ready(chan);
1785 } else if (chan->state == BT_CONNECT) {
1786 l2cap_do_start(chan);
1789 l2cap_chan_unlock(chan);
1792 mutex_unlock(&conn->chan_lock);
1794 if (hcon->type == LE_LINK)
1795 l2cap_le_conn_ready(conn);
1797 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1800 /* Notify sockets that we cannot guaranty reliability anymore */
1801 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1803 struct l2cap_chan *chan;
1805 BT_DBG("conn %p", conn);
1807 mutex_lock(&conn->chan_lock);
1809 list_for_each_entry(chan, &conn->chan_l, list) {
1810 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1811 l2cap_chan_set_err(chan, err);
1814 mutex_unlock(&conn->chan_lock);
1817 static void l2cap_info_timeout(struct work_struct *work)
1819 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1822 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1823 conn->info_ident = 0;
1825 l2cap_conn_start(conn);
1830 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1831 * callback is called during registration. The ->remove callback is called
1832 * during unregistration.
1833 * An l2cap_user object can either be explicitly unregistered or when the
1834 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1835 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1836 * External modules must own a reference to the l2cap_conn object if they intend
1837 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1838 * any time if they don't.
1841 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1843 struct hci_dev *hdev = conn->hcon->hdev;
1846 /* We need to check whether l2cap_conn is registered. If it is not, we
1847 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1848 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1849 * relies on the parent hci_conn object to be locked. This itself relies
1850 * on the hci_dev object to be locked. So we must lock the hci device
1855 if (!list_empty(&user->list)) {
1860 /* conn->hchan is NULL after l2cap_conn_del() was called */
1866 ret = user->probe(conn, user);
1870 list_add(&user->list, &conn->users);
1874 hci_dev_unlock(hdev);
1877 EXPORT_SYMBOL(l2cap_register_user);
1879 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1881 struct hci_dev *hdev = conn->hcon->hdev;
1885 if (list_empty(&user->list))
1888 list_del_init(&user->list);
1889 user->remove(conn, user);
1892 hci_dev_unlock(hdev);
1894 EXPORT_SYMBOL(l2cap_unregister_user);
1896 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1898 struct l2cap_user *user;
1900 while (!list_empty(&conn->users)) {
1901 user = list_first_entry(&conn->users, struct l2cap_user, list);
1902 list_del_init(&user->list);
1903 user->remove(conn, user);
1907 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1909 struct l2cap_conn *conn = hcon->l2cap_data;
1910 struct l2cap_chan *chan, *l;
1915 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1917 kfree_skb(conn->rx_skb);
1919 skb_queue_purge(&conn->pending_rx);
1921 /* We can not call flush_work(&conn->pending_rx_work) here since we
1922 * might block if we are running on a worker from the same workqueue
1923 * pending_rx_work is waiting on.
1925 if (work_pending(&conn->pending_rx_work))
1926 cancel_work_sync(&conn->pending_rx_work);
1928 if (work_pending(&conn->id_addr_update_work))
1929 cancel_work_sync(&conn->id_addr_update_work);
1931 l2cap_unregister_all_users(conn);
1933 /* Force the connection to be immediately dropped */
1934 hcon->disc_timeout = 0;
1936 mutex_lock(&conn->chan_lock);
1939 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1940 l2cap_chan_hold(chan);
1941 l2cap_chan_lock(chan);
1943 l2cap_chan_del(chan, err);
1945 chan->ops->close(chan);
1947 l2cap_chan_unlock(chan);
1948 l2cap_chan_put(chan);
1951 mutex_unlock(&conn->chan_lock);
1953 hci_chan_del(conn->hchan);
1955 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1956 cancel_delayed_work_sync(&conn->info_timer);
1958 hcon->l2cap_data = NULL;
1960 l2cap_conn_put(conn);
1963 static void l2cap_conn_free(struct kref *ref)
1965 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1967 hci_conn_put(conn->hcon);
1971 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1973 kref_get(&conn->ref);
1976 EXPORT_SYMBOL(l2cap_conn_get);
1978 void l2cap_conn_put(struct l2cap_conn *conn)
1980 kref_put(&conn->ref, l2cap_conn_free);
1982 EXPORT_SYMBOL(l2cap_conn_put);
1984 /* ---- Socket interface ---- */
1986 /* Find socket with psm and source / destination bdaddr.
1987 * Returns closest match.
1989 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1994 struct l2cap_chan *c, *tmp, *c1 = NULL;
1996 read_lock(&chan_list_lock);
1998 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1999 if (state && c->state != state)
2002 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
2005 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
2008 if (c->psm == psm) {
2009 int src_match, dst_match;
2010 int src_any, dst_any;
2013 src_match = !bacmp(&c->src, src);
2014 dst_match = !bacmp(&c->dst, dst);
2015 if (src_match && dst_match) {
2016 if (!l2cap_chan_hold_unless_zero(c))
2019 read_unlock(&chan_list_lock);
2024 src_any = !bacmp(&c->src, BDADDR_ANY);
2025 dst_any = !bacmp(&c->dst, BDADDR_ANY);
2026 if ((src_match && dst_any) || (src_any && dst_match) ||
2027 (src_any && dst_any))
2033 c1 = l2cap_chan_hold_unless_zero(c1);
2035 read_unlock(&chan_list_lock);
2040 static void l2cap_monitor_timeout(struct work_struct *work)
2042 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2043 monitor_timer.work);
2045 BT_DBG("chan %p", chan);
2047 l2cap_chan_lock(chan);
2050 l2cap_chan_unlock(chan);
2051 l2cap_chan_put(chan);
2055 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2057 l2cap_chan_unlock(chan);
2058 l2cap_chan_put(chan);
2061 static void l2cap_retrans_timeout(struct work_struct *work)
2063 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2064 retrans_timer.work);
2066 BT_DBG("chan %p", chan);
2068 l2cap_chan_lock(chan);
2071 l2cap_chan_unlock(chan);
2072 l2cap_chan_put(chan);
2076 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2077 l2cap_chan_unlock(chan);
2078 l2cap_chan_put(chan);
2081 static void l2cap_streaming_send(struct l2cap_chan *chan,
2082 struct sk_buff_head *skbs)
2084 struct sk_buff *skb;
2085 struct l2cap_ctrl *control;
2087 BT_DBG("chan %p, skbs %p", chan, skbs);
2089 if (__chan_is_moving(chan))
2092 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2094 while (!skb_queue_empty(&chan->tx_q)) {
2096 skb = skb_dequeue(&chan->tx_q);
2098 bt_cb(skb)->l2cap.retries = 1;
2099 control = &bt_cb(skb)->l2cap;
2101 control->reqseq = 0;
2102 control->txseq = chan->next_tx_seq;
2104 __pack_control(chan, control, skb);
2106 if (chan->fcs == L2CAP_FCS_CRC16) {
2107 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2108 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2111 l2cap_do_send(chan, skb);
2113 BT_DBG("Sent txseq %u", control->txseq);
2115 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2116 chan->frames_sent++;
2120 static int l2cap_ertm_send(struct l2cap_chan *chan)
2122 struct sk_buff *skb, *tx_skb;
2123 struct l2cap_ctrl *control;
2126 BT_DBG("chan %p", chan);
2128 if (chan->state != BT_CONNECTED)
2131 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2134 if (__chan_is_moving(chan))
2137 while (chan->tx_send_head &&
2138 chan->unacked_frames < chan->remote_tx_win &&
2139 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2141 skb = chan->tx_send_head;
2143 bt_cb(skb)->l2cap.retries = 1;
2144 control = &bt_cb(skb)->l2cap;
2146 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2149 control->reqseq = chan->buffer_seq;
2150 chan->last_acked_seq = chan->buffer_seq;
2151 control->txseq = chan->next_tx_seq;
2153 __pack_control(chan, control, skb);
2155 if (chan->fcs == L2CAP_FCS_CRC16) {
2156 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2157 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2160 /* Clone after data has been modified. Data is assumed to be
2161 read-only (for locking purposes) on cloned sk_buffs.
2163 tx_skb = skb_clone(skb, GFP_KERNEL);
2168 __set_retrans_timer(chan);
2170 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2171 chan->unacked_frames++;
2172 chan->frames_sent++;
2175 if (skb_queue_is_last(&chan->tx_q, skb))
2176 chan->tx_send_head = NULL;
2178 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2180 l2cap_do_send(chan, tx_skb);
2181 BT_DBG("Sent txseq %u", control->txseq);
2184 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2185 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2190 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2192 struct l2cap_ctrl control;
2193 struct sk_buff *skb;
2194 struct sk_buff *tx_skb;
2197 BT_DBG("chan %p", chan);
2199 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2202 if (__chan_is_moving(chan))
2205 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2206 seq = l2cap_seq_list_pop(&chan->retrans_list);
2208 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2210 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2215 bt_cb(skb)->l2cap.retries++;
2216 control = bt_cb(skb)->l2cap;
2218 if (chan->max_tx != 0 &&
2219 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2220 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2221 l2cap_send_disconn_req(chan, ECONNRESET);
2222 l2cap_seq_list_clear(&chan->retrans_list);
2226 control.reqseq = chan->buffer_seq;
2227 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2232 if (skb_cloned(skb)) {
2233 /* Cloned sk_buffs are read-only, so we need a
2236 tx_skb = skb_copy(skb, GFP_KERNEL);
2238 tx_skb = skb_clone(skb, GFP_KERNEL);
2242 l2cap_seq_list_clear(&chan->retrans_list);
2246 /* Update skb contents */
2247 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2248 put_unaligned_le32(__pack_extended_control(&control),
2249 tx_skb->data + L2CAP_HDR_SIZE);
2251 put_unaligned_le16(__pack_enhanced_control(&control),
2252 tx_skb->data + L2CAP_HDR_SIZE);
2256 if (chan->fcs == L2CAP_FCS_CRC16) {
2257 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2258 tx_skb->len - L2CAP_FCS_SIZE);
2259 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2263 l2cap_do_send(chan, tx_skb);
2265 BT_DBG("Resent txseq %d", control.txseq);
2267 chan->last_acked_seq = chan->buffer_seq;
2271 static void l2cap_retransmit(struct l2cap_chan *chan,
2272 struct l2cap_ctrl *control)
2274 BT_DBG("chan %p, control %p", chan, control);
2276 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2277 l2cap_ertm_resend(chan);
2280 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2281 struct l2cap_ctrl *control)
2283 struct sk_buff *skb;
2285 BT_DBG("chan %p, control %p", chan, control);
2288 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2290 l2cap_seq_list_clear(&chan->retrans_list);
2292 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2295 if (chan->unacked_frames) {
2296 skb_queue_walk(&chan->tx_q, skb) {
2297 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2298 skb == chan->tx_send_head)
2302 skb_queue_walk_from(&chan->tx_q, skb) {
2303 if (skb == chan->tx_send_head)
2306 l2cap_seq_list_append(&chan->retrans_list,
2307 bt_cb(skb)->l2cap.txseq);
2310 l2cap_ertm_resend(chan);
2314 static void l2cap_send_ack(struct l2cap_chan *chan)
2316 struct l2cap_ctrl control;
2317 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2318 chan->last_acked_seq);
2321 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2322 chan, chan->last_acked_seq, chan->buffer_seq);
2324 memset(&control, 0, sizeof(control));
2327 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2328 chan->rx_state == L2CAP_RX_STATE_RECV) {
2329 __clear_ack_timer(chan);
2330 control.super = L2CAP_SUPER_RNR;
2331 control.reqseq = chan->buffer_seq;
2332 l2cap_send_sframe(chan, &control);
2334 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2335 l2cap_ertm_send(chan);
2336 /* If any i-frames were sent, they included an ack */
2337 if (chan->buffer_seq == chan->last_acked_seq)
2341 /* Ack now if the window is 3/4ths full.
2342 * Calculate without mul or div
2344 threshold = chan->ack_win;
2345 threshold += threshold << 1;
2348 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2351 if (frames_to_ack >= threshold) {
2352 __clear_ack_timer(chan);
2353 control.super = L2CAP_SUPER_RR;
2354 control.reqseq = chan->buffer_seq;
2355 l2cap_send_sframe(chan, &control);
2360 __set_ack_timer(chan);
2364 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2365 struct msghdr *msg, int len,
2366 int count, struct sk_buff *skb)
2368 struct l2cap_conn *conn = chan->conn;
2369 struct sk_buff **frag;
2372 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2378 /* Continuation fragments (no L2CAP header) */
2379 frag = &skb_shinfo(skb)->frag_list;
2381 struct sk_buff *tmp;
2383 count = min_t(unsigned int, conn->mtu, len);
2385 tmp = chan->ops->alloc_skb(chan, 0, count,
2386 msg->msg_flags & MSG_DONTWAIT);
2388 return PTR_ERR(tmp);
2392 if (!copy_from_iter_full(skb_put(*frag, count), count,
2399 skb->len += (*frag)->len;
2400 skb->data_len += (*frag)->len;
2402 frag = &(*frag)->next;
2408 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2409 struct msghdr *msg, size_t len)
2411 struct l2cap_conn *conn = chan->conn;
2412 struct sk_buff *skb;
2413 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2414 struct l2cap_hdr *lh;
2416 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2417 __le16_to_cpu(chan->psm), len);
2419 count = min_t(unsigned int, (conn->mtu - hlen), len);
2421 skb = chan->ops->alloc_skb(chan, hlen, count,
2422 msg->msg_flags & MSG_DONTWAIT);
2426 /* Create L2CAP header */
2427 lh = skb_put(skb, L2CAP_HDR_SIZE);
2428 lh->cid = cpu_to_le16(chan->dcid);
2429 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2430 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2432 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2433 if (unlikely(err < 0)) {
2435 return ERR_PTR(err);
2440 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2441 struct msghdr *msg, size_t len)
2443 struct l2cap_conn *conn = chan->conn;
2444 struct sk_buff *skb;
2446 struct l2cap_hdr *lh;
2448 BT_DBG("chan %p len %zu", chan, len);
2450 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2452 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2453 msg->msg_flags & MSG_DONTWAIT);
2457 /* Create L2CAP header */
2458 lh = skb_put(skb, L2CAP_HDR_SIZE);
2459 lh->cid = cpu_to_le16(chan->dcid);
2460 lh->len = cpu_to_le16(len);
2462 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2463 if (unlikely(err < 0)) {
2465 return ERR_PTR(err);
2470 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2471 struct msghdr *msg, size_t len,
2474 struct l2cap_conn *conn = chan->conn;
2475 struct sk_buff *skb;
2476 int err, count, hlen;
2477 struct l2cap_hdr *lh;
2479 BT_DBG("chan %p len %zu", chan, len);
2482 return ERR_PTR(-ENOTCONN);
2484 hlen = __ertm_hdr_size(chan);
2487 hlen += L2CAP_SDULEN_SIZE;
2489 if (chan->fcs == L2CAP_FCS_CRC16)
2490 hlen += L2CAP_FCS_SIZE;
2492 count = min_t(unsigned int, (conn->mtu - hlen), len);
2494 skb = chan->ops->alloc_skb(chan, hlen, count,
2495 msg->msg_flags & MSG_DONTWAIT);
2499 /* Create L2CAP header */
2500 lh = skb_put(skb, L2CAP_HDR_SIZE);
2501 lh->cid = cpu_to_le16(chan->dcid);
2502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2504 /* Control header is populated later */
2505 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2506 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2508 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2511 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2513 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2514 if (unlikely(err < 0)) {
2516 return ERR_PTR(err);
2519 bt_cb(skb)->l2cap.fcs = chan->fcs;
2520 bt_cb(skb)->l2cap.retries = 0;
2524 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2525 struct sk_buff_head *seg_queue,
2526 struct msghdr *msg, size_t len)
2528 struct sk_buff *skb;
2533 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2535 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2536 * so fragmented skbs are not used. The HCI layer's handling
2537 * of fragmented skbs is not compatible with ERTM's queueing.
2540 /* PDU size is derived from the HCI MTU */
2541 pdu_len = chan->conn->mtu;
2543 /* Constrain PDU size for BR/EDR connections */
2545 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2547 /* Adjust for largest possible L2CAP overhead. */
2549 pdu_len -= L2CAP_FCS_SIZE;
2551 pdu_len -= __ertm_hdr_size(chan);
2553 /* Remote device may have requested smaller PDUs */
2554 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2556 if (len <= pdu_len) {
2557 sar = L2CAP_SAR_UNSEGMENTED;
2561 sar = L2CAP_SAR_START;
2566 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2569 __skb_queue_purge(seg_queue);
2570 return PTR_ERR(skb);
2573 bt_cb(skb)->l2cap.sar = sar;
2574 __skb_queue_tail(seg_queue, skb);
2580 if (len <= pdu_len) {
2581 sar = L2CAP_SAR_END;
2584 sar = L2CAP_SAR_CONTINUE;
2591 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2593 size_t len, u16 sdulen)
2595 struct l2cap_conn *conn = chan->conn;
2596 struct sk_buff *skb;
2597 int err, count, hlen;
2598 struct l2cap_hdr *lh;
2600 BT_DBG("chan %p len %zu", chan, len);
2603 return ERR_PTR(-ENOTCONN);
2605 hlen = L2CAP_HDR_SIZE;
2608 hlen += L2CAP_SDULEN_SIZE;
2610 count = min_t(unsigned int, (conn->mtu - hlen), len);
2612 skb = chan->ops->alloc_skb(chan, hlen, count,
2613 msg->msg_flags & MSG_DONTWAIT);
2617 /* Create L2CAP header */
2618 lh = skb_put(skb, L2CAP_HDR_SIZE);
2619 lh->cid = cpu_to_le16(chan->dcid);
2620 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2623 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2625 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2626 if (unlikely(err < 0)) {
2628 return ERR_PTR(err);
2634 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2635 struct sk_buff_head *seg_queue,
2636 struct msghdr *msg, size_t len)
2638 struct sk_buff *skb;
2642 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2645 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2651 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2653 __skb_queue_purge(seg_queue);
2654 return PTR_ERR(skb);
2657 __skb_queue_tail(seg_queue, skb);
2663 pdu_len += L2CAP_SDULEN_SIZE;
2670 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2674 BT_DBG("chan %p", chan);
2676 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2677 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2682 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2683 skb_queue_len(&chan->tx_q));
2686 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2688 struct sk_buff *skb;
2690 struct sk_buff_head seg_queue;
2695 /* Connectionless channel */
2696 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2697 skb = l2cap_create_connless_pdu(chan, msg, len);
2699 return PTR_ERR(skb);
2701 /* Channel lock is released before requesting new skb and then
2702 * reacquired thus we need to recheck channel state.
2704 if (chan->state != BT_CONNECTED) {
2709 l2cap_do_send(chan, skb);
2713 switch (chan->mode) {
2714 case L2CAP_MODE_LE_FLOWCTL:
2715 case L2CAP_MODE_EXT_FLOWCTL:
2716 /* Check outgoing MTU */
2717 if (len > chan->omtu)
2720 __skb_queue_head_init(&seg_queue);
2722 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2724 if (chan->state != BT_CONNECTED) {
2725 __skb_queue_purge(&seg_queue);
2732 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2734 l2cap_le_flowctl_send(chan);
2736 if (!chan->tx_credits)
2737 chan->ops->suspend(chan);
2743 case L2CAP_MODE_BASIC:
2744 /* Check outgoing MTU */
2745 if (len > chan->omtu)
2748 /* Create a basic PDU */
2749 skb = l2cap_create_basic_pdu(chan, msg, len);
2751 return PTR_ERR(skb);
2753 /* Channel lock is released before requesting new skb and then
2754 * reacquired thus we need to recheck channel state.
2756 if (chan->state != BT_CONNECTED) {
2761 l2cap_do_send(chan, skb);
2765 case L2CAP_MODE_ERTM:
2766 case L2CAP_MODE_STREAMING:
2767 /* Check outgoing MTU */
2768 if (len > chan->omtu) {
2773 __skb_queue_head_init(&seg_queue);
2775 /* Do segmentation before calling in to the state machine,
2776 * since it's possible to block while waiting for memory
2779 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2781 /* The channel could have been closed while segmenting,
2782 * check that it is still connected.
2784 if (chan->state != BT_CONNECTED) {
2785 __skb_queue_purge(&seg_queue);
2792 if (chan->mode == L2CAP_MODE_ERTM)
2793 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2795 l2cap_streaming_send(chan, &seg_queue);
2799 /* If the skbs were not queued for sending, they'll still be in
2800 * seg_queue and need to be purged.
2802 __skb_queue_purge(&seg_queue);
2806 BT_DBG("bad state %1.1x", chan->mode);
2812 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2814 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2816 struct l2cap_ctrl control;
2819 BT_DBG("chan %p, txseq %u", chan, txseq);
2821 memset(&control, 0, sizeof(control));
2823 control.super = L2CAP_SUPER_SREJ;
2825 for (seq = chan->expected_tx_seq; seq != txseq;
2826 seq = __next_seq(chan, seq)) {
2827 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2828 control.reqseq = seq;
2829 l2cap_send_sframe(chan, &control);
2830 l2cap_seq_list_append(&chan->srej_list, seq);
2834 chan->expected_tx_seq = __next_seq(chan, txseq);
2837 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2839 struct l2cap_ctrl control;
2841 BT_DBG("chan %p", chan);
2843 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2846 memset(&control, 0, sizeof(control));
2848 control.super = L2CAP_SUPER_SREJ;
2849 control.reqseq = chan->srej_list.tail;
2850 l2cap_send_sframe(chan, &control);
2853 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2855 struct l2cap_ctrl control;
2859 BT_DBG("chan %p, txseq %u", chan, txseq);
2861 memset(&control, 0, sizeof(control));
2863 control.super = L2CAP_SUPER_SREJ;
2865 /* Capture initial list head to allow only one pass through the list. */
2866 initial_head = chan->srej_list.head;
2869 seq = l2cap_seq_list_pop(&chan->srej_list);
2870 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2873 control.reqseq = seq;
2874 l2cap_send_sframe(chan, &control);
2875 l2cap_seq_list_append(&chan->srej_list, seq);
2876 } while (chan->srej_list.head != initial_head);
2879 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2881 struct sk_buff *acked_skb;
2884 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2886 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2889 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2890 chan->expected_ack_seq, chan->unacked_frames);
2892 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2893 ackseq = __next_seq(chan, ackseq)) {
2895 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2897 skb_unlink(acked_skb, &chan->tx_q);
2898 kfree_skb(acked_skb);
2899 chan->unacked_frames--;
2903 chan->expected_ack_seq = reqseq;
2905 if (chan->unacked_frames == 0)
2906 __clear_retrans_timer(chan);
2908 BT_DBG("unacked_frames %u", chan->unacked_frames);
2911 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2913 BT_DBG("chan %p", chan);
2915 chan->expected_tx_seq = chan->buffer_seq;
2916 l2cap_seq_list_clear(&chan->srej_list);
2917 skb_queue_purge(&chan->srej_q);
2918 chan->rx_state = L2CAP_RX_STATE_RECV;
2921 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2922 struct l2cap_ctrl *control,
2923 struct sk_buff_head *skbs, u8 event)
2925 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2929 case L2CAP_EV_DATA_REQUEST:
2930 if (chan->tx_send_head == NULL)
2931 chan->tx_send_head = skb_peek(skbs);
2933 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2934 l2cap_ertm_send(chan);
2936 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2937 BT_DBG("Enter LOCAL_BUSY");
2938 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2940 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2941 /* The SREJ_SENT state must be aborted if we are to
2942 * enter the LOCAL_BUSY state.
2944 l2cap_abort_rx_srej_sent(chan);
2947 l2cap_send_ack(chan);
2950 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2951 BT_DBG("Exit LOCAL_BUSY");
2952 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2954 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2955 struct l2cap_ctrl local_control;
2957 memset(&local_control, 0, sizeof(local_control));
2958 local_control.sframe = 1;
2959 local_control.super = L2CAP_SUPER_RR;
2960 local_control.poll = 1;
2961 local_control.reqseq = chan->buffer_seq;
2962 l2cap_send_sframe(chan, &local_control);
2964 chan->retry_count = 1;
2965 __set_monitor_timer(chan);
2966 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2969 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2970 l2cap_process_reqseq(chan, control->reqseq);
2972 case L2CAP_EV_EXPLICIT_POLL:
2973 l2cap_send_rr_or_rnr(chan, 1);
2974 chan->retry_count = 1;
2975 __set_monitor_timer(chan);
2976 __clear_ack_timer(chan);
2977 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2979 case L2CAP_EV_RETRANS_TO:
2980 l2cap_send_rr_or_rnr(chan, 1);
2981 chan->retry_count = 1;
2982 __set_monitor_timer(chan);
2983 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2985 case L2CAP_EV_RECV_FBIT:
2986 /* Nothing to process */
2993 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2994 struct l2cap_ctrl *control,
2995 struct sk_buff_head *skbs, u8 event)
2997 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
3001 case L2CAP_EV_DATA_REQUEST:
3002 if (chan->tx_send_head == NULL)
3003 chan->tx_send_head = skb_peek(skbs);
3004 /* Queue data, but don't send. */
3005 skb_queue_splice_tail_init(skbs, &chan->tx_q);
3007 case L2CAP_EV_LOCAL_BUSY_DETECTED:
3008 BT_DBG("Enter LOCAL_BUSY");
3009 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3011 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
3012 /* The SREJ_SENT state must be aborted if we are to
3013 * enter the LOCAL_BUSY state.
3015 l2cap_abort_rx_srej_sent(chan);
3018 l2cap_send_ack(chan);
3021 case L2CAP_EV_LOCAL_BUSY_CLEAR:
3022 BT_DBG("Exit LOCAL_BUSY");
3023 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3025 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3026 struct l2cap_ctrl local_control;
3027 memset(&local_control, 0, sizeof(local_control));
3028 local_control.sframe = 1;
3029 local_control.super = L2CAP_SUPER_RR;
3030 local_control.poll = 1;
3031 local_control.reqseq = chan->buffer_seq;
3032 l2cap_send_sframe(chan, &local_control);
3034 chan->retry_count = 1;
3035 __set_monitor_timer(chan);
3036 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3039 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3040 l2cap_process_reqseq(chan, control->reqseq);
3043 case L2CAP_EV_RECV_FBIT:
3044 if (control && control->final) {
3045 __clear_monitor_timer(chan);
3046 if (chan->unacked_frames > 0)
3047 __set_retrans_timer(chan);
3048 chan->retry_count = 0;
3049 chan->tx_state = L2CAP_TX_STATE_XMIT;
3050 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3053 case L2CAP_EV_EXPLICIT_POLL:
3056 case L2CAP_EV_MONITOR_TO:
3057 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3058 l2cap_send_rr_or_rnr(chan, 1);
3059 __set_monitor_timer(chan);
3060 chan->retry_count++;
3062 l2cap_send_disconn_req(chan, ECONNABORTED);
3070 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3071 struct sk_buff_head *skbs, u8 event)
3073 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3074 chan, control, skbs, event, chan->tx_state);
3076 switch (chan->tx_state) {
3077 case L2CAP_TX_STATE_XMIT:
3078 l2cap_tx_state_xmit(chan, control, skbs, event);
3080 case L2CAP_TX_STATE_WAIT_F:
3081 l2cap_tx_state_wait_f(chan, control, skbs, event);
3089 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3090 struct l2cap_ctrl *control)
3092 BT_DBG("chan %p, control %p", chan, control);
3093 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3096 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3097 struct l2cap_ctrl *control)
3099 BT_DBG("chan %p, control %p", chan, control);
3100 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3103 /* Copy frame to all raw sockets on that connection */
3104 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3106 struct sk_buff *nskb;
3107 struct l2cap_chan *chan;
3109 BT_DBG("conn %p", conn);
3111 mutex_lock(&conn->chan_lock);
3113 list_for_each_entry(chan, &conn->chan_l, list) {
3114 if (chan->chan_type != L2CAP_CHAN_RAW)
3117 /* Don't send frame to the channel it came from */
3118 if (bt_cb(skb)->l2cap.chan == chan)
3121 nskb = skb_clone(skb, GFP_KERNEL);
3124 if (chan->ops->recv(chan, nskb))
3128 mutex_unlock(&conn->chan_lock);
3131 /* ---- L2CAP signalling commands ---- */
3132 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3133 u8 ident, u16 dlen, void *data)
3135 struct sk_buff *skb, **frag;
3136 struct l2cap_cmd_hdr *cmd;
3137 struct l2cap_hdr *lh;
3140 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3141 conn, code, ident, dlen);
3143 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3146 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3147 count = min_t(unsigned int, conn->mtu, len);
3149 skb = bt_skb_alloc(count, GFP_KERNEL);
3153 lh = skb_put(skb, L2CAP_HDR_SIZE);
3154 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3156 if (conn->hcon->type == LE_LINK)
3157 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3159 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3161 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3164 cmd->len = cpu_to_le16(dlen);
3167 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3168 skb_put_data(skb, data, count);
3174 /* Continuation fragments (no L2CAP header) */
3175 frag = &skb_shinfo(skb)->frag_list;
3177 count = min_t(unsigned int, conn->mtu, len);
3179 *frag = bt_skb_alloc(count, GFP_KERNEL);
3183 skb_put_data(*frag, data, count);
3188 frag = &(*frag)->next;
3198 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3201 struct l2cap_conf_opt *opt = *ptr;
3204 len = L2CAP_CONF_OPT_SIZE + opt->len;
3212 *val = *((u8 *) opt->val);
3216 *val = get_unaligned_le16(opt->val);
3220 *val = get_unaligned_le32(opt->val);
3224 *val = (unsigned long) opt->val;
3228 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3232 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3234 struct l2cap_conf_opt *opt = *ptr;
3236 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3238 if (size < L2CAP_CONF_OPT_SIZE + len)
3246 *((u8 *) opt->val) = val;
3250 put_unaligned_le16(val, opt->val);
3254 put_unaligned_le32(val, opt->val);
3258 memcpy(opt->val, (void *) val, len);
3262 *ptr += L2CAP_CONF_OPT_SIZE + len;
3265 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3267 struct l2cap_conf_efs efs;
3269 switch (chan->mode) {
3270 case L2CAP_MODE_ERTM:
3271 efs.id = chan->local_id;
3272 efs.stype = chan->local_stype;
3273 efs.msdu = cpu_to_le16(chan->local_msdu);
3274 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3275 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3276 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3279 case L2CAP_MODE_STREAMING:
3281 efs.stype = L2CAP_SERV_BESTEFFORT;
3282 efs.msdu = cpu_to_le16(chan->local_msdu);
3283 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3292 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3293 (unsigned long) &efs, size);
3296 static void l2cap_ack_timeout(struct work_struct *work)
3298 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3302 BT_DBG("chan %p", chan);
3304 l2cap_chan_lock(chan);
3306 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3307 chan->last_acked_seq);
3310 l2cap_send_rr_or_rnr(chan, 0);
3312 l2cap_chan_unlock(chan);
3313 l2cap_chan_put(chan);
3316 int l2cap_ertm_init(struct l2cap_chan *chan)
3320 chan->next_tx_seq = 0;
3321 chan->expected_tx_seq = 0;
3322 chan->expected_ack_seq = 0;
3323 chan->unacked_frames = 0;
3324 chan->buffer_seq = 0;
3325 chan->frames_sent = 0;
3326 chan->last_acked_seq = 0;
3328 chan->sdu_last_frag = NULL;
3331 skb_queue_head_init(&chan->tx_q);
3333 chan->local_amp_id = AMP_ID_BREDR;
3334 chan->move_id = AMP_ID_BREDR;
3335 chan->move_state = L2CAP_MOVE_STABLE;
3336 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3338 if (chan->mode != L2CAP_MODE_ERTM)
3341 chan->rx_state = L2CAP_RX_STATE_RECV;
3342 chan->tx_state = L2CAP_TX_STATE_XMIT;
3344 skb_queue_head_init(&chan->srej_q);
3346 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3350 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3352 l2cap_seq_list_free(&chan->srej_list);
3357 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3360 case L2CAP_MODE_STREAMING:
3361 case L2CAP_MODE_ERTM:
3362 if (l2cap_mode_supported(mode, remote_feat_mask))
3366 return L2CAP_MODE_BASIC;
3370 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3372 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3373 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3376 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3378 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3379 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3382 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3383 struct l2cap_conf_rfc *rfc)
3385 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3386 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3388 /* Class 1 devices have must have ERTM timeouts
3389 * exceeding the Link Supervision Timeout. The
3390 * default Link Supervision Timeout for AMP
3391 * controllers is 10 seconds.
3393 * Class 1 devices use 0xffffffff for their
3394 * best-effort flush timeout, so the clamping logic
3395 * will result in a timeout that meets the above
3396 * requirement. ERTM timeouts are 16-bit values, so
3397 * the maximum timeout is 65.535 seconds.
3400 /* Convert timeout to milliseconds and round */
3401 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3403 /* This is the recommended formula for class 2 devices
3404 * that start ERTM timers when packets are sent to the
3407 ertm_to = 3 * ertm_to + 500;
3409 if (ertm_to > 0xffff)
3412 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3413 rfc->monitor_timeout = rfc->retrans_timeout;
3415 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3416 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3420 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3422 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3423 __l2cap_ews_supported(chan->conn)) {
3424 /* use extended control field */
3425 set_bit(FLAG_EXT_CTRL, &chan->flags);
3426 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3428 chan->tx_win = min_t(u16, chan->tx_win,
3429 L2CAP_DEFAULT_TX_WINDOW);
3430 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3432 chan->ack_win = chan->tx_win;
3435 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3437 struct hci_conn *conn = chan->conn->hcon;
3439 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3441 /* The 2-DH1 packet has between 2 and 56 information bytes
3442 * (including the 2-byte payload header)
3444 if (!(conn->pkt_type & HCI_2DH1))
3447 /* The 3-DH1 packet has between 2 and 85 information bytes
3448 * (including the 2-byte payload header)
3450 if (!(conn->pkt_type & HCI_3DH1))
3453 /* The 2-DH3 packet has between 2 and 369 information bytes
3454 * (including the 2-byte payload header)
3456 if (!(conn->pkt_type & HCI_2DH3))
3459 /* The 3-DH3 packet has between 2 and 554 information bytes
3460 * (including the 2-byte payload header)
3462 if (!(conn->pkt_type & HCI_3DH3))
3465 /* The 2-DH5 packet has between 2 and 681 information bytes
3466 * (including the 2-byte payload header)
3468 if (!(conn->pkt_type & HCI_2DH5))
3471 /* The 3-DH5 packet has between 2 and 1023 information bytes
3472 * (including the 2-byte payload header)
3474 if (!(conn->pkt_type & HCI_3DH5))
3478 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3480 struct l2cap_conf_req *req = data;
3481 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3482 void *ptr = req->data;
3483 void *endptr = data + data_size;
3486 BT_DBG("chan %p", chan);
3488 if (chan->num_conf_req || chan->num_conf_rsp)
3491 switch (chan->mode) {
3492 case L2CAP_MODE_STREAMING:
3493 case L2CAP_MODE_ERTM:
3494 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3497 if (__l2cap_efs_supported(chan->conn))
3498 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3502 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3507 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3509 l2cap_mtu_auto(chan);
3510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3514 switch (chan->mode) {
3515 case L2CAP_MODE_BASIC:
3519 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3520 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3523 rfc.mode = L2CAP_MODE_BASIC;
3525 rfc.max_transmit = 0;
3526 rfc.retrans_timeout = 0;
3527 rfc.monitor_timeout = 0;
3528 rfc.max_pdu_size = 0;
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3531 (unsigned long) &rfc, endptr - ptr);
3534 case L2CAP_MODE_ERTM:
3535 rfc.mode = L2CAP_MODE_ERTM;
3536 rfc.max_transmit = chan->max_tx;
3538 __l2cap_set_ertm_timeouts(chan, &rfc);
3540 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3541 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3543 rfc.max_pdu_size = cpu_to_le16(size);
3545 l2cap_txwin_setup(chan);
3547 rfc.txwin_size = min_t(u16, chan->tx_win,
3548 L2CAP_DEFAULT_TX_WINDOW);
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3551 (unsigned long) &rfc, endptr - ptr);
3553 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3554 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3556 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3557 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3558 chan->tx_win, endptr - ptr);
3560 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3561 if (chan->fcs == L2CAP_FCS_NONE ||
3562 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3563 chan->fcs = L2CAP_FCS_NONE;
3564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3565 chan->fcs, endptr - ptr);
3569 case L2CAP_MODE_STREAMING:
3570 l2cap_txwin_setup(chan);
3571 rfc.mode = L2CAP_MODE_STREAMING;
3573 rfc.max_transmit = 0;
3574 rfc.retrans_timeout = 0;
3575 rfc.monitor_timeout = 0;
3577 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3578 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3580 rfc.max_pdu_size = cpu_to_le16(size);
3582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3583 (unsigned long) &rfc, endptr - ptr);
3585 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3586 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3588 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3589 if (chan->fcs == L2CAP_FCS_NONE ||
3590 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3591 chan->fcs = L2CAP_FCS_NONE;
3592 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3593 chan->fcs, endptr - ptr);
3598 req->dcid = cpu_to_le16(chan->dcid);
3599 req->flags = cpu_to_le16(0);
3604 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3606 struct l2cap_conf_rsp *rsp = data;
3607 void *ptr = rsp->data;
3608 void *endptr = data + data_size;
3609 void *req = chan->conf_req;
3610 int len = chan->conf_len;
3611 int type, hint, olen;
3613 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3614 struct l2cap_conf_efs efs;
3616 u16 mtu = L2CAP_DEFAULT_MTU;
3617 u16 result = L2CAP_CONF_SUCCESS;
3620 BT_DBG("chan %p", chan);
3622 while (len >= L2CAP_CONF_OPT_SIZE) {
3623 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3627 hint = type & L2CAP_CONF_HINT;
3628 type &= L2CAP_CONF_MASK;
3631 case L2CAP_CONF_MTU:
3637 case L2CAP_CONF_FLUSH_TO:
3640 chan->flush_to = val;
3643 case L2CAP_CONF_QOS:
3646 case L2CAP_CONF_RFC:
3647 if (olen != sizeof(rfc))
3649 memcpy(&rfc, (void *) val, olen);
3652 case L2CAP_CONF_FCS:
3655 if (val == L2CAP_FCS_NONE)
3656 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3659 case L2CAP_CONF_EFS:
3660 if (olen != sizeof(efs))
3663 memcpy(&efs, (void *) val, olen);
3666 case L2CAP_CONF_EWS:
3669 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3670 return -ECONNREFUSED;
3671 set_bit(FLAG_EXT_CTRL, &chan->flags);
3672 set_bit(CONF_EWS_RECV, &chan->conf_state);
3673 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3674 chan->remote_tx_win = val;
3680 result = L2CAP_CONF_UNKNOWN;
3681 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3686 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3689 switch (chan->mode) {
3690 case L2CAP_MODE_STREAMING:
3691 case L2CAP_MODE_ERTM:
3692 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3693 chan->mode = l2cap_select_mode(rfc.mode,
3694 chan->conn->feat_mask);
3699 if (__l2cap_efs_supported(chan->conn))
3700 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3702 return -ECONNREFUSED;
3705 if (chan->mode != rfc.mode)
3706 return -ECONNREFUSED;
3712 if (chan->mode != rfc.mode) {
3713 result = L2CAP_CONF_UNACCEPT;
3714 rfc.mode = chan->mode;
3716 if (chan->num_conf_rsp == 1)
3717 return -ECONNREFUSED;
3719 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3720 (unsigned long) &rfc, endptr - ptr);
3723 if (result == L2CAP_CONF_SUCCESS) {
3724 /* Configure output options and let the other side know
3725 * which ones we don't like. */
3727 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3728 result = L2CAP_CONF_UNACCEPT;
3731 set_bit(CONF_MTU_DONE, &chan->conf_state);
3733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3736 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3737 efs.stype != L2CAP_SERV_NOTRAFIC &&
3738 efs.stype != chan->local_stype) {
3740 result = L2CAP_CONF_UNACCEPT;
3742 if (chan->num_conf_req >= 1)
3743 return -ECONNREFUSED;
3745 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3747 (unsigned long) &efs, endptr - ptr);
3749 /* Send PENDING Conf Rsp */
3750 result = L2CAP_CONF_PENDING;
3751 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3756 case L2CAP_MODE_BASIC:
3757 chan->fcs = L2CAP_FCS_NONE;
3758 set_bit(CONF_MODE_DONE, &chan->conf_state);
3761 case L2CAP_MODE_ERTM:
3762 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3763 chan->remote_tx_win = rfc.txwin_size;
3765 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3767 chan->remote_max_tx = rfc.max_transmit;
3769 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3770 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3771 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3772 rfc.max_pdu_size = cpu_to_le16(size);
3773 chan->remote_mps = size;
3775 __l2cap_set_ertm_timeouts(chan, &rfc);
3777 set_bit(CONF_MODE_DONE, &chan->conf_state);
3779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3780 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3783 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3784 chan->remote_id = efs.id;
3785 chan->remote_stype = efs.stype;
3786 chan->remote_msdu = le16_to_cpu(efs.msdu);
3787 chan->remote_flush_to =
3788 le32_to_cpu(efs.flush_to);
3789 chan->remote_acc_lat =
3790 le32_to_cpu(efs.acc_lat);
3791 chan->remote_sdu_itime =
3792 le32_to_cpu(efs.sdu_itime);
3793 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3795 (unsigned long) &efs, endptr - ptr);
3799 case L2CAP_MODE_STREAMING:
3800 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3801 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3802 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3803 rfc.max_pdu_size = cpu_to_le16(size);
3804 chan->remote_mps = size;
3806 set_bit(CONF_MODE_DONE, &chan->conf_state);
3808 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3809 (unsigned long) &rfc, endptr - ptr);
3814 result = L2CAP_CONF_UNACCEPT;
3816 memset(&rfc, 0, sizeof(rfc));
3817 rfc.mode = chan->mode;
3820 if (result == L2CAP_CONF_SUCCESS)
3821 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3823 rsp->scid = cpu_to_le16(chan->dcid);
3824 rsp->result = cpu_to_le16(result);
3825 rsp->flags = cpu_to_le16(0);
3830 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3831 void *data, size_t size, u16 *result)
3833 struct l2cap_conf_req *req = data;
3834 void *ptr = req->data;
3835 void *endptr = data + size;
3838 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3839 struct l2cap_conf_efs efs;
3841 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3843 while (len >= L2CAP_CONF_OPT_SIZE) {
3844 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3849 case L2CAP_CONF_MTU:
3852 if (val < L2CAP_DEFAULT_MIN_MTU) {
3853 *result = L2CAP_CONF_UNACCEPT;
3854 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3857 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3861 case L2CAP_CONF_FLUSH_TO:
3864 chan->flush_to = val;
3865 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3866 chan->flush_to, endptr - ptr);
3869 case L2CAP_CONF_RFC:
3870 if (olen != sizeof(rfc))
3872 memcpy(&rfc, (void *)val, olen);
3873 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3874 rfc.mode != chan->mode)
3875 return -ECONNREFUSED;
3877 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3878 (unsigned long) &rfc, endptr - ptr);
3881 case L2CAP_CONF_EWS:
3884 chan->ack_win = min_t(u16, val, chan->ack_win);
3885 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3886 chan->tx_win, endptr - ptr);
3889 case L2CAP_CONF_EFS:
3890 if (olen != sizeof(efs))
3892 memcpy(&efs, (void *)val, olen);
3893 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3894 efs.stype != L2CAP_SERV_NOTRAFIC &&
3895 efs.stype != chan->local_stype)
3896 return -ECONNREFUSED;
3897 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3898 (unsigned long) &efs, endptr - ptr);
3901 case L2CAP_CONF_FCS:
3904 if (*result == L2CAP_CONF_PENDING)
3905 if (val == L2CAP_FCS_NONE)
3906 set_bit(CONF_RECV_NO_FCS,
3912 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3913 return -ECONNREFUSED;
3915 chan->mode = rfc.mode;
3917 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3919 case L2CAP_MODE_ERTM:
3920 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3921 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3922 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3923 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3924 chan->ack_win = min_t(u16, chan->ack_win,
3927 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3928 chan->local_msdu = le16_to_cpu(efs.msdu);
3929 chan->local_sdu_itime =
3930 le32_to_cpu(efs.sdu_itime);
3931 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3932 chan->local_flush_to =
3933 le32_to_cpu(efs.flush_to);
3937 case L2CAP_MODE_STREAMING:
3938 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3942 req->dcid = cpu_to_le16(chan->dcid);
3943 req->flags = cpu_to_le16(0);
3948 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3949 u16 result, u16 flags)
3951 struct l2cap_conf_rsp *rsp = data;
3952 void *ptr = rsp->data;
3954 BT_DBG("chan %p", chan);
3956 rsp->scid = cpu_to_le16(chan->dcid);
3957 rsp->result = cpu_to_le16(result);
3958 rsp->flags = cpu_to_le16(flags);
3963 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3965 struct l2cap_le_conn_rsp rsp;
3966 struct l2cap_conn *conn = chan->conn;
3968 BT_DBG("chan %p", chan);
3970 rsp.dcid = cpu_to_le16(chan->scid);
3971 rsp.mtu = cpu_to_le16(chan->imtu);
3972 rsp.mps = cpu_to_le16(chan->mps);
3973 rsp.credits = cpu_to_le16(chan->rx_credits);
3974 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3976 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3980 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3983 struct l2cap_ecred_conn_rsp rsp;
3986 struct l2cap_conn *conn = chan->conn;
3987 u16 ident = chan->ident;
3993 BT_DBG("chan %p ident %d", chan, ident);
3995 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3996 pdu.rsp.mps = cpu_to_le16(chan->mps);
3997 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3998 pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4000 mutex_lock(&conn->chan_lock);
4002 list_for_each_entry(chan, &conn->chan_l, list) {
4003 if (chan->ident != ident)
4006 /* Reset ident so only one response is sent */
4009 /* Include all channels pending with the same ident */
4010 pdu.dcid[i++] = cpu_to_le16(chan->scid);
4013 mutex_unlock(&conn->chan_lock);
4015 l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
4016 sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4019 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4021 struct l2cap_conn_rsp rsp;
4022 struct l2cap_conn *conn = chan->conn;
4026 rsp.scid = cpu_to_le16(chan->dcid);
4027 rsp.dcid = cpu_to_le16(chan->scid);
4028 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4029 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4032 rsp_code = L2CAP_CREATE_CHAN_RSP;
4034 rsp_code = L2CAP_CONN_RSP;
4036 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4038 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4040 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4043 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4044 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4045 chan->num_conf_req++;
4048 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4052 /* Use sane default values in case a misbehaving remote device
4053 * did not send an RFC or extended window size option.
4055 u16 txwin_ext = chan->ack_win;
4056 struct l2cap_conf_rfc rfc = {
4058 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4059 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4060 .max_pdu_size = cpu_to_le16(chan->imtu),
4061 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4064 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4066 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4069 while (len >= L2CAP_CONF_OPT_SIZE) {
4070 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4075 case L2CAP_CONF_RFC:
4076 if (olen != sizeof(rfc))
4078 memcpy(&rfc, (void *)val, olen);
4080 case L2CAP_CONF_EWS:
4089 case L2CAP_MODE_ERTM:
4090 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4091 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4092 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4093 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4094 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4096 chan->ack_win = min_t(u16, chan->ack_win,
4099 case L2CAP_MODE_STREAMING:
4100 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4104 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4105 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4108 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4110 if (cmd_len < sizeof(*rej))
4113 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4116 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4117 cmd->ident == conn->info_ident) {
4118 cancel_delayed_work(&conn->info_timer);
4120 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4121 conn->info_ident = 0;
4123 l2cap_conn_start(conn);
4129 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4130 struct l2cap_cmd_hdr *cmd,
4131 u8 *data, u8 rsp_code, u8 amp_id)
4133 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4134 struct l2cap_conn_rsp rsp;
4135 struct l2cap_chan *chan = NULL, *pchan;
4136 int result, status = L2CAP_CS_NO_INFO;
4138 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4139 __le16 psm = req->psm;
4141 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4143 /* Check if we have socket listening on psm */
4144 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4145 &conn->hcon->dst, ACL_LINK);
4147 result = L2CAP_CR_BAD_PSM;
4151 mutex_lock(&conn->chan_lock);
4152 l2cap_chan_lock(pchan);
4154 /* Check if the ACL is secure enough (if not SDP) */
4155 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4156 !hci_conn_check_link_mode(conn->hcon)) {
4157 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4158 result = L2CAP_CR_SEC_BLOCK;
4162 result = L2CAP_CR_NO_MEM;
4164 /* Check for valid dynamic CID range (as per Erratum 3253) */
4165 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4166 result = L2CAP_CR_INVALID_SCID;
4170 /* Check if we already have channel with that dcid */
4171 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4172 result = L2CAP_CR_SCID_IN_USE;
4176 chan = pchan->ops->new_connection(pchan);
4180 /* For certain devices (ex: HID mouse), support for authentication,
4181 * pairing and bonding is optional. For such devices, inorder to avoid
4182 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4183 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4185 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4187 bacpy(&chan->src, &conn->hcon->src);
4188 bacpy(&chan->dst, &conn->hcon->dst);
4189 chan->src_type = bdaddr_src_type(conn->hcon);
4190 chan->dst_type = bdaddr_dst_type(conn->hcon);
4193 chan->local_amp_id = amp_id;
4195 __l2cap_chan_add(conn, chan);
4199 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4201 chan->ident = cmd->ident;
4203 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4204 if (l2cap_chan_check_security(chan, false)) {
4205 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4206 l2cap_state_change(chan, BT_CONNECT2);
4207 result = L2CAP_CR_PEND;
4208 status = L2CAP_CS_AUTHOR_PEND;
4209 chan->ops->defer(chan);
4211 /* Force pending result for AMP controllers.
4212 * The connection will succeed after the
4213 * physical link is up.
4215 if (amp_id == AMP_ID_BREDR) {
4216 l2cap_state_change(chan, BT_CONFIG);
4217 result = L2CAP_CR_SUCCESS;
4219 l2cap_state_change(chan, BT_CONNECT2);
4220 result = L2CAP_CR_PEND;
4222 status = L2CAP_CS_NO_INFO;
4225 l2cap_state_change(chan, BT_CONNECT2);
4226 result = L2CAP_CR_PEND;
4227 status = L2CAP_CS_AUTHEN_PEND;
4230 l2cap_state_change(chan, BT_CONNECT2);
4231 result = L2CAP_CR_PEND;
4232 status = L2CAP_CS_NO_INFO;
4236 l2cap_chan_unlock(pchan);
4237 mutex_unlock(&conn->chan_lock);
4238 l2cap_chan_put(pchan);
4241 rsp.scid = cpu_to_le16(scid);
4242 rsp.dcid = cpu_to_le16(dcid);
4243 rsp.result = cpu_to_le16(result);
4244 rsp.status = cpu_to_le16(status);
4245 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4247 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4248 struct l2cap_info_req info;
4249 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4251 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4252 conn->info_ident = l2cap_get_ident(conn);
4254 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4256 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4257 sizeof(info), &info);
4260 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4261 result == L2CAP_CR_SUCCESS) {
4263 set_bit(CONF_REQ_SENT, &chan->conf_state);
4264 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4265 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4266 chan->num_conf_req++;
4272 static int l2cap_connect_req(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4275 struct hci_dev *hdev = conn->hcon->hdev;
4276 struct hci_conn *hcon = conn->hcon;
4278 if (cmd_len < sizeof(struct l2cap_conn_req))
4282 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4283 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4284 mgmt_device_connected(hdev, hcon, NULL, 0);
4285 hci_dev_unlock(hdev);
4287 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4291 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4292 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4295 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4296 u16 scid, dcid, result, status;
4297 struct l2cap_chan *chan;
4301 if (cmd_len < sizeof(*rsp))
4304 scid = __le16_to_cpu(rsp->scid);
4305 dcid = __le16_to_cpu(rsp->dcid);
4306 result = __le16_to_cpu(rsp->result);
4307 status = __le16_to_cpu(rsp->status);
4309 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4310 dcid, scid, result, status);
4312 mutex_lock(&conn->chan_lock);
4315 chan = __l2cap_get_chan_by_scid(conn, scid);
4321 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4328 chan = l2cap_chan_hold_unless_zero(chan);
4336 l2cap_chan_lock(chan);
4339 case L2CAP_CR_SUCCESS:
4340 l2cap_state_change(chan, BT_CONFIG);
4343 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4345 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4348 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4349 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4350 chan->num_conf_req++;
4354 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4358 l2cap_chan_del(chan, ECONNREFUSED);
4362 l2cap_chan_unlock(chan);
4363 l2cap_chan_put(chan);
4366 mutex_unlock(&conn->chan_lock);
4371 static inline void set_default_fcs(struct l2cap_chan *chan)
4373 /* FCS is enabled only in ERTM or streaming mode, if one or both
4376 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4377 chan->fcs = L2CAP_FCS_NONE;
4378 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4379 chan->fcs = L2CAP_FCS_CRC16;
4382 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4383 u8 ident, u16 flags)
4385 struct l2cap_conn *conn = chan->conn;
4387 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4390 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4391 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4393 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4394 l2cap_build_conf_rsp(chan, data,
4395 L2CAP_CONF_SUCCESS, flags), data);
4398 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4401 struct l2cap_cmd_rej_cid rej;
4403 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4404 rej.scid = __cpu_to_le16(scid);
4405 rej.dcid = __cpu_to_le16(dcid);
4407 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4410 static inline int l2cap_config_req(struct l2cap_conn *conn,
4411 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4414 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4417 struct l2cap_chan *chan;
4420 if (cmd_len < sizeof(*req))
4423 dcid = __le16_to_cpu(req->dcid);
4424 flags = __le16_to_cpu(req->flags);
4426 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4428 chan = l2cap_get_chan_by_scid(conn, dcid);
4430 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4434 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4435 chan->state != BT_CONNECTED) {
4436 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4441 /* Reject if config buffer is too small. */
4442 len = cmd_len - sizeof(*req);
4443 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4444 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4445 l2cap_build_conf_rsp(chan, rsp,
4446 L2CAP_CONF_REJECT, flags), rsp);
4451 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4452 chan->conf_len += len;
4454 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4455 /* Incomplete config. Send empty response. */
4456 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4457 l2cap_build_conf_rsp(chan, rsp,
4458 L2CAP_CONF_SUCCESS, flags), rsp);
4462 /* Complete config. */
4463 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4465 l2cap_send_disconn_req(chan, ECONNRESET);
4469 chan->ident = cmd->ident;
4470 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4471 chan->num_conf_rsp++;
4473 /* Reset config buffer. */
4476 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4479 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4480 set_default_fcs(chan);
4482 if (chan->mode == L2CAP_MODE_ERTM ||
4483 chan->mode == L2CAP_MODE_STREAMING)
4484 err = l2cap_ertm_init(chan);
4487 l2cap_send_disconn_req(chan, -err);
4489 l2cap_chan_ready(chan);
4494 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4496 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4497 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4498 chan->num_conf_req++;
4501 /* Got Conf Rsp PENDING from remote side and assume we sent
4502 Conf Rsp PENDING in the code above */
4503 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4504 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4506 /* check compatibility */
4508 /* Send rsp for BR/EDR channel */
4510 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4512 chan->ident = cmd->ident;
4516 l2cap_chan_unlock(chan);
4517 l2cap_chan_put(chan);
4521 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4522 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4525 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4526 u16 scid, flags, result;
4527 struct l2cap_chan *chan;
4528 int len = cmd_len - sizeof(*rsp);
4531 if (cmd_len < sizeof(*rsp))
4534 scid = __le16_to_cpu(rsp->scid);
4535 flags = __le16_to_cpu(rsp->flags);
4536 result = __le16_to_cpu(rsp->result);
4538 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4541 chan = l2cap_get_chan_by_scid(conn, scid);
4546 case L2CAP_CONF_SUCCESS:
4547 l2cap_conf_rfc_get(chan, rsp->data, len);
4548 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4551 case L2CAP_CONF_PENDING:
4552 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4554 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4557 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4558 buf, sizeof(buf), &result);
4560 l2cap_send_disconn_req(chan, ECONNRESET);
4564 if (!chan->hs_hcon) {
4565 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4568 if (l2cap_check_efs(chan)) {
4569 amp_create_logical_link(chan);
4570 chan->ident = cmd->ident;
4576 case L2CAP_CONF_UNKNOWN:
4577 case L2CAP_CONF_UNACCEPT:
4578 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4581 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4582 l2cap_send_disconn_req(chan, ECONNRESET);
4586 /* throw out any old stored conf requests */
4587 result = L2CAP_CONF_SUCCESS;
4588 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4589 req, sizeof(req), &result);
4591 l2cap_send_disconn_req(chan, ECONNRESET);
4595 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4596 L2CAP_CONF_REQ, len, req);
4597 chan->num_conf_req++;
4598 if (result != L2CAP_CONF_SUCCESS)
4605 l2cap_chan_set_err(chan, ECONNRESET);
4607 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4608 l2cap_send_disconn_req(chan, ECONNRESET);
4612 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4615 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4617 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4618 set_default_fcs(chan);
4620 if (chan->mode == L2CAP_MODE_ERTM ||
4621 chan->mode == L2CAP_MODE_STREAMING)
4622 err = l2cap_ertm_init(chan);
4625 l2cap_send_disconn_req(chan, -err);
4627 l2cap_chan_ready(chan);
4631 l2cap_chan_unlock(chan);
4632 l2cap_chan_put(chan);
4636 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4637 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4640 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4641 struct l2cap_disconn_rsp rsp;
4643 struct l2cap_chan *chan;
4645 if (cmd_len != sizeof(*req))
4648 scid = __le16_to_cpu(req->scid);
4649 dcid = __le16_to_cpu(req->dcid);
4651 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4653 mutex_lock(&conn->chan_lock);
4655 chan = __l2cap_get_chan_by_scid(conn, dcid);
4657 mutex_unlock(&conn->chan_lock);
4658 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4662 l2cap_chan_hold(chan);
4663 l2cap_chan_lock(chan);
4665 rsp.dcid = cpu_to_le16(chan->scid);
4666 rsp.scid = cpu_to_le16(chan->dcid);
4667 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4669 chan->ops->set_shutdown(chan);
4671 l2cap_chan_del(chan, ECONNRESET);
4673 chan->ops->close(chan);
4675 l2cap_chan_unlock(chan);
4676 l2cap_chan_put(chan);
4678 mutex_unlock(&conn->chan_lock);
4683 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4684 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4687 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4689 struct l2cap_chan *chan;
4691 if (cmd_len != sizeof(*rsp))
4694 scid = __le16_to_cpu(rsp->scid);
4695 dcid = __le16_to_cpu(rsp->dcid);
4697 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4699 mutex_lock(&conn->chan_lock);
4701 chan = __l2cap_get_chan_by_scid(conn, scid);
4703 mutex_unlock(&conn->chan_lock);
4707 l2cap_chan_hold(chan);
4708 l2cap_chan_lock(chan);
4710 if (chan->state != BT_DISCONN) {
4711 l2cap_chan_unlock(chan);
4712 l2cap_chan_put(chan);
4713 mutex_unlock(&conn->chan_lock);
4717 l2cap_chan_del(chan, 0);
4719 chan->ops->close(chan);
4721 l2cap_chan_unlock(chan);
4722 l2cap_chan_put(chan);
4724 mutex_unlock(&conn->chan_lock);
4729 static inline int l2cap_information_req(struct l2cap_conn *conn,
4730 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4733 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4736 if (cmd_len != sizeof(*req))
4739 type = __le16_to_cpu(req->type);
4741 BT_DBG("type 0x%4.4x", type);
4743 if (type == L2CAP_IT_FEAT_MASK) {
4745 u32 feat_mask = l2cap_feat_mask;
4746 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4747 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4748 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4750 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4752 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4753 feat_mask |= L2CAP_FEAT_EXT_FLOW
4754 | L2CAP_FEAT_EXT_WINDOW;
4756 put_unaligned_le32(feat_mask, rsp->data);
4757 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4759 } else if (type == L2CAP_IT_FIXED_CHAN) {
4761 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4763 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4764 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4765 rsp->data[0] = conn->local_fixed_chan;
4766 memset(rsp->data + 1, 0, 7);
4767 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4770 struct l2cap_info_rsp rsp;
4771 rsp.type = cpu_to_le16(type);
4772 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4773 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4780 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4781 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4784 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4787 if (cmd_len < sizeof(*rsp))
4790 type = __le16_to_cpu(rsp->type);
4791 result = __le16_to_cpu(rsp->result);
4793 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4795 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4796 if (cmd->ident != conn->info_ident ||
4797 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4800 cancel_delayed_work(&conn->info_timer);
4802 if (result != L2CAP_IR_SUCCESS) {
4803 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4804 conn->info_ident = 0;
4806 l2cap_conn_start(conn);
4812 case L2CAP_IT_FEAT_MASK:
4813 conn->feat_mask = get_unaligned_le32(rsp->data);
4815 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4816 struct l2cap_info_req req;
4817 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4819 conn->info_ident = l2cap_get_ident(conn);
4821 l2cap_send_cmd(conn, conn->info_ident,
4822 L2CAP_INFO_REQ, sizeof(req), &req);
4824 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4825 conn->info_ident = 0;
4827 l2cap_conn_start(conn);
4831 case L2CAP_IT_FIXED_CHAN:
4832 conn->remote_fixed_chan = rsp->data[0];
4833 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4834 conn->info_ident = 0;
4836 l2cap_conn_start(conn);
4843 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4844 struct l2cap_cmd_hdr *cmd,
4845 u16 cmd_len, void *data)
4847 struct l2cap_create_chan_req *req = data;
4848 struct l2cap_create_chan_rsp rsp;
4849 struct l2cap_chan *chan;
4850 struct hci_dev *hdev;
4853 if (cmd_len != sizeof(*req))
4856 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4859 psm = le16_to_cpu(req->psm);
4860 scid = le16_to_cpu(req->scid);
4862 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4864 /* For controller id 0 make BR/EDR connection */
4865 if (req->amp_id == AMP_ID_BREDR) {
4866 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4871 /* Validate AMP controller id */
4872 hdev = hci_dev_get(req->amp_id);
4876 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4881 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4884 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4885 struct hci_conn *hs_hcon;
4887 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4891 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4896 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4898 mgr->bredr_chan = chan;
4899 chan->hs_hcon = hs_hcon;
4900 chan->fcs = L2CAP_FCS_NONE;
4901 conn->mtu = hdev->block_mtu;
4910 rsp.scid = cpu_to_le16(scid);
4911 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4912 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4914 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4920 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4922 struct l2cap_move_chan_req req;
4925 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4927 ident = l2cap_get_ident(chan->conn);
4928 chan->ident = ident;
4930 req.icid = cpu_to_le16(chan->scid);
4931 req.dest_amp_id = dest_amp_id;
4933 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4936 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4939 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4941 struct l2cap_move_chan_rsp rsp;
4943 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4945 rsp.icid = cpu_to_le16(chan->dcid);
4946 rsp.result = cpu_to_le16(result);
4948 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4952 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4954 struct l2cap_move_chan_cfm cfm;
4956 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4958 chan->ident = l2cap_get_ident(chan->conn);
4960 cfm.icid = cpu_to_le16(chan->scid);
4961 cfm.result = cpu_to_le16(result);
4963 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4966 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4969 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4971 struct l2cap_move_chan_cfm cfm;
4973 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4975 cfm.icid = cpu_to_le16(icid);
4976 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4978 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4982 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4985 struct l2cap_move_chan_cfm_rsp rsp;
4987 BT_DBG("icid 0x%4.4x", icid);
4989 rsp.icid = cpu_to_le16(icid);
4990 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4993 static void __release_logical_link(struct l2cap_chan *chan)
4995 chan->hs_hchan = NULL;
4996 chan->hs_hcon = NULL;
4998 /* Placeholder - release the logical link */
5001 static void l2cap_logical_fail(struct l2cap_chan *chan)
5003 /* Logical link setup failed */
5004 if (chan->state != BT_CONNECTED) {
5005 /* Create channel failure, disconnect */
5006 l2cap_send_disconn_req(chan, ECONNRESET);
5010 switch (chan->move_role) {
5011 case L2CAP_MOVE_ROLE_RESPONDER:
5012 l2cap_move_done(chan);
5013 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5015 case L2CAP_MOVE_ROLE_INITIATOR:
5016 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5017 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5018 /* Remote has only sent pending or
5019 * success responses, clean up
5021 l2cap_move_done(chan);
5024 /* Other amp move states imply that the move
5025 * has already aborted
5027 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5032 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5033 struct hci_chan *hchan)
5035 struct l2cap_conf_rsp rsp;
5037 chan->hs_hchan = hchan;
5038 chan->hs_hcon->l2cap_data = chan->conn;
5040 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5042 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5045 set_default_fcs(chan);
5047 err = l2cap_ertm_init(chan);
5049 l2cap_send_disconn_req(chan, -err);
5051 l2cap_chan_ready(chan);
5055 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5056 struct hci_chan *hchan)
5058 chan->hs_hcon = hchan->conn;
5059 chan->hs_hcon->l2cap_data = chan->conn;
5061 BT_DBG("move_state %d", chan->move_state);
5063 switch (chan->move_state) {
5064 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5065 /* Move confirm will be sent after a success
5066 * response is received
5068 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5070 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5071 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5072 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5073 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5074 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5075 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5076 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5077 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5078 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5082 /* Move was not in expected state, free the channel */
5083 __release_logical_link(chan);
5085 chan->move_state = L2CAP_MOVE_STABLE;
5089 /* Call with chan locked */
5090 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5093 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5096 l2cap_logical_fail(chan);
5097 __release_logical_link(chan);
5101 if (chan->state != BT_CONNECTED) {
5102 /* Ignore logical link if channel is on BR/EDR */
5103 if (chan->local_amp_id != AMP_ID_BREDR)
5104 l2cap_logical_finish_create(chan, hchan);
5106 l2cap_logical_finish_move(chan, hchan);
5110 void l2cap_move_start(struct l2cap_chan *chan)
5112 BT_DBG("chan %p", chan);
5114 if (chan->local_amp_id == AMP_ID_BREDR) {
5115 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5117 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5118 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5119 /* Placeholder - start physical link setup */
5121 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5122 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5124 l2cap_move_setup(chan);
5125 l2cap_send_move_chan_req(chan, 0);
5129 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5130 u8 local_amp_id, u8 remote_amp_id)
5132 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5133 local_amp_id, remote_amp_id);
5135 chan->fcs = L2CAP_FCS_NONE;
5137 /* Outgoing channel on AMP */
5138 if (chan->state == BT_CONNECT) {
5139 if (result == L2CAP_CR_SUCCESS) {
5140 chan->local_amp_id = local_amp_id;
5141 l2cap_send_create_chan_req(chan, remote_amp_id);
5143 /* Revert to BR/EDR connect */
5144 l2cap_send_conn_req(chan);
5150 /* Incoming channel on AMP */
5151 if (__l2cap_no_conn_pending(chan)) {
5152 struct l2cap_conn_rsp rsp;
5154 rsp.scid = cpu_to_le16(chan->dcid);
5155 rsp.dcid = cpu_to_le16(chan->scid);
5157 if (result == L2CAP_CR_SUCCESS) {
5158 /* Send successful response */
5159 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5160 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5162 /* Send negative response */
5163 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5164 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5167 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5170 if (result == L2CAP_CR_SUCCESS) {
5171 l2cap_state_change(chan, BT_CONFIG);
5172 set_bit(CONF_REQ_SENT, &chan->conf_state);
5173 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5175 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5176 chan->num_conf_req++;
5181 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5184 l2cap_move_setup(chan);
5185 chan->move_id = local_amp_id;
5186 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5188 l2cap_send_move_chan_req(chan, remote_amp_id);
5191 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5193 struct hci_chan *hchan = NULL;
5195 /* Placeholder - get hci_chan for logical link */
5198 if (hchan->state == BT_CONNECTED) {
5199 /* Logical link is ready to go */
5200 chan->hs_hcon = hchan->conn;
5201 chan->hs_hcon->l2cap_data = chan->conn;
5202 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5203 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5205 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5207 /* Wait for logical link to be ready */
5208 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5211 /* Logical link not available */
5212 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5216 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5218 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5220 if (result == -EINVAL)
5221 rsp_result = L2CAP_MR_BAD_ID;
5223 rsp_result = L2CAP_MR_NOT_ALLOWED;
5225 l2cap_send_move_chan_rsp(chan, rsp_result);
5228 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5229 chan->move_state = L2CAP_MOVE_STABLE;
5231 /* Restart data transmission */
5232 l2cap_ertm_send(chan);
5235 /* Invoke with locked chan */
5236 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5238 u8 local_amp_id = chan->local_amp_id;
5239 u8 remote_amp_id = chan->remote_amp_id;
5241 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5242 chan, result, local_amp_id, remote_amp_id);
5244 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5247 if (chan->state != BT_CONNECTED) {
5248 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5249 } else if (result != L2CAP_MR_SUCCESS) {
5250 l2cap_do_move_cancel(chan, result);
5252 switch (chan->move_role) {
5253 case L2CAP_MOVE_ROLE_INITIATOR:
5254 l2cap_do_move_initiate(chan, local_amp_id,
5257 case L2CAP_MOVE_ROLE_RESPONDER:
5258 l2cap_do_move_respond(chan, result);
5261 l2cap_do_move_cancel(chan, result);
5267 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5268 struct l2cap_cmd_hdr *cmd,
5269 u16 cmd_len, void *data)
5271 struct l2cap_move_chan_req *req = data;
5272 struct l2cap_move_chan_rsp rsp;
5273 struct l2cap_chan *chan;
5275 u16 result = L2CAP_MR_NOT_ALLOWED;
5277 if (cmd_len != sizeof(*req))
5280 icid = le16_to_cpu(req->icid);
5282 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5284 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5287 chan = l2cap_get_chan_by_dcid(conn, icid);
5289 rsp.icid = cpu_to_le16(icid);
5290 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5291 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5296 chan->ident = cmd->ident;
5298 if (chan->scid < L2CAP_CID_DYN_START ||
5299 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5300 (chan->mode != L2CAP_MODE_ERTM &&
5301 chan->mode != L2CAP_MODE_STREAMING)) {
5302 result = L2CAP_MR_NOT_ALLOWED;
5303 goto send_move_response;
5306 if (chan->local_amp_id == req->dest_amp_id) {
5307 result = L2CAP_MR_SAME_ID;
5308 goto send_move_response;
5311 if (req->dest_amp_id != AMP_ID_BREDR) {
5312 struct hci_dev *hdev;
5313 hdev = hci_dev_get(req->dest_amp_id);
5314 if (!hdev || hdev->dev_type != HCI_AMP ||
5315 !test_bit(HCI_UP, &hdev->flags)) {
5319 result = L2CAP_MR_BAD_ID;
5320 goto send_move_response;
5325 /* Detect a move collision. Only send a collision response
5326 * if this side has "lost", otherwise proceed with the move.
5327 * The winner has the larger bd_addr.
5329 if ((__chan_is_moving(chan) ||
5330 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5331 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5332 result = L2CAP_MR_COLLISION;
5333 goto send_move_response;
5336 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5337 l2cap_move_setup(chan);
5338 chan->move_id = req->dest_amp_id;
5340 if (req->dest_amp_id == AMP_ID_BREDR) {
5341 /* Moving to BR/EDR */
5342 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5343 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5344 result = L2CAP_MR_PEND;
5346 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5347 result = L2CAP_MR_SUCCESS;
5350 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5351 /* Placeholder - uncomment when amp functions are available */
5352 /*amp_accept_physical(chan, req->dest_amp_id);*/
5353 result = L2CAP_MR_PEND;
5357 l2cap_send_move_chan_rsp(chan, result);
5359 l2cap_chan_unlock(chan);
5360 l2cap_chan_put(chan);
5365 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5367 struct l2cap_chan *chan;
5368 struct hci_chan *hchan = NULL;
5370 chan = l2cap_get_chan_by_scid(conn, icid);
5372 l2cap_send_move_chan_cfm_icid(conn, icid);
5376 __clear_chan_timer(chan);
5377 if (result == L2CAP_MR_PEND)
5378 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5380 switch (chan->move_state) {
5381 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5382 /* Move confirm will be sent when logical link
5385 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5387 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5388 if (result == L2CAP_MR_PEND) {
5390 } else if (test_bit(CONN_LOCAL_BUSY,
5391 &chan->conn_state)) {
5392 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5394 /* Logical link is up or moving to BR/EDR,
5397 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5398 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5401 case L2CAP_MOVE_WAIT_RSP:
5403 if (result == L2CAP_MR_SUCCESS) {
5404 /* Remote is ready, send confirm immediately
5405 * after logical link is ready
5407 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5409 /* Both logical link and move success
5410 * are required to confirm
5412 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5415 /* Placeholder - get hci_chan for logical link */
5417 /* Logical link not available */
5418 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5422 /* If the logical link is not yet connected, do not
5423 * send confirmation.
5425 if (hchan->state != BT_CONNECTED)
5428 /* Logical link is already ready to go */
5430 chan->hs_hcon = hchan->conn;
5431 chan->hs_hcon->l2cap_data = chan->conn;
5433 if (result == L2CAP_MR_SUCCESS) {
5434 /* Can confirm now */
5435 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5437 /* Now only need move success
5440 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5443 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5446 /* Any other amp move state means the move failed. */
5447 chan->move_id = chan->local_amp_id;
5448 l2cap_move_done(chan);
5449 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5452 l2cap_chan_unlock(chan);
5453 l2cap_chan_put(chan);
5456 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5459 struct l2cap_chan *chan;
5461 chan = l2cap_get_chan_by_ident(conn, ident);
5463 /* Could not locate channel, icid is best guess */
5464 l2cap_send_move_chan_cfm_icid(conn, icid);
5468 __clear_chan_timer(chan);
5470 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5471 if (result == L2CAP_MR_COLLISION) {
5472 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5474 /* Cleanup - cancel move */
5475 chan->move_id = chan->local_amp_id;
5476 l2cap_move_done(chan);
5480 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5482 l2cap_chan_unlock(chan);
5483 l2cap_chan_put(chan);
5486 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5487 struct l2cap_cmd_hdr *cmd,
5488 u16 cmd_len, void *data)
5490 struct l2cap_move_chan_rsp *rsp = data;
5493 if (cmd_len != sizeof(*rsp))
5496 icid = le16_to_cpu(rsp->icid);
5497 result = le16_to_cpu(rsp->result);
5499 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5501 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5502 l2cap_move_continue(conn, icid, result);
5504 l2cap_move_fail(conn, cmd->ident, icid, result);
5509 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5510 struct l2cap_cmd_hdr *cmd,
5511 u16 cmd_len, void *data)
5513 struct l2cap_move_chan_cfm *cfm = data;
5514 struct l2cap_chan *chan;
5517 if (cmd_len != sizeof(*cfm))
5520 icid = le16_to_cpu(cfm->icid);
5521 result = le16_to_cpu(cfm->result);
5523 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5525 chan = l2cap_get_chan_by_dcid(conn, icid);
5527 /* Spec requires a response even if the icid was not found */
5528 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5532 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5533 if (result == L2CAP_MC_CONFIRMED) {
5534 chan->local_amp_id = chan->move_id;
5535 if (chan->local_amp_id == AMP_ID_BREDR)
5536 __release_logical_link(chan);
5538 chan->move_id = chan->local_amp_id;
5541 l2cap_move_done(chan);
5544 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5546 l2cap_chan_unlock(chan);
5547 l2cap_chan_put(chan);
5552 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5553 struct l2cap_cmd_hdr *cmd,
5554 u16 cmd_len, void *data)
5556 struct l2cap_move_chan_cfm_rsp *rsp = data;
5557 struct l2cap_chan *chan;
5560 if (cmd_len != sizeof(*rsp))
5563 icid = le16_to_cpu(rsp->icid);
5565 BT_DBG("icid 0x%4.4x", icid);
5567 chan = l2cap_get_chan_by_scid(conn, icid);
5571 __clear_chan_timer(chan);
5573 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5574 chan->local_amp_id = chan->move_id;
5576 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5577 __release_logical_link(chan);
5579 l2cap_move_done(chan);
5582 l2cap_chan_unlock(chan);
5583 l2cap_chan_put(chan);
5588 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5589 struct l2cap_cmd_hdr *cmd,
5590 u16 cmd_len, u8 *data)
5592 struct hci_conn *hcon = conn->hcon;
5593 struct l2cap_conn_param_update_req *req;
5594 struct l2cap_conn_param_update_rsp rsp;
5595 u16 min, max, latency, to_multiplier;
5598 if (hcon->role != HCI_ROLE_MASTER)
5601 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5604 req = (struct l2cap_conn_param_update_req *) data;
5605 min = __le16_to_cpu(req->min);
5606 max = __le16_to_cpu(req->max);
5607 latency = __le16_to_cpu(req->latency);
5608 to_multiplier = __le16_to_cpu(req->to_multiplier);
5610 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5611 min, max, latency, to_multiplier);
5613 memset(&rsp, 0, sizeof(rsp));
5615 err = hci_check_conn_params(min, max, latency, to_multiplier);
5617 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5619 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5621 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5627 store_hint = hci_le_conn_update(hcon, min, max, latency,
5629 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5630 store_hint, min, max, latency,
5638 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5639 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5642 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5643 struct hci_conn *hcon = conn->hcon;
5644 u16 dcid, mtu, mps, credits, result;
5645 struct l2cap_chan *chan;
5648 if (cmd_len < sizeof(*rsp))
5651 dcid = __le16_to_cpu(rsp->dcid);
5652 mtu = __le16_to_cpu(rsp->mtu);
5653 mps = __le16_to_cpu(rsp->mps);
5654 credits = __le16_to_cpu(rsp->credits);
5655 result = __le16_to_cpu(rsp->result);
5657 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5658 dcid < L2CAP_CID_DYN_START ||
5659 dcid > L2CAP_CID_LE_DYN_END))
5662 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5663 dcid, mtu, mps, credits, result);
5665 mutex_lock(&conn->chan_lock);
5667 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5675 l2cap_chan_lock(chan);
5678 case L2CAP_CR_LE_SUCCESS:
5679 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5687 chan->remote_mps = mps;
5688 chan->tx_credits = credits;
5689 l2cap_chan_ready(chan);
5692 case L2CAP_CR_LE_AUTHENTICATION:
5693 case L2CAP_CR_LE_ENCRYPTION:
5694 /* If we already have MITM protection we can't do
5697 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5698 l2cap_chan_del(chan, ECONNREFUSED);
5702 sec_level = hcon->sec_level + 1;
5703 if (chan->sec_level < sec_level)
5704 chan->sec_level = sec_level;
5706 /* We'll need to send a new Connect Request */
5707 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5709 smp_conn_security(hcon, chan->sec_level);
5713 l2cap_chan_del(chan, ECONNREFUSED);
5717 l2cap_chan_unlock(chan);
5720 mutex_unlock(&conn->chan_lock);
5725 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5726 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5731 switch (cmd->code) {
5732 case L2CAP_COMMAND_REJ:
5733 l2cap_command_rej(conn, cmd, cmd_len, data);
5736 case L2CAP_CONN_REQ:
5737 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5740 case L2CAP_CONN_RSP:
5741 case L2CAP_CREATE_CHAN_RSP:
5742 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5745 case L2CAP_CONF_REQ:
5746 err = l2cap_config_req(conn, cmd, cmd_len, data);
5749 case L2CAP_CONF_RSP:
5750 l2cap_config_rsp(conn, cmd, cmd_len, data);
5753 case L2CAP_DISCONN_REQ:
5754 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5757 case L2CAP_DISCONN_RSP:
5758 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5761 case L2CAP_ECHO_REQ:
5762 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5765 case L2CAP_ECHO_RSP:
5768 case L2CAP_INFO_REQ:
5769 err = l2cap_information_req(conn, cmd, cmd_len, data);
5772 case L2CAP_INFO_RSP:
5773 l2cap_information_rsp(conn, cmd, cmd_len, data);
5776 case L2CAP_CREATE_CHAN_REQ:
5777 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5780 case L2CAP_MOVE_CHAN_REQ:
5781 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5784 case L2CAP_MOVE_CHAN_RSP:
5785 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5788 case L2CAP_MOVE_CHAN_CFM:
5789 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5792 case L2CAP_MOVE_CHAN_CFM_RSP:
5793 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5797 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5805 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5806 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5809 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5810 struct l2cap_le_conn_rsp rsp;
5811 struct l2cap_chan *chan, *pchan;
5812 u16 dcid, scid, credits, mtu, mps;
5816 if (cmd_len != sizeof(*req))
5819 scid = __le16_to_cpu(req->scid);
5820 mtu = __le16_to_cpu(req->mtu);
5821 mps = __le16_to_cpu(req->mps);
5826 if (mtu < 23 || mps < 23)
5829 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5832 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5835 * Valid range: 0x0001-0x00ff
5837 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5839 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5840 result = L2CAP_CR_LE_BAD_PSM;
5845 /* Check if we have socket listening on psm */
5846 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5847 &conn->hcon->dst, LE_LINK);
5849 result = L2CAP_CR_LE_BAD_PSM;
5854 mutex_lock(&conn->chan_lock);
5855 l2cap_chan_lock(pchan);
5857 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5859 result = L2CAP_CR_LE_AUTHENTICATION;
5861 goto response_unlock;
5864 /* Check for valid dynamic CID range */
5865 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5866 result = L2CAP_CR_LE_INVALID_SCID;
5868 goto response_unlock;
5871 /* Check if we already have channel with that dcid */
5872 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5873 result = L2CAP_CR_LE_SCID_IN_USE;
5875 goto response_unlock;
5878 chan = pchan->ops->new_connection(pchan);
5880 result = L2CAP_CR_LE_NO_MEM;
5881 goto response_unlock;
5884 bacpy(&chan->src, &conn->hcon->src);
5885 bacpy(&chan->dst, &conn->hcon->dst);
5886 chan->src_type = bdaddr_src_type(conn->hcon);
5887 chan->dst_type = bdaddr_dst_type(conn->hcon);
5891 chan->remote_mps = mps;
5893 __l2cap_chan_add(conn, chan);
5895 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5898 credits = chan->rx_credits;
5900 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5902 chan->ident = cmd->ident;
5904 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5905 l2cap_state_change(chan, BT_CONNECT2);
5906 /* The following result value is actually not defined
5907 * for LE CoC but we use it to let the function know
5908 * that it should bail out after doing its cleanup
5909 * instead of sending a response.
5911 result = L2CAP_CR_PEND;
5912 chan->ops->defer(chan);
5914 l2cap_chan_ready(chan);
5915 result = L2CAP_CR_LE_SUCCESS;
5919 l2cap_chan_unlock(pchan);
5920 mutex_unlock(&conn->chan_lock);
5921 l2cap_chan_put(pchan);
5923 if (result == L2CAP_CR_PEND)
5928 rsp.mtu = cpu_to_le16(chan->imtu);
5929 rsp.mps = cpu_to_le16(chan->mps);
5935 rsp.dcid = cpu_to_le16(dcid);
5936 rsp.credits = cpu_to_le16(credits);
5937 rsp.result = cpu_to_le16(result);
5939 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5944 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5945 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5948 struct l2cap_le_credits *pkt;
5949 struct l2cap_chan *chan;
5950 u16 cid, credits, max_credits;
5952 if (cmd_len != sizeof(*pkt))
5955 pkt = (struct l2cap_le_credits *) data;
5956 cid = __le16_to_cpu(pkt->cid);
5957 credits = __le16_to_cpu(pkt->credits);
5959 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5961 chan = l2cap_get_chan_by_dcid(conn, cid);
5965 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5966 if (credits > max_credits) {
5967 BT_ERR("LE credits overflow");
5968 l2cap_send_disconn_req(chan, ECONNRESET);
5970 /* Return 0 so that we don't trigger an unnecessary
5971 * command reject packet.
5976 chan->tx_credits += credits;
5978 /* Resume sending */
5979 l2cap_le_flowctl_send(chan);
5981 if (chan->tx_credits)
5982 chan->ops->resume(chan);
5985 l2cap_chan_unlock(chan);
5986 l2cap_chan_put(chan);
5991 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5992 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5995 struct l2cap_ecred_conn_req *req = (void *) data;
5997 struct l2cap_ecred_conn_rsp rsp;
5998 __le16 dcid[L2CAP_ECRED_MAX_CID];
6000 struct l2cap_chan *chan, *pchan;
6010 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6011 result = L2CAP_CR_LE_INVALID_PARAMS;
6015 cmd_len -= sizeof(*req);
6016 num_scid = cmd_len / sizeof(u16);
6018 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6019 result = L2CAP_CR_LE_INVALID_PARAMS;
6023 mtu = __le16_to_cpu(req->mtu);
6024 mps = __le16_to_cpu(req->mps);
6026 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6027 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6033 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6036 * Valid range: 0x0001-0x00ff
6038 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6040 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6041 result = L2CAP_CR_LE_BAD_PSM;
6045 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6047 memset(&pdu, 0, sizeof(pdu));
6049 /* Check if we have socket listening on psm */
6050 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6051 &conn->hcon->dst, LE_LINK);
6053 result = L2CAP_CR_LE_BAD_PSM;
6057 mutex_lock(&conn->chan_lock);
6058 l2cap_chan_lock(pchan);
6060 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6062 result = L2CAP_CR_LE_AUTHENTICATION;
6066 result = L2CAP_CR_LE_SUCCESS;
6068 for (i = 0; i < num_scid; i++) {
6069 u16 scid = __le16_to_cpu(req->scid[i]);
6071 BT_DBG("scid[%d] 0x%4.4x", i, scid);
6073 pdu.dcid[i] = 0x0000;
6074 len += sizeof(*pdu.dcid);
6076 /* Check for valid dynamic CID range */
6077 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6078 result = L2CAP_CR_LE_INVALID_SCID;
6082 /* Check if we already have channel with that dcid */
6083 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6084 result = L2CAP_CR_LE_SCID_IN_USE;
6088 chan = pchan->ops->new_connection(pchan);
6090 result = L2CAP_CR_LE_NO_MEM;
6094 bacpy(&chan->src, &conn->hcon->src);
6095 bacpy(&chan->dst, &conn->hcon->dst);
6096 chan->src_type = bdaddr_src_type(conn->hcon);
6097 chan->dst_type = bdaddr_dst_type(conn->hcon);
6101 chan->remote_mps = mps;
6103 __l2cap_chan_add(conn, chan);
6105 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6108 if (!pdu.rsp.credits) {
6109 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6110 pdu.rsp.mps = cpu_to_le16(chan->mps);
6111 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6114 pdu.dcid[i] = cpu_to_le16(chan->scid);
6116 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6118 chan->ident = cmd->ident;
6120 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6121 l2cap_state_change(chan, BT_CONNECT2);
6123 chan->ops->defer(chan);
6125 l2cap_chan_ready(chan);
6130 l2cap_chan_unlock(pchan);
6131 mutex_unlock(&conn->chan_lock);
6132 l2cap_chan_put(pchan);
6135 pdu.rsp.result = cpu_to_le16(result);
6140 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6141 sizeof(pdu.rsp) + len, &pdu);
6146 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6147 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6150 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6151 struct hci_conn *hcon = conn->hcon;
6152 u16 mtu, mps, credits, result;
6153 struct l2cap_chan *chan, *tmp;
6154 int err = 0, sec_level;
6157 if (cmd_len < sizeof(*rsp))
6160 mtu = __le16_to_cpu(rsp->mtu);
6161 mps = __le16_to_cpu(rsp->mps);
6162 credits = __le16_to_cpu(rsp->credits);
6163 result = __le16_to_cpu(rsp->result);
6165 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6168 mutex_lock(&conn->chan_lock);
6170 cmd_len -= sizeof(*rsp);
6172 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6175 if (chan->ident != cmd->ident ||
6176 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6177 chan->state == BT_CONNECTED)
6180 l2cap_chan_lock(chan);
6182 /* Check that there is a dcid for each pending channel */
6183 if (cmd_len < sizeof(dcid)) {
6184 l2cap_chan_del(chan, ECONNREFUSED);
6185 l2cap_chan_unlock(chan);
6189 dcid = __le16_to_cpu(rsp->dcid[i++]);
6190 cmd_len -= sizeof(u16);
6192 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6194 /* Check if dcid is already in use */
6195 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6196 /* If a device receives a
6197 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6198 * already-assigned Destination CID, then both the
6199 * original channel and the new channel shall be
6200 * immediately discarded and not used.
6202 l2cap_chan_del(chan, ECONNREFUSED);
6203 l2cap_chan_unlock(chan);
6204 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6205 l2cap_chan_lock(chan);
6206 l2cap_chan_del(chan, ECONNRESET);
6207 l2cap_chan_unlock(chan);
6212 case L2CAP_CR_LE_AUTHENTICATION:
6213 case L2CAP_CR_LE_ENCRYPTION:
6214 /* If we already have MITM protection we can't do
6217 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6218 l2cap_chan_del(chan, ECONNREFUSED);
6222 sec_level = hcon->sec_level + 1;
6223 if (chan->sec_level < sec_level)
6224 chan->sec_level = sec_level;
6226 /* We'll need to send a new Connect Request */
6227 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6229 smp_conn_security(hcon, chan->sec_level);
6232 case L2CAP_CR_LE_BAD_PSM:
6233 l2cap_chan_del(chan, ECONNREFUSED);
6237 /* If dcid was not set it means channels was refused */
6239 l2cap_chan_del(chan, ECONNREFUSED);
6246 chan->remote_mps = mps;
6247 chan->tx_credits = credits;
6248 l2cap_chan_ready(chan);
6252 l2cap_chan_unlock(chan);
6255 mutex_unlock(&conn->chan_lock);
6260 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6261 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6264 struct l2cap_ecred_reconf_req *req = (void *) data;
6265 struct l2cap_ecred_reconf_rsp rsp;
6266 u16 mtu, mps, result;
6267 struct l2cap_chan *chan;
6273 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6274 result = L2CAP_CR_LE_INVALID_PARAMS;
6278 mtu = __le16_to_cpu(req->mtu);
6279 mps = __le16_to_cpu(req->mps);
6281 BT_DBG("mtu %u mps %u", mtu, mps);
6283 if (mtu < L2CAP_ECRED_MIN_MTU) {
6284 result = L2CAP_RECONF_INVALID_MTU;
6288 if (mps < L2CAP_ECRED_MIN_MPS) {
6289 result = L2CAP_RECONF_INVALID_MPS;
6293 cmd_len -= sizeof(*req);
6294 num_scid = cmd_len / sizeof(u16);
6295 result = L2CAP_RECONF_SUCCESS;
6297 for (i = 0; i < num_scid; i++) {
6300 scid = __le16_to_cpu(req->scid[i]);
6304 chan = __l2cap_get_chan_by_dcid(conn, scid);
6308 /* If the MTU value is decreased for any of the included
6309 * channels, then the receiver shall disconnect all
6310 * included channels.
6312 if (chan->omtu > mtu) {
6313 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6315 result = L2CAP_RECONF_INVALID_MTU;
6319 chan->remote_mps = mps;
6323 rsp.result = cpu_to_le16(result);
6325 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6331 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6332 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6335 struct l2cap_chan *chan, *tmp;
6336 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6339 if (cmd_len < sizeof(*rsp))
6342 result = __le16_to_cpu(rsp->result);
6344 BT_DBG("result 0x%4.4x", rsp->result);
6349 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6350 if (chan->ident != cmd->ident)
6353 l2cap_chan_del(chan, ECONNRESET);
6359 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6360 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6363 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6364 struct l2cap_chan *chan;
6366 if (cmd_len < sizeof(*rej))
6369 mutex_lock(&conn->chan_lock);
6371 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6375 l2cap_chan_lock(chan);
6376 l2cap_chan_del(chan, ECONNREFUSED);
6377 l2cap_chan_unlock(chan);
6380 mutex_unlock(&conn->chan_lock);
6384 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6385 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6390 switch (cmd->code) {
6391 case L2CAP_COMMAND_REJ:
6392 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6395 case L2CAP_CONN_PARAM_UPDATE_REQ:
6396 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6399 case L2CAP_CONN_PARAM_UPDATE_RSP:
6402 case L2CAP_LE_CONN_RSP:
6403 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6406 case L2CAP_LE_CONN_REQ:
6407 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6410 case L2CAP_LE_CREDITS:
6411 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6414 case L2CAP_ECRED_CONN_REQ:
6415 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6418 case L2CAP_ECRED_CONN_RSP:
6419 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6422 case L2CAP_ECRED_RECONF_REQ:
6423 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6426 case L2CAP_ECRED_RECONF_RSP:
6427 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6430 case L2CAP_DISCONN_REQ:
6431 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6434 case L2CAP_DISCONN_RSP:
6435 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6439 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6447 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6448 struct sk_buff *skb)
6450 struct hci_conn *hcon = conn->hcon;
6451 struct l2cap_cmd_hdr *cmd;
6455 if (hcon->type != LE_LINK)
6458 if (skb->len < L2CAP_CMD_HDR_SIZE)
6461 cmd = (void *) skb->data;
6462 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6464 len = le16_to_cpu(cmd->len);
6466 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6468 if (len != skb->len || !cmd->ident) {
6469 BT_DBG("corrupted command");
6473 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6475 struct l2cap_cmd_rej_unk rej;
6477 BT_ERR("Wrong link type (%d)", err);
6479 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6480 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6488 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6489 struct sk_buff *skb)
6491 struct hci_conn *hcon = conn->hcon;
6492 struct l2cap_cmd_hdr *cmd;
6495 l2cap_raw_recv(conn, skb);
6497 if (hcon->type != ACL_LINK)
6500 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6503 cmd = (void *) skb->data;
6504 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6506 len = le16_to_cpu(cmd->len);
6508 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6511 if (len > skb->len || !cmd->ident) {
6512 BT_DBG("corrupted command");
6516 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6518 struct l2cap_cmd_rej_unk rej;
6520 BT_ERR("Wrong link type (%d)", err);
6522 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6523 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6534 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6536 u16 our_fcs, rcv_fcs;
6539 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6540 hdr_size = L2CAP_EXT_HDR_SIZE;
6542 hdr_size = L2CAP_ENH_HDR_SIZE;
6544 if (chan->fcs == L2CAP_FCS_CRC16) {
6545 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6546 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6547 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6549 if (our_fcs != rcv_fcs)
6555 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6557 struct l2cap_ctrl control;
6559 BT_DBG("chan %p", chan);
6561 memset(&control, 0, sizeof(control));
6564 control.reqseq = chan->buffer_seq;
6565 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6567 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6568 control.super = L2CAP_SUPER_RNR;
6569 l2cap_send_sframe(chan, &control);
6572 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6573 chan->unacked_frames > 0)
6574 __set_retrans_timer(chan);
6576 /* Send pending iframes */
6577 l2cap_ertm_send(chan);
6579 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6580 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6581 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6584 control.super = L2CAP_SUPER_RR;
6585 l2cap_send_sframe(chan, &control);
6589 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6590 struct sk_buff **last_frag)
6592 /* skb->len reflects data in skb as well as all fragments
6593 * skb->data_len reflects only data in fragments
6595 if (!skb_has_frag_list(skb))
6596 skb_shinfo(skb)->frag_list = new_frag;
6598 new_frag->next = NULL;
6600 (*last_frag)->next = new_frag;
6601 *last_frag = new_frag;
6603 skb->len += new_frag->len;
6604 skb->data_len += new_frag->len;
6605 skb->truesize += new_frag->truesize;
6608 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6609 struct l2cap_ctrl *control)
6613 switch (control->sar) {
6614 case L2CAP_SAR_UNSEGMENTED:
6618 err = chan->ops->recv(chan, skb);
6621 case L2CAP_SAR_START:
6625 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6628 chan->sdu_len = get_unaligned_le16(skb->data);
6629 skb_pull(skb, L2CAP_SDULEN_SIZE);
6631 if (chan->sdu_len > chan->imtu) {
6636 if (skb->len >= chan->sdu_len)
6640 chan->sdu_last_frag = skb;
6646 case L2CAP_SAR_CONTINUE:
6650 append_skb_frag(chan->sdu, skb,
6651 &chan->sdu_last_frag);
6654 if (chan->sdu->len >= chan->sdu_len)
6664 append_skb_frag(chan->sdu, skb,
6665 &chan->sdu_last_frag);
6668 if (chan->sdu->len != chan->sdu_len)
6671 err = chan->ops->recv(chan, chan->sdu);
6674 /* Reassembly complete */
6676 chan->sdu_last_frag = NULL;
6684 kfree_skb(chan->sdu);
6686 chan->sdu_last_frag = NULL;
6693 static int l2cap_resegment(struct l2cap_chan *chan)
6699 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6703 if (chan->mode != L2CAP_MODE_ERTM)
6706 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6707 l2cap_tx(chan, NULL, NULL, event);
6710 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6713 /* Pass sequential frames to l2cap_reassemble_sdu()
6714 * until a gap is encountered.
6717 BT_DBG("chan %p", chan);
6719 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6720 struct sk_buff *skb;
6721 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6722 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6724 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6729 skb_unlink(skb, &chan->srej_q);
6730 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6731 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6736 if (skb_queue_empty(&chan->srej_q)) {
6737 chan->rx_state = L2CAP_RX_STATE_RECV;
6738 l2cap_send_ack(chan);
6744 static void l2cap_handle_srej(struct l2cap_chan *chan,
6745 struct l2cap_ctrl *control)
6747 struct sk_buff *skb;
6749 BT_DBG("chan %p, control %p", chan, control);
6751 if (control->reqseq == chan->next_tx_seq) {
6752 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6753 l2cap_send_disconn_req(chan, ECONNRESET);
6757 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6760 BT_DBG("Seq %d not available for retransmission",
6765 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6766 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6767 l2cap_send_disconn_req(chan, ECONNRESET);
6771 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6773 if (control->poll) {
6774 l2cap_pass_to_tx(chan, control);
6776 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6777 l2cap_retransmit(chan, control);
6778 l2cap_ertm_send(chan);
6780 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6781 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6782 chan->srej_save_reqseq = control->reqseq;
6785 l2cap_pass_to_tx_fbit(chan, control);
6787 if (control->final) {
6788 if (chan->srej_save_reqseq != control->reqseq ||
6789 !test_and_clear_bit(CONN_SREJ_ACT,
6791 l2cap_retransmit(chan, control);
6793 l2cap_retransmit(chan, control);
6794 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6795 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6796 chan->srej_save_reqseq = control->reqseq;
6802 static void l2cap_handle_rej(struct l2cap_chan *chan,
6803 struct l2cap_ctrl *control)
6805 struct sk_buff *skb;
6807 BT_DBG("chan %p, control %p", chan, control);
6809 if (control->reqseq == chan->next_tx_seq) {
6810 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6811 l2cap_send_disconn_req(chan, ECONNRESET);
6815 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6817 if (chan->max_tx && skb &&
6818 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6819 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6820 l2cap_send_disconn_req(chan, ECONNRESET);
6824 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6826 l2cap_pass_to_tx(chan, control);
6828 if (control->final) {
6829 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6830 l2cap_retransmit_all(chan, control);
6832 l2cap_retransmit_all(chan, control);
6833 l2cap_ertm_send(chan);
6834 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6835 set_bit(CONN_REJ_ACT, &chan->conn_state);
6839 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6841 BT_DBG("chan %p, txseq %d", chan, txseq);
6843 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6844 chan->expected_tx_seq);
6846 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6847 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6849 /* See notes below regarding "double poll" and
6852 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6853 BT_DBG("Invalid/Ignore - after SREJ");
6854 return L2CAP_TXSEQ_INVALID_IGNORE;
6856 BT_DBG("Invalid - in window after SREJ sent");
6857 return L2CAP_TXSEQ_INVALID;
6861 if (chan->srej_list.head == txseq) {
6862 BT_DBG("Expected SREJ");
6863 return L2CAP_TXSEQ_EXPECTED_SREJ;
6866 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6867 BT_DBG("Duplicate SREJ - txseq already stored");
6868 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6871 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6872 BT_DBG("Unexpected SREJ - not requested");
6873 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6877 if (chan->expected_tx_seq == txseq) {
6878 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6880 BT_DBG("Invalid - txseq outside tx window");
6881 return L2CAP_TXSEQ_INVALID;
6884 return L2CAP_TXSEQ_EXPECTED;
6888 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6889 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6890 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6891 return L2CAP_TXSEQ_DUPLICATE;
6894 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6895 /* A source of invalid packets is a "double poll" condition,
6896 * where delays cause us to send multiple poll packets. If
6897 * the remote stack receives and processes both polls,
6898 * sequence numbers can wrap around in such a way that a
6899 * resent frame has a sequence number that looks like new data
6900 * with a sequence gap. This would trigger an erroneous SREJ
6903 * Fortunately, this is impossible with a tx window that's
6904 * less than half of the maximum sequence number, which allows
6905 * invalid frames to be safely ignored.
6907 * With tx window sizes greater than half of the tx window
6908 * maximum, the frame is invalid and cannot be ignored. This
6909 * causes a disconnect.
6912 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6913 BT_DBG("Invalid/Ignore - txseq outside tx window");
6914 return L2CAP_TXSEQ_INVALID_IGNORE;
6916 BT_DBG("Invalid - txseq outside tx window");
6917 return L2CAP_TXSEQ_INVALID;
6920 BT_DBG("Unexpected - txseq indicates missing frames");
6921 return L2CAP_TXSEQ_UNEXPECTED;
6925 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6926 struct l2cap_ctrl *control,
6927 struct sk_buff *skb, u8 event)
6929 struct l2cap_ctrl local_control;
6931 bool skb_in_use = false;
6933 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6937 case L2CAP_EV_RECV_IFRAME:
6938 switch (l2cap_classify_txseq(chan, control->txseq)) {
6939 case L2CAP_TXSEQ_EXPECTED:
6940 l2cap_pass_to_tx(chan, control);
6942 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6943 BT_DBG("Busy, discarding expected seq %d",
6948 chan->expected_tx_seq = __next_seq(chan,
6951 chan->buffer_seq = chan->expected_tx_seq;
6954 /* l2cap_reassemble_sdu may free skb, hence invalidate
6955 * control, so make a copy in advance to use it after
6956 * l2cap_reassemble_sdu returns and to avoid the race
6957 * condition, for example:
6959 * The current thread calls:
6960 * l2cap_reassemble_sdu
6961 * chan->ops->recv == l2cap_sock_recv_cb
6962 * __sock_queue_rcv_skb
6963 * Another thread calls:
6967 * Then the current thread tries to access control, but
6968 * it was freed by skb_free_datagram.
6970 local_control = *control;
6971 err = l2cap_reassemble_sdu(chan, skb, control);
6975 if (local_control.final) {
6976 if (!test_and_clear_bit(CONN_REJ_ACT,
6977 &chan->conn_state)) {
6978 local_control.final = 0;
6979 l2cap_retransmit_all(chan, &local_control);
6980 l2cap_ertm_send(chan);
6984 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6985 l2cap_send_ack(chan);
6987 case L2CAP_TXSEQ_UNEXPECTED:
6988 l2cap_pass_to_tx(chan, control);
6990 /* Can't issue SREJ frames in the local busy state.
6991 * Drop this frame, it will be seen as missing
6992 * when local busy is exited.
6994 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6995 BT_DBG("Busy, discarding unexpected seq %d",
7000 /* There was a gap in the sequence, so an SREJ
7001 * must be sent for each missing frame. The
7002 * current frame is stored for later use.
7004 skb_queue_tail(&chan->srej_q, skb);
7006 BT_DBG("Queued %p (queue len %d)", skb,
7007 skb_queue_len(&chan->srej_q));
7009 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7010 l2cap_seq_list_clear(&chan->srej_list);
7011 l2cap_send_srej(chan, control->txseq);
7013 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7015 case L2CAP_TXSEQ_DUPLICATE:
7016 l2cap_pass_to_tx(chan, control);
7018 case L2CAP_TXSEQ_INVALID_IGNORE:
7020 case L2CAP_TXSEQ_INVALID:
7022 l2cap_send_disconn_req(chan, ECONNRESET);
7026 case L2CAP_EV_RECV_RR:
7027 l2cap_pass_to_tx(chan, control);
7028 if (control->final) {
7029 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7031 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7032 !__chan_is_moving(chan)) {
7034 l2cap_retransmit_all(chan, control);
7037 l2cap_ertm_send(chan);
7038 } else if (control->poll) {
7039 l2cap_send_i_or_rr_or_rnr(chan);
7041 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7042 &chan->conn_state) &&
7043 chan->unacked_frames)
7044 __set_retrans_timer(chan);
7046 l2cap_ertm_send(chan);
7049 case L2CAP_EV_RECV_RNR:
7050 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7051 l2cap_pass_to_tx(chan, control);
7052 if (control && control->poll) {
7053 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7054 l2cap_send_rr_or_rnr(chan, 0);
7056 __clear_retrans_timer(chan);
7057 l2cap_seq_list_clear(&chan->retrans_list);
7059 case L2CAP_EV_RECV_REJ:
7060 l2cap_handle_rej(chan, control);
7062 case L2CAP_EV_RECV_SREJ:
7063 l2cap_handle_srej(chan, control);
7069 if (skb && !skb_in_use) {
7070 BT_DBG("Freeing %p", skb);
7077 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7078 struct l2cap_ctrl *control,
7079 struct sk_buff *skb, u8 event)
7082 u16 txseq = control->txseq;
7083 bool skb_in_use = false;
7085 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7089 case L2CAP_EV_RECV_IFRAME:
7090 switch (l2cap_classify_txseq(chan, txseq)) {
7091 case L2CAP_TXSEQ_EXPECTED:
7092 /* Keep frame for reassembly later */
7093 l2cap_pass_to_tx(chan, control);
7094 skb_queue_tail(&chan->srej_q, skb);
7096 BT_DBG("Queued %p (queue len %d)", skb,
7097 skb_queue_len(&chan->srej_q));
7099 chan->expected_tx_seq = __next_seq(chan, txseq);
7101 case L2CAP_TXSEQ_EXPECTED_SREJ:
7102 l2cap_seq_list_pop(&chan->srej_list);
7104 l2cap_pass_to_tx(chan, control);
7105 skb_queue_tail(&chan->srej_q, skb);
7107 BT_DBG("Queued %p (queue len %d)", skb,
7108 skb_queue_len(&chan->srej_q));
7110 err = l2cap_rx_queued_iframes(chan);
7115 case L2CAP_TXSEQ_UNEXPECTED:
7116 /* Got a frame that can't be reassembled yet.
7117 * Save it for later, and send SREJs to cover
7118 * the missing frames.
7120 skb_queue_tail(&chan->srej_q, skb);
7122 BT_DBG("Queued %p (queue len %d)", skb,
7123 skb_queue_len(&chan->srej_q));
7125 l2cap_pass_to_tx(chan, control);
7126 l2cap_send_srej(chan, control->txseq);
7128 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7129 /* This frame was requested with an SREJ, but
7130 * some expected retransmitted frames are
7131 * missing. Request retransmission of missing
7134 skb_queue_tail(&chan->srej_q, skb);
7136 BT_DBG("Queued %p (queue len %d)", skb,
7137 skb_queue_len(&chan->srej_q));
7139 l2cap_pass_to_tx(chan, control);
7140 l2cap_send_srej_list(chan, control->txseq);
7142 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7143 /* We've already queued this frame. Drop this copy. */
7144 l2cap_pass_to_tx(chan, control);
7146 case L2CAP_TXSEQ_DUPLICATE:
7147 /* Expecting a later sequence number, so this frame
7148 * was already received. Ignore it completely.
7151 case L2CAP_TXSEQ_INVALID_IGNORE:
7153 case L2CAP_TXSEQ_INVALID:
7155 l2cap_send_disconn_req(chan, ECONNRESET);
7159 case L2CAP_EV_RECV_RR:
7160 l2cap_pass_to_tx(chan, control);
7161 if (control->final) {
7162 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7164 if (!test_and_clear_bit(CONN_REJ_ACT,
7165 &chan->conn_state)) {
7167 l2cap_retransmit_all(chan, control);
7170 l2cap_ertm_send(chan);
7171 } else if (control->poll) {
7172 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7173 &chan->conn_state) &&
7174 chan->unacked_frames) {
7175 __set_retrans_timer(chan);
7178 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7179 l2cap_send_srej_tail(chan);
7181 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7182 &chan->conn_state) &&
7183 chan->unacked_frames)
7184 __set_retrans_timer(chan);
7186 l2cap_send_ack(chan);
7189 case L2CAP_EV_RECV_RNR:
7190 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7191 l2cap_pass_to_tx(chan, control);
7192 if (control->poll) {
7193 l2cap_send_srej_tail(chan);
7195 struct l2cap_ctrl rr_control;
7196 memset(&rr_control, 0, sizeof(rr_control));
7197 rr_control.sframe = 1;
7198 rr_control.super = L2CAP_SUPER_RR;
7199 rr_control.reqseq = chan->buffer_seq;
7200 l2cap_send_sframe(chan, &rr_control);
7204 case L2CAP_EV_RECV_REJ:
7205 l2cap_handle_rej(chan, control);
7207 case L2CAP_EV_RECV_SREJ:
7208 l2cap_handle_srej(chan, control);
7212 if (skb && !skb_in_use) {
7213 BT_DBG("Freeing %p", skb);
7220 static int l2cap_finish_move(struct l2cap_chan *chan)
7222 BT_DBG("chan %p", chan);
7224 chan->rx_state = L2CAP_RX_STATE_RECV;
7227 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7229 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7231 return l2cap_resegment(chan);
7234 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7235 struct l2cap_ctrl *control,
7236 struct sk_buff *skb, u8 event)
7240 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7246 l2cap_process_reqseq(chan, control->reqseq);
7248 if (!skb_queue_empty(&chan->tx_q))
7249 chan->tx_send_head = skb_peek(&chan->tx_q);
7251 chan->tx_send_head = NULL;
7253 /* Rewind next_tx_seq to the point expected
7256 chan->next_tx_seq = control->reqseq;
7257 chan->unacked_frames = 0;
7259 err = l2cap_finish_move(chan);
7263 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7264 l2cap_send_i_or_rr_or_rnr(chan);
7266 if (event == L2CAP_EV_RECV_IFRAME)
7269 return l2cap_rx_state_recv(chan, control, NULL, event);
7272 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7273 struct l2cap_ctrl *control,
7274 struct sk_buff *skb, u8 event)
7278 if (!control->final)
7281 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7283 chan->rx_state = L2CAP_RX_STATE_RECV;
7284 l2cap_process_reqseq(chan, control->reqseq);
7286 if (!skb_queue_empty(&chan->tx_q))
7287 chan->tx_send_head = skb_peek(&chan->tx_q);
7289 chan->tx_send_head = NULL;
7291 /* Rewind next_tx_seq to the point expected
7294 chan->next_tx_seq = control->reqseq;
7295 chan->unacked_frames = 0;
7298 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7300 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7302 err = l2cap_resegment(chan);
7305 err = l2cap_rx_state_recv(chan, control, skb, event);
7310 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7312 /* Make sure reqseq is for a packet that has been sent but not acked */
7315 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7316 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7319 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7320 struct sk_buff *skb, u8 event)
7324 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7325 control, skb, event, chan->rx_state);
7327 if (__valid_reqseq(chan, control->reqseq)) {
7328 switch (chan->rx_state) {
7329 case L2CAP_RX_STATE_RECV:
7330 err = l2cap_rx_state_recv(chan, control, skb, event);
7332 case L2CAP_RX_STATE_SREJ_SENT:
7333 err = l2cap_rx_state_srej_sent(chan, control, skb,
7336 case L2CAP_RX_STATE_WAIT_P:
7337 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7339 case L2CAP_RX_STATE_WAIT_F:
7340 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7347 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7348 control->reqseq, chan->next_tx_seq,
7349 chan->expected_ack_seq);
7350 l2cap_send_disconn_req(chan, ECONNRESET);
7356 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7357 struct sk_buff *skb)
7359 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7360 * the txseq field in advance to use it after l2cap_reassemble_sdu
7361 * returns and to avoid the race condition, for example:
7363 * The current thread calls:
7364 * l2cap_reassemble_sdu
7365 * chan->ops->recv == l2cap_sock_recv_cb
7366 * __sock_queue_rcv_skb
7367 * Another thread calls:
7371 * Then the current thread tries to access control, but it was freed by
7372 * skb_free_datagram.
7374 u16 txseq = control->txseq;
7376 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7379 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7380 l2cap_pass_to_tx(chan, control);
7382 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7383 __next_seq(chan, chan->buffer_seq));
7385 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7387 l2cap_reassemble_sdu(chan, skb, control);
7390 kfree_skb(chan->sdu);
7393 chan->sdu_last_frag = NULL;
7397 BT_DBG("Freeing %p", skb);
7402 chan->last_acked_seq = txseq;
7403 chan->expected_tx_seq = __next_seq(chan, txseq);
7408 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7410 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7414 __unpack_control(chan, skb);
7419 * We can just drop the corrupted I-frame here.
7420 * Receiver will miss it and start proper recovery
7421 * procedures and ask for retransmission.
7423 if (l2cap_check_fcs(chan, skb))
7426 if (!control->sframe && control->sar == L2CAP_SAR_START)
7427 len -= L2CAP_SDULEN_SIZE;
7429 if (chan->fcs == L2CAP_FCS_CRC16)
7430 len -= L2CAP_FCS_SIZE;
7432 if (len > chan->mps) {
7433 l2cap_send_disconn_req(chan, ECONNRESET);
7437 if (chan->ops->filter) {
7438 if (chan->ops->filter(chan, skb))
7442 if (!control->sframe) {
7445 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7446 control->sar, control->reqseq, control->final,
7449 /* Validate F-bit - F=0 always valid, F=1 only
7450 * valid in TX WAIT_F
7452 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7455 if (chan->mode != L2CAP_MODE_STREAMING) {
7456 event = L2CAP_EV_RECV_IFRAME;
7457 err = l2cap_rx(chan, control, skb, event);
7459 err = l2cap_stream_rx(chan, control, skb);
7463 l2cap_send_disconn_req(chan, ECONNRESET);
7465 const u8 rx_func_to_event[4] = {
7466 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7467 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7470 /* Only I-frames are expected in streaming mode */
7471 if (chan->mode == L2CAP_MODE_STREAMING)
7474 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7475 control->reqseq, control->final, control->poll,
7479 BT_ERR("Trailing bytes: %d in sframe", len);
7480 l2cap_send_disconn_req(chan, ECONNRESET);
7484 /* Validate F and P bits */
7485 if (control->final && (control->poll ||
7486 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7489 event = rx_func_to_event[control->super];
7490 if (l2cap_rx(chan, control, skb, event))
7491 l2cap_send_disconn_req(chan, ECONNRESET);
7501 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7503 struct l2cap_conn *conn = chan->conn;
7504 struct l2cap_le_credits pkt;
7507 return_credits = (chan->imtu / chan->mps) + 1;
7509 if (chan->rx_credits >= return_credits)
7512 return_credits -= chan->rx_credits;
7514 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7516 chan->rx_credits += return_credits;
7518 pkt.cid = cpu_to_le16(chan->scid);
7519 pkt.credits = cpu_to_le16(return_credits);
7521 chan->ident = l2cap_get_ident(conn);
7523 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7526 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7530 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7532 /* Wait recv to confirm reception before updating the credits */
7533 err = chan->ops->recv(chan, skb);
7535 /* Update credits whenever an SDU is received */
7536 l2cap_chan_le_send_credits(chan);
7541 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7545 if (!chan->rx_credits) {
7546 BT_ERR("No credits to receive LE L2CAP data");
7547 l2cap_send_disconn_req(chan, ECONNRESET);
7551 if (chan->imtu < skb->len) {
7552 BT_ERR("Too big LE L2CAP PDU");
7557 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7559 /* Update if remote had run out of credits, this should only happens
7560 * if the remote is not using the entire MPS.
7562 if (!chan->rx_credits)
7563 l2cap_chan_le_send_credits(chan);
7570 sdu_len = get_unaligned_le16(skb->data);
7571 skb_pull(skb, L2CAP_SDULEN_SIZE);
7573 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7574 sdu_len, skb->len, chan->imtu);
7576 if (sdu_len > chan->imtu) {
7577 BT_ERR("Too big LE L2CAP SDU length received");
7582 if (skb->len > sdu_len) {
7583 BT_ERR("Too much LE L2CAP data received");
7588 if (skb->len == sdu_len)
7589 return l2cap_ecred_recv(chan, skb);
7592 chan->sdu_len = sdu_len;
7593 chan->sdu_last_frag = skb;
7595 /* Detect if remote is not able to use the selected MPS */
7596 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7597 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7599 /* Adjust the number of credits */
7600 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7601 chan->mps = mps_len;
7602 l2cap_chan_le_send_credits(chan);
7608 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7609 chan->sdu->len, skb->len, chan->sdu_len);
7611 if (chan->sdu->len + skb->len > chan->sdu_len) {
7612 BT_ERR("Too much LE L2CAP data received");
7617 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7620 if (chan->sdu->len == chan->sdu_len) {
7621 err = l2cap_ecred_recv(chan, chan->sdu);
7624 chan->sdu_last_frag = NULL;
7632 kfree_skb(chan->sdu);
7634 chan->sdu_last_frag = NULL;
7638 /* We can't return an error here since we took care of the skb
7639 * freeing internally. An error return would cause the caller to
7640 * do a double-free of the skb.
7645 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7646 struct sk_buff *skb)
7648 struct l2cap_chan *chan;
7650 chan = l2cap_get_chan_by_scid(conn, cid);
7652 if (cid == L2CAP_CID_A2MP) {
7653 chan = a2mp_channel_create(conn, skb);
7659 l2cap_chan_hold(chan);
7660 l2cap_chan_lock(chan);
7662 BT_DBG("unknown cid 0x%4.4x", cid);
7663 /* Drop packet and return */
7669 BT_DBG("chan %p, len %d", chan, skb->len);
7671 /* If we receive data on a fixed channel before the info req/rsp
7672 * procedure is done simply assume that the channel is supported
7673 * and mark it as ready.
7676 if (chan->chan_type == L2CAP_CHAN_FIXED)
7677 l2cap_chan_ready(chan);
7679 if (chan->chan_type == L2CAP_CHAN_FIXED) {
7680 if (chan->psm == L2CAP_PSM_IPSP) {
7681 struct l2cap_conn *conn = chan->conn;
7683 if (conn->hcon->out)
7684 l2cap_chan_ready(chan);
7685 else if (conn->hcon->type != LE_LINK)
7686 l2cap_chan_ready(chan);
7688 l2cap_chan_ready(chan);
7693 if (chan->state != BT_CONNECTED)
7696 switch (chan->mode) {
7697 case L2CAP_MODE_LE_FLOWCTL:
7698 case L2CAP_MODE_EXT_FLOWCTL:
7699 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7704 case L2CAP_MODE_BASIC:
7705 /* If socket recv buffers overflows we drop data here
7706 * which is *bad* because L2CAP has to be reliable.
7707 * But we don't have any other choice. L2CAP doesn't
7708 * provide flow control mechanism. */
7710 if (chan->imtu < skb->len) {
7711 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7715 if (!chan->ops->recv(chan, skb))
7719 case L2CAP_MODE_ERTM:
7720 case L2CAP_MODE_STREAMING:
7721 l2cap_data_rcv(chan, skb);
7725 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7733 l2cap_chan_unlock(chan);
7734 l2cap_chan_put(chan);
7737 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7738 struct sk_buff *skb)
7740 struct hci_conn *hcon = conn->hcon;
7741 struct l2cap_chan *chan;
7743 if (hcon->type != ACL_LINK)
7746 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7751 BT_DBG("chan %p, len %d", chan, skb->len);
7753 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7756 if (chan->imtu < skb->len)
7759 /* Store remote BD_ADDR and PSM for msg_name */
7760 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7761 bt_cb(skb)->l2cap.psm = psm;
7763 if (!chan->ops->recv(chan, skb)) {
7764 l2cap_chan_put(chan);
7769 l2cap_chan_put(chan);
7774 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7776 struct l2cap_hdr *lh = (void *) skb->data;
7777 struct hci_conn *hcon = conn->hcon;
7781 if (hcon->state != BT_CONNECTED) {
7782 BT_DBG("queueing pending rx skb");
7783 skb_queue_tail(&conn->pending_rx, skb);
7787 skb_pull(skb, L2CAP_HDR_SIZE);
7788 cid = __le16_to_cpu(lh->cid);
7789 len = __le16_to_cpu(lh->len);
7791 if (len != skb->len) {
7796 /* Since we can't actively block incoming LE connections we must
7797 * at least ensure that we ignore incoming data from them.
7799 if (hcon->type == LE_LINK &&
7800 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7801 bdaddr_dst_type(hcon))) {
7806 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7809 case L2CAP_CID_SIGNALING:
7810 l2cap_sig_channel(conn, skb);
7813 case L2CAP_CID_CONN_LESS:
7814 psm = get_unaligned((__le16 *) skb->data);
7815 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7816 l2cap_conless_channel(conn, psm, skb);
7819 case L2CAP_CID_LE_SIGNALING:
7820 l2cap_le_sig_channel(conn, skb);
7824 l2cap_data_channel(conn, cid, skb);
7829 static void process_pending_rx(struct work_struct *work)
7831 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7833 struct sk_buff *skb;
7837 while ((skb = skb_dequeue(&conn->pending_rx)))
7838 l2cap_recv_frame(conn, skb);
7841 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7843 struct l2cap_conn *conn = hcon->l2cap_data;
7844 struct hci_chan *hchan;
7849 hchan = hci_chan_create(hcon);
7853 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7855 hci_chan_del(hchan);
7859 kref_init(&conn->ref);
7860 hcon->l2cap_data = conn;
7861 conn->hcon = hci_conn_get(hcon);
7862 conn->hchan = hchan;
7864 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7866 switch (hcon->type) {
7868 if (hcon->hdev->le_mtu) {
7869 conn->mtu = hcon->hdev->le_mtu;
7874 conn->mtu = hcon->hdev->acl_mtu;
7878 conn->feat_mask = 0;
7880 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7882 if (hcon->type == ACL_LINK &&
7883 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7884 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7886 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7887 (bredr_sc_enabled(hcon->hdev) ||
7888 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7889 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7891 mutex_init(&conn->ident_lock);
7892 mutex_init(&conn->chan_lock);
7894 INIT_LIST_HEAD(&conn->chan_l);
7895 INIT_LIST_HEAD(&conn->users);
7897 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7899 skb_queue_head_init(&conn->pending_rx);
7900 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7901 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7903 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7908 static bool is_valid_psm(u16 psm, u8 dst_type)
7913 if (bdaddr_type_is_le(dst_type))
7914 return (psm <= 0x00ff);
7916 /* PSM must be odd and lsb of upper byte must be 0 */
7917 return ((psm & 0x0101) == 0x0001);
7920 struct l2cap_chan_data {
7921 struct l2cap_chan *chan;
7926 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7928 struct l2cap_chan_data *d = data;
7931 if (chan == d->chan)
7934 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7937 pid = chan->ops->get_peer_pid(chan);
7939 /* Only count deferred channels with the same PID/PSM */
7940 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7941 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7947 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7948 bdaddr_t *dst, u8 dst_type)
7950 struct l2cap_conn *conn;
7951 struct hci_conn *hcon;
7952 struct hci_dev *hdev;
7955 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7956 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7958 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7960 return -EHOSTUNREACH;
7964 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7965 chan->chan_type != L2CAP_CHAN_RAW) {
7970 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7975 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7980 switch (chan->mode) {
7981 case L2CAP_MODE_BASIC:
7983 case L2CAP_MODE_LE_FLOWCTL:
7985 case L2CAP_MODE_EXT_FLOWCTL:
7986 if (!enable_ecred) {
7991 case L2CAP_MODE_ERTM:
7992 case L2CAP_MODE_STREAMING:
8001 switch (chan->state) {
8005 /* Already connecting */
8010 /* Already connected */
8024 /* Set destination address and psm */
8025 bacpy(&chan->dst, dst);
8026 chan->dst_type = dst_type;
8031 if (bdaddr_type_is_le(dst_type)) {
8032 /* Convert from L2CAP channel address type to HCI address type
8034 if (dst_type == BDADDR_LE_PUBLIC)
8035 dst_type = ADDR_LE_DEV_PUBLIC;
8037 dst_type = ADDR_LE_DEV_RANDOM;
8039 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8040 hcon = hci_connect_le(hdev, dst, dst_type,
8042 HCI_LE_CONN_TIMEOUT,
8043 HCI_ROLE_SLAVE, NULL);
8045 hcon = hci_connect_le_scan(hdev, dst, dst_type,
8047 HCI_LE_CONN_TIMEOUT,
8048 CONN_REASON_L2CAP_CHAN);
8051 u8 auth_type = l2cap_get_auth_type(chan);
8052 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8053 CONN_REASON_L2CAP_CHAN);
8057 err = PTR_ERR(hcon);
8061 conn = l2cap_conn_add(hcon);
8063 hci_conn_drop(hcon);
8068 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8069 struct l2cap_chan_data data;
8072 data.pid = chan->ops->get_peer_pid(chan);
8075 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8077 /* Check if there isn't too many channels being connected */
8078 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8079 hci_conn_drop(hcon);
8085 mutex_lock(&conn->chan_lock);
8086 l2cap_chan_lock(chan);
8088 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8089 hci_conn_drop(hcon);
8094 /* Update source addr of the socket */
8095 bacpy(&chan->src, &hcon->src);
8096 chan->src_type = bdaddr_src_type(hcon);
8098 __l2cap_chan_add(conn, chan);
8100 /* l2cap_chan_add takes its own ref so we can drop this one */
8101 hci_conn_drop(hcon);
8103 l2cap_state_change(chan, BT_CONNECT);
8104 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8106 /* Release chan->sport so that it can be reused by other
8107 * sockets (as it's only used for listening sockets).
8109 write_lock(&chan_list_lock);
8111 write_unlock(&chan_list_lock);
8113 if (hcon->state == BT_CONNECTED) {
8114 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8115 __clear_chan_timer(chan);
8116 if (l2cap_chan_check_security(chan, true))
8117 l2cap_state_change(chan, BT_CONNECTED);
8119 l2cap_do_start(chan);
8125 l2cap_chan_unlock(chan);
8126 mutex_unlock(&conn->chan_lock);
8128 hci_dev_unlock(hdev);
8132 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8134 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8136 struct l2cap_conn *conn = chan->conn;
8138 struct l2cap_ecred_reconf_req req;
8142 pdu.req.mtu = cpu_to_le16(chan->imtu);
8143 pdu.req.mps = cpu_to_le16(chan->mps);
8144 pdu.scid = cpu_to_le16(chan->scid);
8146 chan->ident = l2cap_get_ident(conn);
8148 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8152 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8154 if (chan->imtu > mtu)
8157 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8161 l2cap_ecred_reconfigure(chan);
8166 /* ---- L2CAP interface with lower layer (HCI) ---- */
8168 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8170 int exact = 0, lm1 = 0, lm2 = 0;
8171 struct l2cap_chan *c;
8173 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8175 /* Find listening sockets and check their link_mode */
8176 read_lock(&chan_list_lock);
8177 list_for_each_entry(c, &chan_list, global_l) {
8178 if (c->state != BT_LISTEN)
8181 if (!bacmp(&c->src, &hdev->bdaddr)) {
8182 lm1 |= HCI_LM_ACCEPT;
8183 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8184 lm1 |= HCI_LM_MASTER;
8186 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8187 lm2 |= HCI_LM_ACCEPT;
8188 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8189 lm2 |= HCI_LM_MASTER;
8192 read_unlock(&chan_list_lock);
8194 return exact ? lm1 : lm2;
8197 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8198 * from an existing channel in the list or from the beginning of the
8199 * global list (by passing NULL as first parameter).
8201 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8202 struct hci_conn *hcon)
8204 u8 src_type = bdaddr_src_type(hcon);
8206 read_lock(&chan_list_lock);
8209 c = list_next_entry(c, global_l);
8211 c = list_entry(chan_list.next, typeof(*c), global_l);
8213 list_for_each_entry_from(c, &chan_list, global_l) {
8214 if (c->chan_type != L2CAP_CHAN_FIXED)
8216 if (c->state != BT_LISTEN)
8218 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8220 if (src_type != c->src_type)
8223 c = l2cap_chan_hold_unless_zero(c);
8224 read_unlock(&chan_list_lock);
8228 read_unlock(&chan_list_lock);
8233 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8235 struct hci_dev *hdev = hcon->hdev;
8236 struct l2cap_conn *conn;
8237 struct l2cap_chan *pchan;
8240 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8243 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8246 l2cap_conn_del(hcon, bt_to_errno(status));
8250 conn = l2cap_conn_add(hcon);
8254 dst_type = bdaddr_dst_type(hcon);
8256 /* If device is blocked, do not create channels for it */
8257 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8260 /* Find fixed channels and notify them of the new connection. We
8261 * use multiple individual lookups, continuing each time where
8262 * we left off, because the list lock would prevent calling the
8263 * potentially sleeping l2cap_chan_lock() function.
8265 pchan = l2cap_global_fixed_chan(NULL, hcon);
8267 struct l2cap_chan *chan, *next;
8269 /* Client fixed channels should override server ones */
8270 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8273 l2cap_chan_lock(pchan);
8274 chan = pchan->ops->new_connection(pchan);
8276 bacpy(&chan->src, &hcon->src);
8277 bacpy(&chan->dst, &hcon->dst);
8278 chan->src_type = bdaddr_src_type(hcon);
8279 chan->dst_type = dst_type;
8281 __l2cap_chan_add(conn, chan);
8284 l2cap_chan_unlock(pchan);
8286 next = l2cap_global_fixed_chan(pchan, hcon);
8287 l2cap_chan_put(pchan);
8291 l2cap_conn_ready(conn);
8294 int l2cap_disconn_ind(struct hci_conn *hcon)
8296 struct l2cap_conn *conn = hcon->l2cap_data;
8298 BT_DBG("hcon %p", hcon);
8301 return HCI_ERROR_REMOTE_USER_TERM;
8302 return conn->disc_reason;
8305 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8307 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8310 BT_DBG("hcon %p reason %d", hcon, reason);
8312 l2cap_conn_del(hcon, bt_to_errno(reason));
8315 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8317 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8320 if (encrypt == 0x00) {
8321 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8322 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8323 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8324 chan->sec_level == BT_SECURITY_FIPS)
8325 l2cap_chan_close(chan, ECONNREFUSED);
8327 if (chan->sec_level == BT_SECURITY_MEDIUM)
8328 __clear_chan_timer(chan);
8332 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8334 struct l2cap_conn *conn = hcon->l2cap_data;
8335 struct l2cap_chan *chan;
8340 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8342 mutex_lock(&conn->chan_lock);
8344 list_for_each_entry(chan, &conn->chan_l, list) {
8345 l2cap_chan_lock(chan);
8347 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8348 state_to_string(chan->state));
8350 if (chan->scid == L2CAP_CID_A2MP) {
8351 l2cap_chan_unlock(chan);
8355 if (!status && encrypt)
8356 chan->sec_level = hcon->sec_level;
8358 if (!__l2cap_no_conn_pending(chan)) {
8359 l2cap_chan_unlock(chan);
8363 if (!status && (chan->state == BT_CONNECTED ||
8364 chan->state == BT_CONFIG)) {
8365 chan->ops->resume(chan);
8366 l2cap_check_encryption(chan, encrypt);
8367 l2cap_chan_unlock(chan);
8371 if (chan->state == BT_CONNECT) {
8372 if (!status && l2cap_check_enc_key_size(hcon))
8373 l2cap_start_connection(chan);
8375 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8376 } else if (chan->state == BT_CONNECT2 &&
8377 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8378 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8379 struct l2cap_conn_rsp rsp;
8382 if (!status && l2cap_check_enc_key_size(hcon)) {
8383 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8384 res = L2CAP_CR_PEND;
8385 stat = L2CAP_CS_AUTHOR_PEND;
8386 chan->ops->defer(chan);
8388 l2cap_state_change(chan, BT_CONFIG);
8389 res = L2CAP_CR_SUCCESS;
8390 stat = L2CAP_CS_NO_INFO;
8393 l2cap_state_change(chan, BT_DISCONN);
8394 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8395 res = L2CAP_CR_SEC_BLOCK;
8396 stat = L2CAP_CS_NO_INFO;
8399 rsp.scid = cpu_to_le16(chan->dcid);
8400 rsp.dcid = cpu_to_le16(chan->scid);
8401 rsp.result = cpu_to_le16(res);
8402 rsp.status = cpu_to_le16(stat);
8403 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8406 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8407 res == L2CAP_CR_SUCCESS) {
8409 set_bit(CONF_REQ_SENT, &chan->conf_state);
8410 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8412 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8414 chan->num_conf_req++;
8418 l2cap_chan_unlock(chan);
8421 mutex_unlock(&conn->chan_lock);
8424 /* Append fragment into frame respecting the maximum len of rx_skb */
8425 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8428 if (!conn->rx_skb) {
8429 /* Allocate skb for the complete frame (with header) */
8430 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8437 /* Copy as much as the rx_skb can hold */
8438 len = min_t(u16, len, skb->len);
8439 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8441 conn->rx_len -= len;
8446 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8448 struct sk_buff *rx_skb;
8451 /* Append just enough to complete the header */
8452 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8454 /* If header could not be read just continue */
8455 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8458 rx_skb = conn->rx_skb;
8459 len = get_unaligned_le16(rx_skb->data);
8461 /* Check if rx_skb has enough space to received all fragments */
8462 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8463 /* Update expected len */
8464 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8465 return L2CAP_LEN_SIZE;
8468 /* Reset conn->rx_skb since it will need to be reallocated in order to
8469 * fit all fragments.
8471 conn->rx_skb = NULL;
8473 /* Reallocates rx_skb using the exact expected length */
8474 len = l2cap_recv_frag(conn, rx_skb,
8475 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8481 static void l2cap_recv_reset(struct l2cap_conn *conn)
8483 kfree_skb(conn->rx_skb);
8484 conn->rx_skb = NULL;
8488 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8490 struct l2cap_conn *conn = hcon->l2cap_data;
8493 /* For AMP controller do not create l2cap conn */
8494 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8498 conn = l2cap_conn_add(hcon);
8503 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8507 case ACL_START_NO_FLUSH:
8510 BT_ERR("Unexpected start frame (len %d)", skb->len);
8511 l2cap_recv_reset(conn);
8512 l2cap_conn_unreliable(conn, ECOMM);
8515 /* Start fragment may not contain the L2CAP length so just
8516 * copy the initial byte when that happens and use conn->mtu as
8519 if (skb->len < L2CAP_LEN_SIZE) {
8520 l2cap_recv_frag(conn, skb, conn->mtu);
8524 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8526 if (len == skb->len) {
8527 /* Complete frame received */
8528 l2cap_recv_frame(conn, skb);
8532 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8534 if (skb->len > len) {
8535 BT_ERR("Frame is too long (len %u, expected len %d)",
8537 l2cap_conn_unreliable(conn, ECOMM);
8541 /* Append fragment into frame (with header) */
8542 if (l2cap_recv_frag(conn, skb, len) < 0)
8548 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8550 if (!conn->rx_skb) {
8551 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8552 l2cap_conn_unreliable(conn, ECOMM);
8556 /* Complete the L2CAP length if it has not been read */
8557 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8558 if (l2cap_recv_len(conn, skb) < 0) {
8559 l2cap_conn_unreliable(conn, ECOMM);
8563 /* Header still could not be read just continue */
8564 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8568 if (skb->len > conn->rx_len) {
8569 BT_ERR("Fragment is too long (len %u, expected %u)",
8570 skb->len, conn->rx_len);
8571 l2cap_recv_reset(conn);
8572 l2cap_conn_unreliable(conn, ECOMM);
8576 /* Append fragment into frame (with header) */
8577 l2cap_recv_frag(conn, skb, skb->len);
8579 if (!conn->rx_len) {
8580 /* Complete frame received. l2cap_recv_frame
8581 * takes ownership of the skb so set the global
8582 * rx_skb pointer to NULL first.
8584 struct sk_buff *rx_skb = conn->rx_skb;
8585 conn->rx_skb = NULL;
8586 l2cap_recv_frame(conn, rx_skb);
8595 static struct hci_cb l2cap_cb = {
8597 .connect_cfm = l2cap_connect_cfm,
8598 .disconn_cfm = l2cap_disconn_cfm,
8599 .security_cfm = l2cap_security_cfm,
8602 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8604 struct l2cap_chan *c;
8606 read_lock(&chan_list_lock);
8608 list_for_each_entry(c, &chan_list, global_l) {
8609 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8610 &c->src, c->src_type, &c->dst, c->dst_type,
8611 c->state, __le16_to_cpu(c->psm),
8612 c->scid, c->dcid, c->imtu, c->omtu,
8613 c->sec_level, c->mode);
8616 read_unlock(&chan_list_lock);
8621 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8623 static struct dentry *l2cap_debugfs;
8625 int __init l2cap_init(void)
8629 err = l2cap_init_sockets();
8633 hci_register_cb(&l2cap_cb);
8635 if (IS_ERR_OR_NULL(bt_debugfs))
8638 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8639 NULL, &l2cap_debugfs_fops);
8644 void l2cap_exit(void)
8646 debugfs_remove(l2cap_debugfs);
8647 hci_unregister_cb(&l2cap_cb);
8648 l2cap_cleanup_sockets();
8651 module_param(disable_ertm, bool, 0644);
8652 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8654 module_param(enable_ecred, bool, 0644);
8655 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");