2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
70 if (link_type == LE_LINK) {
71 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
82 return bdaddr_type(hcon->type, hcon->src_type);
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
87 return bdaddr_type(hcon->type, hcon->dst_type);
90 /* ---- L2CAP channels ---- */
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
97 list_for_each_entry(c, &conn->chan_l, list) {
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 struct l2cap_chan *c;
109 list_for_each_entry(c, &conn->chan_l, list) {
116 /* Find channel with given SCID.
117 * Returns a reference locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_scid(conn, cid);
127 /* Only lock if chan reference is not 0 */
128 c = l2cap_chan_hold_unless_zero(c);
132 mutex_unlock(&conn->chan_lock);
137 /* Find channel with given DCID.
138 * Returns a reference locked channel.
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
143 struct l2cap_chan *c;
145 mutex_lock(&conn->chan_lock);
146 c = __l2cap_get_chan_by_dcid(conn, cid);
148 /* Only lock if chan reference is not 0 */
149 c = l2cap_chan_hold_unless_zero(c);
153 mutex_unlock(&conn->chan_lock);
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &conn->chan_l, list) {
164 if (c->ident == ident)
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
173 struct l2cap_chan *c;
175 mutex_lock(&conn->chan_lock);
176 c = __l2cap_get_chan_by_ident(conn, ident);
178 /* Only lock if chan reference is not 0 */
179 c = l2cap_chan_hold_unless_zero(c);
183 mutex_unlock(&conn->chan_lock);
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
191 struct l2cap_chan *c;
193 list_for_each_entry(c, &chan_list, global_l) {
194 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
197 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
200 if (c->sport == psm && !bacmp(&c->src, src))
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
210 write_lock(&chan_list_lock);
212 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
222 u16 p, start, end, incr;
224 if (chan->src_type == BDADDR_BREDR) {
225 start = L2CAP_PSM_DYN_START;
226 end = L2CAP_PSM_AUTO_END;
229 start = L2CAP_PSM_LE_DYN_START;
230 end = L2CAP_PSM_LE_DYN_END;
235 for (p = start; p <= end; p += incr)
236 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
238 chan->psm = cpu_to_le16(p);
239 chan->sport = cpu_to_le16(p);
246 write_unlock(&chan_list_lock);
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
251 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
253 write_lock(&chan_list_lock);
255 /* Override the defaults (which are for conn-oriented) */
256 chan->omtu = L2CAP_DEFAULT_MTU;
257 chan->chan_type = L2CAP_CHAN_FIXED;
261 write_unlock(&chan_list_lock);
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
270 if (conn->hcon->type == LE_LINK)
271 dyn_end = L2CAP_CID_LE_DYN_END;
273 dyn_end = L2CAP_CID_DYN_END;
275 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 if (!__l2cap_get_chan_by_scid(conn, cid))
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
285 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 state_to_string(state));
289 chan->ops->state_change(chan, state, 0);
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
296 chan->ops->state_change(chan, chan->state, err);
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
301 chan->ops->state_change(chan, chan->state, err);
304 static void __set_retrans_timer(struct l2cap_chan *chan)
306 if (!delayed_work_pending(&chan->monitor_timer) &&
307 chan->retrans_timeout) {
308 l2cap_set_timer(chan, &chan->retrans_timer,
309 msecs_to_jiffies(chan->retrans_timeout));
313 static void __set_monitor_timer(struct l2cap_chan *chan)
315 __clear_retrans_timer(chan);
316 if (chan->monitor_timeout) {
317 l2cap_set_timer(chan, &chan->monitor_timer,
318 msecs_to_jiffies(chan->monitor_timeout));
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
327 skb_queue_walk(head, skb) {
328 if (bt_cb(skb)->l2cap.txseq == seq)
335 /* ---- L2CAP sequence number lists ---- */
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338 * SREJ requests that are received and for frames that are to be
339 * retransmitted. These seq_list functions implement a singly-linked
340 * list in an array, where membership in the list can also be checked
341 * in constant time. Items can also be added to the tail of the list
342 * and removed from the head in constant time, without further memory
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
348 size_t alloc_size, i;
350 /* Allocated size is a power of 2 to map sequence numbers
351 * (which may be up to 14 bits) in to a smaller array that is
352 * sized for the negotiated ERTM transmit windows.
354 alloc_size = roundup_pow_of_two(size);
356 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
360 seq_list->mask = alloc_size - 1;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 for (i = 0; i < alloc_size; i++)
364 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
371 kfree(seq_list->list);
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
377 /* Constant-time check for list membership */
378 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
383 u16 seq = seq_list->head;
384 u16 mask = seq_list->mask;
386 seq_list->head = seq_list->list[seq & mask];
387 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
389 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
401 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
404 for (i = 0; i <= seq_list->mask; i++)
405 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
407 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
413 u16 mask = seq_list->mask;
415 /* All appends happen in constant time */
417 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
420 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 seq_list->head = seq;
423 seq_list->list[seq_list->tail & mask] = seq;
425 seq_list->tail = seq;
426 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
429 static void l2cap_chan_timeout(struct work_struct *work)
431 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
433 struct l2cap_conn *conn = chan->conn;
436 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
438 mutex_lock(&conn->chan_lock);
439 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 * this work. No need to call l2cap_chan_hold(chan) here again.
442 l2cap_chan_lock(chan);
444 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 reason = ECONNREFUSED;
446 else if (chan->state == BT_CONNECT &&
447 chan->sec_level != BT_SECURITY_SDP)
448 reason = ECONNREFUSED;
452 l2cap_chan_close(chan, reason);
454 chan->ops->close(chan);
456 l2cap_chan_unlock(chan);
457 l2cap_chan_put(chan);
459 mutex_unlock(&conn->chan_lock);
462 struct l2cap_chan *l2cap_chan_create(void)
464 struct l2cap_chan *chan;
466 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
470 skb_queue_head_init(&chan->tx_q);
471 skb_queue_head_init(&chan->srej_q);
472 mutex_init(&chan->lock);
474 /* Set default lock nesting level */
475 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
477 write_lock(&chan_list_lock);
478 list_add(&chan->global_l, &chan_list);
479 write_unlock(&chan_list_lock);
481 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
486 chan->state = BT_OPEN;
488 kref_init(&chan->kref);
490 /* This flag is cleared in l2cap_chan_ready() */
491 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
493 BT_DBG("chan %p", chan);
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
499 static void l2cap_chan_destroy(struct kref *kref)
501 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
503 BT_DBG("chan %p", chan);
505 write_lock(&chan_list_lock);
506 list_del(&chan->global_l);
507 write_unlock(&chan_list_lock);
512 void l2cap_chan_hold(struct l2cap_chan *c)
514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
521 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
523 if (!kref_get_unless_zero(&c->kref))
529 void l2cap_chan_put(struct l2cap_chan *c)
531 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
533 kref_put(&c->kref, l2cap_chan_destroy);
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
539 chan->fcs = L2CAP_FCS_CRC16;
540 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 chan->remote_max_tx = chan->max_tx;
544 chan->remote_tx_win = chan->tx_win;
545 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 chan->sec_level = BT_SECURITY_LOW;
547 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
551 chan->conf_state = 0;
552 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
554 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
561 chan->sdu_last_frag = NULL;
563 chan->tx_credits = tx_credits;
564 /* Derive MPS from connection MTU to stop HCI fragmentation */
565 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 /* Give enough credits for a full packet */
567 chan->rx_credits = (chan->imtu / chan->mps) + 1;
569 skb_queue_head_init(&chan->tx_q);
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
574 l2cap_le_flowctl_init(chan, tx_credits);
576 /* L2CAP implementations shall support a minimum MPS of 64 octets */
577 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 chan->mps = L2CAP_ECRED_MIN_MPS;
579 chan->rx_credits = (chan->imtu / chan->mps) + 1;
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
585 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 __le16_to_cpu(chan->psm), chan->dcid);
588 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 switch (chan->chan_type) {
593 case L2CAP_CHAN_CONN_ORIENTED:
594 /* Alloc CID for connection-oriented socket */
595 chan->scid = l2cap_alloc_cid(conn);
596 if (conn->hcon->type == ACL_LINK)
597 chan->omtu = L2CAP_DEFAULT_MTU;
600 case L2CAP_CHAN_CONN_LESS:
601 /* Connectionless socket */
602 chan->scid = L2CAP_CID_CONN_LESS;
603 chan->dcid = L2CAP_CID_CONN_LESS;
604 chan->omtu = L2CAP_DEFAULT_MTU;
607 case L2CAP_CHAN_FIXED:
608 /* Caller will set CID and CID specific MTU values */
612 /* Raw socket can send/recv signalling messages only */
613 chan->scid = L2CAP_CID_SIGNALING;
614 chan->dcid = L2CAP_CID_SIGNALING;
615 chan->omtu = L2CAP_DEFAULT_MTU;
618 chan->local_id = L2CAP_BESTEFFORT_ID;
619 chan->local_stype = L2CAP_SERV_BESTEFFORT;
620 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
621 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
622 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
623 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
625 l2cap_chan_hold(chan);
627 /* Only keep a reference for fixed channels if they requested it */
628 if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 hci_conn_hold(conn->hcon);
632 list_add(&chan->list, &conn->chan_l);
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
637 mutex_lock(&conn->chan_lock);
638 __l2cap_chan_add(conn, chan);
639 mutex_unlock(&conn->chan_lock);
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
644 struct l2cap_conn *conn = chan->conn;
646 __clear_chan_timer(chan);
648 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 state_to_string(chan->state));
651 chan->ops->teardown(chan, err);
654 struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 /* Delete from channel list */
656 list_del(&chan->list);
658 l2cap_chan_put(chan);
662 /* Reference was only held for non-fixed channels or
663 * fixed channels that explicitly requested it using the
664 * FLAG_HOLD_HCI_CONN flag.
666 if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 hci_conn_drop(conn->hcon);
670 if (mgr && mgr->bredr_chan == chan)
671 mgr->bredr_chan = NULL;
674 if (chan->hs_hchan) {
675 struct hci_chan *hs_hchan = chan->hs_hchan;
677 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 amp_disconnect_logical_link(hs_hchan);
681 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
684 switch (chan->mode) {
685 case L2CAP_MODE_BASIC:
688 case L2CAP_MODE_LE_FLOWCTL:
689 case L2CAP_MODE_EXT_FLOWCTL:
690 skb_queue_purge(&chan->tx_q);
693 case L2CAP_MODE_ERTM:
694 __clear_retrans_timer(chan);
695 __clear_monitor_timer(chan);
696 __clear_ack_timer(chan);
698 skb_queue_purge(&chan->srej_q);
700 l2cap_seq_list_free(&chan->srej_list);
701 l2cap_seq_list_free(&chan->retrans_list);
704 case L2CAP_MODE_STREAMING:
705 skb_queue_purge(&chan->tx_q);
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
714 struct l2cap_chan *chan;
716 list_for_each_entry(chan, &conn->chan_l, list) {
721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 mutex_lock(&conn->chan_lock);
728 __l2cap_chan_list(conn, func, data);
729 mutex_unlock(&conn->chan_lock);
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
734 static void l2cap_conn_update_id_addr(struct work_struct *work)
736 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 id_addr_update_work);
738 struct hci_conn *hcon = conn->hcon;
739 struct l2cap_chan *chan;
741 mutex_lock(&conn->chan_lock);
743 list_for_each_entry(chan, &conn->chan_l, list) {
744 l2cap_chan_lock(chan);
745 bacpy(&chan->dst, &hcon->dst);
746 chan->dst_type = bdaddr_dst_type(hcon);
747 l2cap_chan_unlock(chan);
750 mutex_unlock(&conn->chan_lock);
753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
755 struct l2cap_conn *conn = chan->conn;
756 struct l2cap_le_conn_rsp rsp;
759 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 result = L2CAP_CR_LE_AUTHORIZATION;
762 result = L2CAP_CR_LE_BAD_PSM;
764 l2cap_state_change(chan, BT_DISCONN);
766 rsp.dcid = cpu_to_le16(chan->scid);
767 rsp.mtu = cpu_to_le16(chan->imtu);
768 rsp.mps = cpu_to_le16(chan->mps);
769 rsp.credits = cpu_to_le16(chan->rx_credits);
770 rsp.result = cpu_to_le16(result);
772 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
778 struct l2cap_conn *conn = chan->conn;
779 struct l2cap_ecred_conn_rsp rsp;
782 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 result = L2CAP_CR_LE_AUTHORIZATION;
785 result = L2CAP_CR_LE_BAD_PSM;
787 l2cap_state_change(chan, BT_DISCONN);
789 memset(&rsp, 0, sizeof(rsp));
791 rsp.result = cpu_to_le16(result);
793 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
799 struct l2cap_conn *conn = chan->conn;
800 struct l2cap_conn_rsp rsp;
803 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 result = L2CAP_CR_SEC_BLOCK;
806 result = L2CAP_CR_BAD_PSM;
808 l2cap_state_change(chan, BT_DISCONN);
810 rsp.scid = cpu_to_le16(chan->dcid);
811 rsp.dcid = cpu_to_le16(chan->scid);
812 rsp.result = cpu_to_le16(result);
813 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
815 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
820 struct l2cap_conn *conn = chan->conn;
822 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
824 switch (chan->state) {
826 chan->ops->teardown(chan, 0);
831 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 l2cap_send_disconn_req(chan, reason);
835 l2cap_chan_del(chan, reason);
839 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 if (conn->hcon->type == ACL_LINK)
841 l2cap_chan_connect_reject(chan);
842 else if (conn->hcon->type == LE_LINK) {
843 switch (chan->mode) {
844 case L2CAP_MODE_LE_FLOWCTL:
845 l2cap_chan_le_connect_reject(chan);
847 case L2CAP_MODE_EXT_FLOWCTL:
848 l2cap_chan_ecred_connect_reject(chan);
854 l2cap_chan_del(chan, reason);
859 l2cap_chan_del(chan, reason);
863 chan->ops->teardown(chan, 0);
867 EXPORT_SYMBOL(l2cap_chan_close);
869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
871 switch (chan->chan_type) {
873 switch (chan->sec_level) {
874 case BT_SECURITY_HIGH:
875 case BT_SECURITY_FIPS:
876 return HCI_AT_DEDICATED_BONDING_MITM;
877 case BT_SECURITY_MEDIUM:
878 return HCI_AT_DEDICATED_BONDING;
880 return HCI_AT_NO_BONDING;
883 case L2CAP_CHAN_CONN_LESS:
884 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 if (chan->sec_level == BT_SECURITY_LOW)
886 chan->sec_level = BT_SECURITY_SDP;
888 if (chan->sec_level == BT_SECURITY_HIGH ||
889 chan->sec_level == BT_SECURITY_FIPS)
890 return HCI_AT_NO_BONDING_MITM;
892 return HCI_AT_NO_BONDING;
894 case L2CAP_CHAN_CONN_ORIENTED:
895 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 if (chan->sec_level == BT_SECURITY_LOW)
897 chan->sec_level = BT_SECURITY_SDP;
899 if (chan->sec_level == BT_SECURITY_HIGH ||
900 chan->sec_level == BT_SECURITY_FIPS)
901 return HCI_AT_NO_BONDING_MITM;
903 return HCI_AT_NO_BONDING;
908 switch (chan->sec_level) {
909 case BT_SECURITY_HIGH:
910 case BT_SECURITY_FIPS:
911 return HCI_AT_GENERAL_BONDING_MITM;
912 case BT_SECURITY_MEDIUM:
913 return HCI_AT_GENERAL_BONDING;
915 return HCI_AT_NO_BONDING;
921 /* Service level security */
922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
924 struct l2cap_conn *conn = chan->conn;
927 if (conn->hcon->type == LE_LINK)
928 return smp_conn_security(conn->hcon, chan->sec_level);
930 auth_type = l2cap_get_auth_type(chan);
932 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
940 /* Get next available identificator.
941 * 1 - 128 are used by kernel.
942 * 129 - 199 are reserved.
943 * 200 - 254 are used by utilities like l2ping, etc.
946 mutex_lock(&conn->ident_lock);
948 if (++conn->tx_ident > 128)
953 mutex_unlock(&conn->ident_lock);
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
961 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
964 BT_DBG("code 0x%2.2x", code);
969 /* Use NO_FLUSH if supported or we have an LE link (which does
970 * not support auto-flushing packets) */
971 if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 conn->hcon->type == LE_LINK)
973 flags = ACL_START_NO_FLUSH;
977 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 skb->priority = HCI_PRIO_MAX;
980 hci_send_acl(conn->hchan, skb, flags);
983 static bool __chan_is_moving(struct l2cap_chan *chan)
985 return chan->move_state != L2CAP_MOVE_STABLE &&
986 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
991 struct hci_conn *hcon = chan->conn->hcon;
994 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
997 if (chan->hs_hcon && !__chan_is_moving(chan)) {
999 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1006 /* Use NO_FLUSH for LE links (where this is the only option) or
1007 * if the BR/EDR link supports it and flushing has not been
1008 * explicitly requested (through FLAG_FLUSHABLE).
1010 if (hcon->type == LE_LINK ||
1011 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 lmp_no_flush_capable(hcon->hdev)))
1013 flags = ACL_START_NO_FLUSH;
1017 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 hci_send_acl(chan->conn->hchan, skb, flags);
1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1023 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1026 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1028 control->sframe = 1;
1029 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1036 control->sframe = 0;
1037 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1047 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1050 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1052 control->sframe = 1;
1053 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1060 control->sframe = 0;
1061 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 struct sk_buff *skb)
1072 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 __unpack_extended_control(get_unaligned_le32(skb->data),
1074 &bt_cb(skb)->l2cap);
1075 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1077 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 &bt_cb(skb)->l2cap);
1079 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1087 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1090 if (control->sframe) {
1091 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1095 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1106 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1109 if (control->sframe) {
1110 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 packed |= L2CAP_CTRL_FRAME_TYPE;
1114 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 struct l2cap_ctrl *control,
1123 struct sk_buff *skb)
1125 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 put_unaligned_le32(__pack_extended_control(control),
1127 skb->data + L2CAP_HDR_SIZE);
1129 put_unaligned_le16(__pack_enhanced_control(control),
1130 skb->data + L2CAP_HDR_SIZE);
1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1136 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 return L2CAP_EXT_HDR_SIZE;
1139 return L2CAP_ENH_HDR_SIZE;
1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1145 struct sk_buff *skb;
1146 struct l2cap_hdr *lh;
1147 int hlen = __ertm_hdr_size(chan);
1149 if (chan->fcs == L2CAP_FCS_CRC16)
1150 hlen += L2CAP_FCS_SIZE;
1152 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1155 return ERR_PTR(-ENOMEM);
1157 lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 lh->cid = cpu_to_le16(chan->dcid);
1161 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1164 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1166 if (chan->fcs == L2CAP_FCS_CRC16) {
1167 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1171 skb->priority = HCI_PRIO_MAX;
1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 struct l2cap_ctrl *control)
1178 struct sk_buff *skb;
1181 BT_DBG("chan %p, control %p", chan, control);
1183 if (!control->sframe)
1186 if (__chan_is_moving(chan))
1189 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1193 if (control->super == L2CAP_SUPER_RR)
1194 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 else if (control->super == L2CAP_SUPER_RNR)
1196 set_bit(CONN_RNR_SENT, &chan->conn_state);
1198 if (control->super != L2CAP_SUPER_SREJ) {
1199 chan->last_acked_seq = control->reqseq;
1200 __clear_ack_timer(chan);
1203 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 control->final, control->poll, control->super);
1206 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 control_field = __pack_extended_control(control);
1209 control_field = __pack_enhanced_control(control);
1211 skb = l2cap_create_sframe_pdu(chan, control_field);
1213 l2cap_do_send(chan, skb);
1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1218 struct l2cap_ctrl control;
1220 BT_DBG("chan %p, poll %d", chan, poll);
1222 memset(&control, 0, sizeof(control));
1224 control.poll = poll;
1226 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 control.super = L2CAP_SUPER_RNR;
1229 control.super = L2CAP_SUPER_RR;
1231 control.reqseq = chan->buffer_seq;
1232 l2cap_send_sframe(chan, &control);
1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1237 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1240 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1243 static bool __amp_capable(struct l2cap_chan *chan)
1245 struct l2cap_conn *conn = chan->conn;
1246 struct hci_dev *hdev;
1247 bool amp_available = false;
1249 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1252 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1255 read_lock(&hci_dev_list_lock);
1256 list_for_each_entry(hdev, &hci_dev_list, list) {
1257 if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 test_bit(HCI_UP, &hdev->flags)) {
1259 amp_available = true;
1263 read_unlock(&hci_dev_list_lock);
1265 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 return amp_available;
1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1273 /* Check EFS parameters */
1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1279 struct l2cap_conn *conn = chan->conn;
1280 struct l2cap_conn_req req;
1282 req.scid = cpu_to_le16(chan->scid);
1283 req.psm = chan->psm;
1285 chan->ident = l2cap_get_ident(conn);
1287 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1289 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1294 struct l2cap_create_chan_req req;
1295 req.scid = cpu_to_le16(chan->scid);
1296 req.psm = chan->psm;
1297 req.amp_id = amp_id;
1299 chan->ident = l2cap_get_ident(chan->conn);
1301 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1307 struct sk_buff *skb;
1309 BT_DBG("chan %p", chan);
1311 if (chan->mode != L2CAP_MODE_ERTM)
1314 __clear_retrans_timer(chan);
1315 __clear_monitor_timer(chan);
1316 __clear_ack_timer(chan);
1318 chan->retry_count = 0;
1319 skb_queue_walk(&chan->tx_q, skb) {
1320 if (bt_cb(skb)->l2cap.retries)
1321 bt_cb(skb)->l2cap.retries = 1;
1326 chan->expected_tx_seq = chan->buffer_seq;
1328 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 l2cap_seq_list_clear(&chan->retrans_list);
1331 l2cap_seq_list_clear(&chan->srej_list);
1332 skb_queue_purge(&chan->srej_q);
1334 chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 chan->rx_state = L2CAP_RX_STATE_MOVE;
1337 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1340 static void l2cap_move_done(struct l2cap_chan *chan)
1342 u8 move_role = chan->move_role;
1343 BT_DBG("chan %p", chan);
1345 chan->move_state = L2CAP_MOVE_STABLE;
1346 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1348 if (chan->mode != L2CAP_MODE_ERTM)
1351 switch (move_role) {
1352 case L2CAP_MOVE_ROLE_INITIATOR:
1353 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1356 case L2CAP_MOVE_ROLE_RESPONDER:
1357 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1364 /* The channel may have already been flagged as connected in
1365 * case of receiving data before the L2CAP info req/rsp
1366 * procedure is complete.
1369 if (chan->state == BT_CONNECTED)
1372 if (chan->state == BT_CONNECTED) {
1373 if (chan->psm == L2CAP_PSM_IPSP) {
1374 struct l2cap_conn *conn = chan->conn;
1376 if (conn->hcon->out)
1378 else if (conn->hcon->type != LE_LINK)
1386 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1387 chan->conf_state = 0;
1388 __clear_chan_timer(chan);
1390 switch (chan->mode) {
1391 case L2CAP_MODE_LE_FLOWCTL:
1392 case L2CAP_MODE_EXT_FLOWCTL:
1393 if (!chan->tx_credits)
1394 chan->ops->suspend(chan);
1398 chan->state = BT_CONNECTED;
1400 chan->ops->ready(chan);
1403 static void l2cap_le_connect(struct l2cap_chan *chan)
1405 struct l2cap_conn *conn = chan->conn;
1406 struct l2cap_le_conn_req req;
1408 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1412 chan->imtu = chan->conn->mtu;
1414 l2cap_le_flowctl_init(chan, 0);
1416 req.psm = chan->psm;
1417 req.scid = cpu_to_le16(chan->scid);
1418 req.mtu = cpu_to_le16(chan->imtu);
1419 req.mps = cpu_to_le16(chan->mps);
1420 req.credits = cpu_to_le16(chan->rx_credits);
1422 chan->ident = l2cap_get_ident(conn);
1424 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1428 struct l2cap_ecred_conn_data {
1430 struct l2cap_ecred_conn_req req;
1433 struct l2cap_chan *chan;
1438 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1440 struct l2cap_ecred_conn_data *conn = data;
1443 if (chan == conn->chan)
1446 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1449 pid = chan->ops->get_peer_pid(chan);
1451 /* Only add deferred channels with the same PID/PSM */
1452 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1453 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1456 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1459 l2cap_ecred_init(chan, 0);
1461 /* Set the same ident so we can match on the rsp */
1462 chan->ident = conn->chan->ident;
1464 /* Include all channels deferred */
1465 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1470 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1472 struct l2cap_conn *conn = chan->conn;
1473 struct l2cap_ecred_conn_data data;
1475 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1478 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1481 l2cap_ecred_init(chan, 0);
1483 memset(&data, 0, sizeof(data));
1484 data.pdu.req.psm = chan->psm;
1485 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1486 data.pdu.req.mps = cpu_to_le16(chan->mps);
1487 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1488 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1490 chan->ident = l2cap_get_ident(conn);
1491 data.pid = chan->ops->get_peer_pid(chan);
1495 data.pid = chan->ops->get_peer_pid(chan);
1497 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1499 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1500 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1504 static void l2cap_le_start(struct l2cap_chan *chan)
1506 struct l2cap_conn *conn = chan->conn;
1508 if (!smp_conn_security(conn->hcon, chan->sec_level))
1512 l2cap_chan_ready(chan);
1516 if (chan->state == BT_CONNECT) {
1517 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1518 l2cap_ecred_connect(chan);
1520 l2cap_le_connect(chan);
1524 static void l2cap_start_connection(struct l2cap_chan *chan)
1526 if (__amp_capable(chan)) {
1527 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1528 a2mp_discover_amp(chan);
1529 } else if (chan->conn->hcon->type == LE_LINK) {
1530 l2cap_le_start(chan);
1532 l2cap_send_conn_req(chan);
1536 static void l2cap_request_info(struct l2cap_conn *conn)
1538 struct l2cap_info_req req;
1540 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1543 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1545 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1546 conn->info_ident = l2cap_get_ident(conn);
1548 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1550 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1554 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1556 /* The minimum encryption key size needs to be enforced by the
1557 * host stack before establishing any L2CAP connections. The
1558 * specification in theory allows a minimum of 1, but to align
1559 * BR/EDR and LE transports, a minimum of 7 is chosen.
1561 * This check might also be called for unencrypted connections
1562 * that have no key size requirements. Ensure that the link is
1563 * actually encrypted before enforcing a key size.
1565 int min_key_size = hcon->hdev->min_enc_key_size;
1567 /* On FIPS security level, key size must be 16 bytes */
1568 if (hcon->sec_level == BT_SECURITY_FIPS)
1571 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1572 hcon->enc_key_size >= min_key_size);
1575 static void l2cap_do_start(struct l2cap_chan *chan)
1577 struct l2cap_conn *conn = chan->conn;
1579 if (conn->hcon->type == LE_LINK) {
1580 l2cap_le_start(chan);
1584 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1585 l2cap_request_info(conn);
1589 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1592 if (!l2cap_chan_check_security(chan, true) ||
1593 !__l2cap_no_conn_pending(chan))
1596 if (l2cap_check_enc_key_size(conn->hcon))
1597 l2cap_start_connection(chan);
1599 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1602 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1604 u32 local_feat_mask = l2cap_feat_mask;
1606 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1609 case L2CAP_MODE_ERTM:
1610 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1611 case L2CAP_MODE_STREAMING:
1612 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1618 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1620 struct l2cap_conn *conn = chan->conn;
1621 struct l2cap_disconn_req req;
1626 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1627 __clear_retrans_timer(chan);
1628 __clear_monitor_timer(chan);
1629 __clear_ack_timer(chan);
1632 if (chan->scid == L2CAP_CID_A2MP) {
1633 l2cap_state_change(chan, BT_DISCONN);
1637 req.dcid = cpu_to_le16(chan->dcid);
1638 req.scid = cpu_to_le16(chan->scid);
1639 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1642 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1645 /* ---- L2CAP connections ---- */
1646 static void l2cap_conn_start(struct l2cap_conn *conn)
1648 struct l2cap_chan *chan, *tmp;
1650 BT_DBG("conn %p", conn);
1652 mutex_lock(&conn->chan_lock);
1654 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1655 l2cap_chan_lock(chan);
1657 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1658 l2cap_chan_ready(chan);
1659 l2cap_chan_unlock(chan);
1663 if (chan->state == BT_CONNECT) {
1664 if (!l2cap_chan_check_security(chan, true) ||
1665 !__l2cap_no_conn_pending(chan)) {
1666 l2cap_chan_unlock(chan);
1670 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1671 && test_bit(CONF_STATE2_DEVICE,
1672 &chan->conf_state)) {
1673 l2cap_chan_close(chan, ECONNRESET);
1674 l2cap_chan_unlock(chan);
1678 if (l2cap_check_enc_key_size(conn->hcon))
1679 l2cap_start_connection(chan);
1681 l2cap_chan_close(chan, ECONNREFUSED);
1683 } else if (chan->state == BT_CONNECT2) {
1684 struct l2cap_conn_rsp rsp;
1686 rsp.scid = cpu_to_le16(chan->dcid);
1687 rsp.dcid = cpu_to_le16(chan->scid);
1689 if (l2cap_chan_check_security(chan, false)) {
1690 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1691 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1692 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1693 chan->ops->defer(chan);
1696 l2cap_state_change(chan, BT_CONFIG);
1697 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1698 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1701 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1702 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1705 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1708 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1709 rsp.result != L2CAP_CR_SUCCESS) {
1710 l2cap_chan_unlock(chan);
1714 set_bit(CONF_REQ_SENT, &chan->conf_state);
1715 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1716 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1717 chan->num_conf_req++;
1720 l2cap_chan_unlock(chan);
1723 mutex_unlock(&conn->chan_lock);
1726 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1728 struct hci_conn *hcon = conn->hcon;
1729 struct hci_dev *hdev = hcon->hdev;
1731 BT_DBG("%s conn %p", hdev->name, conn);
1733 /* For outgoing pairing which doesn't necessarily have an
1734 * associated socket (e.g. mgmt_pair_device).
1737 smp_conn_security(hcon, hcon->pending_sec_level);
1739 /* For LE peripheral connections, make sure the connection interval
1740 * is in the range of the minimum and maximum interval that has
1741 * been configured for this connection. If not, then trigger
1742 * the connection update procedure.
1744 if (hcon->role == HCI_ROLE_SLAVE &&
1745 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1746 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1747 struct l2cap_conn_param_update_req req;
1749 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1750 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1751 req.latency = cpu_to_le16(hcon->le_conn_latency);
1752 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1754 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1755 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1759 static void l2cap_conn_ready(struct l2cap_conn *conn)
1761 struct l2cap_chan *chan;
1762 struct hci_conn *hcon = conn->hcon;
1764 BT_DBG("conn %p", conn);
1766 if (hcon->type == ACL_LINK)
1767 l2cap_request_info(conn);
1769 mutex_lock(&conn->chan_lock);
1771 list_for_each_entry(chan, &conn->chan_l, list) {
1773 l2cap_chan_lock(chan);
1775 if (chan->scid == L2CAP_CID_A2MP) {
1776 l2cap_chan_unlock(chan);
1780 if (hcon->type == LE_LINK) {
1781 l2cap_le_start(chan);
1782 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1783 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1784 l2cap_chan_ready(chan);
1785 } else if (chan->state == BT_CONNECT) {
1786 l2cap_do_start(chan);
1789 l2cap_chan_unlock(chan);
1792 mutex_unlock(&conn->chan_lock);
1794 if (hcon->type == LE_LINK)
1795 l2cap_le_conn_ready(conn);
1797 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1800 /* Notify sockets that we cannot guaranty reliability anymore */
1801 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1803 struct l2cap_chan *chan;
1805 BT_DBG("conn %p", conn);
1807 mutex_lock(&conn->chan_lock);
1809 list_for_each_entry(chan, &conn->chan_l, list) {
1810 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1811 l2cap_chan_set_err(chan, err);
1814 mutex_unlock(&conn->chan_lock);
1817 static void l2cap_info_timeout(struct work_struct *work)
1819 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1822 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1823 conn->info_ident = 0;
1825 l2cap_conn_start(conn);
1830 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1831 * callback is called during registration. The ->remove callback is called
1832 * during unregistration.
1833 * An l2cap_user object can either be explicitly unregistered or when the
1834 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1835 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1836 * External modules must own a reference to the l2cap_conn object if they intend
1837 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1838 * any time if they don't.
1841 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1843 struct hci_dev *hdev = conn->hcon->hdev;
1846 /* We need to check whether l2cap_conn is registered. If it is not, we
1847 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1848 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1849 * relies on the parent hci_conn object to be locked. This itself relies
1850 * on the hci_dev object to be locked. So we must lock the hci device
1855 if (!list_empty(&user->list)) {
1860 /* conn->hchan is NULL after l2cap_conn_del() was called */
1866 ret = user->probe(conn, user);
1870 list_add(&user->list, &conn->users);
1874 hci_dev_unlock(hdev);
1877 EXPORT_SYMBOL(l2cap_register_user);
1879 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1881 struct hci_dev *hdev = conn->hcon->hdev;
1885 if (list_empty(&user->list))
1888 list_del_init(&user->list);
1889 user->remove(conn, user);
1892 hci_dev_unlock(hdev);
1894 EXPORT_SYMBOL(l2cap_unregister_user);
1896 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1898 struct l2cap_user *user;
1900 while (!list_empty(&conn->users)) {
1901 user = list_first_entry(&conn->users, struct l2cap_user, list);
1902 list_del_init(&user->list);
1903 user->remove(conn, user);
1907 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1909 struct l2cap_conn *conn = hcon->l2cap_data;
1910 struct l2cap_chan *chan, *l;
1915 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1917 kfree_skb(conn->rx_skb);
1919 skb_queue_purge(&conn->pending_rx);
1921 /* We can not call flush_work(&conn->pending_rx_work) here since we
1922 * might block if we are running on a worker from the same workqueue
1923 * pending_rx_work is waiting on.
1925 if (work_pending(&conn->pending_rx_work))
1926 cancel_work_sync(&conn->pending_rx_work);
1928 if (work_pending(&conn->id_addr_update_work))
1929 cancel_work_sync(&conn->id_addr_update_work);
1931 l2cap_unregister_all_users(conn);
1933 /* Force the connection to be immediately dropped */
1934 hcon->disc_timeout = 0;
1936 mutex_lock(&conn->chan_lock);
1939 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1940 l2cap_chan_hold(chan);
1941 l2cap_chan_lock(chan);
1943 l2cap_chan_del(chan, err);
1945 chan->ops->close(chan);
1947 l2cap_chan_unlock(chan);
1948 l2cap_chan_put(chan);
1951 mutex_unlock(&conn->chan_lock);
1953 hci_chan_del(conn->hchan);
1955 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1956 cancel_delayed_work_sync(&conn->info_timer);
1958 hcon->l2cap_data = NULL;
1960 l2cap_conn_put(conn);
1963 static void l2cap_conn_free(struct kref *ref)
1965 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1967 hci_conn_put(conn->hcon);
1971 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1973 kref_get(&conn->ref);
1976 EXPORT_SYMBOL(l2cap_conn_get);
1978 void l2cap_conn_put(struct l2cap_conn *conn)
1980 kref_put(&conn->ref, l2cap_conn_free);
1982 EXPORT_SYMBOL(l2cap_conn_put);
1984 /* ---- Socket interface ---- */
1986 /* Find socket with psm and source / destination bdaddr.
1987 * Returns closest match.
1989 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1994 struct l2cap_chan *c, *tmp, *c1 = NULL;
1996 read_lock(&chan_list_lock);
1998 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1999 if (state && c->state != state)
2002 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
2005 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
2008 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
2009 int src_match, dst_match;
2010 int src_any, dst_any;
2013 src_match = !bacmp(&c->src, src);
2014 dst_match = !bacmp(&c->dst, dst);
2015 if (src_match && dst_match) {
2016 if (!l2cap_chan_hold_unless_zero(c))
2019 read_unlock(&chan_list_lock);
2024 src_any = !bacmp(&c->src, BDADDR_ANY);
2025 dst_any = !bacmp(&c->dst, BDADDR_ANY);
2026 if ((src_match && dst_any) || (src_any && dst_match) ||
2027 (src_any && dst_any))
2033 c1 = l2cap_chan_hold_unless_zero(c1);
2035 read_unlock(&chan_list_lock);
2040 static void l2cap_monitor_timeout(struct work_struct *work)
2042 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2043 monitor_timer.work);
2045 BT_DBG("chan %p", chan);
2047 l2cap_chan_lock(chan);
2050 l2cap_chan_unlock(chan);
2051 l2cap_chan_put(chan);
2055 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2057 l2cap_chan_unlock(chan);
2058 l2cap_chan_put(chan);
2061 static void l2cap_retrans_timeout(struct work_struct *work)
2063 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2064 retrans_timer.work);
2066 BT_DBG("chan %p", chan);
2068 l2cap_chan_lock(chan);
2071 l2cap_chan_unlock(chan);
2072 l2cap_chan_put(chan);
2076 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2077 l2cap_chan_unlock(chan);
2078 l2cap_chan_put(chan);
2081 static void l2cap_streaming_send(struct l2cap_chan *chan,
2082 struct sk_buff_head *skbs)
2084 struct sk_buff *skb;
2085 struct l2cap_ctrl *control;
2087 BT_DBG("chan %p, skbs %p", chan, skbs);
2089 if (__chan_is_moving(chan))
2092 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2094 while (!skb_queue_empty(&chan->tx_q)) {
2096 skb = skb_dequeue(&chan->tx_q);
2098 bt_cb(skb)->l2cap.retries = 1;
2099 control = &bt_cb(skb)->l2cap;
2101 control->reqseq = 0;
2102 control->txseq = chan->next_tx_seq;
2104 __pack_control(chan, control, skb);
2106 if (chan->fcs == L2CAP_FCS_CRC16) {
2107 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2108 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2111 l2cap_do_send(chan, skb);
2113 BT_DBG("Sent txseq %u", control->txseq);
2115 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2116 chan->frames_sent++;
2120 static int l2cap_ertm_send(struct l2cap_chan *chan)
2122 struct sk_buff *skb, *tx_skb;
2123 struct l2cap_ctrl *control;
2126 BT_DBG("chan %p", chan);
2128 if (chan->state != BT_CONNECTED)
2131 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2134 if (__chan_is_moving(chan))
2137 while (chan->tx_send_head &&
2138 chan->unacked_frames < chan->remote_tx_win &&
2139 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2141 skb = chan->tx_send_head;
2143 bt_cb(skb)->l2cap.retries = 1;
2144 control = &bt_cb(skb)->l2cap;
2146 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2149 control->reqseq = chan->buffer_seq;
2150 chan->last_acked_seq = chan->buffer_seq;
2151 control->txseq = chan->next_tx_seq;
2153 __pack_control(chan, control, skb);
2155 if (chan->fcs == L2CAP_FCS_CRC16) {
2156 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2157 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2160 /* Clone after data has been modified. Data is assumed to be
2161 read-only (for locking purposes) on cloned sk_buffs.
2163 tx_skb = skb_clone(skb, GFP_KERNEL);
2168 __set_retrans_timer(chan);
2170 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2171 chan->unacked_frames++;
2172 chan->frames_sent++;
2175 if (skb_queue_is_last(&chan->tx_q, skb))
2176 chan->tx_send_head = NULL;
2178 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2180 l2cap_do_send(chan, tx_skb);
2181 BT_DBG("Sent txseq %u", control->txseq);
2184 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2185 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2190 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2192 struct l2cap_ctrl control;
2193 struct sk_buff *skb;
2194 struct sk_buff *tx_skb;
2197 BT_DBG("chan %p", chan);
2199 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2202 if (__chan_is_moving(chan))
2205 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2206 seq = l2cap_seq_list_pop(&chan->retrans_list);
2208 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2210 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2215 bt_cb(skb)->l2cap.retries++;
2216 control = bt_cb(skb)->l2cap;
2218 if (chan->max_tx != 0 &&
2219 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2220 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2221 l2cap_send_disconn_req(chan, ECONNRESET);
2222 l2cap_seq_list_clear(&chan->retrans_list);
2226 control.reqseq = chan->buffer_seq;
2227 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2232 if (skb_cloned(skb)) {
2233 /* Cloned sk_buffs are read-only, so we need a
2236 tx_skb = skb_copy(skb, GFP_KERNEL);
2238 tx_skb = skb_clone(skb, GFP_KERNEL);
2242 l2cap_seq_list_clear(&chan->retrans_list);
2246 /* Update skb contents */
2247 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2248 put_unaligned_le32(__pack_extended_control(&control),
2249 tx_skb->data + L2CAP_HDR_SIZE);
2251 put_unaligned_le16(__pack_enhanced_control(&control),
2252 tx_skb->data + L2CAP_HDR_SIZE);
2256 if (chan->fcs == L2CAP_FCS_CRC16) {
2257 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2258 tx_skb->len - L2CAP_FCS_SIZE);
2259 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2263 l2cap_do_send(chan, tx_skb);
2265 BT_DBG("Resent txseq %d", control.txseq);
2267 chan->last_acked_seq = chan->buffer_seq;
2271 static void l2cap_retransmit(struct l2cap_chan *chan,
2272 struct l2cap_ctrl *control)
2274 BT_DBG("chan %p, control %p", chan, control);
2276 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2277 l2cap_ertm_resend(chan);
2280 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2281 struct l2cap_ctrl *control)
2283 struct sk_buff *skb;
2285 BT_DBG("chan %p, control %p", chan, control);
2288 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2290 l2cap_seq_list_clear(&chan->retrans_list);
2292 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2295 if (chan->unacked_frames) {
2296 skb_queue_walk(&chan->tx_q, skb) {
2297 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2298 skb == chan->tx_send_head)
2302 skb_queue_walk_from(&chan->tx_q, skb) {
2303 if (skb == chan->tx_send_head)
2306 l2cap_seq_list_append(&chan->retrans_list,
2307 bt_cb(skb)->l2cap.txseq);
2310 l2cap_ertm_resend(chan);
2314 static void l2cap_send_ack(struct l2cap_chan *chan)
2316 struct l2cap_ctrl control;
2317 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2318 chan->last_acked_seq);
2321 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2322 chan, chan->last_acked_seq, chan->buffer_seq);
2324 memset(&control, 0, sizeof(control));
2327 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2328 chan->rx_state == L2CAP_RX_STATE_RECV) {
2329 __clear_ack_timer(chan);
2330 control.super = L2CAP_SUPER_RNR;
2331 control.reqseq = chan->buffer_seq;
2332 l2cap_send_sframe(chan, &control);
2334 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2335 l2cap_ertm_send(chan);
2336 /* If any i-frames were sent, they included an ack */
2337 if (chan->buffer_seq == chan->last_acked_seq)
2341 /* Ack now if the window is 3/4ths full.
2342 * Calculate without mul or div
2344 threshold = chan->ack_win;
2345 threshold += threshold << 1;
2348 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2351 if (frames_to_ack >= threshold) {
2352 __clear_ack_timer(chan);
2353 control.super = L2CAP_SUPER_RR;
2354 control.reqseq = chan->buffer_seq;
2355 l2cap_send_sframe(chan, &control);
2360 __set_ack_timer(chan);
2364 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2365 struct msghdr *msg, int len,
2366 int count, struct sk_buff *skb)
2368 struct l2cap_conn *conn = chan->conn;
2369 struct sk_buff **frag;
2372 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2378 /* Continuation fragments (no L2CAP header) */
2379 frag = &skb_shinfo(skb)->frag_list;
2381 struct sk_buff *tmp;
2383 count = min_t(unsigned int, conn->mtu, len);
2385 tmp = chan->ops->alloc_skb(chan, 0, count,
2386 msg->msg_flags & MSG_DONTWAIT);
2388 return PTR_ERR(tmp);
2392 if (!copy_from_iter_full(skb_put(*frag, count), count,
2399 skb->len += (*frag)->len;
2400 skb->data_len += (*frag)->len;
2402 frag = &(*frag)->next;
2408 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2409 struct msghdr *msg, size_t len)
2411 struct l2cap_conn *conn = chan->conn;
2412 struct sk_buff *skb;
2413 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2414 struct l2cap_hdr *lh;
2416 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2417 __le16_to_cpu(chan->psm), len);
2419 count = min_t(unsigned int, (conn->mtu - hlen), len);
2421 skb = chan->ops->alloc_skb(chan, hlen, count,
2422 msg->msg_flags & MSG_DONTWAIT);
2426 /* Create L2CAP header */
2427 lh = skb_put(skb, L2CAP_HDR_SIZE);
2428 lh->cid = cpu_to_le16(chan->dcid);
2429 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2430 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2432 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2433 if (unlikely(err < 0)) {
2435 return ERR_PTR(err);
2440 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2441 struct msghdr *msg, size_t len)
2443 struct l2cap_conn *conn = chan->conn;
2444 struct sk_buff *skb;
2446 struct l2cap_hdr *lh;
2448 BT_DBG("chan %p len %zu", chan, len);
2450 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2452 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2453 msg->msg_flags & MSG_DONTWAIT);
2457 /* Create L2CAP header */
2458 lh = skb_put(skb, L2CAP_HDR_SIZE);
2459 lh->cid = cpu_to_le16(chan->dcid);
2460 lh->len = cpu_to_le16(len);
2462 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2463 if (unlikely(err < 0)) {
2465 return ERR_PTR(err);
2470 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2471 struct msghdr *msg, size_t len,
2474 struct l2cap_conn *conn = chan->conn;
2475 struct sk_buff *skb;
2476 int err, count, hlen;
2477 struct l2cap_hdr *lh;
2479 BT_DBG("chan %p len %zu", chan, len);
2482 return ERR_PTR(-ENOTCONN);
2484 hlen = __ertm_hdr_size(chan);
2487 hlen += L2CAP_SDULEN_SIZE;
2489 if (chan->fcs == L2CAP_FCS_CRC16)
2490 hlen += L2CAP_FCS_SIZE;
2492 count = min_t(unsigned int, (conn->mtu - hlen), len);
2494 skb = chan->ops->alloc_skb(chan, hlen, count,
2495 msg->msg_flags & MSG_DONTWAIT);
2499 /* Create L2CAP header */
2500 lh = skb_put(skb, L2CAP_HDR_SIZE);
2501 lh->cid = cpu_to_le16(chan->dcid);
2502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2504 /* Control header is populated later */
2505 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2506 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2508 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2511 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2513 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2514 if (unlikely(err < 0)) {
2516 return ERR_PTR(err);
2519 bt_cb(skb)->l2cap.fcs = chan->fcs;
2520 bt_cb(skb)->l2cap.retries = 0;
2524 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2525 struct sk_buff_head *seg_queue,
2526 struct msghdr *msg, size_t len)
2528 struct sk_buff *skb;
2533 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2535 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2536 * so fragmented skbs are not used. The HCI layer's handling
2537 * of fragmented skbs is not compatible with ERTM's queueing.
2540 /* PDU size is derived from the HCI MTU */
2541 pdu_len = chan->conn->mtu;
2543 /* Constrain PDU size for BR/EDR connections */
2545 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2547 /* Adjust for largest possible L2CAP overhead. */
2549 pdu_len -= L2CAP_FCS_SIZE;
2551 pdu_len -= __ertm_hdr_size(chan);
2553 /* Remote device may have requested smaller PDUs */
2554 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2556 if (len <= pdu_len) {
2557 sar = L2CAP_SAR_UNSEGMENTED;
2561 sar = L2CAP_SAR_START;
2566 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2569 __skb_queue_purge(seg_queue);
2570 return PTR_ERR(skb);
2573 bt_cb(skb)->l2cap.sar = sar;
2574 __skb_queue_tail(seg_queue, skb);
2580 if (len <= pdu_len) {
2581 sar = L2CAP_SAR_END;
2584 sar = L2CAP_SAR_CONTINUE;
2591 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2593 size_t len, u16 sdulen)
2595 struct l2cap_conn *conn = chan->conn;
2596 struct sk_buff *skb;
2597 int err, count, hlen;
2598 struct l2cap_hdr *lh;
2600 BT_DBG("chan %p len %zu", chan, len);
2603 return ERR_PTR(-ENOTCONN);
2605 hlen = L2CAP_HDR_SIZE;
2608 hlen += L2CAP_SDULEN_SIZE;
2610 count = min_t(unsigned int, (conn->mtu - hlen), len);
2612 skb = chan->ops->alloc_skb(chan, hlen, count,
2613 msg->msg_flags & MSG_DONTWAIT);
2617 /* Create L2CAP header */
2618 lh = skb_put(skb, L2CAP_HDR_SIZE);
2619 lh->cid = cpu_to_le16(chan->dcid);
2620 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2623 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2625 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2626 if (unlikely(err < 0)) {
2628 return ERR_PTR(err);
2634 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2635 struct sk_buff_head *seg_queue,
2636 struct msghdr *msg, size_t len)
2638 struct sk_buff *skb;
2642 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2645 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2651 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2653 __skb_queue_purge(seg_queue);
2654 return PTR_ERR(skb);
2657 __skb_queue_tail(seg_queue, skb);
2663 pdu_len += L2CAP_SDULEN_SIZE;
2670 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2674 BT_DBG("chan %p", chan);
2676 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2677 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2682 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2683 skb_queue_len(&chan->tx_q));
2686 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2688 struct sk_buff *skb;
2690 struct sk_buff_head seg_queue;
2695 /* Connectionless channel */
2696 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2697 skb = l2cap_create_connless_pdu(chan, msg, len);
2699 return PTR_ERR(skb);
2701 /* Channel lock is released before requesting new skb and then
2702 * reacquired thus we need to recheck channel state.
2704 if (chan->state != BT_CONNECTED) {
2709 l2cap_do_send(chan, skb);
2713 switch (chan->mode) {
2714 case L2CAP_MODE_LE_FLOWCTL:
2715 case L2CAP_MODE_EXT_FLOWCTL:
2716 /* Check outgoing MTU */
2717 if (len > chan->omtu)
2720 __skb_queue_head_init(&seg_queue);
2722 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2724 if (chan->state != BT_CONNECTED) {
2725 __skb_queue_purge(&seg_queue);
2732 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2734 l2cap_le_flowctl_send(chan);
2736 if (!chan->tx_credits)
2737 chan->ops->suspend(chan);
2743 case L2CAP_MODE_BASIC:
2744 /* Check outgoing MTU */
2745 if (len > chan->omtu)
2748 /* Create a basic PDU */
2749 skb = l2cap_create_basic_pdu(chan, msg, len);
2751 return PTR_ERR(skb);
2753 /* Channel lock is released before requesting new skb and then
2754 * reacquired thus we need to recheck channel state.
2756 if (chan->state != BT_CONNECTED) {
2761 l2cap_do_send(chan, skb);
2765 case L2CAP_MODE_ERTM:
2766 case L2CAP_MODE_STREAMING:
2767 /* Check outgoing MTU */
2768 if (len > chan->omtu) {
2773 __skb_queue_head_init(&seg_queue);
2775 /* Do segmentation before calling in to the state machine,
2776 * since it's possible to block while waiting for memory
2779 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2781 /* The channel could have been closed while segmenting,
2782 * check that it is still connected.
2784 if (chan->state != BT_CONNECTED) {
2785 __skb_queue_purge(&seg_queue);
2792 if (chan->mode == L2CAP_MODE_ERTM)
2793 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2795 l2cap_streaming_send(chan, &seg_queue);
2799 /* If the skbs were not queued for sending, they'll still be in
2800 * seg_queue and need to be purged.
2802 __skb_queue_purge(&seg_queue);
2806 BT_DBG("bad state %1.1x", chan->mode);
2812 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2814 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2816 struct l2cap_ctrl control;
2819 BT_DBG("chan %p, txseq %u", chan, txseq);
2821 memset(&control, 0, sizeof(control));
2823 control.super = L2CAP_SUPER_SREJ;
2825 for (seq = chan->expected_tx_seq; seq != txseq;
2826 seq = __next_seq(chan, seq)) {
2827 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2828 control.reqseq = seq;
2829 l2cap_send_sframe(chan, &control);
2830 l2cap_seq_list_append(&chan->srej_list, seq);
2834 chan->expected_tx_seq = __next_seq(chan, txseq);
2837 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2839 struct l2cap_ctrl control;
2841 BT_DBG("chan %p", chan);
2843 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2846 memset(&control, 0, sizeof(control));
2848 control.super = L2CAP_SUPER_SREJ;
2849 control.reqseq = chan->srej_list.tail;
2850 l2cap_send_sframe(chan, &control);
2853 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2855 struct l2cap_ctrl control;
2859 BT_DBG("chan %p, txseq %u", chan, txseq);
2861 memset(&control, 0, sizeof(control));
2863 control.super = L2CAP_SUPER_SREJ;
2865 /* Capture initial list head to allow only one pass through the list. */
2866 initial_head = chan->srej_list.head;
2869 seq = l2cap_seq_list_pop(&chan->srej_list);
2870 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2873 control.reqseq = seq;
2874 l2cap_send_sframe(chan, &control);
2875 l2cap_seq_list_append(&chan->srej_list, seq);
2876 } while (chan->srej_list.head != initial_head);
2879 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2881 struct sk_buff *acked_skb;
2884 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2886 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2889 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2890 chan->expected_ack_seq, chan->unacked_frames);
2892 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2893 ackseq = __next_seq(chan, ackseq)) {
2895 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2897 skb_unlink(acked_skb, &chan->tx_q);
2898 kfree_skb(acked_skb);
2899 chan->unacked_frames--;
2903 chan->expected_ack_seq = reqseq;
2905 if (chan->unacked_frames == 0)
2906 __clear_retrans_timer(chan);
2908 BT_DBG("unacked_frames %u", chan->unacked_frames);
2911 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2913 BT_DBG("chan %p", chan);
2915 chan->expected_tx_seq = chan->buffer_seq;
2916 l2cap_seq_list_clear(&chan->srej_list);
2917 skb_queue_purge(&chan->srej_q);
2918 chan->rx_state = L2CAP_RX_STATE_RECV;
2921 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2922 struct l2cap_ctrl *control,
2923 struct sk_buff_head *skbs, u8 event)
2925 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2929 case L2CAP_EV_DATA_REQUEST:
2930 if (chan->tx_send_head == NULL)
2931 chan->tx_send_head = skb_peek(skbs);
2933 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2934 l2cap_ertm_send(chan);
2936 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2937 BT_DBG("Enter LOCAL_BUSY");
2938 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2940 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2941 /* The SREJ_SENT state must be aborted if we are to
2942 * enter the LOCAL_BUSY state.
2944 l2cap_abort_rx_srej_sent(chan);
2947 l2cap_send_ack(chan);
2950 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2951 BT_DBG("Exit LOCAL_BUSY");
2952 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2954 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2955 struct l2cap_ctrl local_control;
2957 memset(&local_control, 0, sizeof(local_control));
2958 local_control.sframe = 1;
2959 local_control.super = L2CAP_SUPER_RR;
2960 local_control.poll = 1;
2961 local_control.reqseq = chan->buffer_seq;
2962 l2cap_send_sframe(chan, &local_control);
2964 chan->retry_count = 1;
2965 __set_monitor_timer(chan);
2966 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2969 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2970 l2cap_process_reqseq(chan, control->reqseq);
2972 case L2CAP_EV_EXPLICIT_POLL:
2973 l2cap_send_rr_or_rnr(chan, 1);
2974 chan->retry_count = 1;
2975 __set_monitor_timer(chan);
2976 __clear_ack_timer(chan);
2977 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2979 case L2CAP_EV_RETRANS_TO:
2980 l2cap_send_rr_or_rnr(chan, 1);
2981 chan->retry_count = 1;
2982 __set_monitor_timer(chan);
2983 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2985 case L2CAP_EV_RECV_FBIT:
2986 /* Nothing to process */
2993 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2994 struct l2cap_ctrl *control,
2995 struct sk_buff_head *skbs, u8 event)
2997 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
3001 case L2CAP_EV_DATA_REQUEST:
3002 if (chan->tx_send_head == NULL)
3003 chan->tx_send_head = skb_peek(skbs);
3004 /* Queue data, but don't send. */
3005 skb_queue_splice_tail_init(skbs, &chan->tx_q);
3007 case L2CAP_EV_LOCAL_BUSY_DETECTED:
3008 BT_DBG("Enter LOCAL_BUSY");
3009 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3011 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
3012 /* The SREJ_SENT state must be aborted if we are to
3013 * enter the LOCAL_BUSY state.
3015 l2cap_abort_rx_srej_sent(chan);
3018 l2cap_send_ack(chan);
3021 case L2CAP_EV_LOCAL_BUSY_CLEAR:
3022 BT_DBG("Exit LOCAL_BUSY");
3023 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3025 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3026 struct l2cap_ctrl local_control;
3027 memset(&local_control, 0, sizeof(local_control));
3028 local_control.sframe = 1;
3029 local_control.super = L2CAP_SUPER_RR;
3030 local_control.poll = 1;
3031 local_control.reqseq = chan->buffer_seq;
3032 l2cap_send_sframe(chan, &local_control);
3034 chan->retry_count = 1;
3035 __set_monitor_timer(chan);
3036 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3039 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3040 l2cap_process_reqseq(chan, control->reqseq);
3043 case L2CAP_EV_RECV_FBIT:
3044 if (control && control->final) {
3045 __clear_monitor_timer(chan);
3046 if (chan->unacked_frames > 0)
3047 __set_retrans_timer(chan);
3048 chan->retry_count = 0;
3049 chan->tx_state = L2CAP_TX_STATE_XMIT;
3050 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3053 case L2CAP_EV_EXPLICIT_POLL:
3056 case L2CAP_EV_MONITOR_TO:
3057 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3058 l2cap_send_rr_or_rnr(chan, 1);
3059 __set_monitor_timer(chan);
3060 chan->retry_count++;
3062 l2cap_send_disconn_req(chan, ECONNABORTED);
3070 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3071 struct sk_buff_head *skbs, u8 event)
3073 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3074 chan, control, skbs, event, chan->tx_state);
3076 switch (chan->tx_state) {
3077 case L2CAP_TX_STATE_XMIT:
3078 l2cap_tx_state_xmit(chan, control, skbs, event);
3080 case L2CAP_TX_STATE_WAIT_F:
3081 l2cap_tx_state_wait_f(chan, control, skbs, event);
3089 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3090 struct l2cap_ctrl *control)
3092 BT_DBG("chan %p, control %p", chan, control);
3093 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3096 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3097 struct l2cap_ctrl *control)
3099 BT_DBG("chan %p, control %p", chan, control);
3100 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3103 /* Copy frame to all raw sockets on that connection */
3104 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3106 struct sk_buff *nskb;
3107 struct l2cap_chan *chan;
3109 BT_DBG("conn %p", conn);
3111 mutex_lock(&conn->chan_lock);
3113 list_for_each_entry(chan, &conn->chan_l, list) {
3114 if (chan->chan_type != L2CAP_CHAN_RAW)
3117 /* Don't send frame to the channel it came from */
3118 if (bt_cb(skb)->l2cap.chan == chan)
3121 nskb = skb_clone(skb, GFP_KERNEL);
3124 if (chan->ops->recv(chan, nskb))
3128 mutex_unlock(&conn->chan_lock);
3131 /* ---- L2CAP signalling commands ---- */
3132 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3133 u8 ident, u16 dlen, void *data)
3135 struct sk_buff *skb, **frag;
3136 struct l2cap_cmd_hdr *cmd;
3137 struct l2cap_hdr *lh;
3140 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3141 conn, code, ident, dlen);
3143 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3146 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3147 count = min_t(unsigned int, conn->mtu, len);
3149 skb = bt_skb_alloc(count, GFP_KERNEL);
3153 lh = skb_put(skb, L2CAP_HDR_SIZE);
3154 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3156 if (conn->hcon->type == LE_LINK)
3157 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3159 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3161 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3164 cmd->len = cpu_to_le16(dlen);
3167 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3168 skb_put_data(skb, data, count);
3174 /* Continuation fragments (no L2CAP header) */
3175 frag = &skb_shinfo(skb)->frag_list;
3177 count = min_t(unsigned int, conn->mtu, len);
3179 *frag = bt_skb_alloc(count, GFP_KERNEL);
3183 skb_put_data(*frag, data, count);
3188 frag = &(*frag)->next;
3198 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3201 struct l2cap_conf_opt *opt = *ptr;
3204 len = L2CAP_CONF_OPT_SIZE + opt->len;
3212 *val = *((u8 *) opt->val);
3216 *val = get_unaligned_le16(opt->val);
3220 *val = get_unaligned_le32(opt->val);
3224 *val = (unsigned long) opt->val;
3228 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3232 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3234 struct l2cap_conf_opt *opt = *ptr;
3236 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3238 if (size < L2CAP_CONF_OPT_SIZE + len)
3246 *((u8 *) opt->val) = val;
3250 put_unaligned_le16(val, opt->val);
3254 put_unaligned_le32(val, opt->val);
3258 memcpy(opt->val, (void *) val, len);
3262 *ptr += L2CAP_CONF_OPT_SIZE + len;
3265 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3267 struct l2cap_conf_efs efs;
3269 switch (chan->mode) {
3270 case L2CAP_MODE_ERTM:
3271 efs.id = chan->local_id;
3272 efs.stype = chan->local_stype;
3273 efs.msdu = cpu_to_le16(chan->local_msdu);
3274 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3275 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3276 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3279 case L2CAP_MODE_STREAMING:
3281 efs.stype = L2CAP_SERV_BESTEFFORT;
3282 efs.msdu = cpu_to_le16(chan->local_msdu);
3283 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3292 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3293 (unsigned long) &efs, size);
3296 static void l2cap_ack_timeout(struct work_struct *work)
3298 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3302 BT_DBG("chan %p", chan);
3304 l2cap_chan_lock(chan);
3306 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3307 chan->last_acked_seq);
3310 l2cap_send_rr_or_rnr(chan, 0);
3312 l2cap_chan_unlock(chan);
3313 l2cap_chan_put(chan);
3316 int l2cap_ertm_init(struct l2cap_chan *chan)
3320 chan->next_tx_seq = 0;
3321 chan->expected_tx_seq = 0;
3322 chan->expected_ack_seq = 0;
3323 chan->unacked_frames = 0;
3324 chan->buffer_seq = 0;
3325 chan->frames_sent = 0;
3326 chan->last_acked_seq = 0;
3328 chan->sdu_last_frag = NULL;
3331 skb_queue_head_init(&chan->tx_q);
3333 chan->local_amp_id = AMP_ID_BREDR;
3334 chan->move_id = AMP_ID_BREDR;
3335 chan->move_state = L2CAP_MOVE_STABLE;
3336 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3338 if (chan->mode != L2CAP_MODE_ERTM)
3341 chan->rx_state = L2CAP_RX_STATE_RECV;
3342 chan->tx_state = L2CAP_TX_STATE_XMIT;
3344 skb_queue_head_init(&chan->srej_q);
3346 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3350 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3352 l2cap_seq_list_free(&chan->srej_list);
3357 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3360 case L2CAP_MODE_STREAMING:
3361 case L2CAP_MODE_ERTM:
3362 if (l2cap_mode_supported(mode, remote_feat_mask))
3366 return L2CAP_MODE_BASIC;
3370 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3372 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3373 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3376 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3378 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3379 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3382 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3383 struct l2cap_conf_rfc *rfc)
3385 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3386 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3388 /* Class 1 devices have must have ERTM timeouts
3389 * exceeding the Link Supervision Timeout. The
3390 * default Link Supervision Timeout for AMP
3391 * controllers is 10 seconds.
3393 * Class 1 devices use 0xffffffff for their
3394 * best-effort flush timeout, so the clamping logic
3395 * will result in a timeout that meets the above
3396 * requirement. ERTM timeouts are 16-bit values, so
3397 * the maximum timeout is 65.535 seconds.
3400 /* Convert timeout to milliseconds and round */
3401 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3403 /* This is the recommended formula for class 2 devices
3404 * that start ERTM timers when packets are sent to the
3407 ertm_to = 3 * ertm_to + 500;
3409 if (ertm_to > 0xffff)
3412 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3413 rfc->monitor_timeout = rfc->retrans_timeout;
3415 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3416 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3420 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3422 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3423 __l2cap_ews_supported(chan->conn)) {
3424 /* use extended control field */
3425 set_bit(FLAG_EXT_CTRL, &chan->flags);
3426 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3428 chan->tx_win = min_t(u16, chan->tx_win,
3429 L2CAP_DEFAULT_TX_WINDOW);
3430 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3432 chan->ack_win = chan->tx_win;
3435 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3437 struct hci_conn *conn = chan->conn->hcon;
3439 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3441 /* The 2-DH1 packet has between 2 and 56 information bytes
3442 * (including the 2-byte payload header)
3444 if (!(conn->pkt_type & HCI_2DH1))
3447 /* The 3-DH1 packet has between 2 and 85 information bytes
3448 * (including the 2-byte payload header)
3450 if (!(conn->pkt_type & HCI_3DH1))
3453 /* The 2-DH3 packet has between 2 and 369 information bytes
3454 * (including the 2-byte payload header)
3456 if (!(conn->pkt_type & HCI_2DH3))
3459 /* The 3-DH3 packet has between 2 and 554 information bytes
3460 * (including the 2-byte payload header)
3462 if (!(conn->pkt_type & HCI_3DH3))
3465 /* The 2-DH5 packet has between 2 and 681 information bytes
3466 * (including the 2-byte payload header)
3468 if (!(conn->pkt_type & HCI_2DH5))
3471 /* The 3-DH5 packet has between 2 and 1023 information bytes
3472 * (including the 2-byte payload header)
3474 if (!(conn->pkt_type & HCI_3DH5))
3478 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3480 struct l2cap_conf_req *req = data;
3481 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3482 void *ptr = req->data;
3483 void *endptr = data + data_size;
3486 BT_DBG("chan %p", chan);
3488 if (chan->num_conf_req || chan->num_conf_rsp)
3491 switch (chan->mode) {
3492 case L2CAP_MODE_STREAMING:
3493 case L2CAP_MODE_ERTM:
3494 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3497 if (__l2cap_efs_supported(chan->conn))
3498 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3502 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3507 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3509 l2cap_mtu_auto(chan);
3510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3514 switch (chan->mode) {
3515 case L2CAP_MODE_BASIC:
3519 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3520 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3523 rfc.mode = L2CAP_MODE_BASIC;
3525 rfc.max_transmit = 0;
3526 rfc.retrans_timeout = 0;
3527 rfc.monitor_timeout = 0;
3528 rfc.max_pdu_size = 0;
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3531 (unsigned long) &rfc, endptr - ptr);
3534 case L2CAP_MODE_ERTM:
3535 rfc.mode = L2CAP_MODE_ERTM;
3536 rfc.max_transmit = chan->max_tx;
3538 __l2cap_set_ertm_timeouts(chan, &rfc);
3540 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3541 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3543 rfc.max_pdu_size = cpu_to_le16(size);
3545 l2cap_txwin_setup(chan);
3547 rfc.txwin_size = min_t(u16, chan->tx_win,
3548 L2CAP_DEFAULT_TX_WINDOW);
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3551 (unsigned long) &rfc, endptr - ptr);
3553 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3554 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3556 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3557 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3558 chan->tx_win, endptr - ptr);
3560 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3561 if (chan->fcs == L2CAP_FCS_NONE ||
3562 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3563 chan->fcs = L2CAP_FCS_NONE;
3564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3565 chan->fcs, endptr - ptr);
3569 case L2CAP_MODE_STREAMING:
3570 l2cap_txwin_setup(chan);
3571 rfc.mode = L2CAP_MODE_STREAMING;
3573 rfc.max_transmit = 0;
3574 rfc.retrans_timeout = 0;
3575 rfc.monitor_timeout = 0;
3577 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3578 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3580 rfc.max_pdu_size = cpu_to_le16(size);
3582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3583 (unsigned long) &rfc, endptr - ptr);
3585 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3586 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3588 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3589 if (chan->fcs == L2CAP_FCS_NONE ||
3590 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3591 chan->fcs = L2CAP_FCS_NONE;
3592 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3593 chan->fcs, endptr - ptr);
3598 req->dcid = cpu_to_le16(chan->dcid);
3599 req->flags = cpu_to_le16(0);
3604 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3606 struct l2cap_conf_rsp *rsp = data;
3607 void *ptr = rsp->data;
3608 void *endptr = data + data_size;
3609 void *req = chan->conf_req;
3610 int len = chan->conf_len;
3611 int type, hint, olen;
3613 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3614 struct l2cap_conf_efs efs;
3616 u16 mtu = L2CAP_DEFAULT_MTU;
3617 u16 result = L2CAP_CONF_SUCCESS;
3620 BT_DBG("chan %p", chan);
3622 while (len >= L2CAP_CONF_OPT_SIZE) {
3623 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3627 hint = type & L2CAP_CONF_HINT;
3628 type &= L2CAP_CONF_MASK;
3631 case L2CAP_CONF_MTU:
3637 case L2CAP_CONF_FLUSH_TO:
3640 chan->flush_to = val;
3643 case L2CAP_CONF_QOS:
3646 case L2CAP_CONF_RFC:
3647 if (olen != sizeof(rfc))
3649 memcpy(&rfc, (void *) val, olen);
3652 case L2CAP_CONF_FCS:
3655 if (val == L2CAP_FCS_NONE)
3656 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3659 case L2CAP_CONF_EFS:
3660 if (olen != sizeof(efs))
3663 memcpy(&efs, (void *) val, olen);
3666 case L2CAP_CONF_EWS:
3669 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3670 return -ECONNREFUSED;
3671 set_bit(FLAG_EXT_CTRL, &chan->flags);
3672 set_bit(CONF_EWS_RECV, &chan->conf_state);
3673 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3674 chan->remote_tx_win = val;
3680 result = L2CAP_CONF_UNKNOWN;
3681 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3686 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3689 switch (chan->mode) {
3690 case L2CAP_MODE_STREAMING:
3691 case L2CAP_MODE_ERTM:
3692 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3693 chan->mode = l2cap_select_mode(rfc.mode,
3694 chan->conn->feat_mask);
3699 if (__l2cap_efs_supported(chan->conn))
3700 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3702 return -ECONNREFUSED;
3705 if (chan->mode != rfc.mode)
3706 return -ECONNREFUSED;
3712 if (chan->mode != rfc.mode) {
3713 result = L2CAP_CONF_UNACCEPT;
3714 rfc.mode = chan->mode;
3716 if (chan->num_conf_rsp == 1)
3717 return -ECONNREFUSED;
3719 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3720 (unsigned long) &rfc, endptr - ptr);
3723 if (result == L2CAP_CONF_SUCCESS) {
3724 /* Configure output options and let the other side know
3725 * which ones we don't like. */
3727 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3728 result = L2CAP_CONF_UNACCEPT;
3731 set_bit(CONF_MTU_DONE, &chan->conf_state);
3733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3736 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3737 efs.stype != L2CAP_SERV_NOTRAFIC &&
3738 efs.stype != chan->local_stype) {
3740 result = L2CAP_CONF_UNACCEPT;
3742 if (chan->num_conf_req >= 1)
3743 return -ECONNREFUSED;
3745 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3747 (unsigned long) &efs, endptr - ptr);
3749 /* Send PENDING Conf Rsp */
3750 result = L2CAP_CONF_PENDING;
3751 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3756 case L2CAP_MODE_BASIC:
3757 chan->fcs = L2CAP_FCS_NONE;
3758 set_bit(CONF_MODE_DONE, &chan->conf_state);
3761 case L2CAP_MODE_ERTM:
3762 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3763 chan->remote_tx_win = rfc.txwin_size;
3765 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3767 chan->remote_max_tx = rfc.max_transmit;
3769 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3770 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3771 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3772 rfc.max_pdu_size = cpu_to_le16(size);
3773 chan->remote_mps = size;
3775 __l2cap_set_ertm_timeouts(chan, &rfc);
3777 set_bit(CONF_MODE_DONE, &chan->conf_state);
3779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3780 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3783 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3784 chan->remote_id = efs.id;
3785 chan->remote_stype = efs.stype;
3786 chan->remote_msdu = le16_to_cpu(efs.msdu);
3787 chan->remote_flush_to =
3788 le32_to_cpu(efs.flush_to);
3789 chan->remote_acc_lat =
3790 le32_to_cpu(efs.acc_lat);
3791 chan->remote_sdu_itime =
3792 le32_to_cpu(efs.sdu_itime);
3793 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3795 (unsigned long) &efs, endptr - ptr);
3799 case L2CAP_MODE_STREAMING:
3800 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3801 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3802 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3803 rfc.max_pdu_size = cpu_to_le16(size);
3804 chan->remote_mps = size;
3806 set_bit(CONF_MODE_DONE, &chan->conf_state);
3808 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3809 (unsigned long) &rfc, endptr - ptr);
3814 result = L2CAP_CONF_UNACCEPT;
3816 memset(&rfc, 0, sizeof(rfc));
3817 rfc.mode = chan->mode;
3820 if (result == L2CAP_CONF_SUCCESS)
3821 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3823 rsp->scid = cpu_to_le16(chan->dcid);
3824 rsp->result = cpu_to_le16(result);
3825 rsp->flags = cpu_to_le16(0);
3830 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3831 void *data, size_t size, u16 *result)
3833 struct l2cap_conf_req *req = data;
3834 void *ptr = req->data;
3835 void *endptr = data + size;
3838 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3839 struct l2cap_conf_efs efs;
3841 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3843 while (len >= L2CAP_CONF_OPT_SIZE) {
3844 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3849 case L2CAP_CONF_MTU:
3852 if (val < L2CAP_DEFAULT_MIN_MTU) {
3853 *result = L2CAP_CONF_UNACCEPT;
3854 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3857 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3861 case L2CAP_CONF_FLUSH_TO:
3864 chan->flush_to = val;
3865 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3866 chan->flush_to, endptr - ptr);
3869 case L2CAP_CONF_RFC:
3870 if (olen != sizeof(rfc))
3872 memcpy(&rfc, (void *)val, olen);
3873 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3874 rfc.mode != chan->mode)
3875 return -ECONNREFUSED;
3877 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3878 (unsigned long) &rfc, endptr - ptr);
3881 case L2CAP_CONF_EWS:
3884 chan->ack_win = min_t(u16, val, chan->ack_win);
3885 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3886 chan->tx_win, endptr - ptr);
3889 case L2CAP_CONF_EFS:
3890 if (olen != sizeof(efs))
3892 memcpy(&efs, (void *)val, olen);
3893 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3894 efs.stype != L2CAP_SERV_NOTRAFIC &&
3895 efs.stype != chan->local_stype)
3896 return -ECONNREFUSED;
3897 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3898 (unsigned long) &efs, endptr - ptr);
3901 case L2CAP_CONF_FCS:
3904 if (*result == L2CAP_CONF_PENDING)
3905 if (val == L2CAP_FCS_NONE)
3906 set_bit(CONF_RECV_NO_FCS,
3912 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3913 return -ECONNREFUSED;
3915 chan->mode = rfc.mode;
3917 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3919 case L2CAP_MODE_ERTM:
3920 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3921 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3922 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3923 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3924 chan->ack_win = min_t(u16, chan->ack_win,
3927 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3928 chan->local_msdu = le16_to_cpu(efs.msdu);
3929 chan->local_sdu_itime =
3930 le32_to_cpu(efs.sdu_itime);
3931 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3932 chan->local_flush_to =
3933 le32_to_cpu(efs.flush_to);
3937 case L2CAP_MODE_STREAMING:
3938 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3942 req->dcid = cpu_to_le16(chan->dcid);
3943 req->flags = cpu_to_le16(0);
3948 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3949 u16 result, u16 flags)
3951 struct l2cap_conf_rsp *rsp = data;
3952 void *ptr = rsp->data;
3954 BT_DBG("chan %p", chan);
3956 rsp->scid = cpu_to_le16(chan->dcid);
3957 rsp->result = cpu_to_le16(result);
3958 rsp->flags = cpu_to_le16(flags);
3963 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3965 struct l2cap_le_conn_rsp rsp;
3966 struct l2cap_conn *conn = chan->conn;
3968 BT_DBG("chan %p", chan);
3970 rsp.dcid = cpu_to_le16(chan->scid);
3971 rsp.mtu = cpu_to_le16(chan->imtu);
3972 rsp.mps = cpu_to_le16(chan->mps);
3973 rsp.credits = cpu_to_le16(chan->rx_credits);
3974 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3976 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3980 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3983 struct l2cap_ecred_conn_rsp rsp;
3986 struct l2cap_conn *conn = chan->conn;
3987 u16 ident = chan->ident;
3993 BT_DBG("chan %p ident %d", chan, ident);
3995 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3996 pdu.rsp.mps = cpu_to_le16(chan->mps);
3997 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3998 pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4000 mutex_lock(&conn->chan_lock);
4002 list_for_each_entry(chan, &conn->chan_l, list) {
4003 if (chan->ident != ident)
4006 /* Reset ident so only one response is sent */
4009 /* Include all channels pending with the same ident */
4010 pdu.dcid[i++] = cpu_to_le16(chan->scid);
4013 mutex_unlock(&conn->chan_lock);
4015 l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
4016 sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4019 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4021 struct l2cap_conn_rsp rsp;
4022 struct l2cap_conn *conn = chan->conn;
4026 rsp.scid = cpu_to_le16(chan->dcid);
4027 rsp.dcid = cpu_to_le16(chan->scid);
4028 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4029 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4032 rsp_code = L2CAP_CREATE_CHAN_RSP;
4034 rsp_code = L2CAP_CONN_RSP;
4036 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4038 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4040 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4043 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4044 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4045 chan->num_conf_req++;
4048 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4052 /* Use sane default values in case a misbehaving remote device
4053 * did not send an RFC or extended window size option.
4055 u16 txwin_ext = chan->ack_win;
4056 struct l2cap_conf_rfc rfc = {
4058 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4059 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4060 .max_pdu_size = cpu_to_le16(chan->imtu),
4061 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4064 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4066 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4069 while (len >= L2CAP_CONF_OPT_SIZE) {
4070 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4075 case L2CAP_CONF_RFC:
4076 if (olen != sizeof(rfc))
4078 memcpy(&rfc, (void *)val, olen);
4080 case L2CAP_CONF_EWS:
4089 case L2CAP_MODE_ERTM:
4090 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4091 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4092 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4093 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4094 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4096 chan->ack_win = min_t(u16, chan->ack_win,
4099 case L2CAP_MODE_STREAMING:
4100 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4104 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4105 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4108 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4110 if (cmd_len < sizeof(*rej))
4113 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4116 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4117 cmd->ident == conn->info_ident) {
4118 cancel_delayed_work(&conn->info_timer);
4120 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4121 conn->info_ident = 0;
4123 l2cap_conn_start(conn);
4129 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4130 struct l2cap_cmd_hdr *cmd,
4131 u8 *data, u8 rsp_code, u8 amp_id)
4133 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4134 struct l2cap_conn_rsp rsp;
4135 struct l2cap_chan *chan = NULL, *pchan;
4136 int result, status = L2CAP_CS_NO_INFO;
4138 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4139 __le16 psm = req->psm;
4141 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4143 /* Check if we have socket listening on psm */
4144 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4145 &conn->hcon->dst, ACL_LINK);
4147 result = L2CAP_CR_BAD_PSM;
4151 mutex_lock(&conn->chan_lock);
4152 l2cap_chan_lock(pchan);
4154 /* Check if the ACL is secure enough (if not SDP) */
4155 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4156 !hci_conn_check_link_mode(conn->hcon)) {
4157 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4158 result = L2CAP_CR_SEC_BLOCK;
4162 result = L2CAP_CR_NO_MEM;
4164 /* Check for valid dynamic CID range (as per Erratum 3253) */
4165 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4166 result = L2CAP_CR_INVALID_SCID;
4170 /* Check if we already have channel with that dcid */
4171 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4172 result = L2CAP_CR_SCID_IN_USE;
4176 chan = pchan->ops->new_connection(pchan);
4180 /* For certain devices (ex: HID mouse), support for authentication,
4181 * pairing and bonding is optional. For such devices, inorder to avoid
4182 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4183 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4185 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4187 bacpy(&chan->src, &conn->hcon->src);
4188 bacpy(&chan->dst, &conn->hcon->dst);
4189 chan->src_type = bdaddr_src_type(conn->hcon);
4190 chan->dst_type = bdaddr_dst_type(conn->hcon);
4193 chan->local_amp_id = amp_id;
4195 __l2cap_chan_add(conn, chan);
4199 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4201 chan->ident = cmd->ident;
4203 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4204 if (l2cap_chan_check_security(chan, false)) {
4205 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4206 l2cap_state_change(chan, BT_CONNECT2);
4207 result = L2CAP_CR_PEND;
4208 status = L2CAP_CS_AUTHOR_PEND;
4209 chan->ops->defer(chan);
4211 /* Force pending result for AMP controllers.
4212 * The connection will succeed after the
4213 * physical link is up.
4215 if (amp_id == AMP_ID_BREDR) {
4216 l2cap_state_change(chan, BT_CONFIG);
4217 result = L2CAP_CR_SUCCESS;
4219 l2cap_state_change(chan, BT_CONNECT2);
4220 result = L2CAP_CR_PEND;
4222 status = L2CAP_CS_NO_INFO;
4225 l2cap_state_change(chan, BT_CONNECT2);
4226 result = L2CAP_CR_PEND;
4227 status = L2CAP_CS_AUTHEN_PEND;
4230 l2cap_state_change(chan, BT_CONNECT2);
4231 result = L2CAP_CR_PEND;
4232 status = L2CAP_CS_NO_INFO;
4236 l2cap_chan_unlock(pchan);
4237 mutex_unlock(&conn->chan_lock);
4238 l2cap_chan_put(pchan);
4241 rsp.scid = cpu_to_le16(scid);
4242 rsp.dcid = cpu_to_le16(dcid);
4243 rsp.result = cpu_to_le16(result);
4244 rsp.status = cpu_to_le16(status);
4245 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4247 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4248 struct l2cap_info_req info;
4249 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4251 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4252 conn->info_ident = l2cap_get_ident(conn);
4254 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4256 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4257 sizeof(info), &info);
4260 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4261 result == L2CAP_CR_SUCCESS) {
4263 set_bit(CONF_REQ_SENT, &chan->conf_state);
4264 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4265 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4266 chan->num_conf_req++;
4272 static int l2cap_connect_req(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4275 struct hci_dev *hdev = conn->hcon->hdev;
4276 struct hci_conn *hcon = conn->hcon;
4278 if (cmd_len < sizeof(struct l2cap_conn_req))
4282 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4283 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4284 mgmt_device_connected(hdev, hcon, NULL, 0);
4285 hci_dev_unlock(hdev);
4287 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4291 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4292 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4295 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4296 u16 scid, dcid, result, status;
4297 struct l2cap_chan *chan;
4301 if (cmd_len < sizeof(*rsp))
4304 scid = __le16_to_cpu(rsp->scid);
4305 dcid = __le16_to_cpu(rsp->dcid);
4306 result = __le16_to_cpu(rsp->result);
4307 status = __le16_to_cpu(rsp->status);
4309 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4310 dcid, scid, result, status);
4312 mutex_lock(&conn->chan_lock);
4315 chan = __l2cap_get_chan_by_scid(conn, scid);
4321 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4328 chan = l2cap_chan_hold_unless_zero(chan);
4336 l2cap_chan_lock(chan);
4339 case L2CAP_CR_SUCCESS:
4340 l2cap_state_change(chan, BT_CONFIG);
4343 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4345 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4348 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4349 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4350 chan->num_conf_req++;
4354 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4358 l2cap_chan_del(chan, ECONNREFUSED);
4362 l2cap_chan_unlock(chan);
4363 l2cap_chan_put(chan);
4366 mutex_unlock(&conn->chan_lock);
4371 static inline void set_default_fcs(struct l2cap_chan *chan)
4373 /* FCS is enabled only in ERTM or streaming mode, if one or both
4376 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4377 chan->fcs = L2CAP_FCS_NONE;
4378 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4379 chan->fcs = L2CAP_FCS_CRC16;
4382 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4383 u8 ident, u16 flags)
4385 struct l2cap_conn *conn = chan->conn;
4387 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4390 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4391 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4393 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4394 l2cap_build_conf_rsp(chan, data,
4395 L2CAP_CONF_SUCCESS, flags), data);
4398 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4401 struct l2cap_cmd_rej_cid rej;
4403 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4404 rej.scid = __cpu_to_le16(scid);
4405 rej.dcid = __cpu_to_le16(dcid);
4407 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4410 static inline int l2cap_config_req(struct l2cap_conn *conn,
4411 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4414 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4417 struct l2cap_chan *chan;
4420 if (cmd_len < sizeof(*req))
4423 dcid = __le16_to_cpu(req->dcid);
4424 flags = __le16_to_cpu(req->flags);
4426 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4428 chan = l2cap_get_chan_by_scid(conn, dcid);
4430 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4434 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4435 chan->state != BT_CONNECTED) {
4436 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4441 /* Reject if config buffer is too small. */
4442 len = cmd_len - sizeof(*req);
4443 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4444 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4445 l2cap_build_conf_rsp(chan, rsp,
4446 L2CAP_CONF_REJECT, flags), rsp);
4451 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4452 chan->conf_len += len;
4454 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4455 /* Incomplete config. Send empty response. */
4456 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4457 l2cap_build_conf_rsp(chan, rsp,
4458 L2CAP_CONF_SUCCESS, flags), rsp);
4462 /* Complete config. */
4463 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4465 l2cap_send_disconn_req(chan, ECONNRESET);
4469 chan->ident = cmd->ident;
4470 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4471 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4472 chan->num_conf_rsp++;
4474 /* Reset config buffer. */
4477 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4480 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4481 set_default_fcs(chan);
4483 if (chan->mode == L2CAP_MODE_ERTM ||
4484 chan->mode == L2CAP_MODE_STREAMING)
4485 err = l2cap_ertm_init(chan);
4488 l2cap_send_disconn_req(chan, -err);
4490 l2cap_chan_ready(chan);
4495 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4497 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4498 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4499 chan->num_conf_req++;
4502 /* Got Conf Rsp PENDING from remote side and assume we sent
4503 Conf Rsp PENDING in the code above */
4504 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4505 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4507 /* check compatibility */
4509 /* Send rsp for BR/EDR channel */
4511 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4513 chan->ident = cmd->ident;
4517 l2cap_chan_unlock(chan);
4518 l2cap_chan_put(chan);
4522 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4523 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4526 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4527 u16 scid, flags, result;
4528 struct l2cap_chan *chan;
4529 int len = cmd_len - sizeof(*rsp);
4532 if (cmd_len < sizeof(*rsp))
4535 scid = __le16_to_cpu(rsp->scid);
4536 flags = __le16_to_cpu(rsp->flags);
4537 result = __le16_to_cpu(rsp->result);
4539 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4542 chan = l2cap_get_chan_by_scid(conn, scid);
4547 case L2CAP_CONF_SUCCESS:
4548 l2cap_conf_rfc_get(chan, rsp->data, len);
4549 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4552 case L2CAP_CONF_PENDING:
4553 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4555 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4558 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4559 buf, sizeof(buf), &result);
4561 l2cap_send_disconn_req(chan, ECONNRESET);
4565 if (!chan->hs_hcon) {
4566 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4569 if (l2cap_check_efs(chan)) {
4570 amp_create_logical_link(chan);
4571 chan->ident = cmd->ident;
4577 case L2CAP_CONF_UNKNOWN:
4578 case L2CAP_CONF_UNACCEPT:
4579 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4582 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4583 l2cap_send_disconn_req(chan, ECONNRESET);
4587 /* throw out any old stored conf requests */
4588 result = L2CAP_CONF_SUCCESS;
4589 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4590 req, sizeof(req), &result);
4592 l2cap_send_disconn_req(chan, ECONNRESET);
4596 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4597 L2CAP_CONF_REQ, len, req);
4598 chan->num_conf_req++;
4599 if (result != L2CAP_CONF_SUCCESS)
4606 l2cap_chan_set_err(chan, ECONNRESET);
4608 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4609 l2cap_send_disconn_req(chan, ECONNRESET);
4613 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4616 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4618 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4619 set_default_fcs(chan);
4621 if (chan->mode == L2CAP_MODE_ERTM ||
4622 chan->mode == L2CAP_MODE_STREAMING)
4623 err = l2cap_ertm_init(chan);
4626 l2cap_send_disconn_req(chan, -err);
4628 l2cap_chan_ready(chan);
4632 l2cap_chan_unlock(chan);
4633 l2cap_chan_put(chan);
4637 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4638 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4641 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4642 struct l2cap_disconn_rsp rsp;
4644 struct l2cap_chan *chan;
4646 if (cmd_len != sizeof(*req))
4649 scid = __le16_to_cpu(req->scid);
4650 dcid = __le16_to_cpu(req->dcid);
4652 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4654 mutex_lock(&conn->chan_lock);
4656 chan = __l2cap_get_chan_by_scid(conn, dcid);
4658 mutex_unlock(&conn->chan_lock);
4659 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4663 l2cap_chan_hold(chan);
4664 l2cap_chan_lock(chan);
4666 rsp.dcid = cpu_to_le16(chan->scid);
4667 rsp.scid = cpu_to_le16(chan->dcid);
4668 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4670 chan->ops->set_shutdown(chan);
4672 l2cap_chan_del(chan, ECONNRESET);
4674 chan->ops->close(chan);
4676 l2cap_chan_unlock(chan);
4677 l2cap_chan_put(chan);
4679 mutex_unlock(&conn->chan_lock);
4684 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4685 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4688 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4690 struct l2cap_chan *chan;
4692 if (cmd_len != sizeof(*rsp))
4695 scid = __le16_to_cpu(rsp->scid);
4696 dcid = __le16_to_cpu(rsp->dcid);
4698 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4700 mutex_lock(&conn->chan_lock);
4702 chan = __l2cap_get_chan_by_scid(conn, scid);
4704 mutex_unlock(&conn->chan_lock);
4708 l2cap_chan_hold(chan);
4709 l2cap_chan_lock(chan);
4711 if (chan->state != BT_DISCONN) {
4712 l2cap_chan_unlock(chan);
4713 l2cap_chan_put(chan);
4714 mutex_unlock(&conn->chan_lock);
4718 l2cap_chan_del(chan, 0);
4720 chan->ops->close(chan);
4722 l2cap_chan_unlock(chan);
4723 l2cap_chan_put(chan);
4725 mutex_unlock(&conn->chan_lock);
4730 static inline int l2cap_information_req(struct l2cap_conn *conn,
4731 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4734 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4737 if (cmd_len != sizeof(*req))
4740 type = __le16_to_cpu(req->type);
4742 BT_DBG("type 0x%4.4x", type);
4744 if (type == L2CAP_IT_FEAT_MASK) {
4746 u32 feat_mask = l2cap_feat_mask;
4747 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4748 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4749 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4751 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4753 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4754 feat_mask |= L2CAP_FEAT_EXT_FLOW
4755 | L2CAP_FEAT_EXT_WINDOW;
4757 put_unaligned_le32(feat_mask, rsp->data);
4758 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4760 } else if (type == L2CAP_IT_FIXED_CHAN) {
4762 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4764 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4765 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4766 rsp->data[0] = conn->local_fixed_chan;
4767 memset(rsp->data + 1, 0, 7);
4768 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4771 struct l2cap_info_rsp rsp;
4772 rsp.type = cpu_to_le16(type);
4773 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4774 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4781 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4782 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4785 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4788 if (cmd_len < sizeof(*rsp))
4791 type = __le16_to_cpu(rsp->type);
4792 result = __le16_to_cpu(rsp->result);
4794 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4796 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4797 if (cmd->ident != conn->info_ident ||
4798 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4801 cancel_delayed_work(&conn->info_timer);
4803 if (result != L2CAP_IR_SUCCESS) {
4804 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4805 conn->info_ident = 0;
4807 l2cap_conn_start(conn);
4813 case L2CAP_IT_FEAT_MASK:
4814 conn->feat_mask = get_unaligned_le32(rsp->data);
4816 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4817 struct l2cap_info_req req;
4818 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4820 conn->info_ident = l2cap_get_ident(conn);
4822 l2cap_send_cmd(conn, conn->info_ident,
4823 L2CAP_INFO_REQ, sizeof(req), &req);
4825 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4826 conn->info_ident = 0;
4828 l2cap_conn_start(conn);
4832 case L2CAP_IT_FIXED_CHAN:
4833 conn->remote_fixed_chan = rsp->data[0];
4834 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4835 conn->info_ident = 0;
4837 l2cap_conn_start(conn);
4844 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4845 struct l2cap_cmd_hdr *cmd,
4846 u16 cmd_len, void *data)
4848 struct l2cap_create_chan_req *req = data;
4849 struct l2cap_create_chan_rsp rsp;
4850 struct l2cap_chan *chan;
4851 struct hci_dev *hdev;
4854 if (cmd_len != sizeof(*req))
4857 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4860 psm = le16_to_cpu(req->psm);
4861 scid = le16_to_cpu(req->scid);
4863 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4865 /* For controller id 0 make BR/EDR connection */
4866 if (req->amp_id == AMP_ID_BREDR) {
4867 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4872 /* Validate AMP controller id */
4873 hdev = hci_dev_get(req->amp_id);
4877 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4882 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4885 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4886 struct hci_conn *hs_hcon;
4888 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4892 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4897 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4899 mgr->bredr_chan = chan;
4900 chan->hs_hcon = hs_hcon;
4901 chan->fcs = L2CAP_FCS_NONE;
4902 conn->mtu = hdev->block_mtu;
4911 rsp.scid = cpu_to_le16(scid);
4912 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4913 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4915 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4921 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4923 struct l2cap_move_chan_req req;
4926 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4928 ident = l2cap_get_ident(chan->conn);
4929 chan->ident = ident;
4931 req.icid = cpu_to_le16(chan->scid);
4932 req.dest_amp_id = dest_amp_id;
4934 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4937 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4940 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4942 struct l2cap_move_chan_rsp rsp;
4944 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4946 rsp.icid = cpu_to_le16(chan->dcid);
4947 rsp.result = cpu_to_le16(result);
4949 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4953 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4955 struct l2cap_move_chan_cfm cfm;
4957 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4959 chan->ident = l2cap_get_ident(chan->conn);
4961 cfm.icid = cpu_to_le16(chan->scid);
4962 cfm.result = cpu_to_le16(result);
4964 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4967 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4970 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4972 struct l2cap_move_chan_cfm cfm;
4974 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4976 cfm.icid = cpu_to_le16(icid);
4977 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4979 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4983 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4986 struct l2cap_move_chan_cfm_rsp rsp;
4988 BT_DBG("icid 0x%4.4x", icid);
4990 rsp.icid = cpu_to_le16(icid);
4991 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4994 static void __release_logical_link(struct l2cap_chan *chan)
4996 chan->hs_hchan = NULL;
4997 chan->hs_hcon = NULL;
4999 /* Placeholder - release the logical link */
5002 static void l2cap_logical_fail(struct l2cap_chan *chan)
5004 /* Logical link setup failed */
5005 if (chan->state != BT_CONNECTED) {
5006 /* Create channel failure, disconnect */
5007 l2cap_send_disconn_req(chan, ECONNRESET);
5011 switch (chan->move_role) {
5012 case L2CAP_MOVE_ROLE_RESPONDER:
5013 l2cap_move_done(chan);
5014 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5016 case L2CAP_MOVE_ROLE_INITIATOR:
5017 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5018 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5019 /* Remote has only sent pending or
5020 * success responses, clean up
5022 l2cap_move_done(chan);
5025 /* Other amp move states imply that the move
5026 * has already aborted
5028 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5033 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5034 struct hci_chan *hchan)
5036 struct l2cap_conf_rsp rsp;
5038 chan->hs_hchan = hchan;
5039 chan->hs_hcon->l2cap_data = chan->conn;
5041 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5043 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5046 set_default_fcs(chan);
5048 err = l2cap_ertm_init(chan);
5050 l2cap_send_disconn_req(chan, -err);
5052 l2cap_chan_ready(chan);
5056 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5057 struct hci_chan *hchan)
5059 chan->hs_hcon = hchan->conn;
5060 chan->hs_hcon->l2cap_data = chan->conn;
5062 BT_DBG("move_state %d", chan->move_state);
5064 switch (chan->move_state) {
5065 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5066 /* Move confirm will be sent after a success
5067 * response is received
5069 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5071 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5072 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5073 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5074 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5075 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5076 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5077 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5078 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5079 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5083 /* Move was not in expected state, free the channel */
5084 __release_logical_link(chan);
5086 chan->move_state = L2CAP_MOVE_STABLE;
5090 /* Call with chan locked */
5091 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5094 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5097 l2cap_logical_fail(chan);
5098 __release_logical_link(chan);
5102 if (chan->state != BT_CONNECTED) {
5103 /* Ignore logical link if channel is on BR/EDR */
5104 if (chan->local_amp_id != AMP_ID_BREDR)
5105 l2cap_logical_finish_create(chan, hchan);
5107 l2cap_logical_finish_move(chan, hchan);
5111 void l2cap_move_start(struct l2cap_chan *chan)
5113 BT_DBG("chan %p", chan);
5115 if (chan->local_amp_id == AMP_ID_BREDR) {
5116 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5118 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5119 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5120 /* Placeholder - start physical link setup */
5122 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5123 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5125 l2cap_move_setup(chan);
5126 l2cap_send_move_chan_req(chan, 0);
5130 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5131 u8 local_amp_id, u8 remote_amp_id)
5133 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5134 local_amp_id, remote_amp_id);
5136 chan->fcs = L2CAP_FCS_NONE;
5138 /* Outgoing channel on AMP */
5139 if (chan->state == BT_CONNECT) {
5140 if (result == L2CAP_CR_SUCCESS) {
5141 chan->local_amp_id = local_amp_id;
5142 l2cap_send_create_chan_req(chan, remote_amp_id);
5144 /* Revert to BR/EDR connect */
5145 l2cap_send_conn_req(chan);
5151 /* Incoming channel on AMP */
5152 if (__l2cap_no_conn_pending(chan)) {
5153 struct l2cap_conn_rsp rsp;
5155 rsp.scid = cpu_to_le16(chan->dcid);
5156 rsp.dcid = cpu_to_le16(chan->scid);
5158 if (result == L2CAP_CR_SUCCESS) {
5159 /* Send successful response */
5160 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5161 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5163 /* Send negative response */
5164 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5165 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5168 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5171 if (result == L2CAP_CR_SUCCESS) {
5172 l2cap_state_change(chan, BT_CONFIG);
5173 set_bit(CONF_REQ_SENT, &chan->conf_state);
5174 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5176 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5177 chan->num_conf_req++;
5182 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5185 l2cap_move_setup(chan);
5186 chan->move_id = local_amp_id;
5187 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5189 l2cap_send_move_chan_req(chan, remote_amp_id);
5192 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5194 struct hci_chan *hchan = NULL;
5196 /* Placeholder - get hci_chan for logical link */
5199 if (hchan->state == BT_CONNECTED) {
5200 /* Logical link is ready to go */
5201 chan->hs_hcon = hchan->conn;
5202 chan->hs_hcon->l2cap_data = chan->conn;
5203 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5204 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5206 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5208 /* Wait for logical link to be ready */
5209 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5212 /* Logical link not available */
5213 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5217 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5219 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5221 if (result == -EINVAL)
5222 rsp_result = L2CAP_MR_BAD_ID;
5224 rsp_result = L2CAP_MR_NOT_ALLOWED;
5226 l2cap_send_move_chan_rsp(chan, rsp_result);
5229 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5230 chan->move_state = L2CAP_MOVE_STABLE;
5232 /* Restart data transmission */
5233 l2cap_ertm_send(chan);
5236 /* Invoke with locked chan */
5237 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5239 u8 local_amp_id = chan->local_amp_id;
5240 u8 remote_amp_id = chan->remote_amp_id;
5242 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5243 chan, result, local_amp_id, remote_amp_id);
5245 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5248 if (chan->state != BT_CONNECTED) {
5249 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5250 } else if (result != L2CAP_MR_SUCCESS) {
5251 l2cap_do_move_cancel(chan, result);
5253 switch (chan->move_role) {
5254 case L2CAP_MOVE_ROLE_INITIATOR:
5255 l2cap_do_move_initiate(chan, local_amp_id,
5258 case L2CAP_MOVE_ROLE_RESPONDER:
5259 l2cap_do_move_respond(chan, result);
5262 l2cap_do_move_cancel(chan, result);
5268 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5269 struct l2cap_cmd_hdr *cmd,
5270 u16 cmd_len, void *data)
5272 struct l2cap_move_chan_req *req = data;
5273 struct l2cap_move_chan_rsp rsp;
5274 struct l2cap_chan *chan;
5276 u16 result = L2CAP_MR_NOT_ALLOWED;
5278 if (cmd_len != sizeof(*req))
5281 icid = le16_to_cpu(req->icid);
5283 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5285 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5288 chan = l2cap_get_chan_by_dcid(conn, icid);
5290 rsp.icid = cpu_to_le16(icid);
5291 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5292 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5297 chan->ident = cmd->ident;
5299 if (chan->scid < L2CAP_CID_DYN_START ||
5300 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5301 (chan->mode != L2CAP_MODE_ERTM &&
5302 chan->mode != L2CAP_MODE_STREAMING)) {
5303 result = L2CAP_MR_NOT_ALLOWED;
5304 goto send_move_response;
5307 if (chan->local_amp_id == req->dest_amp_id) {
5308 result = L2CAP_MR_SAME_ID;
5309 goto send_move_response;
5312 if (req->dest_amp_id != AMP_ID_BREDR) {
5313 struct hci_dev *hdev;
5314 hdev = hci_dev_get(req->dest_amp_id);
5315 if (!hdev || hdev->dev_type != HCI_AMP ||
5316 !test_bit(HCI_UP, &hdev->flags)) {
5320 result = L2CAP_MR_BAD_ID;
5321 goto send_move_response;
5326 /* Detect a move collision. Only send a collision response
5327 * if this side has "lost", otherwise proceed with the move.
5328 * The winner has the larger bd_addr.
5330 if ((__chan_is_moving(chan) ||
5331 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5332 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5333 result = L2CAP_MR_COLLISION;
5334 goto send_move_response;
5337 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5338 l2cap_move_setup(chan);
5339 chan->move_id = req->dest_amp_id;
5341 if (req->dest_amp_id == AMP_ID_BREDR) {
5342 /* Moving to BR/EDR */
5343 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5344 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5345 result = L2CAP_MR_PEND;
5347 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5348 result = L2CAP_MR_SUCCESS;
5351 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5352 /* Placeholder - uncomment when amp functions are available */
5353 /*amp_accept_physical(chan, req->dest_amp_id);*/
5354 result = L2CAP_MR_PEND;
5358 l2cap_send_move_chan_rsp(chan, result);
5360 l2cap_chan_unlock(chan);
5361 l2cap_chan_put(chan);
5366 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5368 struct l2cap_chan *chan;
5369 struct hci_chan *hchan = NULL;
5371 chan = l2cap_get_chan_by_scid(conn, icid);
5373 l2cap_send_move_chan_cfm_icid(conn, icid);
5377 __clear_chan_timer(chan);
5378 if (result == L2CAP_MR_PEND)
5379 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5381 switch (chan->move_state) {
5382 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5383 /* Move confirm will be sent when logical link
5386 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5388 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5389 if (result == L2CAP_MR_PEND) {
5391 } else if (test_bit(CONN_LOCAL_BUSY,
5392 &chan->conn_state)) {
5393 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5395 /* Logical link is up or moving to BR/EDR,
5398 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5399 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5402 case L2CAP_MOVE_WAIT_RSP:
5404 if (result == L2CAP_MR_SUCCESS) {
5405 /* Remote is ready, send confirm immediately
5406 * after logical link is ready
5408 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5410 /* Both logical link and move success
5411 * are required to confirm
5413 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5416 /* Placeholder - get hci_chan for logical link */
5418 /* Logical link not available */
5419 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5423 /* If the logical link is not yet connected, do not
5424 * send confirmation.
5426 if (hchan->state != BT_CONNECTED)
5429 /* Logical link is already ready to go */
5431 chan->hs_hcon = hchan->conn;
5432 chan->hs_hcon->l2cap_data = chan->conn;
5434 if (result == L2CAP_MR_SUCCESS) {
5435 /* Can confirm now */
5436 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5438 /* Now only need move success
5441 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5444 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5447 /* Any other amp move state means the move failed. */
5448 chan->move_id = chan->local_amp_id;
5449 l2cap_move_done(chan);
5450 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5453 l2cap_chan_unlock(chan);
5454 l2cap_chan_put(chan);
5457 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5460 struct l2cap_chan *chan;
5462 chan = l2cap_get_chan_by_ident(conn, ident);
5464 /* Could not locate channel, icid is best guess */
5465 l2cap_send_move_chan_cfm_icid(conn, icid);
5469 __clear_chan_timer(chan);
5471 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5472 if (result == L2CAP_MR_COLLISION) {
5473 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5475 /* Cleanup - cancel move */
5476 chan->move_id = chan->local_amp_id;
5477 l2cap_move_done(chan);
5481 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5483 l2cap_chan_unlock(chan);
5484 l2cap_chan_put(chan);
5487 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5488 struct l2cap_cmd_hdr *cmd,
5489 u16 cmd_len, void *data)
5491 struct l2cap_move_chan_rsp *rsp = data;
5494 if (cmd_len != sizeof(*rsp))
5497 icid = le16_to_cpu(rsp->icid);
5498 result = le16_to_cpu(rsp->result);
5500 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5502 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5503 l2cap_move_continue(conn, icid, result);
5505 l2cap_move_fail(conn, cmd->ident, icid, result);
5510 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5511 struct l2cap_cmd_hdr *cmd,
5512 u16 cmd_len, void *data)
5514 struct l2cap_move_chan_cfm *cfm = data;
5515 struct l2cap_chan *chan;
5518 if (cmd_len != sizeof(*cfm))
5521 icid = le16_to_cpu(cfm->icid);
5522 result = le16_to_cpu(cfm->result);
5524 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5526 chan = l2cap_get_chan_by_dcid(conn, icid);
5528 /* Spec requires a response even if the icid was not found */
5529 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5533 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5534 if (result == L2CAP_MC_CONFIRMED) {
5535 chan->local_amp_id = chan->move_id;
5536 if (chan->local_amp_id == AMP_ID_BREDR)
5537 __release_logical_link(chan);
5539 chan->move_id = chan->local_amp_id;
5542 l2cap_move_done(chan);
5545 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5547 l2cap_chan_unlock(chan);
5548 l2cap_chan_put(chan);
5553 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5554 struct l2cap_cmd_hdr *cmd,
5555 u16 cmd_len, void *data)
5557 struct l2cap_move_chan_cfm_rsp *rsp = data;
5558 struct l2cap_chan *chan;
5561 if (cmd_len != sizeof(*rsp))
5564 icid = le16_to_cpu(rsp->icid);
5566 BT_DBG("icid 0x%4.4x", icid);
5568 chan = l2cap_get_chan_by_scid(conn, icid);
5572 __clear_chan_timer(chan);
5574 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5575 chan->local_amp_id = chan->move_id;
5577 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5578 __release_logical_link(chan);
5580 l2cap_move_done(chan);
5583 l2cap_chan_unlock(chan);
5584 l2cap_chan_put(chan);
5589 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5590 struct l2cap_cmd_hdr *cmd,
5591 u16 cmd_len, u8 *data)
5593 struct hci_conn *hcon = conn->hcon;
5594 struct l2cap_conn_param_update_req *req;
5595 struct l2cap_conn_param_update_rsp rsp;
5596 u16 min, max, latency, to_multiplier;
5599 if (hcon->role != HCI_ROLE_MASTER)
5602 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5605 req = (struct l2cap_conn_param_update_req *) data;
5606 min = __le16_to_cpu(req->min);
5607 max = __le16_to_cpu(req->max);
5608 latency = __le16_to_cpu(req->latency);
5609 to_multiplier = __le16_to_cpu(req->to_multiplier);
5611 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5612 min, max, latency, to_multiplier);
5614 memset(&rsp, 0, sizeof(rsp));
5616 err = hci_check_conn_params(min, max, latency, to_multiplier);
5618 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5620 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5622 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5628 store_hint = hci_le_conn_update(hcon, min, max, latency,
5630 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5631 store_hint, min, max, latency,
5639 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5640 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5643 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5644 struct hci_conn *hcon = conn->hcon;
5645 u16 dcid, mtu, mps, credits, result;
5646 struct l2cap_chan *chan;
5649 if (cmd_len < sizeof(*rsp))
5652 dcid = __le16_to_cpu(rsp->dcid);
5653 mtu = __le16_to_cpu(rsp->mtu);
5654 mps = __le16_to_cpu(rsp->mps);
5655 credits = __le16_to_cpu(rsp->credits);
5656 result = __le16_to_cpu(rsp->result);
5658 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5659 dcid < L2CAP_CID_DYN_START ||
5660 dcid > L2CAP_CID_LE_DYN_END))
5663 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5664 dcid, mtu, mps, credits, result);
5666 mutex_lock(&conn->chan_lock);
5668 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5676 l2cap_chan_lock(chan);
5679 case L2CAP_CR_LE_SUCCESS:
5680 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5688 chan->remote_mps = mps;
5689 chan->tx_credits = credits;
5690 l2cap_chan_ready(chan);
5693 case L2CAP_CR_LE_AUTHENTICATION:
5694 case L2CAP_CR_LE_ENCRYPTION:
5695 /* If we already have MITM protection we can't do
5698 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5699 l2cap_chan_del(chan, ECONNREFUSED);
5703 sec_level = hcon->sec_level + 1;
5704 if (chan->sec_level < sec_level)
5705 chan->sec_level = sec_level;
5707 /* We'll need to send a new Connect Request */
5708 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5710 smp_conn_security(hcon, chan->sec_level);
5714 l2cap_chan_del(chan, ECONNREFUSED);
5718 l2cap_chan_unlock(chan);
5721 mutex_unlock(&conn->chan_lock);
5726 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5727 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5732 switch (cmd->code) {
5733 case L2CAP_COMMAND_REJ:
5734 l2cap_command_rej(conn, cmd, cmd_len, data);
5737 case L2CAP_CONN_REQ:
5738 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5741 case L2CAP_CONN_RSP:
5742 case L2CAP_CREATE_CHAN_RSP:
5743 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5746 case L2CAP_CONF_REQ:
5747 err = l2cap_config_req(conn, cmd, cmd_len, data);
5750 case L2CAP_CONF_RSP:
5751 l2cap_config_rsp(conn, cmd, cmd_len, data);
5754 case L2CAP_DISCONN_REQ:
5755 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5758 case L2CAP_DISCONN_RSP:
5759 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5762 case L2CAP_ECHO_REQ:
5763 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5766 case L2CAP_ECHO_RSP:
5769 case L2CAP_INFO_REQ:
5770 err = l2cap_information_req(conn, cmd, cmd_len, data);
5773 case L2CAP_INFO_RSP:
5774 l2cap_information_rsp(conn, cmd, cmd_len, data);
5777 case L2CAP_CREATE_CHAN_REQ:
5778 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5781 case L2CAP_MOVE_CHAN_REQ:
5782 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5785 case L2CAP_MOVE_CHAN_RSP:
5786 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5789 case L2CAP_MOVE_CHAN_CFM:
5790 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5793 case L2CAP_MOVE_CHAN_CFM_RSP:
5794 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5798 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5806 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5807 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5810 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5811 struct l2cap_le_conn_rsp rsp;
5812 struct l2cap_chan *chan, *pchan;
5813 u16 dcid, scid, credits, mtu, mps;
5817 if (cmd_len != sizeof(*req))
5820 scid = __le16_to_cpu(req->scid);
5821 mtu = __le16_to_cpu(req->mtu);
5822 mps = __le16_to_cpu(req->mps);
5827 if (mtu < 23 || mps < 23)
5830 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5833 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5836 * Valid range: 0x0001-0x00ff
5838 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5840 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5841 result = L2CAP_CR_LE_BAD_PSM;
5846 /* Check if we have socket listening on psm */
5847 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5848 &conn->hcon->dst, LE_LINK);
5850 result = L2CAP_CR_LE_BAD_PSM;
5855 mutex_lock(&conn->chan_lock);
5856 l2cap_chan_lock(pchan);
5858 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5860 result = L2CAP_CR_LE_AUTHENTICATION;
5862 goto response_unlock;
5865 /* Check for valid dynamic CID range */
5866 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5867 result = L2CAP_CR_LE_INVALID_SCID;
5869 goto response_unlock;
5872 /* Check if we already have channel with that dcid */
5873 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5874 result = L2CAP_CR_LE_SCID_IN_USE;
5876 goto response_unlock;
5879 chan = pchan->ops->new_connection(pchan);
5881 result = L2CAP_CR_LE_NO_MEM;
5882 goto response_unlock;
5885 bacpy(&chan->src, &conn->hcon->src);
5886 bacpy(&chan->dst, &conn->hcon->dst);
5887 chan->src_type = bdaddr_src_type(conn->hcon);
5888 chan->dst_type = bdaddr_dst_type(conn->hcon);
5892 chan->remote_mps = mps;
5894 __l2cap_chan_add(conn, chan);
5896 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5899 credits = chan->rx_credits;
5901 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5903 chan->ident = cmd->ident;
5905 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5906 l2cap_state_change(chan, BT_CONNECT2);
5907 /* The following result value is actually not defined
5908 * for LE CoC but we use it to let the function know
5909 * that it should bail out after doing its cleanup
5910 * instead of sending a response.
5912 result = L2CAP_CR_PEND;
5913 chan->ops->defer(chan);
5915 l2cap_chan_ready(chan);
5916 result = L2CAP_CR_LE_SUCCESS;
5920 l2cap_chan_unlock(pchan);
5921 mutex_unlock(&conn->chan_lock);
5922 l2cap_chan_put(pchan);
5924 if (result == L2CAP_CR_PEND)
5929 rsp.mtu = cpu_to_le16(chan->imtu);
5930 rsp.mps = cpu_to_le16(chan->mps);
5936 rsp.dcid = cpu_to_le16(dcid);
5937 rsp.credits = cpu_to_le16(credits);
5938 rsp.result = cpu_to_le16(result);
5940 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5945 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5946 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5949 struct l2cap_le_credits *pkt;
5950 struct l2cap_chan *chan;
5951 u16 cid, credits, max_credits;
5953 if (cmd_len != sizeof(*pkt))
5956 pkt = (struct l2cap_le_credits *) data;
5957 cid = __le16_to_cpu(pkt->cid);
5958 credits = __le16_to_cpu(pkt->credits);
5960 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5962 chan = l2cap_get_chan_by_dcid(conn, cid);
5966 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5967 if (credits > max_credits) {
5968 BT_ERR("LE credits overflow");
5969 l2cap_send_disconn_req(chan, ECONNRESET);
5971 /* Return 0 so that we don't trigger an unnecessary
5972 * command reject packet.
5977 chan->tx_credits += credits;
5979 /* Resume sending */
5980 l2cap_le_flowctl_send(chan);
5982 if (chan->tx_credits)
5983 chan->ops->resume(chan);
5986 l2cap_chan_unlock(chan);
5987 l2cap_chan_put(chan);
5992 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5993 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5996 struct l2cap_ecred_conn_req *req = (void *) data;
5998 struct l2cap_ecred_conn_rsp rsp;
5999 __le16 dcid[L2CAP_ECRED_MAX_CID];
6001 struct l2cap_chan *chan, *pchan;
6011 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6012 result = L2CAP_CR_LE_INVALID_PARAMS;
6016 cmd_len -= sizeof(*req);
6017 num_scid = cmd_len / sizeof(u16);
6019 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6020 result = L2CAP_CR_LE_INVALID_PARAMS;
6024 mtu = __le16_to_cpu(req->mtu);
6025 mps = __le16_to_cpu(req->mps);
6027 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6028 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6034 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6037 * Valid range: 0x0001-0x00ff
6039 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6041 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6042 result = L2CAP_CR_LE_BAD_PSM;
6046 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6048 memset(&pdu, 0, sizeof(pdu));
6050 /* Check if we have socket listening on psm */
6051 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6052 &conn->hcon->dst, LE_LINK);
6054 result = L2CAP_CR_LE_BAD_PSM;
6058 mutex_lock(&conn->chan_lock);
6059 l2cap_chan_lock(pchan);
6061 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6063 result = L2CAP_CR_LE_AUTHENTICATION;
6067 result = L2CAP_CR_LE_SUCCESS;
6069 for (i = 0; i < num_scid; i++) {
6070 u16 scid = __le16_to_cpu(req->scid[i]);
6072 BT_DBG("scid[%d] 0x%4.4x", i, scid);
6074 pdu.dcid[i] = 0x0000;
6075 len += sizeof(*pdu.dcid);
6077 /* Check for valid dynamic CID range */
6078 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6079 result = L2CAP_CR_LE_INVALID_SCID;
6083 /* Check if we already have channel with that dcid */
6084 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6085 result = L2CAP_CR_LE_SCID_IN_USE;
6089 chan = pchan->ops->new_connection(pchan);
6091 result = L2CAP_CR_LE_NO_MEM;
6095 bacpy(&chan->src, &conn->hcon->src);
6096 bacpy(&chan->dst, &conn->hcon->dst);
6097 chan->src_type = bdaddr_src_type(conn->hcon);
6098 chan->dst_type = bdaddr_dst_type(conn->hcon);
6102 chan->remote_mps = mps;
6104 __l2cap_chan_add(conn, chan);
6106 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6109 if (!pdu.rsp.credits) {
6110 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6111 pdu.rsp.mps = cpu_to_le16(chan->mps);
6112 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6115 pdu.dcid[i] = cpu_to_le16(chan->scid);
6117 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6119 chan->ident = cmd->ident;
6121 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6122 l2cap_state_change(chan, BT_CONNECT2);
6124 chan->ops->defer(chan);
6126 l2cap_chan_ready(chan);
6131 l2cap_chan_unlock(pchan);
6132 mutex_unlock(&conn->chan_lock);
6133 l2cap_chan_put(pchan);
6136 pdu.rsp.result = cpu_to_le16(result);
6141 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6142 sizeof(pdu.rsp) + len, &pdu);
6147 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6148 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6151 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6152 struct hci_conn *hcon = conn->hcon;
6153 u16 mtu, mps, credits, result;
6154 struct l2cap_chan *chan, *tmp;
6155 int err = 0, sec_level;
6158 if (cmd_len < sizeof(*rsp))
6161 mtu = __le16_to_cpu(rsp->mtu);
6162 mps = __le16_to_cpu(rsp->mps);
6163 credits = __le16_to_cpu(rsp->credits);
6164 result = __le16_to_cpu(rsp->result);
6166 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6169 mutex_lock(&conn->chan_lock);
6171 cmd_len -= sizeof(*rsp);
6173 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6176 if (chan->ident != cmd->ident ||
6177 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6178 chan->state == BT_CONNECTED)
6181 l2cap_chan_lock(chan);
6183 /* Check that there is a dcid for each pending channel */
6184 if (cmd_len < sizeof(dcid)) {
6185 l2cap_chan_del(chan, ECONNREFUSED);
6186 l2cap_chan_unlock(chan);
6190 dcid = __le16_to_cpu(rsp->dcid[i++]);
6191 cmd_len -= sizeof(u16);
6193 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6195 /* Check if dcid is already in use */
6196 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6197 /* If a device receives a
6198 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6199 * already-assigned Destination CID, then both the
6200 * original channel and the new channel shall be
6201 * immediately discarded and not used.
6203 l2cap_chan_del(chan, ECONNREFUSED);
6204 l2cap_chan_unlock(chan);
6205 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6206 l2cap_chan_lock(chan);
6207 l2cap_chan_del(chan, ECONNRESET);
6208 l2cap_chan_unlock(chan);
6213 case L2CAP_CR_LE_AUTHENTICATION:
6214 case L2CAP_CR_LE_ENCRYPTION:
6215 /* If we already have MITM protection we can't do
6218 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6219 l2cap_chan_del(chan, ECONNREFUSED);
6223 sec_level = hcon->sec_level + 1;
6224 if (chan->sec_level < sec_level)
6225 chan->sec_level = sec_level;
6227 /* We'll need to send a new Connect Request */
6228 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6230 smp_conn_security(hcon, chan->sec_level);
6233 case L2CAP_CR_LE_BAD_PSM:
6234 l2cap_chan_del(chan, ECONNREFUSED);
6238 /* If dcid was not set it means channels was refused */
6240 l2cap_chan_del(chan, ECONNREFUSED);
6247 chan->remote_mps = mps;
6248 chan->tx_credits = credits;
6249 l2cap_chan_ready(chan);
6253 l2cap_chan_unlock(chan);
6256 mutex_unlock(&conn->chan_lock);
6261 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6262 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6265 struct l2cap_ecred_reconf_req *req = (void *) data;
6266 struct l2cap_ecred_reconf_rsp rsp;
6267 u16 mtu, mps, result;
6268 struct l2cap_chan *chan;
6274 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6275 result = L2CAP_CR_LE_INVALID_PARAMS;
6279 mtu = __le16_to_cpu(req->mtu);
6280 mps = __le16_to_cpu(req->mps);
6282 BT_DBG("mtu %u mps %u", mtu, mps);
6284 if (mtu < L2CAP_ECRED_MIN_MTU) {
6285 result = L2CAP_RECONF_INVALID_MTU;
6289 if (mps < L2CAP_ECRED_MIN_MPS) {
6290 result = L2CAP_RECONF_INVALID_MPS;
6294 cmd_len -= sizeof(*req);
6295 num_scid = cmd_len / sizeof(u16);
6296 result = L2CAP_RECONF_SUCCESS;
6298 for (i = 0; i < num_scid; i++) {
6301 scid = __le16_to_cpu(req->scid[i]);
6305 chan = __l2cap_get_chan_by_dcid(conn, scid);
6309 /* If the MTU value is decreased for any of the included
6310 * channels, then the receiver shall disconnect all
6311 * included channels.
6313 if (chan->omtu > mtu) {
6314 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6316 result = L2CAP_RECONF_INVALID_MTU;
6320 chan->remote_mps = mps;
6324 rsp.result = cpu_to_le16(result);
6326 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6332 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6333 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6336 struct l2cap_chan *chan, *tmp;
6337 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6340 if (cmd_len < sizeof(*rsp))
6343 result = __le16_to_cpu(rsp->result);
6345 BT_DBG("result 0x%4.4x", rsp->result);
6350 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6351 if (chan->ident != cmd->ident)
6354 l2cap_chan_del(chan, ECONNRESET);
6360 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6361 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6364 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6365 struct l2cap_chan *chan;
6367 if (cmd_len < sizeof(*rej))
6370 mutex_lock(&conn->chan_lock);
6372 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6376 l2cap_chan_lock(chan);
6377 l2cap_chan_del(chan, ECONNREFUSED);
6378 l2cap_chan_unlock(chan);
6381 mutex_unlock(&conn->chan_lock);
6385 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6386 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6391 switch (cmd->code) {
6392 case L2CAP_COMMAND_REJ:
6393 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6396 case L2CAP_CONN_PARAM_UPDATE_REQ:
6397 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6400 case L2CAP_CONN_PARAM_UPDATE_RSP:
6403 case L2CAP_LE_CONN_RSP:
6404 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6407 case L2CAP_LE_CONN_REQ:
6408 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6411 case L2CAP_LE_CREDITS:
6412 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6415 case L2CAP_ECRED_CONN_REQ:
6416 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6419 case L2CAP_ECRED_CONN_RSP:
6420 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6423 case L2CAP_ECRED_RECONF_REQ:
6424 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6427 case L2CAP_ECRED_RECONF_RSP:
6428 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6431 case L2CAP_DISCONN_REQ:
6432 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6435 case L2CAP_DISCONN_RSP:
6436 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6440 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6448 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6449 struct sk_buff *skb)
6451 struct hci_conn *hcon = conn->hcon;
6452 struct l2cap_cmd_hdr *cmd;
6456 if (hcon->type != LE_LINK)
6459 if (skb->len < L2CAP_CMD_HDR_SIZE)
6462 cmd = (void *) skb->data;
6463 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6465 len = le16_to_cpu(cmd->len);
6467 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6469 if (len != skb->len || !cmd->ident) {
6470 BT_DBG("corrupted command");
6474 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6476 struct l2cap_cmd_rej_unk rej;
6478 BT_ERR("Wrong link type (%d)", err);
6480 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6481 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6489 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6490 struct sk_buff *skb)
6492 struct hci_conn *hcon = conn->hcon;
6493 struct l2cap_cmd_hdr *cmd;
6496 l2cap_raw_recv(conn, skb);
6498 if (hcon->type != ACL_LINK)
6501 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6504 cmd = (void *) skb->data;
6505 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6507 len = le16_to_cpu(cmd->len);
6509 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6512 if (len > skb->len || !cmd->ident) {
6513 BT_DBG("corrupted command");
6517 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6519 struct l2cap_cmd_rej_unk rej;
6521 BT_ERR("Wrong link type (%d)", err);
6523 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6524 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6535 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6537 u16 our_fcs, rcv_fcs;
6540 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6541 hdr_size = L2CAP_EXT_HDR_SIZE;
6543 hdr_size = L2CAP_ENH_HDR_SIZE;
6545 if (chan->fcs == L2CAP_FCS_CRC16) {
6546 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6547 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6548 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6550 if (our_fcs != rcv_fcs)
6556 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6558 struct l2cap_ctrl control;
6560 BT_DBG("chan %p", chan);
6562 memset(&control, 0, sizeof(control));
6565 control.reqseq = chan->buffer_seq;
6566 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6568 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6569 control.super = L2CAP_SUPER_RNR;
6570 l2cap_send_sframe(chan, &control);
6573 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6574 chan->unacked_frames > 0)
6575 __set_retrans_timer(chan);
6577 /* Send pending iframes */
6578 l2cap_ertm_send(chan);
6580 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6581 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6582 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6585 control.super = L2CAP_SUPER_RR;
6586 l2cap_send_sframe(chan, &control);
6590 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6591 struct sk_buff **last_frag)
6593 /* skb->len reflects data in skb as well as all fragments
6594 * skb->data_len reflects only data in fragments
6596 if (!skb_has_frag_list(skb))
6597 skb_shinfo(skb)->frag_list = new_frag;
6599 new_frag->next = NULL;
6601 (*last_frag)->next = new_frag;
6602 *last_frag = new_frag;
6604 skb->len += new_frag->len;
6605 skb->data_len += new_frag->len;
6606 skb->truesize += new_frag->truesize;
6609 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6610 struct l2cap_ctrl *control)
6614 switch (control->sar) {
6615 case L2CAP_SAR_UNSEGMENTED:
6619 err = chan->ops->recv(chan, skb);
6622 case L2CAP_SAR_START:
6626 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6629 chan->sdu_len = get_unaligned_le16(skb->data);
6630 skb_pull(skb, L2CAP_SDULEN_SIZE);
6632 if (chan->sdu_len > chan->imtu) {
6637 if (skb->len >= chan->sdu_len)
6641 chan->sdu_last_frag = skb;
6647 case L2CAP_SAR_CONTINUE:
6651 append_skb_frag(chan->sdu, skb,
6652 &chan->sdu_last_frag);
6655 if (chan->sdu->len >= chan->sdu_len)
6665 append_skb_frag(chan->sdu, skb,
6666 &chan->sdu_last_frag);
6669 if (chan->sdu->len != chan->sdu_len)
6672 err = chan->ops->recv(chan, chan->sdu);
6675 /* Reassembly complete */
6677 chan->sdu_last_frag = NULL;
6685 kfree_skb(chan->sdu);
6687 chan->sdu_last_frag = NULL;
6694 static int l2cap_resegment(struct l2cap_chan *chan)
6700 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6704 if (chan->mode != L2CAP_MODE_ERTM)
6707 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6708 l2cap_tx(chan, NULL, NULL, event);
6711 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6714 /* Pass sequential frames to l2cap_reassemble_sdu()
6715 * until a gap is encountered.
6718 BT_DBG("chan %p", chan);
6720 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6721 struct sk_buff *skb;
6722 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6723 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6725 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6730 skb_unlink(skb, &chan->srej_q);
6731 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6732 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6737 if (skb_queue_empty(&chan->srej_q)) {
6738 chan->rx_state = L2CAP_RX_STATE_RECV;
6739 l2cap_send_ack(chan);
6745 static void l2cap_handle_srej(struct l2cap_chan *chan,
6746 struct l2cap_ctrl *control)
6748 struct sk_buff *skb;
6750 BT_DBG("chan %p, control %p", chan, control);
6752 if (control->reqseq == chan->next_tx_seq) {
6753 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6754 l2cap_send_disconn_req(chan, ECONNRESET);
6758 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6761 BT_DBG("Seq %d not available for retransmission",
6766 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6767 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6768 l2cap_send_disconn_req(chan, ECONNRESET);
6772 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6774 if (control->poll) {
6775 l2cap_pass_to_tx(chan, control);
6777 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6778 l2cap_retransmit(chan, control);
6779 l2cap_ertm_send(chan);
6781 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6782 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6783 chan->srej_save_reqseq = control->reqseq;
6786 l2cap_pass_to_tx_fbit(chan, control);
6788 if (control->final) {
6789 if (chan->srej_save_reqseq != control->reqseq ||
6790 !test_and_clear_bit(CONN_SREJ_ACT,
6792 l2cap_retransmit(chan, control);
6794 l2cap_retransmit(chan, control);
6795 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6796 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6797 chan->srej_save_reqseq = control->reqseq;
6803 static void l2cap_handle_rej(struct l2cap_chan *chan,
6804 struct l2cap_ctrl *control)
6806 struct sk_buff *skb;
6808 BT_DBG("chan %p, control %p", chan, control);
6810 if (control->reqseq == chan->next_tx_seq) {
6811 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6812 l2cap_send_disconn_req(chan, ECONNRESET);
6816 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6818 if (chan->max_tx && skb &&
6819 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6820 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6821 l2cap_send_disconn_req(chan, ECONNRESET);
6825 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6827 l2cap_pass_to_tx(chan, control);
6829 if (control->final) {
6830 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6831 l2cap_retransmit_all(chan, control);
6833 l2cap_retransmit_all(chan, control);
6834 l2cap_ertm_send(chan);
6835 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6836 set_bit(CONN_REJ_ACT, &chan->conn_state);
6840 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6842 BT_DBG("chan %p, txseq %d", chan, txseq);
6844 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6845 chan->expected_tx_seq);
6847 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6848 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6850 /* See notes below regarding "double poll" and
6853 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6854 BT_DBG("Invalid/Ignore - after SREJ");
6855 return L2CAP_TXSEQ_INVALID_IGNORE;
6857 BT_DBG("Invalid - in window after SREJ sent");
6858 return L2CAP_TXSEQ_INVALID;
6862 if (chan->srej_list.head == txseq) {
6863 BT_DBG("Expected SREJ");
6864 return L2CAP_TXSEQ_EXPECTED_SREJ;
6867 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6868 BT_DBG("Duplicate SREJ - txseq already stored");
6869 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6872 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6873 BT_DBG("Unexpected SREJ - not requested");
6874 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6878 if (chan->expected_tx_seq == txseq) {
6879 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6881 BT_DBG("Invalid - txseq outside tx window");
6882 return L2CAP_TXSEQ_INVALID;
6885 return L2CAP_TXSEQ_EXPECTED;
6889 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6890 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6891 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6892 return L2CAP_TXSEQ_DUPLICATE;
6895 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6896 /* A source of invalid packets is a "double poll" condition,
6897 * where delays cause us to send multiple poll packets. If
6898 * the remote stack receives and processes both polls,
6899 * sequence numbers can wrap around in such a way that a
6900 * resent frame has a sequence number that looks like new data
6901 * with a sequence gap. This would trigger an erroneous SREJ
6904 * Fortunately, this is impossible with a tx window that's
6905 * less than half of the maximum sequence number, which allows
6906 * invalid frames to be safely ignored.
6908 * With tx window sizes greater than half of the tx window
6909 * maximum, the frame is invalid and cannot be ignored. This
6910 * causes a disconnect.
6913 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6914 BT_DBG("Invalid/Ignore - txseq outside tx window");
6915 return L2CAP_TXSEQ_INVALID_IGNORE;
6917 BT_DBG("Invalid - txseq outside tx window");
6918 return L2CAP_TXSEQ_INVALID;
6921 BT_DBG("Unexpected - txseq indicates missing frames");
6922 return L2CAP_TXSEQ_UNEXPECTED;
6926 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6927 struct l2cap_ctrl *control,
6928 struct sk_buff *skb, u8 event)
6930 struct l2cap_ctrl local_control;
6932 bool skb_in_use = false;
6934 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6938 case L2CAP_EV_RECV_IFRAME:
6939 switch (l2cap_classify_txseq(chan, control->txseq)) {
6940 case L2CAP_TXSEQ_EXPECTED:
6941 l2cap_pass_to_tx(chan, control);
6943 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6944 BT_DBG("Busy, discarding expected seq %d",
6949 chan->expected_tx_seq = __next_seq(chan,
6952 chan->buffer_seq = chan->expected_tx_seq;
6955 /* l2cap_reassemble_sdu may free skb, hence invalidate
6956 * control, so make a copy in advance to use it after
6957 * l2cap_reassemble_sdu returns and to avoid the race
6958 * condition, for example:
6960 * The current thread calls:
6961 * l2cap_reassemble_sdu
6962 * chan->ops->recv == l2cap_sock_recv_cb
6963 * __sock_queue_rcv_skb
6964 * Another thread calls:
6968 * Then the current thread tries to access control, but
6969 * it was freed by skb_free_datagram.
6971 local_control = *control;
6972 err = l2cap_reassemble_sdu(chan, skb, control);
6976 if (local_control.final) {
6977 if (!test_and_clear_bit(CONN_REJ_ACT,
6978 &chan->conn_state)) {
6979 local_control.final = 0;
6980 l2cap_retransmit_all(chan, &local_control);
6981 l2cap_ertm_send(chan);
6985 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6986 l2cap_send_ack(chan);
6988 case L2CAP_TXSEQ_UNEXPECTED:
6989 l2cap_pass_to_tx(chan, control);
6991 /* Can't issue SREJ frames in the local busy state.
6992 * Drop this frame, it will be seen as missing
6993 * when local busy is exited.
6995 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6996 BT_DBG("Busy, discarding unexpected seq %d",
7001 /* There was a gap in the sequence, so an SREJ
7002 * must be sent for each missing frame. The
7003 * current frame is stored for later use.
7005 skb_queue_tail(&chan->srej_q, skb);
7007 BT_DBG("Queued %p (queue len %d)", skb,
7008 skb_queue_len(&chan->srej_q));
7010 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7011 l2cap_seq_list_clear(&chan->srej_list);
7012 l2cap_send_srej(chan, control->txseq);
7014 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7016 case L2CAP_TXSEQ_DUPLICATE:
7017 l2cap_pass_to_tx(chan, control);
7019 case L2CAP_TXSEQ_INVALID_IGNORE:
7021 case L2CAP_TXSEQ_INVALID:
7023 l2cap_send_disconn_req(chan, ECONNRESET);
7027 case L2CAP_EV_RECV_RR:
7028 l2cap_pass_to_tx(chan, control);
7029 if (control->final) {
7030 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7032 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7033 !__chan_is_moving(chan)) {
7035 l2cap_retransmit_all(chan, control);
7038 l2cap_ertm_send(chan);
7039 } else if (control->poll) {
7040 l2cap_send_i_or_rr_or_rnr(chan);
7042 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7043 &chan->conn_state) &&
7044 chan->unacked_frames)
7045 __set_retrans_timer(chan);
7047 l2cap_ertm_send(chan);
7050 case L2CAP_EV_RECV_RNR:
7051 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7052 l2cap_pass_to_tx(chan, control);
7053 if (control && control->poll) {
7054 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7055 l2cap_send_rr_or_rnr(chan, 0);
7057 __clear_retrans_timer(chan);
7058 l2cap_seq_list_clear(&chan->retrans_list);
7060 case L2CAP_EV_RECV_REJ:
7061 l2cap_handle_rej(chan, control);
7063 case L2CAP_EV_RECV_SREJ:
7064 l2cap_handle_srej(chan, control);
7070 if (skb && !skb_in_use) {
7071 BT_DBG("Freeing %p", skb);
7078 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7079 struct l2cap_ctrl *control,
7080 struct sk_buff *skb, u8 event)
7083 u16 txseq = control->txseq;
7084 bool skb_in_use = false;
7086 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7090 case L2CAP_EV_RECV_IFRAME:
7091 switch (l2cap_classify_txseq(chan, txseq)) {
7092 case L2CAP_TXSEQ_EXPECTED:
7093 /* Keep frame for reassembly later */
7094 l2cap_pass_to_tx(chan, control);
7095 skb_queue_tail(&chan->srej_q, skb);
7097 BT_DBG("Queued %p (queue len %d)", skb,
7098 skb_queue_len(&chan->srej_q));
7100 chan->expected_tx_seq = __next_seq(chan, txseq);
7102 case L2CAP_TXSEQ_EXPECTED_SREJ:
7103 l2cap_seq_list_pop(&chan->srej_list);
7105 l2cap_pass_to_tx(chan, control);
7106 skb_queue_tail(&chan->srej_q, skb);
7108 BT_DBG("Queued %p (queue len %d)", skb,
7109 skb_queue_len(&chan->srej_q));
7111 err = l2cap_rx_queued_iframes(chan);
7116 case L2CAP_TXSEQ_UNEXPECTED:
7117 /* Got a frame that can't be reassembled yet.
7118 * Save it for later, and send SREJs to cover
7119 * the missing frames.
7121 skb_queue_tail(&chan->srej_q, skb);
7123 BT_DBG("Queued %p (queue len %d)", skb,
7124 skb_queue_len(&chan->srej_q));
7126 l2cap_pass_to_tx(chan, control);
7127 l2cap_send_srej(chan, control->txseq);
7129 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7130 /* This frame was requested with an SREJ, but
7131 * some expected retransmitted frames are
7132 * missing. Request retransmission of missing
7135 skb_queue_tail(&chan->srej_q, skb);
7137 BT_DBG("Queued %p (queue len %d)", skb,
7138 skb_queue_len(&chan->srej_q));
7140 l2cap_pass_to_tx(chan, control);
7141 l2cap_send_srej_list(chan, control->txseq);
7143 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7144 /* We've already queued this frame. Drop this copy. */
7145 l2cap_pass_to_tx(chan, control);
7147 case L2CAP_TXSEQ_DUPLICATE:
7148 /* Expecting a later sequence number, so this frame
7149 * was already received. Ignore it completely.
7152 case L2CAP_TXSEQ_INVALID_IGNORE:
7154 case L2CAP_TXSEQ_INVALID:
7156 l2cap_send_disconn_req(chan, ECONNRESET);
7160 case L2CAP_EV_RECV_RR:
7161 l2cap_pass_to_tx(chan, control);
7162 if (control->final) {
7163 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7165 if (!test_and_clear_bit(CONN_REJ_ACT,
7166 &chan->conn_state)) {
7168 l2cap_retransmit_all(chan, control);
7171 l2cap_ertm_send(chan);
7172 } else if (control->poll) {
7173 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7174 &chan->conn_state) &&
7175 chan->unacked_frames) {
7176 __set_retrans_timer(chan);
7179 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7180 l2cap_send_srej_tail(chan);
7182 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7183 &chan->conn_state) &&
7184 chan->unacked_frames)
7185 __set_retrans_timer(chan);
7187 l2cap_send_ack(chan);
7190 case L2CAP_EV_RECV_RNR:
7191 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7192 l2cap_pass_to_tx(chan, control);
7193 if (control->poll) {
7194 l2cap_send_srej_tail(chan);
7196 struct l2cap_ctrl rr_control;
7197 memset(&rr_control, 0, sizeof(rr_control));
7198 rr_control.sframe = 1;
7199 rr_control.super = L2CAP_SUPER_RR;
7200 rr_control.reqseq = chan->buffer_seq;
7201 l2cap_send_sframe(chan, &rr_control);
7205 case L2CAP_EV_RECV_REJ:
7206 l2cap_handle_rej(chan, control);
7208 case L2CAP_EV_RECV_SREJ:
7209 l2cap_handle_srej(chan, control);
7213 if (skb && !skb_in_use) {
7214 BT_DBG("Freeing %p", skb);
7221 static int l2cap_finish_move(struct l2cap_chan *chan)
7223 BT_DBG("chan %p", chan);
7225 chan->rx_state = L2CAP_RX_STATE_RECV;
7228 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7230 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7232 return l2cap_resegment(chan);
7235 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7236 struct l2cap_ctrl *control,
7237 struct sk_buff *skb, u8 event)
7241 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7247 l2cap_process_reqseq(chan, control->reqseq);
7249 if (!skb_queue_empty(&chan->tx_q))
7250 chan->tx_send_head = skb_peek(&chan->tx_q);
7252 chan->tx_send_head = NULL;
7254 /* Rewind next_tx_seq to the point expected
7257 chan->next_tx_seq = control->reqseq;
7258 chan->unacked_frames = 0;
7260 err = l2cap_finish_move(chan);
7264 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7265 l2cap_send_i_or_rr_or_rnr(chan);
7267 if (event == L2CAP_EV_RECV_IFRAME)
7270 return l2cap_rx_state_recv(chan, control, NULL, event);
7273 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7274 struct l2cap_ctrl *control,
7275 struct sk_buff *skb, u8 event)
7279 if (!control->final)
7282 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7284 chan->rx_state = L2CAP_RX_STATE_RECV;
7285 l2cap_process_reqseq(chan, control->reqseq);
7287 if (!skb_queue_empty(&chan->tx_q))
7288 chan->tx_send_head = skb_peek(&chan->tx_q);
7290 chan->tx_send_head = NULL;
7292 /* Rewind next_tx_seq to the point expected
7295 chan->next_tx_seq = control->reqseq;
7296 chan->unacked_frames = 0;
7299 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7301 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7303 err = l2cap_resegment(chan);
7306 err = l2cap_rx_state_recv(chan, control, skb, event);
7311 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7313 /* Make sure reqseq is for a packet that has been sent but not acked */
7316 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7317 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7320 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7321 struct sk_buff *skb, u8 event)
7325 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7326 control, skb, event, chan->rx_state);
7328 if (__valid_reqseq(chan, control->reqseq)) {
7329 switch (chan->rx_state) {
7330 case L2CAP_RX_STATE_RECV:
7331 err = l2cap_rx_state_recv(chan, control, skb, event);
7333 case L2CAP_RX_STATE_SREJ_SENT:
7334 err = l2cap_rx_state_srej_sent(chan, control, skb,
7337 case L2CAP_RX_STATE_WAIT_P:
7338 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7340 case L2CAP_RX_STATE_WAIT_F:
7341 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7348 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7349 control->reqseq, chan->next_tx_seq,
7350 chan->expected_ack_seq);
7351 l2cap_send_disconn_req(chan, ECONNRESET);
7357 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7358 struct sk_buff *skb)
7360 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7361 * the txseq field in advance to use it after l2cap_reassemble_sdu
7362 * returns and to avoid the race condition, for example:
7364 * The current thread calls:
7365 * l2cap_reassemble_sdu
7366 * chan->ops->recv == l2cap_sock_recv_cb
7367 * __sock_queue_rcv_skb
7368 * Another thread calls:
7372 * Then the current thread tries to access control, but it was freed by
7373 * skb_free_datagram.
7375 u16 txseq = control->txseq;
7377 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7380 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7381 l2cap_pass_to_tx(chan, control);
7383 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7384 __next_seq(chan, chan->buffer_seq));
7386 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7388 l2cap_reassemble_sdu(chan, skb, control);
7391 kfree_skb(chan->sdu);
7394 chan->sdu_last_frag = NULL;
7398 BT_DBG("Freeing %p", skb);
7403 chan->last_acked_seq = txseq;
7404 chan->expected_tx_seq = __next_seq(chan, txseq);
7409 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7411 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7415 __unpack_control(chan, skb);
7420 * We can just drop the corrupted I-frame here.
7421 * Receiver will miss it and start proper recovery
7422 * procedures and ask for retransmission.
7424 if (l2cap_check_fcs(chan, skb))
7427 if (!control->sframe && control->sar == L2CAP_SAR_START)
7428 len -= L2CAP_SDULEN_SIZE;
7430 if (chan->fcs == L2CAP_FCS_CRC16)
7431 len -= L2CAP_FCS_SIZE;
7433 if (len > chan->mps) {
7434 l2cap_send_disconn_req(chan, ECONNRESET);
7438 if (chan->ops->filter) {
7439 if (chan->ops->filter(chan, skb))
7443 if (!control->sframe) {
7446 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7447 control->sar, control->reqseq, control->final,
7450 /* Validate F-bit - F=0 always valid, F=1 only
7451 * valid in TX WAIT_F
7453 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7456 if (chan->mode != L2CAP_MODE_STREAMING) {
7457 event = L2CAP_EV_RECV_IFRAME;
7458 err = l2cap_rx(chan, control, skb, event);
7460 err = l2cap_stream_rx(chan, control, skb);
7464 l2cap_send_disconn_req(chan, ECONNRESET);
7466 const u8 rx_func_to_event[4] = {
7467 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7468 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7471 /* Only I-frames are expected in streaming mode */
7472 if (chan->mode == L2CAP_MODE_STREAMING)
7475 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7476 control->reqseq, control->final, control->poll,
7480 BT_ERR("Trailing bytes: %d in sframe", len);
7481 l2cap_send_disconn_req(chan, ECONNRESET);
7485 /* Validate F and P bits */
7486 if (control->final && (control->poll ||
7487 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7490 event = rx_func_to_event[control->super];
7491 if (l2cap_rx(chan, control, skb, event))
7492 l2cap_send_disconn_req(chan, ECONNRESET);
7502 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7504 struct l2cap_conn *conn = chan->conn;
7505 struct l2cap_le_credits pkt;
7508 return_credits = (chan->imtu / chan->mps) + 1;
7510 if (chan->rx_credits >= return_credits)
7513 return_credits -= chan->rx_credits;
7515 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7517 chan->rx_credits += return_credits;
7519 pkt.cid = cpu_to_le16(chan->scid);
7520 pkt.credits = cpu_to_le16(return_credits);
7522 chan->ident = l2cap_get_ident(conn);
7524 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7527 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7531 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7533 /* Wait recv to confirm reception before updating the credits */
7534 err = chan->ops->recv(chan, skb);
7536 /* Update credits whenever an SDU is received */
7537 l2cap_chan_le_send_credits(chan);
7542 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7546 if (!chan->rx_credits) {
7547 BT_ERR("No credits to receive LE L2CAP data");
7548 l2cap_send_disconn_req(chan, ECONNRESET);
7552 if (chan->imtu < skb->len) {
7553 BT_ERR("Too big LE L2CAP PDU");
7558 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7560 /* Update if remote had run out of credits, this should only happens
7561 * if the remote is not using the entire MPS.
7563 if (!chan->rx_credits)
7564 l2cap_chan_le_send_credits(chan);
7571 sdu_len = get_unaligned_le16(skb->data);
7572 skb_pull(skb, L2CAP_SDULEN_SIZE);
7574 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7575 sdu_len, skb->len, chan->imtu);
7577 if (sdu_len > chan->imtu) {
7578 BT_ERR("Too big LE L2CAP SDU length received");
7583 if (skb->len > sdu_len) {
7584 BT_ERR("Too much LE L2CAP data received");
7589 if (skb->len == sdu_len)
7590 return l2cap_ecred_recv(chan, skb);
7593 chan->sdu_len = sdu_len;
7594 chan->sdu_last_frag = skb;
7596 /* Detect if remote is not able to use the selected MPS */
7597 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7598 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7600 /* Adjust the number of credits */
7601 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7602 chan->mps = mps_len;
7603 l2cap_chan_le_send_credits(chan);
7609 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7610 chan->sdu->len, skb->len, chan->sdu_len);
7612 if (chan->sdu->len + skb->len > chan->sdu_len) {
7613 BT_ERR("Too much LE L2CAP data received");
7618 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7621 if (chan->sdu->len == chan->sdu_len) {
7622 err = l2cap_ecred_recv(chan, chan->sdu);
7625 chan->sdu_last_frag = NULL;
7633 kfree_skb(chan->sdu);
7635 chan->sdu_last_frag = NULL;
7639 /* We can't return an error here since we took care of the skb
7640 * freeing internally. An error return would cause the caller to
7641 * do a double-free of the skb.
7646 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7647 struct sk_buff *skb)
7649 struct l2cap_chan *chan;
7651 chan = l2cap_get_chan_by_scid(conn, cid);
7653 if (cid == L2CAP_CID_A2MP) {
7654 chan = a2mp_channel_create(conn, skb);
7660 l2cap_chan_hold(chan);
7661 l2cap_chan_lock(chan);
7663 BT_DBG("unknown cid 0x%4.4x", cid);
7664 /* Drop packet and return */
7670 BT_DBG("chan %p, len %d", chan, skb->len);
7672 /* If we receive data on a fixed channel before the info req/rsp
7673 * procedure is done simply assume that the channel is supported
7674 * and mark it as ready.
7677 if (chan->chan_type == L2CAP_CHAN_FIXED)
7678 l2cap_chan_ready(chan);
7680 if (chan->chan_type == L2CAP_CHAN_FIXED) {
7681 if (chan->psm == L2CAP_PSM_IPSP) {
7682 struct l2cap_conn *conn = chan->conn;
7684 if (conn->hcon->out)
7685 l2cap_chan_ready(chan);
7686 else if (conn->hcon->type != LE_LINK)
7687 l2cap_chan_ready(chan);
7689 l2cap_chan_ready(chan);
7694 if (chan->state != BT_CONNECTED)
7697 switch (chan->mode) {
7698 case L2CAP_MODE_LE_FLOWCTL:
7699 case L2CAP_MODE_EXT_FLOWCTL:
7700 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7705 case L2CAP_MODE_BASIC:
7706 /* If socket recv buffers overflows we drop data here
7707 * which is *bad* because L2CAP has to be reliable.
7708 * But we don't have any other choice. L2CAP doesn't
7709 * provide flow control mechanism. */
7711 if (chan->imtu < skb->len) {
7712 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7716 if (!chan->ops->recv(chan, skb))
7720 case L2CAP_MODE_ERTM:
7721 case L2CAP_MODE_STREAMING:
7722 l2cap_data_rcv(chan, skb);
7726 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7734 l2cap_chan_unlock(chan);
7735 l2cap_chan_put(chan);
7738 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7739 struct sk_buff *skb)
7741 struct hci_conn *hcon = conn->hcon;
7742 struct l2cap_chan *chan;
7744 if (hcon->type != ACL_LINK)
7747 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7752 BT_DBG("chan %p, len %d", chan, skb->len);
7754 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7757 if (chan->imtu < skb->len)
7760 /* Store remote BD_ADDR and PSM for msg_name */
7761 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7762 bt_cb(skb)->l2cap.psm = psm;
7764 if (!chan->ops->recv(chan, skb)) {
7765 l2cap_chan_put(chan);
7770 l2cap_chan_put(chan);
7775 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7777 struct l2cap_hdr *lh = (void *) skb->data;
7778 struct hci_conn *hcon = conn->hcon;
7782 if (hcon->state != BT_CONNECTED) {
7783 BT_DBG("queueing pending rx skb");
7784 skb_queue_tail(&conn->pending_rx, skb);
7788 skb_pull(skb, L2CAP_HDR_SIZE);
7789 cid = __le16_to_cpu(lh->cid);
7790 len = __le16_to_cpu(lh->len);
7792 if (len != skb->len) {
7797 /* Since we can't actively block incoming LE connections we must
7798 * at least ensure that we ignore incoming data from them.
7800 if (hcon->type == LE_LINK &&
7801 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7802 bdaddr_dst_type(hcon))) {
7807 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7810 case L2CAP_CID_SIGNALING:
7811 l2cap_sig_channel(conn, skb);
7814 case L2CAP_CID_CONN_LESS:
7815 psm = get_unaligned((__le16 *) skb->data);
7816 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7817 l2cap_conless_channel(conn, psm, skb);
7820 case L2CAP_CID_LE_SIGNALING:
7821 l2cap_le_sig_channel(conn, skb);
7825 l2cap_data_channel(conn, cid, skb);
7830 static void process_pending_rx(struct work_struct *work)
7832 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7834 struct sk_buff *skb;
7838 while ((skb = skb_dequeue(&conn->pending_rx)))
7839 l2cap_recv_frame(conn, skb);
7842 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7844 struct l2cap_conn *conn = hcon->l2cap_data;
7845 struct hci_chan *hchan;
7850 hchan = hci_chan_create(hcon);
7854 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7856 hci_chan_del(hchan);
7860 kref_init(&conn->ref);
7861 hcon->l2cap_data = conn;
7862 conn->hcon = hci_conn_get(hcon);
7863 conn->hchan = hchan;
7865 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7867 switch (hcon->type) {
7869 if (hcon->hdev->le_mtu) {
7870 conn->mtu = hcon->hdev->le_mtu;
7875 conn->mtu = hcon->hdev->acl_mtu;
7879 conn->feat_mask = 0;
7881 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7883 if (hcon->type == ACL_LINK &&
7884 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7885 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7887 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7888 (bredr_sc_enabled(hcon->hdev) ||
7889 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7890 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7892 mutex_init(&conn->ident_lock);
7893 mutex_init(&conn->chan_lock);
7895 INIT_LIST_HEAD(&conn->chan_l);
7896 INIT_LIST_HEAD(&conn->users);
7898 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7900 skb_queue_head_init(&conn->pending_rx);
7901 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7902 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7904 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7909 static bool is_valid_psm(u16 psm, u8 dst_type)
7914 if (bdaddr_type_is_le(dst_type))
7915 return (psm <= 0x00ff);
7917 /* PSM must be odd and lsb of upper byte must be 0 */
7918 return ((psm & 0x0101) == 0x0001);
7921 struct l2cap_chan_data {
7922 struct l2cap_chan *chan;
7927 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7929 struct l2cap_chan_data *d = data;
7932 if (chan == d->chan)
7935 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7938 pid = chan->ops->get_peer_pid(chan);
7940 /* Only count deferred channels with the same PID/PSM */
7941 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7942 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7948 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7949 bdaddr_t *dst, u8 dst_type)
7951 struct l2cap_conn *conn;
7952 struct hci_conn *hcon;
7953 struct hci_dev *hdev;
7956 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7957 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7959 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7961 return -EHOSTUNREACH;
7965 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7966 chan->chan_type != L2CAP_CHAN_RAW) {
7971 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7976 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7981 switch (chan->mode) {
7982 case L2CAP_MODE_BASIC:
7984 case L2CAP_MODE_LE_FLOWCTL:
7986 case L2CAP_MODE_EXT_FLOWCTL:
7987 if (!enable_ecred) {
7992 case L2CAP_MODE_ERTM:
7993 case L2CAP_MODE_STREAMING:
8002 switch (chan->state) {
8006 /* Already connecting */
8011 /* Already connected */
8025 /* Set destination address and psm */
8026 bacpy(&chan->dst, dst);
8027 chan->dst_type = dst_type;
8032 if (bdaddr_type_is_le(dst_type)) {
8033 /* Convert from L2CAP channel address type to HCI address type
8035 if (dst_type == BDADDR_LE_PUBLIC)
8036 dst_type = ADDR_LE_DEV_PUBLIC;
8038 dst_type = ADDR_LE_DEV_RANDOM;
8040 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8041 hcon = hci_connect_le(hdev, dst, dst_type,
8043 HCI_LE_CONN_TIMEOUT,
8044 HCI_ROLE_SLAVE, NULL);
8046 hcon = hci_connect_le_scan(hdev, dst, dst_type,
8048 HCI_LE_CONN_TIMEOUT,
8049 CONN_REASON_L2CAP_CHAN);
8052 u8 auth_type = l2cap_get_auth_type(chan);
8053 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8054 CONN_REASON_L2CAP_CHAN);
8058 err = PTR_ERR(hcon);
8062 conn = l2cap_conn_add(hcon);
8064 hci_conn_drop(hcon);
8069 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8070 struct l2cap_chan_data data;
8073 data.pid = chan->ops->get_peer_pid(chan);
8076 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8078 /* Check if there isn't too many channels being connected */
8079 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8080 hci_conn_drop(hcon);
8086 mutex_lock(&conn->chan_lock);
8087 l2cap_chan_lock(chan);
8089 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8090 hci_conn_drop(hcon);
8095 /* Update source addr of the socket */
8096 bacpy(&chan->src, &hcon->src);
8097 chan->src_type = bdaddr_src_type(hcon);
8099 __l2cap_chan_add(conn, chan);
8101 /* l2cap_chan_add takes its own ref so we can drop this one */
8102 hci_conn_drop(hcon);
8104 l2cap_state_change(chan, BT_CONNECT);
8105 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8107 /* Release chan->sport so that it can be reused by other
8108 * sockets (as it's only used for listening sockets).
8110 write_lock(&chan_list_lock);
8112 write_unlock(&chan_list_lock);
8114 if (hcon->state == BT_CONNECTED) {
8115 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8116 __clear_chan_timer(chan);
8117 if (l2cap_chan_check_security(chan, true))
8118 l2cap_state_change(chan, BT_CONNECTED);
8120 l2cap_do_start(chan);
8126 l2cap_chan_unlock(chan);
8127 mutex_unlock(&conn->chan_lock);
8129 hci_dev_unlock(hdev);
8133 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8135 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8137 struct l2cap_conn *conn = chan->conn;
8139 struct l2cap_ecred_reconf_req req;
8143 pdu.req.mtu = cpu_to_le16(chan->imtu);
8144 pdu.req.mps = cpu_to_le16(chan->mps);
8145 pdu.scid = cpu_to_le16(chan->scid);
8147 chan->ident = l2cap_get_ident(conn);
8149 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8153 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8155 if (chan->imtu > mtu)
8158 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8162 l2cap_ecred_reconfigure(chan);
8167 /* ---- L2CAP interface with lower layer (HCI) ---- */
8169 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8171 int exact = 0, lm1 = 0, lm2 = 0;
8172 struct l2cap_chan *c;
8174 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8176 /* Find listening sockets and check their link_mode */
8177 read_lock(&chan_list_lock);
8178 list_for_each_entry(c, &chan_list, global_l) {
8179 if (c->state != BT_LISTEN)
8182 if (!bacmp(&c->src, &hdev->bdaddr)) {
8183 lm1 |= HCI_LM_ACCEPT;
8184 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8185 lm1 |= HCI_LM_MASTER;
8187 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8188 lm2 |= HCI_LM_ACCEPT;
8189 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8190 lm2 |= HCI_LM_MASTER;
8193 read_unlock(&chan_list_lock);
8195 return exact ? lm1 : lm2;
8198 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8199 * from an existing channel in the list or from the beginning of the
8200 * global list (by passing NULL as first parameter).
8202 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8203 struct hci_conn *hcon)
8205 u8 src_type = bdaddr_src_type(hcon);
8207 read_lock(&chan_list_lock);
8210 c = list_next_entry(c, global_l);
8212 c = list_entry(chan_list.next, typeof(*c), global_l);
8214 list_for_each_entry_from(c, &chan_list, global_l) {
8215 if (c->chan_type != L2CAP_CHAN_FIXED)
8217 if (c->state != BT_LISTEN)
8219 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8221 if (src_type != c->src_type)
8224 c = l2cap_chan_hold_unless_zero(c);
8225 read_unlock(&chan_list_lock);
8229 read_unlock(&chan_list_lock);
8234 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8236 struct hci_dev *hdev = hcon->hdev;
8237 struct l2cap_conn *conn;
8238 struct l2cap_chan *pchan;
8241 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8244 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8247 l2cap_conn_del(hcon, bt_to_errno(status));
8251 conn = l2cap_conn_add(hcon);
8255 dst_type = bdaddr_dst_type(hcon);
8257 /* If device is blocked, do not create channels for it */
8258 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8261 /* Find fixed channels and notify them of the new connection. We
8262 * use multiple individual lookups, continuing each time where
8263 * we left off, because the list lock would prevent calling the
8264 * potentially sleeping l2cap_chan_lock() function.
8266 pchan = l2cap_global_fixed_chan(NULL, hcon);
8268 struct l2cap_chan *chan, *next;
8270 /* Client fixed channels should override server ones */
8271 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8274 l2cap_chan_lock(pchan);
8275 chan = pchan->ops->new_connection(pchan);
8277 bacpy(&chan->src, &hcon->src);
8278 bacpy(&chan->dst, &hcon->dst);
8279 chan->src_type = bdaddr_src_type(hcon);
8280 chan->dst_type = dst_type;
8282 __l2cap_chan_add(conn, chan);
8285 l2cap_chan_unlock(pchan);
8287 next = l2cap_global_fixed_chan(pchan, hcon);
8288 l2cap_chan_put(pchan);
8292 l2cap_conn_ready(conn);
8295 int l2cap_disconn_ind(struct hci_conn *hcon)
8297 struct l2cap_conn *conn = hcon->l2cap_data;
8299 BT_DBG("hcon %p", hcon);
8302 return HCI_ERROR_REMOTE_USER_TERM;
8303 return conn->disc_reason;
8306 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8308 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8311 BT_DBG("hcon %p reason %d", hcon, reason);
8313 l2cap_conn_del(hcon, bt_to_errno(reason));
8316 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8318 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8321 if (encrypt == 0x00) {
8322 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8323 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8324 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8325 chan->sec_level == BT_SECURITY_FIPS)
8326 l2cap_chan_close(chan, ECONNREFUSED);
8328 if (chan->sec_level == BT_SECURITY_MEDIUM)
8329 __clear_chan_timer(chan);
8333 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8335 struct l2cap_conn *conn = hcon->l2cap_data;
8336 struct l2cap_chan *chan;
8341 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8343 mutex_lock(&conn->chan_lock);
8345 list_for_each_entry(chan, &conn->chan_l, list) {
8346 l2cap_chan_lock(chan);
8348 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8349 state_to_string(chan->state));
8351 if (chan->scid == L2CAP_CID_A2MP) {
8352 l2cap_chan_unlock(chan);
8356 if (!status && encrypt)
8357 chan->sec_level = hcon->sec_level;
8359 if (!__l2cap_no_conn_pending(chan)) {
8360 l2cap_chan_unlock(chan);
8364 if (!status && (chan->state == BT_CONNECTED ||
8365 chan->state == BT_CONFIG)) {
8366 chan->ops->resume(chan);
8367 l2cap_check_encryption(chan, encrypt);
8368 l2cap_chan_unlock(chan);
8372 if (chan->state == BT_CONNECT) {
8373 if (!status && l2cap_check_enc_key_size(hcon))
8374 l2cap_start_connection(chan);
8376 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8377 } else if (chan->state == BT_CONNECT2 &&
8378 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8379 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8380 struct l2cap_conn_rsp rsp;
8383 if (!status && l2cap_check_enc_key_size(hcon)) {
8384 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8385 res = L2CAP_CR_PEND;
8386 stat = L2CAP_CS_AUTHOR_PEND;
8387 chan->ops->defer(chan);
8389 l2cap_state_change(chan, BT_CONFIG);
8390 res = L2CAP_CR_SUCCESS;
8391 stat = L2CAP_CS_NO_INFO;
8394 l2cap_state_change(chan, BT_DISCONN);
8395 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8396 res = L2CAP_CR_SEC_BLOCK;
8397 stat = L2CAP_CS_NO_INFO;
8400 rsp.scid = cpu_to_le16(chan->dcid);
8401 rsp.dcid = cpu_to_le16(chan->scid);
8402 rsp.result = cpu_to_le16(res);
8403 rsp.status = cpu_to_le16(stat);
8404 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8407 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8408 res == L2CAP_CR_SUCCESS) {
8410 set_bit(CONF_REQ_SENT, &chan->conf_state);
8411 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8413 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8415 chan->num_conf_req++;
8419 l2cap_chan_unlock(chan);
8422 mutex_unlock(&conn->chan_lock);
8425 /* Append fragment into frame respecting the maximum len of rx_skb */
8426 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8429 if (!conn->rx_skb) {
8430 /* Allocate skb for the complete frame (with header) */
8431 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8438 /* Copy as much as the rx_skb can hold */
8439 len = min_t(u16, len, skb->len);
8440 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8442 conn->rx_len -= len;
8447 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8449 struct sk_buff *rx_skb;
8452 /* Append just enough to complete the header */
8453 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8455 /* If header could not be read just continue */
8456 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8459 rx_skb = conn->rx_skb;
8460 len = get_unaligned_le16(rx_skb->data);
8462 /* Check if rx_skb has enough space to received all fragments */
8463 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8464 /* Update expected len */
8465 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8466 return L2CAP_LEN_SIZE;
8469 /* Reset conn->rx_skb since it will need to be reallocated in order to
8470 * fit all fragments.
8472 conn->rx_skb = NULL;
8474 /* Reallocates rx_skb using the exact expected length */
8475 len = l2cap_recv_frag(conn, rx_skb,
8476 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8482 static void l2cap_recv_reset(struct l2cap_conn *conn)
8484 kfree_skb(conn->rx_skb);
8485 conn->rx_skb = NULL;
8489 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8491 struct l2cap_conn *conn = hcon->l2cap_data;
8494 /* For AMP controller do not create l2cap conn */
8495 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8499 conn = l2cap_conn_add(hcon);
8504 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8508 case ACL_START_NO_FLUSH:
8511 BT_ERR("Unexpected start frame (len %d)", skb->len);
8512 l2cap_recv_reset(conn);
8513 l2cap_conn_unreliable(conn, ECOMM);
8516 /* Start fragment may not contain the L2CAP length so just
8517 * copy the initial byte when that happens and use conn->mtu as
8520 if (skb->len < L2CAP_LEN_SIZE) {
8521 l2cap_recv_frag(conn, skb, conn->mtu);
8525 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8527 if (len == skb->len) {
8528 /* Complete frame received */
8529 l2cap_recv_frame(conn, skb);
8533 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8535 if (skb->len > len) {
8536 BT_ERR("Frame is too long (len %u, expected len %d)",
8538 l2cap_conn_unreliable(conn, ECOMM);
8542 /* Append fragment into frame (with header) */
8543 if (l2cap_recv_frag(conn, skb, len) < 0)
8549 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8551 if (!conn->rx_skb) {
8552 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8553 l2cap_conn_unreliable(conn, ECOMM);
8557 /* Complete the L2CAP length if it has not been read */
8558 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8559 if (l2cap_recv_len(conn, skb) < 0) {
8560 l2cap_conn_unreliable(conn, ECOMM);
8564 /* Header still could not be read just continue */
8565 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8569 if (skb->len > conn->rx_len) {
8570 BT_ERR("Fragment is too long (len %u, expected %u)",
8571 skb->len, conn->rx_len);
8572 l2cap_recv_reset(conn);
8573 l2cap_conn_unreliable(conn, ECOMM);
8577 /* Append fragment into frame (with header) */
8578 l2cap_recv_frag(conn, skb, skb->len);
8580 if (!conn->rx_len) {
8581 /* Complete frame received. l2cap_recv_frame
8582 * takes ownership of the skb so set the global
8583 * rx_skb pointer to NULL first.
8585 struct sk_buff *rx_skb = conn->rx_skb;
8586 conn->rx_skb = NULL;
8587 l2cap_recv_frame(conn, rx_skb);
8596 static struct hci_cb l2cap_cb = {
8598 .connect_cfm = l2cap_connect_cfm,
8599 .disconn_cfm = l2cap_disconn_cfm,
8600 .security_cfm = l2cap_security_cfm,
8603 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8605 struct l2cap_chan *c;
8607 read_lock(&chan_list_lock);
8609 list_for_each_entry(c, &chan_list, global_l) {
8610 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8611 &c->src, c->src_type, &c->dst, c->dst_type,
8612 c->state, __le16_to_cpu(c->psm),
8613 c->scid, c->dcid, c->imtu, c->omtu,
8614 c->sec_level, c->mode);
8617 read_unlock(&chan_list_lock);
8622 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8624 static struct dentry *l2cap_debugfs;
8626 int __init l2cap_init(void)
8630 err = l2cap_init_sockets();
8634 hci_register_cb(&l2cap_cb);
8636 if (IS_ERR_OR_NULL(bt_debugfs))
8639 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8640 NULL, &l2cap_debugfs_fops);
8645 void l2cap_exit(void)
8647 debugfs_remove(l2cap_debugfs);
8648 hci_unregister_cb(&l2cap_cb);
8649 l2cap_cleanup_sockets();
8652 module_param(disable_ertm, bool, 0644);
8653 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8655 module_param(enable_ecred, bool, 0644);
8656 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");