2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 if (link_type == LE_LINK) {
68 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 return BDADDR_LE_PUBLIC;
71 return BDADDR_LE_RANDOM;
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 return bdaddr_type(hcon->type, hcon->src_type);
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 return bdaddr_type(hcon->type, hcon->dst_type);
87 /* ---- L2CAP channels ---- */
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
94 list_for_each_entry(c, &conn->chan_l, list) {
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked channel. */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 struct l2cap_chan *c;
120 mutex_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 mutex_unlock(&conn->chan_lock);
129 /* Find channel with given DCID.
130 * Returns locked channel.
132 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_dcid(conn, cid);
141 mutex_unlock(&conn->chan_lock);
146 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 struct l2cap_chan *c;
151 list_for_each_entry(c, &conn->chan_l, list) {
152 if (c->ident == ident)
158 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 struct l2cap_chan *c;
163 mutex_lock(&conn->chan_lock);
164 c = __l2cap_get_chan_by_ident(conn, ident);
167 mutex_unlock(&conn->chan_lock);
172 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
175 struct l2cap_chan *c;
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
181 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
184 if (c->sport == psm && !bacmp(&c->src, src))
190 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
194 write_lock(&chan_list_lock);
196 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
206 u16 p, start, end, incr;
208 if (chan->src_type == BDADDR_BREDR) {
209 start = L2CAP_PSM_DYN_START;
210 end = L2CAP_PSM_AUTO_END;
213 start = L2CAP_PSM_LE_DYN_START;
214 end = L2CAP_PSM_LE_DYN_END;
219 for (p = start; p <= end; p += incr)
220 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
222 chan->psm = cpu_to_le16(p);
223 chan->sport = cpu_to_le16(p);
230 write_unlock(&chan_list_lock);
233 EXPORT_SYMBOL_GPL(l2cap_add_psm);
235 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
237 write_lock(&chan_list_lock);
239 /* Override the defaults (which are for conn-oriented) */
240 chan->omtu = L2CAP_DEFAULT_MTU;
241 chan->chan_type = L2CAP_CHAN_FIXED;
245 write_unlock(&chan_list_lock);
250 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
254 if (conn->hcon->type == LE_LINK)
255 dyn_end = L2CAP_CID_LE_DYN_END;
257 dyn_end = L2CAP_CID_DYN_END;
259 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
260 if (!__l2cap_get_chan_by_scid(conn, cid))
267 static void l2cap_state_change(struct l2cap_chan *chan, int state)
269 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
270 state_to_string(state));
273 chan->ops->state_change(chan, state, 0);
276 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
280 chan->ops->state_change(chan, chan->state, err);
283 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
285 chan->ops->state_change(chan, chan->state, err);
288 static void __set_retrans_timer(struct l2cap_chan *chan)
290 if (!delayed_work_pending(&chan->monitor_timer) &&
291 chan->retrans_timeout) {
292 l2cap_set_timer(chan, &chan->retrans_timer,
293 msecs_to_jiffies(chan->retrans_timeout));
297 static void __set_monitor_timer(struct l2cap_chan *chan)
299 __clear_retrans_timer(chan);
300 if (chan->monitor_timeout) {
301 l2cap_set_timer(chan, &chan->monitor_timer,
302 msecs_to_jiffies(chan->monitor_timeout));
306 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
311 skb_queue_walk(head, skb) {
312 if (bt_cb(skb)->l2cap.txseq == seq)
319 /* ---- L2CAP sequence number lists ---- */
321 /* For ERTM, ordered lists of sequence numbers must be tracked for
322 * SREJ requests that are received and for frames that are to be
323 * retransmitted. These seq_list functions implement a singly-linked
324 * list in an array, where membership in the list can also be checked
325 * in constant time. Items can also be added to the tail of the list
326 * and removed from the head in constant time, without further memory
330 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
332 size_t alloc_size, i;
334 /* Allocated size is a power of 2 to map sequence numbers
335 * (which may be up to 14 bits) in to a smaller array that is
336 * sized for the negotiated ERTM transmit windows.
338 alloc_size = roundup_pow_of_two(size);
340 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
344 seq_list->mask = alloc_size - 1;
345 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 for (i = 0; i < alloc_size; i++)
348 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
353 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
355 kfree(seq_list->list);
358 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
361 /* Constant-time check for list membership */
362 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
365 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
367 u16 seq = seq_list->head;
368 u16 mask = seq_list->mask;
370 seq_list->head = seq_list->list[seq & mask];
371 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
373 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
385 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
388 for (i = 0; i <= seq_list->mask; i++)
389 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
397 u16 mask = seq_list->mask;
399 /* All appends happen in constant time */
401 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
404 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 seq_list->head = seq;
407 seq_list->list[seq_list->tail & mask] = seq;
409 seq_list->tail = seq;
410 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
413 static void l2cap_chan_timeout(struct work_struct *work)
415 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
417 struct l2cap_conn *conn = chan->conn;
420 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
422 mutex_lock(&conn->chan_lock);
423 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
424 * this work. No need to call l2cap_chan_hold(chan) here again.
426 l2cap_chan_lock(chan);
428 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
429 reason = ECONNREFUSED;
430 else if (chan->state == BT_CONNECT &&
431 chan->sec_level != BT_SECURITY_SDP)
432 reason = ECONNREFUSED;
436 l2cap_chan_close(chan, reason);
438 chan->ops->close(chan);
440 l2cap_chan_unlock(chan);
441 l2cap_chan_put(chan);
443 mutex_unlock(&conn->chan_lock);
446 struct l2cap_chan *l2cap_chan_create(void)
448 struct l2cap_chan *chan;
450 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
454 skb_queue_head_init(&chan->tx_q);
455 skb_queue_head_init(&chan->srej_q);
456 mutex_init(&chan->lock);
458 /* Set default lock nesting level */
459 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
461 write_lock(&chan_list_lock);
462 list_add(&chan->global_l, &chan_list);
463 write_unlock(&chan_list_lock);
465 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
467 chan->state = BT_OPEN;
469 kref_init(&chan->kref);
471 /* This flag is cleared in l2cap_chan_ready() */
472 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 BT_DBG("chan %p", chan);
478 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 static void l2cap_chan_destroy(struct kref *kref)
482 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 BT_DBG("chan %p", chan);
486 write_lock(&chan_list_lock);
487 list_del(&chan->global_l);
488 write_unlock(&chan_list_lock);
493 void l2cap_chan_hold(struct l2cap_chan *c)
495 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
500 void l2cap_chan_put(struct l2cap_chan *c)
502 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504 kref_put(&c->kref, l2cap_chan_destroy);
506 EXPORT_SYMBOL_GPL(l2cap_chan_put);
508 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
510 chan->fcs = L2CAP_FCS_CRC16;
511 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
512 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
513 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
514 chan->remote_max_tx = chan->max_tx;
515 chan->remote_tx_win = chan->tx_win;
516 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
517 chan->sec_level = BT_SECURITY_LOW;
518 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
519 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
520 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
522 chan->conf_state = 0;
523 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
525 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
527 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
529 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
532 chan->sdu_last_frag = NULL;
534 chan->tx_credits = tx_credits;
535 /* Derive MPS from connection MTU to stop HCI fragmentation */
536 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
537 /* Give enough credits for a full packet */
538 chan->rx_credits = (chan->imtu / chan->mps) + 1;
540 skb_queue_head_init(&chan->tx_q);
543 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
545 l2cap_le_flowctl_init(chan, tx_credits);
547 /* L2CAP implementations shall support a minimum MPS of 64 octets */
548 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
549 chan->mps = L2CAP_ECRED_MIN_MPS;
550 chan->rx_credits = (chan->imtu / chan->mps) + 1;
554 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
556 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
557 __le16_to_cpu(chan->psm), chan->dcid);
559 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
563 switch (chan->chan_type) {
564 case L2CAP_CHAN_CONN_ORIENTED:
565 /* Alloc CID for connection-oriented socket */
566 chan->scid = l2cap_alloc_cid(conn);
567 if (conn->hcon->type == ACL_LINK)
568 chan->omtu = L2CAP_DEFAULT_MTU;
571 case L2CAP_CHAN_CONN_LESS:
572 /* Connectionless socket */
573 chan->scid = L2CAP_CID_CONN_LESS;
574 chan->dcid = L2CAP_CID_CONN_LESS;
575 chan->omtu = L2CAP_DEFAULT_MTU;
578 case L2CAP_CHAN_FIXED:
579 /* Caller will set CID and CID specific MTU values */
583 /* Raw socket can send/recv signalling messages only */
584 chan->scid = L2CAP_CID_SIGNALING;
585 chan->dcid = L2CAP_CID_SIGNALING;
586 chan->omtu = L2CAP_DEFAULT_MTU;
589 chan->local_id = L2CAP_BESTEFFORT_ID;
590 chan->local_stype = L2CAP_SERV_BESTEFFORT;
591 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
592 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
593 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
594 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
596 l2cap_chan_hold(chan);
598 /* Only keep a reference for fixed channels if they requested it */
599 if (chan->chan_type != L2CAP_CHAN_FIXED ||
600 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
601 hci_conn_hold(conn->hcon);
603 list_add(&chan->list, &conn->chan_l);
606 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
608 mutex_lock(&conn->chan_lock);
609 __l2cap_chan_add(conn, chan);
610 mutex_unlock(&conn->chan_lock);
613 void l2cap_chan_del(struct l2cap_chan *chan, int err)
615 struct l2cap_conn *conn = chan->conn;
617 __clear_chan_timer(chan);
619 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
620 state_to_string(chan->state));
622 chan->ops->teardown(chan, err);
625 struct amp_mgr *mgr = conn->hcon->amp_mgr;
626 /* Delete from channel list */
627 list_del(&chan->list);
629 l2cap_chan_put(chan);
633 /* Reference was only held for non-fixed channels or
634 * fixed channels that explicitly requested it using the
635 * FLAG_HOLD_HCI_CONN flag.
637 if (chan->chan_type != L2CAP_CHAN_FIXED ||
638 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
639 hci_conn_drop(conn->hcon);
641 if (mgr && mgr->bredr_chan == chan)
642 mgr->bredr_chan = NULL;
645 if (chan->hs_hchan) {
646 struct hci_chan *hs_hchan = chan->hs_hchan;
648 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
649 amp_disconnect_logical_link(hs_hchan);
652 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
655 switch (chan->mode) {
656 case L2CAP_MODE_BASIC:
659 case L2CAP_MODE_LE_FLOWCTL:
660 case L2CAP_MODE_EXT_FLOWCTL:
661 skb_queue_purge(&chan->tx_q);
664 case L2CAP_MODE_ERTM:
665 __clear_retrans_timer(chan);
666 __clear_monitor_timer(chan);
667 __clear_ack_timer(chan);
669 skb_queue_purge(&chan->srej_q);
671 l2cap_seq_list_free(&chan->srej_list);
672 l2cap_seq_list_free(&chan->retrans_list);
675 case L2CAP_MODE_STREAMING:
676 skb_queue_purge(&chan->tx_q);
680 EXPORT_SYMBOL_GPL(l2cap_chan_del);
682 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
685 struct l2cap_chan *chan;
687 list_for_each_entry(chan, &conn->chan_l, list) {
692 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
698 mutex_lock(&conn->chan_lock);
699 __l2cap_chan_list(conn, func, data);
700 mutex_unlock(&conn->chan_lock);
703 EXPORT_SYMBOL_GPL(l2cap_chan_list);
705 static void l2cap_conn_update_id_addr(struct work_struct *work)
707 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
708 id_addr_update_work);
709 struct hci_conn *hcon = conn->hcon;
710 struct l2cap_chan *chan;
712 mutex_lock(&conn->chan_lock);
714 list_for_each_entry(chan, &conn->chan_l, list) {
715 l2cap_chan_lock(chan);
716 bacpy(&chan->dst, &hcon->dst);
717 chan->dst_type = bdaddr_dst_type(hcon);
718 l2cap_chan_unlock(chan);
721 mutex_unlock(&conn->chan_lock);
724 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
726 struct l2cap_conn *conn = chan->conn;
727 struct l2cap_le_conn_rsp rsp;
730 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
731 result = L2CAP_CR_LE_AUTHORIZATION;
733 result = L2CAP_CR_LE_BAD_PSM;
735 l2cap_state_change(chan, BT_DISCONN);
737 rsp.dcid = cpu_to_le16(chan->scid);
738 rsp.mtu = cpu_to_le16(chan->imtu);
739 rsp.mps = cpu_to_le16(chan->mps);
740 rsp.credits = cpu_to_le16(chan->rx_credits);
741 rsp.result = cpu_to_le16(result);
743 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
747 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
749 struct l2cap_conn *conn = chan->conn;
750 struct l2cap_ecred_conn_rsp rsp;
753 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
754 result = L2CAP_CR_LE_AUTHORIZATION;
756 result = L2CAP_CR_LE_BAD_PSM;
758 l2cap_state_change(chan, BT_DISCONN);
760 memset(&rsp, 0, sizeof(rsp));
762 rsp.result = cpu_to_le16(result);
764 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
768 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
770 struct l2cap_conn *conn = chan->conn;
771 struct l2cap_conn_rsp rsp;
774 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
775 result = L2CAP_CR_SEC_BLOCK;
777 result = L2CAP_CR_BAD_PSM;
779 l2cap_state_change(chan, BT_DISCONN);
781 rsp.scid = cpu_to_le16(chan->dcid);
782 rsp.dcid = cpu_to_le16(chan->scid);
783 rsp.result = cpu_to_le16(result);
784 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
786 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
789 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
791 struct l2cap_conn *conn = chan->conn;
793 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
795 switch (chan->state) {
797 chan->ops->teardown(chan, 0);
802 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
803 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
804 l2cap_send_disconn_req(chan, reason);
806 l2cap_chan_del(chan, reason);
810 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
811 if (conn->hcon->type == ACL_LINK)
812 l2cap_chan_connect_reject(chan);
813 else if (conn->hcon->type == LE_LINK) {
814 switch (chan->mode) {
815 case L2CAP_MODE_LE_FLOWCTL:
816 l2cap_chan_le_connect_reject(chan);
818 case L2CAP_MODE_EXT_FLOWCTL:
819 l2cap_chan_ecred_connect_reject(chan);
825 l2cap_chan_del(chan, reason);
830 l2cap_chan_del(chan, reason);
834 chan->ops->teardown(chan, 0);
838 EXPORT_SYMBOL(l2cap_chan_close);
840 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
842 switch (chan->chan_type) {
844 switch (chan->sec_level) {
845 case BT_SECURITY_HIGH:
846 case BT_SECURITY_FIPS:
847 return HCI_AT_DEDICATED_BONDING_MITM;
848 case BT_SECURITY_MEDIUM:
849 return HCI_AT_DEDICATED_BONDING;
851 return HCI_AT_NO_BONDING;
854 case L2CAP_CHAN_CONN_LESS:
855 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
856 if (chan->sec_level == BT_SECURITY_LOW)
857 chan->sec_level = BT_SECURITY_SDP;
859 if (chan->sec_level == BT_SECURITY_HIGH ||
860 chan->sec_level == BT_SECURITY_FIPS)
861 return HCI_AT_NO_BONDING_MITM;
863 return HCI_AT_NO_BONDING;
865 case L2CAP_CHAN_CONN_ORIENTED:
866 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
867 if (chan->sec_level == BT_SECURITY_LOW)
868 chan->sec_level = BT_SECURITY_SDP;
870 if (chan->sec_level == BT_SECURITY_HIGH ||
871 chan->sec_level == BT_SECURITY_FIPS)
872 return HCI_AT_NO_BONDING_MITM;
874 return HCI_AT_NO_BONDING;
879 switch (chan->sec_level) {
880 case BT_SECURITY_HIGH:
881 case BT_SECURITY_FIPS:
882 return HCI_AT_GENERAL_BONDING_MITM;
883 case BT_SECURITY_MEDIUM:
884 return HCI_AT_GENERAL_BONDING;
886 return HCI_AT_NO_BONDING;
892 /* Service level security */
893 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
895 struct l2cap_conn *conn = chan->conn;
898 if (conn->hcon->type == LE_LINK)
899 return smp_conn_security(conn->hcon, chan->sec_level);
901 auth_type = l2cap_get_auth_type(chan);
903 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
907 static u8 l2cap_get_ident(struct l2cap_conn *conn)
911 /* Get next available identificator.
912 * 1 - 128 are used by kernel.
913 * 129 - 199 are reserved.
914 * 200 - 254 are used by utilities like l2ping, etc.
917 mutex_lock(&conn->ident_lock);
919 if (++conn->tx_ident > 128)
924 mutex_unlock(&conn->ident_lock);
929 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
932 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
935 BT_DBG("code 0x%2.2x", code);
940 /* Use NO_FLUSH if supported or we have an LE link (which does
941 * not support auto-flushing packets) */
942 if (lmp_no_flush_capable(conn->hcon->hdev) ||
943 conn->hcon->type == LE_LINK)
944 flags = ACL_START_NO_FLUSH;
948 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
949 skb->priority = HCI_PRIO_MAX;
951 hci_send_acl(conn->hchan, skb, flags);
954 static bool __chan_is_moving(struct l2cap_chan *chan)
956 return chan->move_state != L2CAP_MOVE_STABLE &&
957 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
960 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
962 struct hci_conn *hcon = chan->conn->hcon;
965 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
968 if (chan->hs_hcon && !__chan_is_moving(chan)) {
970 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
977 /* Use NO_FLUSH for LE links (where this is the only option) or
978 * if the BR/EDR link supports it and flushing has not been
979 * explicitly requested (through FLAG_FLUSHABLE).
981 if (hcon->type == LE_LINK ||
982 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
983 lmp_no_flush_capable(hcon->hdev)))
984 flags = ACL_START_NO_FLUSH;
988 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
989 hci_send_acl(chan->conn->hchan, skb, flags);
992 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
994 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
995 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
997 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1000 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1001 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1007 control->sframe = 0;
1008 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1009 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1016 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1018 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1019 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1021 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1023 control->sframe = 1;
1024 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1025 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1031 control->sframe = 0;
1032 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1033 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1040 static inline void __unpack_control(struct l2cap_chan *chan,
1041 struct sk_buff *skb)
1043 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1044 __unpack_extended_control(get_unaligned_le32(skb->data),
1045 &bt_cb(skb)->l2cap);
1046 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1048 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1049 &bt_cb(skb)->l2cap);
1050 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1054 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1058 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1059 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1061 if (control->sframe) {
1062 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1063 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1064 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1066 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1067 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1073 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1077 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1078 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1080 if (control->sframe) {
1081 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1082 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1083 packed |= L2CAP_CTRL_FRAME_TYPE;
1085 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1086 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1092 static inline void __pack_control(struct l2cap_chan *chan,
1093 struct l2cap_ctrl *control,
1094 struct sk_buff *skb)
1096 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1097 put_unaligned_le32(__pack_extended_control(control),
1098 skb->data + L2CAP_HDR_SIZE);
1100 put_unaligned_le16(__pack_enhanced_control(control),
1101 skb->data + L2CAP_HDR_SIZE);
1105 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1107 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 return L2CAP_EXT_HDR_SIZE;
1110 return L2CAP_ENH_HDR_SIZE;
1113 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1116 struct sk_buff *skb;
1117 struct l2cap_hdr *lh;
1118 int hlen = __ertm_hdr_size(chan);
1120 if (chan->fcs == L2CAP_FCS_CRC16)
1121 hlen += L2CAP_FCS_SIZE;
1123 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1126 return ERR_PTR(-ENOMEM);
1128 lh = skb_put(skb, L2CAP_HDR_SIZE);
1129 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1130 lh->cid = cpu_to_le16(chan->dcid);
1132 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1133 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1135 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1137 if (chan->fcs == L2CAP_FCS_CRC16) {
1138 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1139 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1142 skb->priority = HCI_PRIO_MAX;
1146 static void l2cap_send_sframe(struct l2cap_chan *chan,
1147 struct l2cap_ctrl *control)
1149 struct sk_buff *skb;
1152 BT_DBG("chan %p, control %p", chan, control);
1154 if (!control->sframe)
1157 if (__chan_is_moving(chan))
1160 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1164 if (control->super == L2CAP_SUPER_RR)
1165 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1166 else if (control->super == L2CAP_SUPER_RNR)
1167 set_bit(CONN_RNR_SENT, &chan->conn_state);
1169 if (control->super != L2CAP_SUPER_SREJ) {
1170 chan->last_acked_seq = control->reqseq;
1171 __clear_ack_timer(chan);
1174 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1175 control->final, control->poll, control->super);
1177 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1178 control_field = __pack_extended_control(control);
1180 control_field = __pack_enhanced_control(control);
1182 skb = l2cap_create_sframe_pdu(chan, control_field);
1184 l2cap_do_send(chan, skb);
1187 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1189 struct l2cap_ctrl control;
1191 BT_DBG("chan %p, poll %d", chan, poll);
1193 memset(&control, 0, sizeof(control));
1195 control.poll = poll;
1197 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1198 control.super = L2CAP_SUPER_RNR;
1200 control.super = L2CAP_SUPER_RR;
1202 control.reqseq = chan->buffer_seq;
1203 l2cap_send_sframe(chan, &control);
1206 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1208 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1211 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1214 static bool __amp_capable(struct l2cap_chan *chan)
1216 struct l2cap_conn *conn = chan->conn;
1217 struct hci_dev *hdev;
1218 bool amp_available = false;
1220 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1223 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1226 read_lock(&hci_dev_list_lock);
1227 list_for_each_entry(hdev, &hci_dev_list, list) {
1228 if (hdev->amp_type != AMP_TYPE_BREDR &&
1229 test_bit(HCI_UP, &hdev->flags)) {
1230 amp_available = true;
1234 read_unlock(&hci_dev_list_lock);
1236 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1237 return amp_available;
1242 static bool l2cap_check_efs(struct l2cap_chan *chan)
1244 /* Check EFS parameters */
1248 void l2cap_send_conn_req(struct l2cap_chan *chan)
1250 struct l2cap_conn *conn = chan->conn;
1251 struct l2cap_conn_req req;
1253 req.scid = cpu_to_le16(chan->scid);
1254 req.psm = chan->psm;
1256 chan->ident = l2cap_get_ident(conn);
1258 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1260 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1263 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1265 struct l2cap_create_chan_req req;
1266 req.scid = cpu_to_le16(chan->scid);
1267 req.psm = chan->psm;
1268 req.amp_id = amp_id;
1270 chan->ident = l2cap_get_ident(chan->conn);
1272 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1276 static void l2cap_move_setup(struct l2cap_chan *chan)
1278 struct sk_buff *skb;
1280 BT_DBG("chan %p", chan);
1282 if (chan->mode != L2CAP_MODE_ERTM)
1285 __clear_retrans_timer(chan);
1286 __clear_monitor_timer(chan);
1287 __clear_ack_timer(chan);
1289 chan->retry_count = 0;
1290 skb_queue_walk(&chan->tx_q, skb) {
1291 if (bt_cb(skb)->l2cap.retries)
1292 bt_cb(skb)->l2cap.retries = 1;
1297 chan->expected_tx_seq = chan->buffer_seq;
1299 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1300 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1301 l2cap_seq_list_clear(&chan->retrans_list);
1302 l2cap_seq_list_clear(&chan->srej_list);
1303 skb_queue_purge(&chan->srej_q);
1305 chan->tx_state = L2CAP_TX_STATE_XMIT;
1306 chan->rx_state = L2CAP_RX_STATE_MOVE;
1308 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1311 static void l2cap_move_done(struct l2cap_chan *chan)
1313 u8 move_role = chan->move_role;
1314 BT_DBG("chan %p", chan);
1316 chan->move_state = L2CAP_MOVE_STABLE;
1317 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1319 if (chan->mode != L2CAP_MODE_ERTM)
1322 switch (move_role) {
1323 case L2CAP_MOVE_ROLE_INITIATOR:
1324 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1325 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1327 case L2CAP_MOVE_ROLE_RESPONDER:
1328 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1333 static void l2cap_chan_ready(struct l2cap_chan *chan)
1335 /* The channel may have already been flagged as connected in
1336 * case of receiving data before the L2CAP info req/rsp
1337 * procedure is complete.
1339 if (chan->state == BT_CONNECTED)
1342 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1343 chan->conf_state = 0;
1344 __clear_chan_timer(chan);
1346 switch (chan->mode) {
1347 case L2CAP_MODE_LE_FLOWCTL:
1348 case L2CAP_MODE_EXT_FLOWCTL:
1349 if (!chan->tx_credits)
1350 chan->ops->suspend(chan);
1354 chan->state = BT_CONNECTED;
1356 chan->ops->ready(chan);
1359 static void l2cap_le_connect(struct l2cap_chan *chan)
1361 struct l2cap_conn *conn = chan->conn;
1362 struct l2cap_le_conn_req req;
1364 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1368 chan->imtu = chan->conn->mtu;
1370 l2cap_le_flowctl_init(chan, 0);
1372 req.psm = chan->psm;
1373 req.scid = cpu_to_le16(chan->scid);
1374 req.mtu = cpu_to_le16(chan->imtu);
1375 req.mps = cpu_to_le16(chan->mps);
1376 req.credits = cpu_to_le16(chan->rx_credits);
1378 chan->ident = l2cap_get_ident(conn);
1380 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1384 struct l2cap_ecred_conn_data {
1386 struct l2cap_ecred_conn_req req;
1389 struct l2cap_chan *chan;
1394 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1396 struct l2cap_ecred_conn_data *conn = data;
1399 if (chan == conn->chan)
1402 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1405 pid = chan->ops->get_peer_pid(chan);
1407 /* Only add deferred channels with the same PID/PSM */
1408 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1409 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1412 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1415 l2cap_ecred_init(chan, 0);
1417 /* Set the same ident so we can match on the rsp */
1418 chan->ident = conn->chan->ident;
1420 /* Include all channels deferred */
1421 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1426 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1428 struct l2cap_conn *conn = chan->conn;
1429 struct l2cap_ecred_conn_data data;
1431 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1434 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1437 l2cap_ecred_init(chan, 0);
1439 memset(&data, 0, sizeof(data));
1440 data.pdu.req.psm = chan->psm;
1441 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1442 data.pdu.req.mps = cpu_to_le16(chan->mps);
1443 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1444 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1446 chan->ident = l2cap_get_ident(conn);
1447 data.pid = chan->ops->get_peer_pid(chan);
1451 data.pid = chan->ops->get_peer_pid(chan);
1453 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1455 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1456 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1460 static void l2cap_le_start(struct l2cap_chan *chan)
1462 struct l2cap_conn *conn = chan->conn;
1464 if (!smp_conn_security(conn->hcon, chan->sec_level))
1468 l2cap_chan_ready(chan);
1472 if (chan->state == BT_CONNECT) {
1473 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1474 l2cap_ecred_connect(chan);
1476 l2cap_le_connect(chan);
1480 static void l2cap_start_connection(struct l2cap_chan *chan)
1482 if (__amp_capable(chan)) {
1483 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1484 a2mp_discover_amp(chan);
1485 } else if (chan->conn->hcon->type == LE_LINK) {
1486 l2cap_le_start(chan);
1488 l2cap_send_conn_req(chan);
1492 static void l2cap_request_info(struct l2cap_conn *conn)
1494 struct l2cap_info_req req;
1496 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1499 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1501 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1502 conn->info_ident = l2cap_get_ident(conn);
1504 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1506 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1510 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1512 /* The minimum encryption key size needs to be enforced by the
1513 * host stack before establishing any L2CAP connections. The
1514 * specification in theory allows a minimum of 1, but to align
1515 * BR/EDR and LE transports, a minimum of 7 is chosen.
1517 * This check might also be called for unencrypted connections
1518 * that have no key size requirements. Ensure that the link is
1519 * actually encrypted before enforcing a key size.
1521 int min_key_size = hcon->hdev->min_enc_key_size;
1523 /* On FIPS security level, key size must be 16 bytes */
1524 if (hcon->sec_level == BT_SECURITY_FIPS)
1527 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1528 hcon->enc_key_size >= min_key_size);
1531 static void l2cap_do_start(struct l2cap_chan *chan)
1533 struct l2cap_conn *conn = chan->conn;
1535 if (conn->hcon->type == LE_LINK) {
1536 l2cap_le_start(chan);
1540 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1541 l2cap_request_info(conn);
1545 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1548 if (!l2cap_chan_check_security(chan, true) ||
1549 !__l2cap_no_conn_pending(chan))
1552 if (l2cap_check_enc_key_size(conn->hcon))
1553 l2cap_start_connection(chan);
1555 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1558 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1560 u32 local_feat_mask = l2cap_feat_mask;
1562 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1565 case L2CAP_MODE_ERTM:
1566 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1567 case L2CAP_MODE_STREAMING:
1568 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1574 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1576 struct l2cap_conn *conn = chan->conn;
1577 struct l2cap_disconn_req req;
1582 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1583 __clear_retrans_timer(chan);
1584 __clear_monitor_timer(chan);
1585 __clear_ack_timer(chan);
1588 if (chan->scid == L2CAP_CID_A2MP) {
1589 l2cap_state_change(chan, BT_DISCONN);
1593 req.dcid = cpu_to_le16(chan->dcid);
1594 req.scid = cpu_to_le16(chan->scid);
1595 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1598 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1601 /* ---- L2CAP connections ---- */
1602 static void l2cap_conn_start(struct l2cap_conn *conn)
1604 struct l2cap_chan *chan, *tmp;
1606 BT_DBG("conn %p", conn);
1608 mutex_lock(&conn->chan_lock);
1610 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1611 l2cap_chan_lock(chan);
1613 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1614 l2cap_chan_ready(chan);
1615 l2cap_chan_unlock(chan);
1619 if (chan->state == BT_CONNECT) {
1620 if (!l2cap_chan_check_security(chan, true) ||
1621 !__l2cap_no_conn_pending(chan)) {
1622 l2cap_chan_unlock(chan);
1626 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1627 && test_bit(CONF_STATE2_DEVICE,
1628 &chan->conf_state)) {
1629 l2cap_chan_close(chan, ECONNRESET);
1630 l2cap_chan_unlock(chan);
1634 if (l2cap_check_enc_key_size(conn->hcon))
1635 l2cap_start_connection(chan);
1637 l2cap_chan_close(chan, ECONNREFUSED);
1639 } else if (chan->state == BT_CONNECT2) {
1640 struct l2cap_conn_rsp rsp;
1642 rsp.scid = cpu_to_le16(chan->dcid);
1643 rsp.dcid = cpu_to_le16(chan->scid);
1645 if (l2cap_chan_check_security(chan, false)) {
1646 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1647 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1648 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1649 chan->ops->defer(chan);
1652 l2cap_state_change(chan, BT_CONFIG);
1653 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1654 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1657 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1658 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1661 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1664 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1665 rsp.result != L2CAP_CR_SUCCESS) {
1666 l2cap_chan_unlock(chan);
1670 set_bit(CONF_REQ_SENT, &chan->conf_state);
1671 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1672 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1673 chan->num_conf_req++;
1676 l2cap_chan_unlock(chan);
1679 mutex_unlock(&conn->chan_lock);
1682 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1684 struct hci_conn *hcon = conn->hcon;
1685 struct hci_dev *hdev = hcon->hdev;
1687 BT_DBG("%s conn %p", hdev->name, conn);
1689 /* For outgoing pairing which doesn't necessarily have an
1690 * associated socket (e.g. mgmt_pair_device).
1693 smp_conn_security(hcon, hcon->pending_sec_level);
1695 /* For LE peripheral connections, make sure the connection interval
1696 * is in the range of the minimum and maximum interval that has
1697 * been configured for this connection. If not, then trigger
1698 * the connection update procedure.
1700 if (hcon->role == HCI_ROLE_SLAVE &&
1701 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1702 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1703 struct l2cap_conn_param_update_req req;
1705 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1706 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1707 req.latency = cpu_to_le16(hcon->le_conn_latency);
1708 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1710 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1711 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1715 static void l2cap_conn_ready(struct l2cap_conn *conn)
1717 struct l2cap_chan *chan;
1718 struct hci_conn *hcon = conn->hcon;
1720 BT_DBG("conn %p", conn);
1722 if (hcon->type == ACL_LINK)
1723 l2cap_request_info(conn);
1725 mutex_lock(&conn->chan_lock);
1727 list_for_each_entry(chan, &conn->chan_l, list) {
1729 l2cap_chan_lock(chan);
1731 if (chan->scid == L2CAP_CID_A2MP) {
1732 l2cap_chan_unlock(chan);
1736 if (hcon->type == LE_LINK) {
1737 l2cap_le_start(chan);
1738 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1739 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1740 l2cap_chan_ready(chan);
1741 } else if (chan->state == BT_CONNECT) {
1742 l2cap_do_start(chan);
1745 l2cap_chan_unlock(chan);
1748 mutex_unlock(&conn->chan_lock);
1750 if (hcon->type == LE_LINK)
1751 l2cap_le_conn_ready(conn);
1753 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1756 /* Notify sockets that we cannot guaranty reliability anymore */
1757 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1759 struct l2cap_chan *chan;
1761 BT_DBG("conn %p", conn);
1763 mutex_lock(&conn->chan_lock);
1765 list_for_each_entry(chan, &conn->chan_l, list) {
1766 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1767 l2cap_chan_set_err(chan, err);
1770 mutex_unlock(&conn->chan_lock);
1773 static void l2cap_info_timeout(struct work_struct *work)
1775 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1778 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1779 conn->info_ident = 0;
1781 l2cap_conn_start(conn);
1786 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1787 * callback is called during registration. The ->remove callback is called
1788 * during unregistration.
1789 * An l2cap_user object can either be explicitly unregistered or when the
1790 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1791 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1792 * External modules must own a reference to the l2cap_conn object if they intend
1793 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1794 * any time if they don't.
1797 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1799 struct hci_dev *hdev = conn->hcon->hdev;
1802 /* We need to check whether l2cap_conn is registered. If it is not, we
1803 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1804 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1805 * relies on the parent hci_conn object to be locked. This itself relies
1806 * on the hci_dev object to be locked. So we must lock the hci device
1811 if (!list_empty(&user->list)) {
1816 /* conn->hchan is NULL after l2cap_conn_del() was called */
1822 ret = user->probe(conn, user);
1826 list_add(&user->list, &conn->users);
1830 hci_dev_unlock(hdev);
1833 EXPORT_SYMBOL(l2cap_register_user);
1835 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1837 struct hci_dev *hdev = conn->hcon->hdev;
1841 if (list_empty(&user->list))
1844 list_del_init(&user->list);
1845 user->remove(conn, user);
1848 hci_dev_unlock(hdev);
1850 EXPORT_SYMBOL(l2cap_unregister_user);
1852 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1854 struct l2cap_user *user;
1856 while (!list_empty(&conn->users)) {
1857 user = list_first_entry(&conn->users, struct l2cap_user, list);
1858 list_del_init(&user->list);
1859 user->remove(conn, user);
1863 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1865 struct l2cap_conn *conn = hcon->l2cap_data;
1866 struct l2cap_chan *chan, *l;
1871 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1873 kfree_skb(conn->rx_skb);
1875 skb_queue_purge(&conn->pending_rx);
1877 /* We can not call flush_work(&conn->pending_rx_work) here since we
1878 * might block if we are running on a worker from the same workqueue
1879 * pending_rx_work is waiting on.
1881 if (work_pending(&conn->pending_rx_work))
1882 cancel_work_sync(&conn->pending_rx_work);
1884 if (work_pending(&conn->id_addr_update_work))
1885 cancel_work_sync(&conn->id_addr_update_work);
1887 l2cap_unregister_all_users(conn);
1889 /* Force the connection to be immediately dropped */
1890 hcon->disc_timeout = 0;
1892 mutex_lock(&conn->chan_lock);
1895 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1896 l2cap_chan_hold(chan);
1897 l2cap_chan_lock(chan);
1899 l2cap_chan_del(chan, err);
1901 chan->ops->close(chan);
1903 l2cap_chan_unlock(chan);
1904 l2cap_chan_put(chan);
1907 mutex_unlock(&conn->chan_lock);
1909 hci_chan_del(conn->hchan);
1911 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1912 cancel_delayed_work_sync(&conn->info_timer);
1914 hcon->l2cap_data = NULL;
1916 l2cap_conn_put(conn);
1919 static void l2cap_conn_free(struct kref *ref)
1921 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1923 hci_conn_put(conn->hcon);
1927 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1929 kref_get(&conn->ref);
1932 EXPORT_SYMBOL(l2cap_conn_get);
1934 void l2cap_conn_put(struct l2cap_conn *conn)
1936 kref_put(&conn->ref, l2cap_conn_free);
1938 EXPORT_SYMBOL(l2cap_conn_put);
1940 /* ---- Socket interface ---- */
1942 /* Find socket with psm and source / destination bdaddr.
1943 * Returns closest match.
1945 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1950 struct l2cap_chan *c, *c1 = NULL;
1952 read_lock(&chan_list_lock);
1954 list_for_each_entry(c, &chan_list, global_l) {
1955 if (state && c->state != state)
1958 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1961 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1964 if (c->psm == psm) {
1965 int src_match, dst_match;
1966 int src_any, dst_any;
1969 src_match = !bacmp(&c->src, src);
1970 dst_match = !bacmp(&c->dst, dst);
1971 if (src_match && dst_match) {
1973 read_unlock(&chan_list_lock);
1978 src_any = !bacmp(&c->src, BDADDR_ANY);
1979 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1980 if ((src_match && dst_any) || (src_any && dst_match) ||
1981 (src_any && dst_any))
1987 l2cap_chan_hold(c1);
1989 read_unlock(&chan_list_lock);
1994 static void l2cap_monitor_timeout(struct work_struct *work)
1996 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1997 monitor_timer.work);
1999 BT_DBG("chan %p", chan);
2001 l2cap_chan_lock(chan);
2004 l2cap_chan_unlock(chan);
2005 l2cap_chan_put(chan);
2009 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2011 l2cap_chan_unlock(chan);
2012 l2cap_chan_put(chan);
2015 static void l2cap_retrans_timeout(struct work_struct *work)
2017 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2018 retrans_timer.work);
2020 BT_DBG("chan %p", chan);
2022 l2cap_chan_lock(chan);
2025 l2cap_chan_unlock(chan);
2026 l2cap_chan_put(chan);
2030 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2031 l2cap_chan_unlock(chan);
2032 l2cap_chan_put(chan);
2035 static void l2cap_streaming_send(struct l2cap_chan *chan,
2036 struct sk_buff_head *skbs)
2038 struct sk_buff *skb;
2039 struct l2cap_ctrl *control;
2041 BT_DBG("chan %p, skbs %p", chan, skbs);
2043 if (__chan_is_moving(chan))
2046 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2048 while (!skb_queue_empty(&chan->tx_q)) {
2050 skb = skb_dequeue(&chan->tx_q);
2052 bt_cb(skb)->l2cap.retries = 1;
2053 control = &bt_cb(skb)->l2cap;
2055 control->reqseq = 0;
2056 control->txseq = chan->next_tx_seq;
2058 __pack_control(chan, control, skb);
2060 if (chan->fcs == L2CAP_FCS_CRC16) {
2061 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2062 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2065 l2cap_do_send(chan, skb);
2067 BT_DBG("Sent txseq %u", control->txseq);
2069 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2070 chan->frames_sent++;
2074 static int l2cap_ertm_send(struct l2cap_chan *chan)
2076 struct sk_buff *skb, *tx_skb;
2077 struct l2cap_ctrl *control;
2080 BT_DBG("chan %p", chan);
2082 if (chan->state != BT_CONNECTED)
2085 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2088 if (__chan_is_moving(chan))
2091 while (chan->tx_send_head &&
2092 chan->unacked_frames < chan->remote_tx_win &&
2093 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2095 skb = chan->tx_send_head;
2097 bt_cb(skb)->l2cap.retries = 1;
2098 control = &bt_cb(skb)->l2cap;
2100 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2103 control->reqseq = chan->buffer_seq;
2104 chan->last_acked_seq = chan->buffer_seq;
2105 control->txseq = chan->next_tx_seq;
2107 __pack_control(chan, control, skb);
2109 if (chan->fcs == L2CAP_FCS_CRC16) {
2110 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2111 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2114 /* Clone after data has been modified. Data is assumed to be
2115 read-only (for locking purposes) on cloned sk_buffs.
2117 tx_skb = skb_clone(skb, GFP_KERNEL);
2122 __set_retrans_timer(chan);
2124 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2125 chan->unacked_frames++;
2126 chan->frames_sent++;
2129 if (skb_queue_is_last(&chan->tx_q, skb))
2130 chan->tx_send_head = NULL;
2132 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2134 l2cap_do_send(chan, tx_skb);
2135 BT_DBG("Sent txseq %u", control->txseq);
2138 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2139 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2144 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2146 struct l2cap_ctrl control;
2147 struct sk_buff *skb;
2148 struct sk_buff *tx_skb;
2151 BT_DBG("chan %p", chan);
2153 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2156 if (__chan_is_moving(chan))
2159 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2160 seq = l2cap_seq_list_pop(&chan->retrans_list);
2162 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2164 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2169 bt_cb(skb)->l2cap.retries++;
2170 control = bt_cb(skb)->l2cap;
2172 if (chan->max_tx != 0 &&
2173 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2174 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2175 l2cap_send_disconn_req(chan, ECONNRESET);
2176 l2cap_seq_list_clear(&chan->retrans_list);
2180 control.reqseq = chan->buffer_seq;
2181 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2186 if (skb_cloned(skb)) {
2187 /* Cloned sk_buffs are read-only, so we need a
2190 tx_skb = skb_copy(skb, GFP_KERNEL);
2192 tx_skb = skb_clone(skb, GFP_KERNEL);
2196 l2cap_seq_list_clear(&chan->retrans_list);
2200 /* Update skb contents */
2201 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2202 put_unaligned_le32(__pack_extended_control(&control),
2203 tx_skb->data + L2CAP_HDR_SIZE);
2205 put_unaligned_le16(__pack_enhanced_control(&control),
2206 tx_skb->data + L2CAP_HDR_SIZE);
2210 if (chan->fcs == L2CAP_FCS_CRC16) {
2211 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2212 tx_skb->len - L2CAP_FCS_SIZE);
2213 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2217 l2cap_do_send(chan, tx_skb);
2219 BT_DBG("Resent txseq %d", control.txseq);
2221 chan->last_acked_seq = chan->buffer_seq;
2225 static void l2cap_retransmit(struct l2cap_chan *chan,
2226 struct l2cap_ctrl *control)
2228 BT_DBG("chan %p, control %p", chan, control);
2230 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2231 l2cap_ertm_resend(chan);
2234 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2235 struct l2cap_ctrl *control)
2237 struct sk_buff *skb;
2239 BT_DBG("chan %p, control %p", chan, control);
2242 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2244 l2cap_seq_list_clear(&chan->retrans_list);
2246 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2249 if (chan->unacked_frames) {
2250 skb_queue_walk(&chan->tx_q, skb) {
2251 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2252 skb == chan->tx_send_head)
2256 skb_queue_walk_from(&chan->tx_q, skb) {
2257 if (skb == chan->tx_send_head)
2260 l2cap_seq_list_append(&chan->retrans_list,
2261 bt_cb(skb)->l2cap.txseq);
2264 l2cap_ertm_resend(chan);
2268 static void l2cap_send_ack(struct l2cap_chan *chan)
2270 struct l2cap_ctrl control;
2271 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2272 chan->last_acked_seq);
2275 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2276 chan, chan->last_acked_seq, chan->buffer_seq);
2278 memset(&control, 0, sizeof(control));
2281 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2282 chan->rx_state == L2CAP_RX_STATE_RECV) {
2283 __clear_ack_timer(chan);
2284 control.super = L2CAP_SUPER_RNR;
2285 control.reqseq = chan->buffer_seq;
2286 l2cap_send_sframe(chan, &control);
2288 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2289 l2cap_ertm_send(chan);
2290 /* If any i-frames were sent, they included an ack */
2291 if (chan->buffer_seq == chan->last_acked_seq)
2295 /* Ack now if the window is 3/4ths full.
2296 * Calculate without mul or div
2298 threshold = chan->ack_win;
2299 threshold += threshold << 1;
2302 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2305 if (frames_to_ack >= threshold) {
2306 __clear_ack_timer(chan);
2307 control.super = L2CAP_SUPER_RR;
2308 control.reqseq = chan->buffer_seq;
2309 l2cap_send_sframe(chan, &control);
2314 __set_ack_timer(chan);
2318 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2319 struct msghdr *msg, int len,
2320 int count, struct sk_buff *skb)
2322 struct l2cap_conn *conn = chan->conn;
2323 struct sk_buff **frag;
2326 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2332 /* Continuation fragments (no L2CAP header) */
2333 frag = &skb_shinfo(skb)->frag_list;
2335 struct sk_buff *tmp;
2337 count = min_t(unsigned int, conn->mtu, len);
2339 tmp = chan->ops->alloc_skb(chan, 0, count,
2340 msg->msg_flags & MSG_DONTWAIT);
2342 return PTR_ERR(tmp);
2346 if (!copy_from_iter_full(skb_put(*frag, count), count,
2353 skb->len += (*frag)->len;
2354 skb->data_len += (*frag)->len;
2356 frag = &(*frag)->next;
2362 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2363 struct msghdr *msg, size_t len)
2365 struct l2cap_conn *conn = chan->conn;
2366 struct sk_buff *skb;
2367 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2368 struct l2cap_hdr *lh;
2370 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2371 __le16_to_cpu(chan->psm), len);
2373 count = min_t(unsigned int, (conn->mtu - hlen), len);
2375 skb = chan->ops->alloc_skb(chan, hlen, count,
2376 msg->msg_flags & MSG_DONTWAIT);
2380 /* Create L2CAP header */
2381 lh = skb_put(skb, L2CAP_HDR_SIZE);
2382 lh->cid = cpu_to_le16(chan->dcid);
2383 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2384 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2386 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2387 if (unlikely(err < 0)) {
2389 return ERR_PTR(err);
2394 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2395 struct msghdr *msg, size_t len)
2397 struct l2cap_conn *conn = chan->conn;
2398 struct sk_buff *skb;
2400 struct l2cap_hdr *lh;
2402 BT_DBG("chan %p len %zu", chan, len);
2404 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2406 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2407 msg->msg_flags & MSG_DONTWAIT);
2411 /* Create L2CAP header */
2412 lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 lh->cid = cpu_to_le16(chan->dcid);
2414 lh->len = cpu_to_le16(len);
2416 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2417 if (unlikely(err < 0)) {
2419 return ERR_PTR(err);
2424 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2425 struct msghdr *msg, size_t len,
2428 struct l2cap_conn *conn = chan->conn;
2429 struct sk_buff *skb;
2430 int err, count, hlen;
2431 struct l2cap_hdr *lh;
2433 BT_DBG("chan %p len %zu", chan, len);
2436 return ERR_PTR(-ENOTCONN);
2438 hlen = __ertm_hdr_size(chan);
2441 hlen += L2CAP_SDULEN_SIZE;
2443 if (chan->fcs == L2CAP_FCS_CRC16)
2444 hlen += L2CAP_FCS_SIZE;
2446 count = min_t(unsigned int, (conn->mtu - hlen), len);
2448 skb = chan->ops->alloc_skb(chan, hlen, count,
2449 msg->msg_flags & MSG_DONTWAIT);
2453 /* Create L2CAP header */
2454 lh = skb_put(skb, L2CAP_HDR_SIZE);
2455 lh->cid = cpu_to_le16(chan->dcid);
2456 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2458 /* Control header is populated later */
2459 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2460 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2462 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2465 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2467 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2468 if (unlikely(err < 0)) {
2470 return ERR_PTR(err);
2473 bt_cb(skb)->l2cap.fcs = chan->fcs;
2474 bt_cb(skb)->l2cap.retries = 0;
2478 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2479 struct sk_buff_head *seg_queue,
2480 struct msghdr *msg, size_t len)
2482 struct sk_buff *skb;
2487 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2489 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2490 * so fragmented skbs are not used. The HCI layer's handling
2491 * of fragmented skbs is not compatible with ERTM's queueing.
2494 /* PDU size is derived from the HCI MTU */
2495 pdu_len = chan->conn->mtu;
2497 /* Constrain PDU size for BR/EDR connections */
2499 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2501 /* Adjust for largest possible L2CAP overhead. */
2503 pdu_len -= L2CAP_FCS_SIZE;
2505 pdu_len -= __ertm_hdr_size(chan);
2507 /* Remote device may have requested smaller PDUs */
2508 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2510 if (len <= pdu_len) {
2511 sar = L2CAP_SAR_UNSEGMENTED;
2515 sar = L2CAP_SAR_START;
2520 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2523 __skb_queue_purge(seg_queue);
2524 return PTR_ERR(skb);
2527 bt_cb(skb)->l2cap.sar = sar;
2528 __skb_queue_tail(seg_queue, skb);
2534 if (len <= pdu_len) {
2535 sar = L2CAP_SAR_END;
2538 sar = L2CAP_SAR_CONTINUE;
2545 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2547 size_t len, u16 sdulen)
2549 struct l2cap_conn *conn = chan->conn;
2550 struct sk_buff *skb;
2551 int err, count, hlen;
2552 struct l2cap_hdr *lh;
2554 BT_DBG("chan %p len %zu", chan, len);
2557 return ERR_PTR(-ENOTCONN);
2559 hlen = L2CAP_HDR_SIZE;
2562 hlen += L2CAP_SDULEN_SIZE;
2564 count = min_t(unsigned int, (conn->mtu - hlen), len);
2566 skb = chan->ops->alloc_skb(chan, hlen, count,
2567 msg->msg_flags & MSG_DONTWAIT);
2571 /* Create L2CAP header */
2572 lh = skb_put(skb, L2CAP_HDR_SIZE);
2573 lh->cid = cpu_to_le16(chan->dcid);
2574 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2577 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2579 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2580 if (unlikely(err < 0)) {
2582 return ERR_PTR(err);
2588 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2589 struct sk_buff_head *seg_queue,
2590 struct msghdr *msg, size_t len)
2592 struct sk_buff *skb;
2596 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2599 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2605 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2607 __skb_queue_purge(seg_queue);
2608 return PTR_ERR(skb);
2611 __skb_queue_tail(seg_queue, skb);
2617 pdu_len += L2CAP_SDULEN_SIZE;
2624 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2628 BT_DBG("chan %p", chan);
2630 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2631 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2636 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2637 skb_queue_len(&chan->tx_q));
2640 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2642 struct sk_buff *skb;
2644 struct sk_buff_head seg_queue;
2649 /* Connectionless channel */
2650 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2651 skb = l2cap_create_connless_pdu(chan, msg, len);
2653 return PTR_ERR(skb);
2655 /* Channel lock is released before requesting new skb and then
2656 * reacquired thus we need to recheck channel state.
2658 if (chan->state != BT_CONNECTED) {
2663 l2cap_do_send(chan, skb);
2667 switch (chan->mode) {
2668 case L2CAP_MODE_LE_FLOWCTL:
2669 case L2CAP_MODE_EXT_FLOWCTL:
2670 /* Check outgoing MTU */
2671 if (len > chan->omtu)
2674 __skb_queue_head_init(&seg_queue);
2676 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2678 if (chan->state != BT_CONNECTED) {
2679 __skb_queue_purge(&seg_queue);
2686 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2688 l2cap_le_flowctl_send(chan);
2690 if (!chan->tx_credits)
2691 chan->ops->suspend(chan);
2697 case L2CAP_MODE_BASIC:
2698 /* Check outgoing MTU */
2699 if (len > chan->omtu)
2702 /* Create a basic PDU */
2703 skb = l2cap_create_basic_pdu(chan, msg, len);
2705 return PTR_ERR(skb);
2707 /* Channel lock is released before requesting new skb and then
2708 * reacquired thus we need to recheck channel state.
2710 if (chan->state != BT_CONNECTED) {
2715 l2cap_do_send(chan, skb);
2719 case L2CAP_MODE_ERTM:
2720 case L2CAP_MODE_STREAMING:
2721 /* Check outgoing MTU */
2722 if (len > chan->omtu) {
2727 __skb_queue_head_init(&seg_queue);
2729 /* Do segmentation before calling in to the state machine,
2730 * since it's possible to block while waiting for memory
2733 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2735 /* The channel could have been closed while segmenting,
2736 * check that it is still connected.
2738 if (chan->state != BT_CONNECTED) {
2739 __skb_queue_purge(&seg_queue);
2746 if (chan->mode == L2CAP_MODE_ERTM)
2747 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2749 l2cap_streaming_send(chan, &seg_queue);
2753 /* If the skbs were not queued for sending, they'll still be in
2754 * seg_queue and need to be purged.
2756 __skb_queue_purge(&seg_queue);
2760 BT_DBG("bad state %1.1x", chan->mode);
2766 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2768 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2770 struct l2cap_ctrl control;
2773 BT_DBG("chan %p, txseq %u", chan, txseq);
2775 memset(&control, 0, sizeof(control));
2777 control.super = L2CAP_SUPER_SREJ;
2779 for (seq = chan->expected_tx_seq; seq != txseq;
2780 seq = __next_seq(chan, seq)) {
2781 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2782 control.reqseq = seq;
2783 l2cap_send_sframe(chan, &control);
2784 l2cap_seq_list_append(&chan->srej_list, seq);
2788 chan->expected_tx_seq = __next_seq(chan, txseq);
2791 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2793 struct l2cap_ctrl control;
2795 BT_DBG("chan %p", chan);
2797 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2800 memset(&control, 0, sizeof(control));
2802 control.super = L2CAP_SUPER_SREJ;
2803 control.reqseq = chan->srej_list.tail;
2804 l2cap_send_sframe(chan, &control);
2807 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2809 struct l2cap_ctrl control;
2813 BT_DBG("chan %p, txseq %u", chan, txseq);
2815 memset(&control, 0, sizeof(control));
2817 control.super = L2CAP_SUPER_SREJ;
2819 /* Capture initial list head to allow only one pass through the list. */
2820 initial_head = chan->srej_list.head;
2823 seq = l2cap_seq_list_pop(&chan->srej_list);
2824 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2827 control.reqseq = seq;
2828 l2cap_send_sframe(chan, &control);
2829 l2cap_seq_list_append(&chan->srej_list, seq);
2830 } while (chan->srej_list.head != initial_head);
2833 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2835 struct sk_buff *acked_skb;
2838 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2840 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2843 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2844 chan->expected_ack_seq, chan->unacked_frames);
2846 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2847 ackseq = __next_seq(chan, ackseq)) {
2849 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2851 skb_unlink(acked_skb, &chan->tx_q);
2852 kfree_skb(acked_skb);
2853 chan->unacked_frames--;
2857 chan->expected_ack_seq = reqseq;
2859 if (chan->unacked_frames == 0)
2860 __clear_retrans_timer(chan);
2862 BT_DBG("unacked_frames %u", chan->unacked_frames);
2865 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2867 BT_DBG("chan %p", chan);
2869 chan->expected_tx_seq = chan->buffer_seq;
2870 l2cap_seq_list_clear(&chan->srej_list);
2871 skb_queue_purge(&chan->srej_q);
2872 chan->rx_state = L2CAP_RX_STATE_RECV;
2875 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2876 struct l2cap_ctrl *control,
2877 struct sk_buff_head *skbs, u8 event)
2879 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2883 case L2CAP_EV_DATA_REQUEST:
2884 if (chan->tx_send_head == NULL)
2885 chan->tx_send_head = skb_peek(skbs);
2887 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2888 l2cap_ertm_send(chan);
2890 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2891 BT_DBG("Enter LOCAL_BUSY");
2892 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2894 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2895 /* The SREJ_SENT state must be aborted if we are to
2896 * enter the LOCAL_BUSY state.
2898 l2cap_abort_rx_srej_sent(chan);
2901 l2cap_send_ack(chan);
2904 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2905 BT_DBG("Exit LOCAL_BUSY");
2906 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2908 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2909 struct l2cap_ctrl local_control;
2911 memset(&local_control, 0, sizeof(local_control));
2912 local_control.sframe = 1;
2913 local_control.super = L2CAP_SUPER_RR;
2914 local_control.poll = 1;
2915 local_control.reqseq = chan->buffer_seq;
2916 l2cap_send_sframe(chan, &local_control);
2918 chan->retry_count = 1;
2919 __set_monitor_timer(chan);
2920 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2923 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2924 l2cap_process_reqseq(chan, control->reqseq);
2926 case L2CAP_EV_EXPLICIT_POLL:
2927 l2cap_send_rr_or_rnr(chan, 1);
2928 chan->retry_count = 1;
2929 __set_monitor_timer(chan);
2930 __clear_ack_timer(chan);
2931 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2933 case L2CAP_EV_RETRANS_TO:
2934 l2cap_send_rr_or_rnr(chan, 1);
2935 chan->retry_count = 1;
2936 __set_monitor_timer(chan);
2937 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2939 case L2CAP_EV_RECV_FBIT:
2940 /* Nothing to process */
2947 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2948 struct l2cap_ctrl *control,
2949 struct sk_buff_head *skbs, u8 event)
2951 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2955 case L2CAP_EV_DATA_REQUEST:
2956 if (chan->tx_send_head == NULL)
2957 chan->tx_send_head = skb_peek(skbs);
2958 /* Queue data, but don't send. */
2959 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2961 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2962 BT_DBG("Enter LOCAL_BUSY");
2963 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2965 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2966 /* The SREJ_SENT state must be aborted if we are to
2967 * enter the LOCAL_BUSY state.
2969 l2cap_abort_rx_srej_sent(chan);
2972 l2cap_send_ack(chan);
2975 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2976 BT_DBG("Exit LOCAL_BUSY");
2977 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2979 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2980 struct l2cap_ctrl local_control;
2981 memset(&local_control, 0, sizeof(local_control));
2982 local_control.sframe = 1;
2983 local_control.super = L2CAP_SUPER_RR;
2984 local_control.poll = 1;
2985 local_control.reqseq = chan->buffer_seq;
2986 l2cap_send_sframe(chan, &local_control);
2988 chan->retry_count = 1;
2989 __set_monitor_timer(chan);
2990 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2993 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2994 l2cap_process_reqseq(chan, control->reqseq);
2997 case L2CAP_EV_RECV_FBIT:
2998 if (control && control->final) {
2999 __clear_monitor_timer(chan);
3000 if (chan->unacked_frames > 0)
3001 __set_retrans_timer(chan);
3002 chan->retry_count = 0;
3003 chan->tx_state = L2CAP_TX_STATE_XMIT;
3004 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3007 case L2CAP_EV_EXPLICIT_POLL:
3010 case L2CAP_EV_MONITOR_TO:
3011 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3012 l2cap_send_rr_or_rnr(chan, 1);
3013 __set_monitor_timer(chan);
3014 chan->retry_count++;
3016 l2cap_send_disconn_req(chan, ECONNABORTED);
3024 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3025 struct sk_buff_head *skbs, u8 event)
3027 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3028 chan, control, skbs, event, chan->tx_state);
3030 switch (chan->tx_state) {
3031 case L2CAP_TX_STATE_XMIT:
3032 l2cap_tx_state_xmit(chan, control, skbs, event);
3034 case L2CAP_TX_STATE_WAIT_F:
3035 l2cap_tx_state_wait_f(chan, control, skbs, event);
3043 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3044 struct l2cap_ctrl *control)
3046 BT_DBG("chan %p, control %p", chan, control);
3047 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3050 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3051 struct l2cap_ctrl *control)
3053 BT_DBG("chan %p, control %p", chan, control);
3054 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3057 /* Copy frame to all raw sockets on that connection */
3058 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3060 struct sk_buff *nskb;
3061 struct l2cap_chan *chan;
3063 BT_DBG("conn %p", conn);
3065 mutex_lock(&conn->chan_lock);
3067 list_for_each_entry(chan, &conn->chan_l, list) {
3068 if (chan->chan_type != L2CAP_CHAN_RAW)
3071 /* Don't send frame to the channel it came from */
3072 if (bt_cb(skb)->l2cap.chan == chan)
3075 nskb = skb_clone(skb, GFP_KERNEL);
3078 if (chan->ops->recv(chan, nskb))
3082 mutex_unlock(&conn->chan_lock);
3085 /* ---- L2CAP signalling commands ---- */
3086 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3087 u8 ident, u16 dlen, void *data)
3089 struct sk_buff *skb, **frag;
3090 struct l2cap_cmd_hdr *cmd;
3091 struct l2cap_hdr *lh;
3094 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3095 conn, code, ident, dlen);
3097 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3100 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3101 count = min_t(unsigned int, conn->mtu, len);
3103 skb = bt_skb_alloc(count, GFP_KERNEL);
3107 lh = skb_put(skb, L2CAP_HDR_SIZE);
3108 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3110 if (conn->hcon->type == LE_LINK)
3111 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3113 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3115 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3118 cmd->len = cpu_to_le16(dlen);
3121 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3122 skb_put_data(skb, data, count);
3128 /* Continuation fragments (no L2CAP header) */
3129 frag = &skb_shinfo(skb)->frag_list;
3131 count = min_t(unsigned int, conn->mtu, len);
3133 *frag = bt_skb_alloc(count, GFP_KERNEL);
3137 skb_put_data(*frag, data, count);
3142 frag = &(*frag)->next;
3152 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3155 struct l2cap_conf_opt *opt = *ptr;
3158 len = L2CAP_CONF_OPT_SIZE + opt->len;
3166 *val = *((u8 *) opt->val);
3170 *val = get_unaligned_le16(opt->val);
3174 *val = get_unaligned_le32(opt->val);
3178 *val = (unsigned long) opt->val;
3182 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3186 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3188 struct l2cap_conf_opt *opt = *ptr;
3190 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3192 if (size < L2CAP_CONF_OPT_SIZE + len)
3200 *((u8 *) opt->val) = val;
3204 put_unaligned_le16(val, opt->val);
3208 put_unaligned_le32(val, opt->val);
3212 memcpy(opt->val, (void *) val, len);
3216 *ptr += L2CAP_CONF_OPT_SIZE + len;
3219 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3221 struct l2cap_conf_efs efs;
3223 switch (chan->mode) {
3224 case L2CAP_MODE_ERTM:
3225 efs.id = chan->local_id;
3226 efs.stype = chan->local_stype;
3227 efs.msdu = cpu_to_le16(chan->local_msdu);
3228 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3229 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3230 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3233 case L2CAP_MODE_STREAMING:
3235 efs.stype = L2CAP_SERV_BESTEFFORT;
3236 efs.msdu = cpu_to_le16(chan->local_msdu);
3237 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3246 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3247 (unsigned long) &efs, size);
3250 static void l2cap_ack_timeout(struct work_struct *work)
3252 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3256 BT_DBG("chan %p", chan);
3258 l2cap_chan_lock(chan);
3260 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3261 chan->last_acked_seq);
3264 l2cap_send_rr_or_rnr(chan, 0);
3266 l2cap_chan_unlock(chan);
3267 l2cap_chan_put(chan);
3270 int l2cap_ertm_init(struct l2cap_chan *chan)
3274 chan->next_tx_seq = 0;
3275 chan->expected_tx_seq = 0;
3276 chan->expected_ack_seq = 0;
3277 chan->unacked_frames = 0;
3278 chan->buffer_seq = 0;
3279 chan->frames_sent = 0;
3280 chan->last_acked_seq = 0;
3282 chan->sdu_last_frag = NULL;
3285 skb_queue_head_init(&chan->tx_q);
3287 chan->local_amp_id = AMP_ID_BREDR;
3288 chan->move_id = AMP_ID_BREDR;
3289 chan->move_state = L2CAP_MOVE_STABLE;
3290 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3292 if (chan->mode != L2CAP_MODE_ERTM)
3295 chan->rx_state = L2CAP_RX_STATE_RECV;
3296 chan->tx_state = L2CAP_TX_STATE_XMIT;
3298 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3299 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3300 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3302 skb_queue_head_init(&chan->srej_q);
3304 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3308 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3310 l2cap_seq_list_free(&chan->srej_list);
3315 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3318 case L2CAP_MODE_STREAMING:
3319 case L2CAP_MODE_ERTM:
3320 if (l2cap_mode_supported(mode, remote_feat_mask))
3324 return L2CAP_MODE_BASIC;
3328 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3330 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3331 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3334 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3336 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3337 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3341 struct l2cap_conf_rfc *rfc)
3343 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3344 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3346 /* Class 1 devices have must have ERTM timeouts
3347 * exceeding the Link Supervision Timeout. The
3348 * default Link Supervision Timeout for AMP
3349 * controllers is 10 seconds.
3351 * Class 1 devices use 0xffffffff for their
3352 * best-effort flush timeout, so the clamping logic
3353 * will result in a timeout that meets the above
3354 * requirement. ERTM timeouts are 16-bit values, so
3355 * the maximum timeout is 65.535 seconds.
3358 /* Convert timeout to milliseconds and round */
3359 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3361 /* This is the recommended formula for class 2 devices
3362 * that start ERTM timers when packets are sent to the
3365 ertm_to = 3 * ertm_to + 500;
3367 if (ertm_to > 0xffff)
3370 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3371 rfc->monitor_timeout = rfc->retrans_timeout;
3373 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3374 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3378 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3380 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3381 __l2cap_ews_supported(chan->conn)) {
3382 /* use extended control field */
3383 set_bit(FLAG_EXT_CTRL, &chan->flags);
3384 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3386 chan->tx_win = min_t(u16, chan->tx_win,
3387 L2CAP_DEFAULT_TX_WINDOW);
3388 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3390 chan->ack_win = chan->tx_win;
3393 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3395 struct hci_conn *conn = chan->conn->hcon;
3397 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3399 /* The 2-DH1 packet has between 2 and 56 information bytes
3400 * (including the 2-byte payload header)
3402 if (!(conn->pkt_type & HCI_2DH1))
3405 /* The 3-DH1 packet has between 2 and 85 information bytes
3406 * (including the 2-byte payload header)
3408 if (!(conn->pkt_type & HCI_3DH1))
3411 /* The 2-DH3 packet has between 2 and 369 information bytes
3412 * (including the 2-byte payload header)
3414 if (!(conn->pkt_type & HCI_2DH3))
3417 /* The 3-DH3 packet has between 2 and 554 information bytes
3418 * (including the 2-byte payload header)
3420 if (!(conn->pkt_type & HCI_3DH3))
3423 /* The 2-DH5 packet has between 2 and 681 information bytes
3424 * (including the 2-byte payload header)
3426 if (!(conn->pkt_type & HCI_2DH5))
3429 /* The 3-DH5 packet has between 2 and 1023 information bytes
3430 * (including the 2-byte payload header)
3432 if (!(conn->pkt_type & HCI_3DH5))
3436 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3438 struct l2cap_conf_req *req = data;
3439 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3440 void *ptr = req->data;
3441 void *endptr = data + data_size;
3444 BT_DBG("chan %p", chan);
3446 if (chan->num_conf_req || chan->num_conf_rsp)
3449 switch (chan->mode) {
3450 case L2CAP_MODE_STREAMING:
3451 case L2CAP_MODE_ERTM:
3452 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3455 if (__l2cap_efs_supported(chan->conn))
3456 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3460 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3465 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3467 l2cap_mtu_auto(chan);
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3472 switch (chan->mode) {
3473 case L2CAP_MODE_BASIC:
3477 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3478 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3481 rfc.mode = L2CAP_MODE_BASIC;
3483 rfc.max_transmit = 0;
3484 rfc.retrans_timeout = 0;
3485 rfc.monitor_timeout = 0;
3486 rfc.max_pdu_size = 0;
3488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3489 (unsigned long) &rfc, endptr - ptr);
3492 case L2CAP_MODE_ERTM:
3493 rfc.mode = L2CAP_MODE_ERTM;
3494 rfc.max_transmit = chan->max_tx;
3496 __l2cap_set_ertm_timeouts(chan, &rfc);
3498 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3499 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3501 rfc.max_pdu_size = cpu_to_le16(size);
3503 l2cap_txwin_setup(chan);
3505 rfc.txwin_size = min_t(u16, chan->tx_win,
3506 L2CAP_DEFAULT_TX_WINDOW);
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3509 (unsigned long) &rfc, endptr - ptr);
3511 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3512 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3514 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3516 chan->tx_win, endptr - ptr);
3518 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3519 if (chan->fcs == L2CAP_FCS_NONE ||
3520 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3521 chan->fcs = L2CAP_FCS_NONE;
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3523 chan->fcs, endptr - ptr);
3527 case L2CAP_MODE_STREAMING:
3528 l2cap_txwin_setup(chan);
3529 rfc.mode = L2CAP_MODE_STREAMING;
3531 rfc.max_transmit = 0;
3532 rfc.retrans_timeout = 0;
3533 rfc.monitor_timeout = 0;
3535 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3536 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3538 rfc.max_pdu_size = cpu_to_le16(size);
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3541 (unsigned long) &rfc, endptr - ptr);
3543 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3544 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3546 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3547 if (chan->fcs == L2CAP_FCS_NONE ||
3548 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3549 chan->fcs = L2CAP_FCS_NONE;
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3551 chan->fcs, endptr - ptr);
3556 req->dcid = cpu_to_le16(chan->dcid);
3557 req->flags = cpu_to_le16(0);
3562 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3564 struct l2cap_conf_rsp *rsp = data;
3565 void *ptr = rsp->data;
3566 void *endptr = data + data_size;
3567 void *req = chan->conf_req;
3568 int len = chan->conf_len;
3569 int type, hint, olen;
3571 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3572 struct l2cap_conf_efs efs;
3574 u16 mtu = L2CAP_DEFAULT_MTU;
3575 u16 result = L2CAP_CONF_SUCCESS;
3578 BT_DBG("chan %p", chan);
3580 while (len >= L2CAP_CONF_OPT_SIZE) {
3581 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3585 hint = type & L2CAP_CONF_HINT;
3586 type &= L2CAP_CONF_MASK;
3589 case L2CAP_CONF_MTU:
3595 case L2CAP_CONF_FLUSH_TO:
3598 chan->flush_to = val;
3601 case L2CAP_CONF_QOS:
3604 case L2CAP_CONF_RFC:
3605 if (olen != sizeof(rfc))
3607 memcpy(&rfc, (void *) val, olen);
3610 case L2CAP_CONF_FCS:
3613 if (val == L2CAP_FCS_NONE)
3614 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3617 case L2CAP_CONF_EFS:
3618 if (olen != sizeof(efs))
3621 memcpy(&efs, (void *) val, olen);
3624 case L2CAP_CONF_EWS:
3627 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3628 return -ECONNREFUSED;
3629 set_bit(FLAG_EXT_CTRL, &chan->flags);
3630 set_bit(CONF_EWS_RECV, &chan->conf_state);
3631 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3632 chan->remote_tx_win = val;
3638 result = L2CAP_CONF_UNKNOWN;
3639 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3644 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3647 switch (chan->mode) {
3648 case L2CAP_MODE_STREAMING:
3649 case L2CAP_MODE_ERTM:
3650 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3651 chan->mode = l2cap_select_mode(rfc.mode,
3652 chan->conn->feat_mask);
3657 if (__l2cap_efs_supported(chan->conn))
3658 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3660 return -ECONNREFUSED;
3663 if (chan->mode != rfc.mode)
3664 return -ECONNREFUSED;
3670 if (chan->mode != rfc.mode) {
3671 result = L2CAP_CONF_UNACCEPT;
3672 rfc.mode = chan->mode;
3674 if (chan->num_conf_rsp == 1)
3675 return -ECONNREFUSED;
3677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3678 (unsigned long) &rfc, endptr - ptr);
3681 if (result == L2CAP_CONF_SUCCESS) {
3682 /* Configure output options and let the other side know
3683 * which ones we don't like. */
3685 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3686 result = L2CAP_CONF_UNACCEPT;
3689 set_bit(CONF_MTU_DONE, &chan->conf_state);
3691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3694 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3695 efs.stype != L2CAP_SERV_NOTRAFIC &&
3696 efs.stype != chan->local_stype) {
3698 result = L2CAP_CONF_UNACCEPT;
3700 if (chan->num_conf_req >= 1)
3701 return -ECONNREFUSED;
3703 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3705 (unsigned long) &efs, endptr - ptr);
3707 /* Send PENDING Conf Rsp */
3708 result = L2CAP_CONF_PENDING;
3709 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3714 case L2CAP_MODE_BASIC:
3715 chan->fcs = L2CAP_FCS_NONE;
3716 set_bit(CONF_MODE_DONE, &chan->conf_state);
3719 case L2CAP_MODE_ERTM:
3720 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3721 chan->remote_tx_win = rfc.txwin_size;
3723 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3725 chan->remote_max_tx = rfc.max_transmit;
3727 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3728 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3729 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3730 rfc.max_pdu_size = cpu_to_le16(size);
3731 chan->remote_mps = size;
3733 __l2cap_set_ertm_timeouts(chan, &rfc);
3735 set_bit(CONF_MODE_DONE, &chan->conf_state);
3737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3738 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3740 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3741 chan->remote_id = efs.id;
3742 chan->remote_stype = efs.stype;
3743 chan->remote_msdu = le16_to_cpu(efs.msdu);
3744 chan->remote_flush_to =
3745 le32_to_cpu(efs.flush_to);
3746 chan->remote_acc_lat =
3747 le32_to_cpu(efs.acc_lat);
3748 chan->remote_sdu_itime =
3749 le32_to_cpu(efs.sdu_itime);
3750 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3752 (unsigned long) &efs, endptr - ptr);
3756 case L2CAP_MODE_STREAMING:
3757 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3758 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3759 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3760 rfc.max_pdu_size = cpu_to_le16(size);
3761 chan->remote_mps = size;
3763 set_bit(CONF_MODE_DONE, &chan->conf_state);
3765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3766 (unsigned long) &rfc, endptr - ptr);
3771 result = L2CAP_CONF_UNACCEPT;
3773 memset(&rfc, 0, sizeof(rfc));
3774 rfc.mode = chan->mode;
3777 if (result == L2CAP_CONF_SUCCESS)
3778 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3780 rsp->scid = cpu_to_le16(chan->dcid);
3781 rsp->result = cpu_to_le16(result);
3782 rsp->flags = cpu_to_le16(0);
3787 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3788 void *data, size_t size, u16 *result)
3790 struct l2cap_conf_req *req = data;
3791 void *ptr = req->data;
3792 void *endptr = data + size;
3795 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3796 struct l2cap_conf_efs efs;
3798 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3800 while (len >= L2CAP_CONF_OPT_SIZE) {
3801 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3806 case L2CAP_CONF_MTU:
3809 if (val < L2CAP_DEFAULT_MIN_MTU) {
3810 *result = L2CAP_CONF_UNACCEPT;
3811 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3814 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3818 case L2CAP_CONF_FLUSH_TO:
3821 chan->flush_to = val;
3822 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3823 chan->flush_to, endptr - ptr);
3826 case L2CAP_CONF_RFC:
3827 if (olen != sizeof(rfc))
3829 memcpy(&rfc, (void *)val, olen);
3830 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3831 rfc.mode != chan->mode)
3832 return -ECONNREFUSED;
3834 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3835 (unsigned long) &rfc, endptr - ptr);
3838 case L2CAP_CONF_EWS:
3841 chan->ack_win = min_t(u16, val, chan->ack_win);
3842 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3843 chan->tx_win, endptr - ptr);
3846 case L2CAP_CONF_EFS:
3847 if (olen != sizeof(efs))
3849 memcpy(&efs, (void *)val, olen);
3850 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3851 efs.stype != L2CAP_SERV_NOTRAFIC &&
3852 efs.stype != chan->local_stype)
3853 return -ECONNREFUSED;
3854 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3855 (unsigned long) &efs, endptr - ptr);
3858 case L2CAP_CONF_FCS:
3861 if (*result == L2CAP_CONF_PENDING)
3862 if (val == L2CAP_FCS_NONE)
3863 set_bit(CONF_RECV_NO_FCS,
3869 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3870 return -ECONNREFUSED;
3872 chan->mode = rfc.mode;
3874 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3876 case L2CAP_MODE_ERTM:
3877 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3878 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3879 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3880 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3881 chan->ack_win = min_t(u16, chan->ack_win,
3884 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3885 chan->local_msdu = le16_to_cpu(efs.msdu);
3886 chan->local_sdu_itime =
3887 le32_to_cpu(efs.sdu_itime);
3888 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3889 chan->local_flush_to =
3890 le32_to_cpu(efs.flush_to);
3894 case L2CAP_MODE_STREAMING:
3895 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3899 req->dcid = cpu_to_le16(chan->dcid);
3900 req->flags = cpu_to_le16(0);
3905 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3906 u16 result, u16 flags)
3908 struct l2cap_conf_rsp *rsp = data;
3909 void *ptr = rsp->data;
3911 BT_DBG("chan %p", chan);
3913 rsp->scid = cpu_to_le16(chan->dcid);
3914 rsp->result = cpu_to_le16(result);
3915 rsp->flags = cpu_to_le16(flags);
3920 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3922 struct l2cap_le_conn_rsp rsp;
3923 struct l2cap_conn *conn = chan->conn;
3925 BT_DBG("chan %p", chan);
3927 rsp.dcid = cpu_to_le16(chan->scid);
3928 rsp.mtu = cpu_to_le16(chan->imtu);
3929 rsp.mps = cpu_to_le16(chan->mps);
3930 rsp.credits = cpu_to_le16(chan->rx_credits);
3931 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3933 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3937 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3940 struct l2cap_ecred_conn_rsp rsp;
3943 struct l2cap_conn *conn = chan->conn;
3944 u16 ident = chan->ident;
3950 BT_DBG("chan %p ident %d", chan, ident);
3952 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3953 pdu.rsp.mps = cpu_to_le16(chan->mps);
3954 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3955 pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3957 mutex_lock(&conn->chan_lock);
3959 list_for_each_entry(chan, &conn->chan_l, list) {
3960 if (chan->ident != ident)
3963 /* Reset ident so only one response is sent */
3966 /* Include all channels pending with the same ident */
3967 pdu.dcid[i++] = cpu_to_le16(chan->scid);
3970 mutex_unlock(&conn->chan_lock);
3972 l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3973 sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3976 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3978 struct l2cap_conn_rsp rsp;
3979 struct l2cap_conn *conn = chan->conn;
3983 rsp.scid = cpu_to_le16(chan->dcid);
3984 rsp.dcid = cpu_to_le16(chan->scid);
3985 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3986 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3989 rsp_code = L2CAP_CREATE_CHAN_RSP;
3991 rsp_code = L2CAP_CONN_RSP;
3993 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3995 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3997 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4000 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4001 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4002 chan->num_conf_req++;
4005 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4009 /* Use sane default values in case a misbehaving remote device
4010 * did not send an RFC or extended window size option.
4012 u16 txwin_ext = chan->ack_win;
4013 struct l2cap_conf_rfc rfc = {
4015 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4016 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4017 .max_pdu_size = cpu_to_le16(chan->imtu),
4018 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4021 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4023 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4026 while (len >= L2CAP_CONF_OPT_SIZE) {
4027 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4032 case L2CAP_CONF_RFC:
4033 if (olen != sizeof(rfc))
4035 memcpy(&rfc, (void *)val, olen);
4037 case L2CAP_CONF_EWS:
4046 case L2CAP_MODE_ERTM:
4047 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4048 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4049 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4050 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4051 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4053 chan->ack_win = min_t(u16, chan->ack_win,
4056 case L2CAP_MODE_STREAMING:
4057 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4061 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4062 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4065 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4067 if (cmd_len < sizeof(*rej))
4070 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4073 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4074 cmd->ident == conn->info_ident) {
4075 cancel_delayed_work(&conn->info_timer);
4077 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4078 conn->info_ident = 0;
4080 l2cap_conn_start(conn);
4086 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4087 struct l2cap_cmd_hdr *cmd,
4088 u8 *data, u8 rsp_code, u8 amp_id)
4090 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4091 struct l2cap_conn_rsp rsp;
4092 struct l2cap_chan *chan = NULL, *pchan;
4093 int result, status = L2CAP_CS_NO_INFO;
4095 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4096 __le16 psm = req->psm;
4098 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4100 /* Check if we have socket listening on psm */
4101 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4102 &conn->hcon->dst, ACL_LINK);
4104 result = L2CAP_CR_BAD_PSM;
4108 mutex_lock(&conn->chan_lock);
4109 l2cap_chan_lock(pchan);
4111 /* Check if the ACL is secure enough (if not SDP) */
4112 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4113 !hci_conn_check_link_mode(conn->hcon)) {
4114 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4115 result = L2CAP_CR_SEC_BLOCK;
4119 result = L2CAP_CR_NO_MEM;
4121 /* Check for valid dynamic CID range (as per Erratum 3253) */
4122 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4123 result = L2CAP_CR_INVALID_SCID;
4127 /* Check if we already have channel with that dcid */
4128 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4129 result = L2CAP_CR_SCID_IN_USE;
4133 chan = pchan->ops->new_connection(pchan);
4137 /* For certain devices (ex: HID mouse), support for authentication,
4138 * pairing and bonding is optional. For such devices, inorder to avoid
4139 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4140 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4142 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4144 bacpy(&chan->src, &conn->hcon->src);
4145 bacpy(&chan->dst, &conn->hcon->dst);
4146 chan->src_type = bdaddr_src_type(conn->hcon);
4147 chan->dst_type = bdaddr_dst_type(conn->hcon);
4150 chan->local_amp_id = amp_id;
4152 __l2cap_chan_add(conn, chan);
4156 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4158 chan->ident = cmd->ident;
4160 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4161 if (l2cap_chan_check_security(chan, false)) {
4162 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4163 l2cap_state_change(chan, BT_CONNECT2);
4164 result = L2CAP_CR_PEND;
4165 status = L2CAP_CS_AUTHOR_PEND;
4166 chan->ops->defer(chan);
4168 /* Force pending result for AMP controllers.
4169 * The connection will succeed after the
4170 * physical link is up.
4172 if (amp_id == AMP_ID_BREDR) {
4173 l2cap_state_change(chan, BT_CONFIG);
4174 result = L2CAP_CR_SUCCESS;
4176 l2cap_state_change(chan, BT_CONNECT2);
4177 result = L2CAP_CR_PEND;
4179 status = L2CAP_CS_NO_INFO;
4182 l2cap_state_change(chan, BT_CONNECT2);
4183 result = L2CAP_CR_PEND;
4184 status = L2CAP_CS_AUTHEN_PEND;
4187 l2cap_state_change(chan, BT_CONNECT2);
4188 result = L2CAP_CR_PEND;
4189 status = L2CAP_CS_NO_INFO;
4193 l2cap_chan_unlock(pchan);
4194 mutex_unlock(&conn->chan_lock);
4195 l2cap_chan_put(pchan);
4198 rsp.scid = cpu_to_le16(scid);
4199 rsp.dcid = cpu_to_le16(dcid);
4200 rsp.result = cpu_to_le16(result);
4201 rsp.status = cpu_to_le16(status);
4202 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4204 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4205 struct l2cap_info_req info;
4206 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4208 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4209 conn->info_ident = l2cap_get_ident(conn);
4211 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4213 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4214 sizeof(info), &info);
4217 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4218 result == L2CAP_CR_SUCCESS) {
4220 set_bit(CONF_REQ_SENT, &chan->conf_state);
4221 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4222 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4223 chan->num_conf_req++;
4229 static int l2cap_connect_req(struct l2cap_conn *conn,
4230 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4232 struct hci_dev *hdev = conn->hcon->hdev;
4233 struct hci_conn *hcon = conn->hcon;
4235 if (cmd_len < sizeof(struct l2cap_conn_req))
4239 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4240 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4241 mgmt_device_connected(hdev, hcon, NULL, 0);
4242 hci_dev_unlock(hdev);
4244 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4248 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4249 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4252 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4253 u16 scid, dcid, result, status;
4254 struct l2cap_chan *chan;
4258 if (cmd_len < sizeof(*rsp))
4261 scid = __le16_to_cpu(rsp->scid);
4262 dcid = __le16_to_cpu(rsp->dcid);
4263 result = __le16_to_cpu(rsp->result);
4264 status = __le16_to_cpu(rsp->status);
4266 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4267 dcid, scid, result, status);
4269 mutex_lock(&conn->chan_lock);
4272 chan = __l2cap_get_chan_by_scid(conn, scid);
4278 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4287 l2cap_chan_lock(chan);
4290 case L2CAP_CR_SUCCESS:
4291 l2cap_state_change(chan, BT_CONFIG);
4294 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4296 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4299 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4300 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4301 chan->num_conf_req++;
4305 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4309 l2cap_chan_del(chan, ECONNREFUSED);
4313 l2cap_chan_unlock(chan);
4316 mutex_unlock(&conn->chan_lock);
4321 static inline void set_default_fcs(struct l2cap_chan *chan)
4323 /* FCS is enabled only in ERTM or streaming mode, if one or both
4326 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4327 chan->fcs = L2CAP_FCS_NONE;
4328 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4329 chan->fcs = L2CAP_FCS_CRC16;
4332 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4333 u8 ident, u16 flags)
4335 struct l2cap_conn *conn = chan->conn;
4337 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4340 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4341 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4343 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4344 l2cap_build_conf_rsp(chan, data,
4345 L2CAP_CONF_SUCCESS, flags), data);
4348 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4351 struct l2cap_cmd_rej_cid rej;
4353 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4354 rej.scid = __cpu_to_le16(scid);
4355 rej.dcid = __cpu_to_le16(dcid);
4357 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4360 static inline int l2cap_config_req(struct l2cap_conn *conn,
4361 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4364 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4367 struct l2cap_chan *chan;
4370 if (cmd_len < sizeof(*req))
4373 dcid = __le16_to_cpu(req->dcid);
4374 flags = __le16_to_cpu(req->flags);
4376 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4378 chan = l2cap_get_chan_by_scid(conn, dcid);
4380 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4384 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4385 chan->state != BT_CONNECTED) {
4386 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4391 /* Reject if config buffer is too small. */
4392 len = cmd_len - sizeof(*req);
4393 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4394 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4395 l2cap_build_conf_rsp(chan, rsp,
4396 L2CAP_CONF_REJECT, flags), rsp);
4401 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4402 chan->conf_len += len;
4404 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4405 /* Incomplete config. Send empty response. */
4406 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4407 l2cap_build_conf_rsp(chan, rsp,
4408 L2CAP_CONF_SUCCESS, flags), rsp);
4412 /* Complete config. */
4413 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4415 l2cap_send_disconn_req(chan, ECONNRESET);
4419 chan->ident = cmd->ident;
4420 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4421 chan->num_conf_rsp++;
4423 /* Reset config buffer. */
4426 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4429 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4430 set_default_fcs(chan);
4432 if (chan->mode == L2CAP_MODE_ERTM ||
4433 chan->mode == L2CAP_MODE_STREAMING)
4434 err = l2cap_ertm_init(chan);
4437 l2cap_send_disconn_req(chan, -err);
4439 l2cap_chan_ready(chan);
4444 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4446 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4447 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4448 chan->num_conf_req++;
4451 /* Got Conf Rsp PENDING from remote side and assume we sent
4452 Conf Rsp PENDING in the code above */
4453 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4454 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4456 /* check compatibility */
4458 /* Send rsp for BR/EDR channel */
4460 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4462 chan->ident = cmd->ident;
4466 l2cap_chan_unlock(chan);
4470 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4471 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4474 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4475 u16 scid, flags, result;
4476 struct l2cap_chan *chan;
4477 int len = cmd_len - sizeof(*rsp);
4480 if (cmd_len < sizeof(*rsp))
4483 scid = __le16_to_cpu(rsp->scid);
4484 flags = __le16_to_cpu(rsp->flags);
4485 result = __le16_to_cpu(rsp->result);
4487 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4490 chan = l2cap_get_chan_by_scid(conn, scid);
4495 case L2CAP_CONF_SUCCESS:
4496 l2cap_conf_rfc_get(chan, rsp->data, len);
4497 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4500 case L2CAP_CONF_PENDING:
4501 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4503 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4506 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4507 buf, sizeof(buf), &result);
4509 l2cap_send_disconn_req(chan, ECONNRESET);
4513 if (!chan->hs_hcon) {
4514 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4517 if (l2cap_check_efs(chan)) {
4518 amp_create_logical_link(chan);
4519 chan->ident = cmd->ident;
4525 case L2CAP_CONF_UNKNOWN:
4526 case L2CAP_CONF_UNACCEPT:
4527 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4530 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4531 l2cap_send_disconn_req(chan, ECONNRESET);
4535 /* throw out any old stored conf requests */
4536 result = L2CAP_CONF_SUCCESS;
4537 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4538 req, sizeof(req), &result);
4540 l2cap_send_disconn_req(chan, ECONNRESET);
4544 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4545 L2CAP_CONF_REQ, len, req);
4546 chan->num_conf_req++;
4547 if (result != L2CAP_CONF_SUCCESS)
4554 l2cap_chan_set_err(chan, ECONNRESET);
4556 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4557 l2cap_send_disconn_req(chan, ECONNRESET);
4561 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4564 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4566 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4567 set_default_fcs(chan);
4569 if (chan->mode == L2CAP_MODE_ERTM ||
4570 chan->mode == L2CAP_MODE_STREAMING)
4571 err = l2cap_ertm_init(chan);
4574 l2cap_send_disconn_req(chan, -err);
4576 l2cap_chan_ready(chan);
4580 l2cap_chan_unlock(chan);
4584 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4585 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4588 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4589 struct l2cap_disconn_rsp rsp;
4591 struct l2cap_chan *chan;
4593 if (cmd_len != sizeof(*req))
4596 scid = __le16_to_cpu(req->scid);
4597 dcid = __le16_to_cpu(req->dcid);
4599 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4601 mutex_lock(&conn->chan_lock);
4603 chan = __l2cap_get_chan_by_scid(conn, dcid);
4605 mutex_unlock(&conn->chan_lock);
4606 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4610 l2cap_chan_hold(chan);
4611 l2cap_chan_lock(chan);
4613 rsp.dcid = cpu_to_le16(chan->scid);
4614 rsp.scid = cpu_to_le16(chan->dcid);
4615 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4617 chan->ops->set_shutdown(chan);
4619 l2cap_chan_del(chan, ECONNRESET);
4621 chan->ops->close(chan);
4623 l2cap_chan_unlock(chan);
4624 l2cap_chan_put(chan);
4626 mutex_unlock(&conn->chan_lock);
4631 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4632 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4635 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4637 struct l2cap_chan *chan;
4639 if (cmd_len != sizeof(*rsp))
4642 scid = __le16_to_cpu(rsp->scid);
4643 dcid = __le16_to_cpu(rsp->dcid);
4645 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4647 mutex_lock(&conn->chan_lock);
4649 chan = __l2cap_get_chan_by_scid(conn, scid);
4651 mutex_unlock(&conn->chan_lock);
4655 l2cap_chan_hold(chan);
4656 l2cap_chan_lock(chan);
4658 if (chan->state != BT_DISCONN) {
4659 l2cap_chan_unlock(chan);
4660 l2cap_chan_put(chan);
4661 mutex_unlock(&conn->chan_lock);
4665 l2cap_chan_del(chan, 0);
4667 chan->ops->close(chan);
4669 l2cap_chan_unlock(chan);
4670 l2cap_chan_put(chan);
4672 mutex_unlock(&conn->chan_lock);
4677 static inline int l2cap_information_req(struct l2cap_conn *conn,
4678 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4681 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4684 if (cmd_len != sizeof(*req))
4687 type = __le16_to_cpu(req->type);
4689 BT_DBG("type 0x%4.4x", type);
4691 if (type == L2CAP_IT_FEAT_MASK) {
4693 u32 feat_mask = l2cap_feat_mask;
4694 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4695 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4696 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4698 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4700 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4701 feat_mask |= L2CAP_FEAT_EXT_FLOW
4702 | L2CAP_FEAT_EXT_WINDOW;
4704 put_unaligned_le32(feat_mask, rsp->data);
4705 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4707 } else if (type == L2CAP_IT_FIXED_CHAN) {
4709 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4711 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4712 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4713 rsp->data[0] = conn->local_fixed_chan;
4714 memset(rsp->data + 1, 0, 7);
4715 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4718 struct l2cap_info_rsp rsp;
4719 rsp.type = cpu_to_le16(type);
4720 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4721 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4728 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4729 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4732 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4735 if (cmd_len < sizeof(*rsp))
4738 type = __le16_to_cpu(rsp->type);
4739 result = __le16_to_cpu(rsp->result);
4741 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4743 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4744 if (cmd->ident != conn->info_ident ||
4745 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4748 cancel_delayed_work(&conn->info_timer);
4750 if (result != L2CAP_IR_SUCCESS) {
4751 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4752 conn->info_ident = 0;
4754 l2cap_conn_start(conn);
4760 case L2CAP_IT_FEAT_MASK:
4761 conn->feat_mask = get_unaligned_le32(rsp->data);
4763 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4764 struct l2cap_info_req req;
4765 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4767 conn->info_ident = l2cap_get_ident(conn);
4769 l2cap_send_cmd(conn, conn->info_ident,
4770 L2CAP_INFO_REQ, sizeof(req), &req);
4772 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4773 conn->info_ident = 0;
4775 l2cap_conn_start(conn);
4779 case L2CAP_IT_FIXED_CHAN:
4780 conn->remote_fixed_chan = rsp->data[0];
4781 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4782 conn->info_ident = 0;
4784 l2cap_conn_start(conn);
4791 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4792 struct l2cap_cmd_hdr *cmd,
4793 u16 cmd_len, void *data)
4795 struct l2cap_create_chan_req *req = data;
4796 struct l2cap_create_chan_rsp rsp;
4797 struct l2cap_chan *chan;
4798 struct hci_dev *hdev;
4801 if (cmd_len != sizeof(*req))
4804 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4807 psm = le16_to_cpu(req->psm);
4808 scid = le16_to_cpu(req->scid);
4810 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4812 /* For controller id 0 make BR/EDR connection */
4813 if (req->amp_id == AMP_ID_BREDR) {
4814 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4819 /* Validate AMP controller id */
4820 hdev = hci_dev_get(req->amp_id);
4824 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4829 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4832 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4833 struct hci_conn *hs_hcon;
4835 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4839 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4844 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4846 mgr->bredr_chan = chan;
4847 chan->hs_hcon = hs_hcon;
4848 chan->fcs = L2CAP_FCS_NONE;
4849 conn->mtu = hdev->block_mtu;
4858 rsp.scid = cpu_to_le16(scid);
4859 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4860 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4862 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4868 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4870 struct l2cap_move_chan_req req;
4873 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4875 ident = l2cap_get_ident(chan->conn);
4876 chan->ident = ident;
4878 req.icid = cpu_to_le16(chan->scid);
4879 req.dest_amp_id = dest_amp_id;
4881 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4884 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4887 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4889 struct l2cap_move_chan_rsp rsp;
4891 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4893 rsp.icid = cpu_to_le16(chan->dcid);
4894 rsp.result = cpu_to_le16(result);
4896 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4900 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4902 struct l2cap_move_chan_cfm cfm;
4904 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4906 chan->ident = l2cap_get_ident(chan->conn);
4908 cfm.icid = cpu_to_le16(chan->scid);
4909 cfm.result = cpu_to_le16(result);
4911 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4914 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4917 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4919 struct l2cap_move_chan_cfm cfm;
4921 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4923 cfm.icid = cpu_to_le16(icid);
4924 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4926 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4930 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4933 struct l2cap_move_chan_cfm_rsp rsp;
4935 BT_DBG("icid 0x%4.4x", icid);
4937 rsp.icid = cpu_to_le16(icid);
4938 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4941 static void __release_logical_link(struct l2cap_chan *chan)
4943 chan->hs_hchan = NULL;
4944 chan->hs_hcon = NULL;
4946 /* Placeholder - release the logical link */
4949 static void l2cap_logical_fail(struct l2cap_chan *chan)
4951 /* Logical link setup failed */
4952 if (chan->state != BT_CONNECTED) {
4953 /* Create channel failure, disconnect */
4954 l2cap_send_disconn_req(chan, ECONNRESET);
4958 switch (chan->move_role) {
4959 case L2CAP_MOVE_ROLE_RESPONDER:
4960 l2cap_move_done(chan);
4961 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4963 case L2CAP_MOVE_ROLE_INITIATOR:
4964 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4965 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4966 /* Remote has only sent pending or
4967 * success responses, clean up
4969 l2cap_move_done(chan);
4972 /* Other amp move states imply that the move
4973 * has already aborted
4975 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4980 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4981 struct hci_chan *hchan)
4983 struct l2cap_conf_rsp rsp;
4985 chan->hs_hchan = hchan;
4986 chan->hs_hcon->l2cap_data = chan->conn;
4988 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4990 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4993 set_default_fcs(chan);
4995 err = l2cap_ertm_init(chan);
4997 l2cap_send_disconn_req(chan, -err);
4999 l2cap_chan_ready(chan);
5003 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5004 struct hci_chan *hchan)
5006 chan->hs_hcon = hchan->conn;
5007 chan->hs_hcon->l2cap_data = chan->conn;
5009 BT_DBG("move_state %d", chan->move_state);
5011 switch (chan->move_state) {
5012 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5013 /* Move confirm will be sent after a success
5014 * response is received
5016 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5018 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5019 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5020 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5021 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5022 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5023 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5024 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5025 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5026 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5030 /* Move was not in expected state, free the channel */
5031 __release_logical_link(chan);
5033 chan->move_state = L2CAP_MOVE_STABLE;
5037 /* Call with chan locked */
5038 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5041 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5044 l2cap_logical_fail(chan);
5045 __release_logical_link(chan);
5049 if (chan->state != BT_CONNECTED) {
5050 /* Ignore logical link if channel is on BR/EDR */
5051 if (chan->local_amp_id != AMP_ID_BREDR)
5052 l2cap_logical_finish_create(chan, hchan);
5054 l2cap_logical_finish_move(chan, hchan);
5058 void l2cap_move_start(struct l2cap_chan *chan)
5060 BT_DBG("chan %p", chan);
5062 if (chan->local_amp_id == AMP_ID_BREDR) {
5063 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5065 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5066 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5067 /* Placeholder - start physical link setup */
5069 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5070 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5072 l2cap_move_setup(chan);
5073 l2cap_send_move_chan_req(chan, 0);
5077 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5078 u8 local_amp_id, u8 remote_amp_id)
5080 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5081 local_amp_id, remote_amp_id);
5083 chan->fcs = L2CAP_FCS_NONE;
5085 /* Outgoing channel on AMP */
5086 if (chan->state == BT_CONNECT) {
5087 if (result == L2CAP_CR_SUCCESS) {
5088 chan->local_amp_id = local_amp_id;
5089 l2cap_send_create_chan_req(chan, remote_amp_id);
5091 /* Revert to BR/EDR connect */
5092 l2cap_send_conn_req(chan);
5098 /* Incoming channel on AMP */
5099 if (__l2cap_no_conn_pending(chan)) {
5100 struct l2cap_conn_rsp rsp;
5102 rsp.scid = cpu_to_le16(chan->dcid);
5103 rsp.dcid = cpu_to_le16(chan->scid);
5105 if (result == L2CAP_CR_SUCCESS) {
5106 /* Send successful response */
5107 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5108 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5110 /* Send negative response */
5111 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5112 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5115 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5118 if (result == L2CAP_CR_SUCCESS) {
5119 l2cap_state_change(chan, BT_CONFIG);
5120 set_bit(CONF_REQ_SENT, &chan->conf_state);
5121 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5123 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5124 chan->num_conf_req++;
5129 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5132 l2cap_move_setup(chan);
5133 chan->move_id = local_amp_id;
5134 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5136 l2cap_send_move_chan_req(chan, remote_amp_id);
5139 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5141 struct hci_chan *hchan = NULL;
5143 /* Placeholder - get hci_chan for logical link */
5146 if (hchan->state == BT_CONNECTED) {
5147 /* Logical link is ready to go */
5148 chan->hs_hcon = hchan->conn;
5149 chan->hs_hcon->l2cap_data = chan->conn;
5150 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5151 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5153 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5155 /* Wait for logical link to be ready */
5156 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5159 /* Logical link not available */
5160 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5164 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5166 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5168 if (result == -EINVAL)
5169 rsp_result = L2CAP_MR_BAD_ID;
5171 rsp_result = L2CAP_MR_NOT_ALLOWED;
5173 l2cap_send_move_chan_rsp(chan, rsp_result);
5176 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5177 chan->move_state = L2CAP_MOVE_STABLE;
5179 /* Restart data transmission */
5180 l2cap_ertm_send(chan);
5183 /* Invoke with locked chan */
5184 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5186 u8 local_amp_id = chan->local_amp_id;
5187 u8 remote_amp_id = chan->remote_amp_id;
5189 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5190 chan, result, local_amp_id, remote_amp_id);
5192 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5195 if (chan->state != BT_CONNECTED) {
5196 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5197 } else if (result != L2CAP_MR_SUCCESS) {
5198 l2cap_do_move_cancel(chan, result);
5200 switch (chan->move_role) {
5201 case L2CAP_MOVE_ROLE_INITIATOR:
5202 l2cap_do_move_initiate(chan, local_amp_id,
5205 case L2CAP_MOVE_ROLE_RESPONDER:
5206 l2cap_do_move_respond(chan, result);
5209 l2cap_do_move_cancel(chan, result);
5215 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5216 struct l2cap_cmd_hdr *cmd,
5217 u16 cmd_len, void *data)
5219 struct l2cap_move_chan_req *req = data;
5220 struct l2cap_move_chan_rsp rsp;
5221 struct l2cap_chan *chan;
5223 u16 result = L2CAP_MR_NOT_ALLOWED;
5225 if (cmd_len != sizeof(*req))
5228 icid = le16_to_cpu(req->icid);
5230 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5232 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5235 chan = l2cap_get_chan_by_dcid(conn, icid);
5237 rsp.icid = cpu_to_le16(icid);
5238 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5239 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5244 chan->ident = cmd->ident;
5246 if (chan->scid < L2CAP_CID_DYN_START ||
5247 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5248 (chan->mode != L2CAP_MODE_ERTM &&
5249 chan->mode != L2CAP_MODE_STREAMING)) {
5250 result = L2CAP_MR_NOT_ALLOWED;
5251 goto send_move_response;
5254 if (chan->local_amp_id == req->dest_amp_id) {
5255 result = L2CAP_MR_SAME_ID;
5256 goto send_move_response;
5259 if (req->dest_amp_id != AMP_ID_BREDR) {
5260 struct hci_dev *hdev;
5261 hdev = hci_dev_get(req->dest_amp_id);
5262 if (!hdev || hdev->dev_type != HCI_AMP ||
5263 !test_bit(HCI_UP, &hdev->flags)) {
5267 result = L2CAP_MR_BAD_ID;
5268 goto send_move_response;
5273 /* Detect a move collision. Only send a collision response
5274 * if this side has "lost", otherwise proceed with the move.
5275 * The winner has the larger bd_addr.
5277 if ((__chan_is_moving(chan) ||
5278 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5279 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5280 result = L2CAP_MR_COLLISION;
5281 goto send_move_response;
5284 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5285 l2cap_move_setup(chan);
5286 chan->move_id = req->dest_amp_id;
5288 if (req->dest_amp_id == AMP_ID_BREDR) {
5289 /* Moving to BR/EDR */
5290 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5291 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5292 result = L2CAP_MR_PEND;
5294 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5295 result = L2CAP_MR_SUCCESS;
5298 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5299 /* Placeholder - uncomment when amp functions are available */
5300 /*amp_accept_physical(chan, req->dest_amp_id);*/
5301 result = L2CAP_MR_PEND;
5305 l2cap_send_move_chan_rsp(chan, result);
5307 l2cap_chan_unlock(chan);
5312 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5314 struct l2cap_chan *chan;
5315 struct hci_chan *hchan = NULL;
5317 chan = l2cap_get_chan_by_scid(conn, icid);
5319 l2cap_send_move_chan_cfm_icid(conn, icid);
5323 __clear_chan_timer(chan);
5324 if (result == L2CAP_MR_PEND)
5325 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5327 switch (chan->move_state) {
5328 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5329 /* Move confirm will be sent when logical link
5332 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5334 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5335 if (result == L2CAP_MR_PEND) {
5337 } else if (test_bit(CONN_LOCAL_BUSY,
5338 &chan->conn_state)) {
5339 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5341 /* Logical link is up or moving to BR/EDR,
5344 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5345 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5348 case L2CAP_MOVE_WAIT_RSP:
5350 if (result == L2CAP_MR_SUCCESS) {
5351 /* Remote is ready, send confirm immediately
5352 * after logical link is ready
5354 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5356 /* Both logical link and move success
5357 * are required to confirm
5359 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5362 /* Placeholder - get hci_chan for logical link */
5364 /* Logical link not available */
5365 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5369 /* If the logical link is not yet connected, do not
5370 * send confirmation.
5372 if (hchan->state != BT_CONNECTED)
5375 /* Logical link is already ready to go */
5377 chan->hs_hcon = hchan->conn;
5378 chan->hs_hcon->l2cap_data = chan->conn;
5380 if (result == L2CAP_MR_SUCCESS) {
5381 /* Can confirm now */
5382 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5384 /* Now only need move success
5387 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5390 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5393 /* Any other amp move state means the move failed. */
5394 chan->move_id = chan->local_amp_id;
5395 l2cap_move_done(chan);
5396 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5399 l2cap_chan_unlock(chan);
5402 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5405 struct l2cap_chan *chan;
5407 chan = l2cap_get_chan_by_ident(conn, ident);
5409 /* Could not locate channel, icid is best guess */
5410 l2cap_send_move_chan_cfm_icid(conn, icid);
5414 __clear_chan_timer(chan);
5416 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5417 if (result == L2CAP_MR_COLLISION) {
5418 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5420 /* Cleanup - cancel move */
5421 chan->move_id = chan->local_amp_id;
5422 l2cap_move_done(chan);
5426 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5428 l2cap_chan_unlock(chan);
5431 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5432 struct l2cap_cmd_hdr *cmd,
5433 u16 cmd_len, void *data)
5435 struct l2cap_move_chan_rsp *rsp = data;
5438 if (cmd_len != sizeof(*rsp))
5441 icid = le16_to_cpu(rsp->icid);
5442 result = le16_to_cpu(rsp->result);
5444 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5446 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5447 l2cap_move_continue(conn, icid, result);
5449 l2cap_move_fail(conn, cmd->ident, icid, result);
5454 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5455 struct l2cap_cmd_hdr *cmd,
5456 u16 cmd_len, void *data)
5458 struct l2cap_move_chan_cfm *cfm = data;
5459 struct l2cap_chan *chan;
5462 if (cmd_len != sizeof(*cfm))
5465 icid = le16_to_cpu(cfm->icid);
5466 result = le16_to_cpu(cfm->result);
5468 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5470 chan = l2cap_get_chan_by_dcid(conn, icid);
5472 /* Spec requires a response even if the icid was not found */
5473 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5477 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5478 if (result == L2CAP_MC_CONFIRMED) {
5479 chan->local_amp_id = chan->move_id;
5480 if (chan->local_amp_id == AMP_ID_BREDR)
5481 __release_logical_link(chan);
5483 chan->move_id = chan->local_amp_id;
5486 l2cap_move_done(chan);
5489 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5491 l2cap_chan_unlock(chan);
5496 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5497 struct l2cap_cmd_hdr *cmd,
5498 u16 cmd_len, void *data)
5500 struct l2cap_move_chan_cfm_rsp *rsp = data;
5501 struct l2cap_chan *chan;
5504 if (cmd_len != sizeof(*rsp))
5507 icid = le16_to_cpu(rsp->icid);
5509 BT_DBG("icid 0x%4.4x", icid);
5511 chan = l2cap_get_chan_by_scid(conn, icid);
5515 __clear_chan_timer(chan);
5517 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5518 chan->local_amp_id = chan->move_id;
5520 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5521 __release_logical_link(chan);
5523 l2cap_move_done(chan);
5526 l2cap_chan_unlock(chan);
5531 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5532 struct l2cap_cmd_hdr *cmd,
5533 u16 cmd_len, u8 *data)
5535 struct hci_conn *hcon = conn->hcon;
5536 struct l2cap_conn_param_update_req *req;
5537 struct l2cap_conn_param_update_rsp rsp;
5538 u16 min, max, latency, to_multiplier;
5541 if (hcon->role != HCI_ROLE_MASTER)
5544 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5547 req = (struct l2cap_conn_param_update_req *) data;
5548 min = __le16_to_cpu(req->min);
5549 max = __le16_to_cpu(req->max);
5550 latency = __le16_to_cpu(req->latency);
5551 to_multiplier = __le16_to_cpu(req->to_multiplier);
5553 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5554 min, max, latency, to_multiplier);
5556 memset(&rsp, 0, sizeof(rsp));
5558 err = hci_check_conn_params(min, max, latency, to_multiplier);
5560 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5562 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5564 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5570 store_hint = hci_le_conn_update(hcon, min, max, latency,
5572 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5573 store_hint, min, max, latency,
5581 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5582 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5585 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5586 struct hci_conn *hcon = conn->hcon;
5587 u16 dcid, mtu, mps, credits, result;
5588 struct l2cap_chan *chan;
5591 if (cmd_len < sizeof(*rsp))
5594 dcid = __le16_to_cpu(rsp->dcid);
5595 mtu = __le16_to_cpu(rsp->mtu);
5596 mps = __le16_to_cpu(rsp->mps);
5597 credits = __le16_to_cpu(rsp->credits);
5598 result = __le16_to_cpu(rsp->result);
5600 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5601 dcid < L2CAP_CID_DYN_START ||
5602 dcid > L2CAP_CID_LE_DYN_END))
5605 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5606 dcid, mtu, mps, credits, result);
5608 mutex_lock(&conn->chan_lock);
5610 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5618 l2cap_chan_lock(chan);
5621 case L2CAP_CR_LE_SUCCESS:
5622 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5630 chan->remote_mps = mps;
5631 chan->tx_credits = credits;
5632 l2cap_chan_ready(chan);
5635 case L2CAP_CR_LE_AUTHENTICATION:
5636 case L2CAP_CR_LE_ENCRYPTION:
5637 /* If we already have MITM protection we can't do
5640 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5641 l2cap_chan_del(chan, ECONNREFUSED);
5645 sec_level = hcon->sec_level + 1;
5646 if (chan->sec_level < sec_level)
5647 chan->sec_level = sec_level;
5649 /* We'll need to send a new Connect Request */
5650 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5652 smp_conn_security(hcon, chan->sec_level);
5656 l2cap_chan_del(chan, ECONNREFUSED);
5660 l2cap_chan_unlock(chan);
5663 mutex_unlock(&conn->chan_lock);
5668 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5669 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5674 switch (cmd->code) {
5675 case L2CAP_COMMAND_REJ:
5676 l2cap_command_rej(conn, cmd, cmd_len, data);
5679 case L2CAP_CONN_REQ:
5680 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5683 case L2CAP_CONN_RSP:
5684 case L2CAP_CREATE_CHAN_RSP:
5685 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5688 case L2CAP_CONF_REQ:
5689 err = l2cap_config_req(conn, cmd, cmd_len, data);
5692 case L2CAP_CONF_RSP:
5693 l2cap_config_rsp(conn, cmd, cmd_len, data);
5696 case L2CAP_DISCONN_REQ:
5697 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5700 case L2CAP_DISCONN_RSP:
5701 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5704 case L2CAP_ECHO_REQ:
5705 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5708 case L2CAP_ECHO_RSP:
5711 case L2CAP_INFO_REQ:
5712 err = l2cap_information_req(conn, cmd, cmd_len, data);
5715 case L2CAP_INFO_RSP:
5716 l2cap_information_rsp(conn, cmd, cmd_len, data);
5719 case L2CAP_CREATE_CHAN_REQ:
5720 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5723 case L2CAP_MOVE_CHAN_REQ:
5724 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5727 case L2CAP_MOVE_CHAN_RSP:
5728 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5731 case L2CAP_MOVE_CHAN_CFM:
5732 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5735 case L2CAP_MOVE_CHAN_CFM_RSP:
5736 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5740 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5748 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5749 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5752 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5753 struct l2cap_le_conn_rsp rsp;
5754 struct l2cap_chan *chan, *pchan;
5755 u16 dcid, scid, credits, mtu, mps;
5759 if (cmd_len != sizeof(*req))
5762 scid = __le16_to_cpu(req->scid);
5763 mtu = __le16_to_cpu(req->mtu);
5764 mps = __le16_to_cpu(req->mps);
5769 if (mtu < 23 || mps < 23)
5772 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5775 /* Check if we have socket listening on psm */
5776 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5777 &conn->hcon->dst, LE_LINK);
5779 result = L2CAP_CR_LE_BAD_PSM;
5784 mutex_lock(&conn->chan_lock);
5785 l2cap_chan_lock(pchan);
5787 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5789 result = L2CAP_CR_LE_AUTHENTICATION;
5791 goto response_unlock;
5794 /* Check for valid dynamic CID range */
5795 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5796 result = L2CAP_CR_LE_INVALID_SCID;
5798 goto response_unlock;
5801 /* Check if we already have channel with that dcid */
5802 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5803 result = L2CAP_CR_LE_SCID_IN_USE;
5805 goto response_unlock;
5808 chan = pchan->ops->new_connection(pchan);
5810 result = L2CAP_CR_LE_NO_MEM;
5811 goto response_unlock;
5814 bacpy(&chan->src, &conn->hcon->src);
5815 bacpy(&chan->dst, &conn->hcon->dst);
5816 chan->src_type = bdaddr_src_type(conn->hcon);
5817 chan->dst_type = bdaddr_dst_type(conn->hcon);
5821 chan->remote_mps = mps;
5823 __l2cap_chan_add(conn, chan);
5825 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5828 credits = chan->rx_credits;
5830 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5832 chan->ident = cmd->ident;
5834 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5835 l2cap_state_change(chan, BT_CONNECT2);
5836 /* The following result value is actually not defined
5837 * for LE CoC but we use it to let the function know
5838 * that it should bail out after doing its cleanup
5839 * instead of sending a response.
5841 result = L2CAP_CR_PEND;
5842 chan->ops->defer(chan);
5844 l2cap_chan_ready(chan);
5845 result = L2CAP_CR_LE_SUCCESS;
5849 l2cap_chan_unlock(pchan);
5850 mutex_unlock(&conn->chan_lock);
5851 l2cap_chan_put(pchan);
5853 if (result == L2CAP_CR_PEND)
5858 rsp.mtu = cpu_to_le16(chan->imtu);
5859 rsp.mps = cpu_to_le16(chan->mps);
5865 rsp.dcid = cpu_to_le16(dcid);
5866 rsp.credits = cpu_to_le16(credits);
5867 rsp.result = cpu_to_le16(result);
5869 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5874 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5875 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5878 struct l2cap_le_credits *pkt;
5879 struct l2cap_chan *chan;
5880 u16 cid, credits, max_credits;
5882 if (cmd_len != sizeof(*pkt))
5885 pkt = (struct l2cap_le_credits *) data;
5886 cid = __le16_to_cpu(pkt->cid);
5887 credits = __le16_to_cpu(pkt->credits);
5889 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5891 chan = l2cap_get_chan_by_dcid(conn, cid);
5895 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5896 if (credits > max_credits) {
5897 BT_ERR("LE credits overflow");
5898 l2cap_send_disconn_req(chan, ECONNRESET);
5899 l2cap_chan_unlock(chan);
5901 /* Return 0 so that we don't trigger an unnecessary
5902 * command reject packet.
5907 chan->tx_credits += credits;
5909 /* Resume sending */
5910 l2cap_le_flowctl_send(chan);
5912 if (chan->tx_credits)
5913 chan->ops->resume(chan);
5915 l2cap_chan_unlock(chan);
5920 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5921 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5924 struct l2cap_ecred_conn_req *req = (void *) data;
5926 struct l2cap_ecred_conn_rsp rsp;
5927 __le16 dcid[L2CAP_ECRED_MAX_CID];
5929 struct l2cap_chan *chan, *pchan;
5939 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5940 result = L2CAP_CR_LE_INVALID_PARAMS;
5944 cmd_len -= sizeof(*req);
5945 num_scid = cmd_len / sizeof(u16);
5947 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5948 result = L2CAP_CR_LE_INVALID_PARAMS;
5952 mtu = __le16_to_cpu(req->mtu);
5953 mps = __le16_to_cpu(req->mps);
5955 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5956 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5962 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5964 memset(&pdu, 0, sizeof(pdu));
5966 /* Check if we have socket listening on psm */
5967 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5968 &conn->hcon->dst, LE_LINK);
5970 result = L2CAP_CR_LE_BAD_PSM;
5974 mutex_lock(&conn->chan_lock);
5975 l2cap_chan_lock(pchan);
5977 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5979 result = L2CAP_CR_LE_AUTHENTICATION;
5983 result = L2CAP_CR_LE_SUCCESS;
5985 for (i = 0; i < num_scid; i++) {
5986 u16 scid = __le16_to_cpu(req->scid[i]);
5988 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5990 pdu.dcid[i] = 0x0000;
5991 len += sizeof(*pdu.dcid);
5993 /* Check for valid dynamic CID range */
5994 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5995 result = L2CAP_CR_LE_INVALID_SCID;
5999 /* Check if we already have channel with that dcid */
6000 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6001 result = L2CAP_CR_LE_SCID_IN_USE;
6005 chan = pchan->ops->new_connection(pchan);
6007 result = L2CAP_CR_LE_NO_MEM;
6011 bacpy(&chan->src, &conn->hcon->src);
6012 bacpy(&chan->dst, &conn->hcon->dst);
6013 chan->src_type = bdaddr_src_type(conn->hcon);
6014 chan->dst_type = bdaddr_dst_type(conn->hcon);
6018 chan->remote_mps = mps;
6020 __l2cap_chan_add(conn, chan);
6022 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6025 if (!pdu.rsp.credits) {
6026 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6027 pdu.rsp.mps = cpu_to_le16(chan->mps);
6028 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6031 pdu.dcid[i] = cpu_to_le16(chan->scid);
6033 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6035 chan->ident = cmd->ident;
6037 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6038 l2cap_state_change(chan, BT_CONNECT2);
6040 chan->ops->defer(chan);
6042 l2cap_chan_ready(chan);
6047 l2cap_chan_unlock(pchan);
6048 mutex_unlock(&conn->chan_lock);
6049 l2cap_chan_put(pchan);
6052 pdu.rsp.result = cpu_to_le16(result);
6057 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6058 sizeof(pdu.rsp) + len, &pdu);
6063 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6064 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6067 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6068 struct hci_conn *hcon = conn->hcon;
6069 u16 mtu, mps, credits, result;
6070 struct l2cap_chan *chan, *tmp;
6071 int err = 0, sec_level;
6074 if (cmd_len < sizeof(*rsp))
6077 mtu = __le16_to_cpu(rsp->mtu);
6078 mps = __le16_to_cpu(rsp->mps);
6079 credits = __le16_to_cpu(rsp->credits);
6080 result = __le16_to_cpu(rsp->result);
6082 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6085 mutex_lock(&conn->chan_lock);
6087 cmd_len -= sizeof(*rsp);
6089 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6092 if (chan->ident != cmd->ident ||
6093 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6094 chan->state == BT_CONNECTED)
6097 l2cap_chan_lock(chan);
6099 /* Check that there is a dcid for each pending channel */
6100 if (cmd_len < sizeof(dcid)) {
6101 l2cap_chan_del(chan, ECONNREFUSED);
6102 l2cap_chan_unlock(chan);
6106 dcid = __le16_to_cpu(rsp->dcid[i++]);
6107 cmd_len -= sizeof(u16);
6109 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6111 /* Check if dcid is already in use */
6112 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6113 /* If a device receives a
6114 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6115 * already-assigned Destination CID, then both the
6116 * original channel and the new channel shall be
6117 * immediately discarded and not used.
6119 l2cap_chan_del(chan, ECONNREFUSED);
6120 l2cap_chan_unlock(chan);
6121 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6122 l2cap_chan_lock(chan);
6123 l2cap_chan_del(chan, ECONNRESET);
6124 l2cap_chan_unlock(chan);
6129 case L2CAP_CR_LE_AUTHENTICATION:
6130 case L2CAP_CR_LE_ENCRYPTION:
6131 /* If we already have MITM protection we can't do
6134 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6135 l2cap_chan_del(chan, ECONNREFUSED);
6139 sec_level = hcon->sec_level + 1;
6140 if (chan->sec_level < sec_level)
6141 chan->sec_level = sec_level;
6143 /* We'll need to send a new Connect Request */
6144 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6146 smp_conn_security(hcon, chan->sec_level);
6149 case L2CAP_CR_LE_BAD_PSM:
6150 l2cap_chan_del(chan, ECONNREFUSED);
6154 /* If dcid was not set it means channels was refused */
6156 l2cap_chan_del(chan, ECONNREFUSED);
6163 chan->remote_mps = mps;
6164 chan->tx_credits = credits;
6165 l2cap_chan_ready(chan);
6169 l2cap_chan_unlock(chan);
6172 mutex_unlock(&conn->chan_lock);
6177 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6178 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6181 struct l2cap_ecred_reconf_req *req = (void *) data;
6182 struct l2cap_ecred_reconf_rsp rsp;
6183 u16 mtu, mps, result;
6184 struct l2cap_chan *chan;
6190 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6191 result = L2CAP_CR_LE_INVALID_PARAMS;
6195 mtu = __le16_to_cpu(req->mtu);
6196 mps = __le16_to_cpu(req->mps);
6198 BT_DBG("mtu %u mps %u", mtu, mps);
6200 if (mtu < L2CAP_ECRED_MIN_MTU) {
6201 result = L2CAP_RECONF_INVALID_MTU;
6205 if (mps < L2CAP_ECRED_MIN_MPS) {
6206 result = L2CAP_RECONF_INVALID_MPS;
6210 cmd_len -= sizeof(*req);
6211 num_scid = cmd_len / sizeof(u16);
6212 result = L2CAP_RECONF_SUCCESS;
6214 for (i = 0; i < num_scid; i++) {
6217 scid = __le16_to_cpu(req->scid[i]);
6221 chan = __l2cap_get_chan_by_dcid(conn, scid);
6225 /* If the MTU value is decreased for any of the included
6226 * channels, then the receiver shall disconnect all
6227 * included channels.
6229 if (chan->omtu > mtu) {
6230 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6232 result = L2CAP_RECONF_INVALID_MTU;
6236 chan->remote_mps = mps;
6240 rsp.result = cpu_to_le16(result);
6242 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6248 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6249 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6252 struct l2cap_chan *chan, *tmp;
6253 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6256 if (cmd_len < sizeof(*rsp))
6259 result = __le16_to_cpu(rsp->result);
6261 BT_DBG("result 0x%4.4x", rsp->result);
6266 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6267 if (chan->ident != cmd->ident)
6270 l2cap_chan_del(chan, ECONNRESET);
6276 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6277 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6280 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6281 struct l2cap_chan *chan;
6283 if (cmd_len < sizeof(*rej))
6286 mutex_lock(&conn->chan_lock);
6288 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6292 l2cap_chan_lock(chan);
6293 l2cap_chan_del(chan, ECONNREFUSED);
6294 l2cap_chan_unlock(chan);
6297 mutex_unlock(&conn->chan_lock);
6301 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6302 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6307 switch (cmd->code) {
6308 case L2CAP_COMMAND_REJ:
6309 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6312 case L2CAP_CONN_PARAM_UPDATE_REQ:
6313 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6316 case L2CAP_CONN_PARAM_UPDATE_RSP:
6319 case L2CAP_LE_CONN_RSP:
6320 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6323 case L2CAP_LE_CONN_REQ:
6324 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6327 case L2CAP_LE_CREDITS:
6328 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6331 case L2CAP_ECRED_CONN_REQ:
6332 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6335 case L2CAP_ECRED_CONN_RSP:
6336 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6339 case L2CAP_ECRED_RECONF_REQ:
6340 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6343 case L2CAP_ECRED_RECONF_RSP:
6344 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6347 case L2CAP_DISCONN_REQ:
6348 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6351 case L2CAP_DISCONN_RSP:
6352 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6356 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6364 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6365 struct sk_buff *skb)
6367 struct hci_conn *hcon = conn->hcon;
6368 struct l2cap_cmd_hdr *cmd;
6372 if (hcon->type != LE_LINK)
6375 if (skb->len < L2CAP_CMD_HDR_SIZE)
6378 cmd = (void *) skb->data;
6379 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6381 len = le16_to_cpu(cmd->len);
6383 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6385 if (len != skb->len || !cmd->ident) {
6386 BT_DBG("corrupted command");
6390 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6392 struct l2cap_cmd_rej_unk rej;
6394 BT_ERR("Wrong link type (%d)", err);
6396 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6397 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6405 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6406 struct sk_buff *skb)
6408 struct hci_conn *hcon = conn->hcon;
6409 struct l2cap_cmd_hdr *cmd;
6412 l2cap_raw_recv(conn, skb);
6414 if (hcon->type != ACL_LINK)
6417 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6420 cmd = (void *) skb->data;
6421 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6423 len = le16_to_cpu(cmd->len);
6425 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6428 if (len > skb->len || !cmd->ident) {
6429 BT_DBG("corrupted command");
6433 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6435 struct l2cap_cmd_rej_unk rej;
6437 BT_ERR("Wrong link type (%d)", err);
6439 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6440 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6451 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6453 u16 our_fcs, rcv_fcs;
6456 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6457 hdr_size = L2CAP_EXT_HDR_SIZE;
6459 hdr_size = L2CAP_ENH_HDR_SIZE;
6461 if (chan->fcs == L2CAP_FCS_CRC16) {
6462 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6463 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6464 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6466 if (our_fcs != rcv_fcs)
6472 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6474 struct l2cap_ctrl control;
6476 BT_DBG("chan %p", chan);
6478 memset(&control, 0, sizeof(control));
6481 control.reqseq = chan->buffer_seq;
6482 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6484 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6485 control.super = L2CAP_SUPER_RNR;
6486 l2cap_send_sframe(chan, &control);
6489 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6490 chan->unacked_frames > 0)
6491 __set_retrans_timer(chan);
6493 /* Send pending iframes */
6494 l2cap_ertm_send(chan);
6496 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6497 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6498 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6501 control.super = L2CAP_SUPER_RR;
6502 l2cap_send_sframe(chan, &control);
6506 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6507 struct sk_buff **last_frag)
6509 /* skb->len reflects data in skb as well as all fragments
6510 * skb->data_len reflects only data in fragments
6512 if (!skb_has_frag_list(skb))
6513 skb_shinfo(skb)->frag_list = new_frag;
6515 new_frag->next = NULL;
6517 (*last_frag)->next = new_frag;
6518 *last_frag = new_frag;
6520 skb->len += new_frag->len;
6521 skb->data_len += new_frag->len;
6522 skb->truesize += new_frag->truesize;
6525 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6526 struct l2cap_ctrl *control)
6530 switch (control->sar) {
6531 case L2CAP_SAR_UNSEGMENTED:
6535 err = chan->ops->recv(chan, skb);
6538 case L2CAP_SAR_START:
6542 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6545 chan->sdu_len = get_unaligned_le16(skb->data);
6546 skb_pull(skb, L2CAP_SDULEN_SIZE);
6548 if (chan->sdu_len > chan->imtu) {
6553 if (skb->len >= chan->sdu_len)
6557 chan->sdu_last_frag = skb;
6563 case L2CAP_SAR_CONTINUE:
6567 append_skb_frag(chan->sdu, skb,
6568 &chan->sdu_last_frag);
6571 if (chan->sdu->len >= chan->sdu_len)
6581 append_skb_frag(chan->sdu, skb,
6582 &chan->sdu_last_frag);
6585 if (chan->sdu->len != chan->sdu_len)
6588 err = chan->ops->recv(chan, chan->sdu);
6591 /* Reassembly complete */
6593 chan->sdu_last_frag = NULL;
6601 kfree_skb(chan->sdu);
6603 chan->sdu_last_frag = NULL;
6610 static int l2cap_resegment(struct l2cap_chan *chan)
6616 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6620 if (chan->mode != L2CAP_MODE_ERTM)
6623 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6624 l2cap_tx(chan, NULL, NULL, event);
6627 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6630 /* Pass sequential frames to l2cap_reassemble_sdu()
6631 * until a gap is encountered.
6634 BT_DBG("chan %p", chan);
6636 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6637 struct sk_buff *skb;
6638 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6639 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6641 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6646 skb_unlink(skb, &chan->srej_q);
6647 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6648 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6653 if (skb_queue_empty(&chan->srej_q)) {
6654 chan->rx_state = L2CAP_RX_STATE_RECV;
6655 l2cap_send_ack(chan);
6661 static void l2cap_handle_srej(struct l2cap_chan *chan,
6662 struct l2cap_ctrl *control)
6664 struct sk_buff *skb;
6666 BT_DBG("chan %p, control %p", chan, control);
6668 if (control->reqseq == chan->next_tx_seq) {
6669 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6670 l2cap_send_disconn_req(chan, ECONNRESET);
6674 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6677 BT_DBG("Seq %d not available for retransmission",
6682 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6683 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6684 l2cap_send_disconn_req(chan, ECONNRESET);
6688 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6690 if (control->poll) {
6691 l2cap_pass_to_tx(chan, control);
6693 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6694 l2cap_retransmit(chan, control);
6695 l2cap_ertm_send(chan);
6697 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6698 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6699 chan->srej_save_reqseq = control->reqseq;
6702 l2cap_pass_to_tx_fbit(chan, control);
6704 if (control->final) {
6705 if (chan->srej_save_reqseq != control->reqseq ||
6706 !test_and_clear_bit(CONN_SREJ_ACT,
6708 l2cap_retransmit(chan, control);
6710 l2cap_retransmit(chan, control);
6711 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6712 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6713 chan->srej_save_reqseq = control->reqseq;
6719 static void l2cap_handle_rej(struct l2cap_chan *chan,
6720 struct l2cap_ctrl *control)
6722 struct sk_buff *skb;
6724 BT_DBG("chan %p, control %p", chan, control);
6726 if (control->reqseq == chan->next_tx_seq) {
6727 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6728 l2cap_send_disconn_req(chan, ECONNRESET);
6732 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6734 if (chan->max_tx && skb &&
6735 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6736 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6737 l2cap_send_disconn_req(chan, ECONNRESET);
6741 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6743 l2cap_pass_to_tx(chan, control);
6745 if (control->final) {
6746 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6747 l2cap_retransmit_all(chan, control);
6749 l2cap_retransmit_all(chan, control);
6750 l2cap_ertm_send(chan);
6751 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6752 set_bit(CONN_REJ_ACT, &chan->conn_state);
6756 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6758 BT_DBG("chan %p, txseq %d", chan, txseq);
6760 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6761 chan->expected_tx_seq);
6763 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6764 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6766 /* See notes below regarding "double poll" and
6769 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6770 BT_DBG("Invalid/Ignore - after SREJ");
6771 return L2CAP_TXSEQ_INVALID_IGNORE;
6773 BT_DBG("Invalid - in window after SREJ sent");
6774 return L2CAP_TXSEQ_INVALID;
6778 if (chan->srej_list.head == txseq) {
6779 BT_DBG("Expected SREJ");
6780 return L2CAP_TXSEQ_EXPECTED_SREJ;
6783 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6784 BT_DBG("Duplicate SREJ - txseq already stored");
6785 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6788 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6789 BT_DBG("Unexpected SREJ - not requested");
6790 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6794 if (chan->expected_tx_seq == txseq) {
6795 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6797 BT_DBG("Invalid - txseq outside tx window");
6798 return L2CAP_TXSEQ_INVALID;
6801 return L2CAP_TXSEQ_EXPECTED;
6805 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6806 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6807 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6808 return L2CAP_TXSEQ_DUPLICATE;
6811 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6812 /* A source of invalid packets is a "double poll" condition,
6813 * where delays cause us to send multiple poll packets. If
6814 * the remote stack receives and processes both polls,
6815 * sequence numbers can wrap around in such a way that a
6816 * resent frame has a sequence number that looks like new data
6817 * with a sequence gap. This would trigger an erroneous SREJ
6820 * Fortunately, this is impossible with a tx window that's
6821 * less than half of the maximum sequence number, which allows
6822 * invalid frames to be safely ignored.
6824 * With tx window sizes greater than half of the tx window
6825 * maximum, the frame is invalid and cannot be ignored. This
6826 * causes a disconnect.
6829 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6830 BT_DBG("Invalid/Ignore - txseq outside tx window");
6831 return L2CAP_TXSEQ_INVALID_IGNORE;
6833 BT_DBG("Invalid - txseq outside tx window");
6834 return L2CAP_TXSEQ_INVALID;
6837 BT_DBG("Unexpected - txseq indicates missing frames");
6838 return L2CAP_TXSEQ_UNEXPECTED;
6842 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6843 struct l2cap_ctrl *control,
6844 struct sk_buff *skb, u8 event)
6847 bool skb_in_use = false;
6849 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6853 case L2CAP_EV_RECV_IFRAME:
6854 switch (l2cap_classify_txseq(chan, control->txseq)) {
6855 case L2CAP_TXSEQ_EXPECTED:
6856 l2cap_pass_to_tx(chan, control);
6858 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6859 BT_DBG("Busy, discarding expected seq %d",
6864 chan->expected_tx_seq = __next_seq(chan,
6867 chan->buffer_seq = chan->expected_tx_seq;
6870 err = l2cap_reassemble_sdu(chan, skb, control);
6874 if (control->final) {
6875 if (!test_and_clear_bit(CONN_REJ_ACT,
6876 &chan->conn_state)) {
6878 l2cap_retransmit_all(chan, control);
6879 l2cap_ertm_send(chan);
6883 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6884 l2cap_send_ack(chan);
6886 case L2CAP_TXSEQ_UNEXPECTED:
6887 l2cap_pass_to_tx(chan, control);
6889 /* Can't issue SREJ frames in the local busy state.
6890 * Drop this frame, it will be seen as missing
6891 * when local busy is exited.
6893 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6894 BT_DBG("Busy, discarding unexpected seq %d",
6899 /* There was a gap in the sequence, so an SREJ
6900 * must be sent for each missing frame. The
6901 * current frame is stored for later use.
6903 skb_queue_tail(&chan->srej_q, skb);
6905 BT_DBG("Queued %p (queue len %d)", skb,
6906 skb_queue_len(&chan->srej_q));
6908 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6909 l2cap_seq_list_clear(&chan->srej_list);
6910 l2cap_send_srej(chan, control->txseq);
6912 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6914 case L2CAP_TXSEQ_DUPLICATE:
6915 l2cap_pass_to_tx(chan, control);
6917 case L2CAP_TXSEQ_INVALID_IGNORE:
6919 case L2CAP_TXSEQ_INVALID:
6921 l2cap_send_disconn_req(chan, ECONNRESET);
6925 case L2CAP_EV_RECV_RR:
6926 l2cap_pass_to_tx(chan, control);
6927 if (control->final) {
6928 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6930 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6931 !__chan_is_moving(chan)) {
6933 l2cap_retransmit_all(chan, control);
6936 l2cap_ertm_send(chan);
6937 } else if (control->poll) {
6938 l2cap_send_i_or_rr_or_rnr(chan);
6940 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6941 &chan->conn_state) &&
6942 chan->unacked_frames)
6943 __set_retrans_timer(chan);
6945 l2cap_ertm_send(chan);
6948 case L2CAP_EV_RECV_RNR:
6949 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6950 l2cap_pass_to_tx(chan, control);
6951 if (control && control->poll) {
6952 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6953 l2cap_send_rr_or_rnr(chan, 0);
6955 __clear_retrans_timer(chan);
6956 l2cap_seq_list_clear(&chan->retrans_list);
6958 case L2CAP_EV_RECV_REJ:
6959 l2cap_handle_rej(chan, control);
6961 case L2CAP_EV_RECV_SREJ:
6962 l2cap_handle_srej(chan, control);
6968 if (skb && !skb_in_use) {
6969 BT_DBG("Freeing %p", skb);
6976 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6977 struct l2cap_ctrl *control,
6978 struct sk_buff *skb, u8 event)
6981 u16 txseq = control->txseq;
6982 bool skb_in_use = false;
6984 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6988 case L2CAP_EV_RECV_IFRAME:
6989 switch (l2cap_classify_txseq(chan, txseq)) {
6990 case L2CAP_TXSEQ_EXPECTED:
6991 /* Keep frame for reassembly later */
6992 l2cap_pass_to_tx(chan, control);
6993 skb_queue_tail(&chan->srej_q, skb);
6995 BT_DBG("Queued %p (queue len %d)", skb,
6996 skb_queue_len(&chan->srej_q));
6998 chan->expected_tx_seq = __next_seq(chan, txseq);
7000 case L2CAP_TXSEQ_EXPECTED_SREJ:
7001 l2cap_seq_list_pop(&chan->srej_list);
7003 l2cap_pass_to_tx(chan, control);
7004 skb_queue_tail(&chan->srej_q, skb);
7006 BT_DBG("Queued %p (queue len %d)", skb,
7007 skb_queue_len(&chan->srej_q));
7009 err = l2cap_rx_queued_iframes(chan);
7014 case L2CAP_TXSEQ_UNEXPECTED:
7015 /* Got a frame that can't be reassembled yet.
7016 * Save it for later, and send SREJs to cover
7017 * the missing frames.
7019 skb_queue_tail(&chan->srej_q, skb);
7021 BT_DBG("Queued %p (queue len %d)", skb,
7022 skb_queue_len(&chan->srej_q));
7024 l2cap_pass_to_tx(chan, control);
7025 l2cap_send_srej(chan, control->txseq);
7027 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7028 /* This frame was requested with an SREJ, but
7029 * some expected retransmitted frames are
7030 * missing. Request retransmission of missing
7033 skb_queue_tail(&chan->srej_q, skb);
7035 BT_DBG("Queued %p (queue len %d)", skb,
7036 skb_queue_len(&chan->srej_q));
7038 l2cap_pass_to_tx(chan, control);
7039 l2cap_send_srej_list(chan, control->txseq);
7041 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7042 /* We've already queued this frame. Drop this copy. */
7043 l2cap_pass_to_tx(chan, control);
7045 case L2CAP_TXSEQ_DUPLICATE:
7046 /* Expecting a later sequence number, so this frame
7047 * was already received. Ignore it completely.
7050 case L2CAP_TXSEQ_INVALID_IGNORE:
7052 case L2CAP_TXSEQ_INVALID:
7054 l2cap_send_disconn_req(chan, ECONNRESET);
7058 case L2CAP_EV_RECV_RR:
7059 l2cap_pass_to_tx(chan, control);
7060 if (control->final) {
7061 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7063 if (!test_and_clear_bit(CONN_REJ_ACT,
7064 &chan->conn_state)) {
7066 l2cap_retransmit_all(chan, control);
7069 l2cap_ertm_send(chan);
7070 } else if (control->poll) {
7071 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7072 &chan->conn_state) &&
7073 chan->unacked_frames) {
7074 __set_retrans_timer(chan);
7077 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7078 l2cap_send_srej_tail(chan);
7080 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7081 &chan->conn_state) &&
7082 chan->unacked_frames)
7083 __set_retrans_timer(chan);
7085 l2cap_send_ack(chan);
7088 case L2CAP_EV_RECV_RNR:
7089 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7090 l2cap_pass_to_tx(chan, control);
7091 if (control->poll) {
7092 l2cap_send_srej_tail(chan);
7094 struct l2cap_ctrl rr_control;
7095 memset(&rr_control, 0, sizeof(rr_control));
7096 rr_control.sframe = 1;
7097 rr_control.super = L2CAP_SUPER_RR;
7098 rr_control.reqseq = chan->buffer_seq;
7099 l2cap_send_sframe(chan, &rr_control);
7103 case L2CAP_EV_RECV_REJ:
7104 l2cap_handle_rej(chan, control);
7106 case L2CAP_EV_RECV_SREJ:
7107 l2cap_handle_srej(chan, control);
7111 if (skb && !skb_in_use) {
7112 BT_DBG("Freeing %p", skb);
7119 static int l2cap_finish_move(struct l2cap_chan *chan)
7121 BT_DBG("chan %p", chan);
7123 chan->rx_state = L2CAP_RX_STATE_RECV;
7126 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7128 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7130 return l2cap_resegment(chan);
7133 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7134 struct l2cap_ctrl *control,
7135 struct sk_buff *skb, u8 event)
7139 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7145 l2cap_process_reqseq(chan, control->reqseq);
7147 if (!skb_queue_empty(&chan->tx_q))
7148 chan->tx_send_head = skb_peek(&chan->tx_q);
7150 chan->tx_send_head = NULL;
7152 /* Rewind next_tx_seq to the point expected
7155 chan->next_tx_seq = control->reqseq;
7156 chan->unacked_frames = 0;
7158 err = l2cap_finish_move(chan);
7162 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7163 l2cap_send_i_or_rr_or_rnr(chan);
7165 if (event == L2CAP_EV_RECV_IFRAME)
7168 return l2cap_rx_state_recv(chan, control, NULL, event);
7171 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7172 struct l2cap_ctrl *control,
7173 struct sk_buff *skb, u8 event)
7177 if (!control->final)
7180 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7182 chan->rx_state = L2CAP_RX_STATE_RECV;
7183 l2cap_process_reqseq(chan, control->reqseq);
7185 if (!skb_queue_empty(&chan->tx_q))
7186 chan->tx_send_head = skb_peek(&chan->tx_q);
7188 chan->tx_send_head = NULL;
7190 /* Rewind next_tx_seq to the point expected
7193 chan->next_tx_seq = control->reqseq;
7194 chan->unacked_frames = 0;
7197 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7199 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7201 err = l2cap_resegment(chan);
7204 err = l2cap_rx_state_recv(chan, control, skb, event);
7209 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7211 /* Make sure reqseq is for a packet that has been sent but not acked */
7214 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7215 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7218 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7219 struct sk_buff *skb, u8 event)
7223 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7224 control, skb, event, chan->rx_state);
7226 if (__valid_reqseq(chan, control->reqseq)) {
7227 switch (chan->rx_state) {
7228 case L2CAP_RX_STATE_RECV:
7229 err = l2cap_rx_state_recv(chan, control, skb, event);
7231 case L2CAP_RX_STATE_SREJ_SENT:
7232 err = l2cap_rx_state_srej_sent(chan, control, skb,
7235 case L2CAP_RX_STATE_WAIT_P:
7236 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7238 case L2CAP_RX_STATE_WAIT_F:
7239 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7246 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7247 control->reqseq, chan->next_tx_seq,
7248 chan->expected_ack_seq);
7249 l2cap_send_disconn_req(chan, ECONNRESET);
7255 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7256 struct sk_buff *skb)
7258 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7261 if (l2cap_classify_txseq(chan, control->txseq) ==
7262 L2CAP_TXSEQ_EXPECTED) {
7263 l2cap_pass_to_tx(chan, control);
7265 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7266 __next_seq(chan, chan->buffer_seq));
7268 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7270 l2cap_reassemble_sdu(chan, skb, control);
7273 kfree_skb(chan->sdu);
7276 chan->sdu_last_frag = NULL;
7280 BT_DBG("Freeing %p", skb);
7285 chan->last_acked_seq = control->txseq;
7286 chan->expected_tx_seq = __next_seq(chan, control->txseq);
7291 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7293 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7297 __unpack_control(chan, skb);
7302 * We can just drop the corrupted I-frame here.
7303 * Receiver will miss it and start proper recovery
7304 * procedures and ask for retransmission.
7306 if (l2cap_check_fcs(chan, skb))
7309 if (!control->sframe && control->sar == L2CAP_SAR_START)
7310 len -= L2CAP_SDULEN_SIZE;
7312 if (chan->fcs == L2CAP_FCS_CRC16)
7313 len -= L2CAP_FCS_SIZE;
7315 if (len > chan->mps) {
7316 l2cap_send_disconn_req(chan, ECONNRESET);
7320 if (chan->ops->filter) {
7321 if (chan->ops->filter(chan, skb))
7325 if (!control->sframe) {
7328 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7329 control->sar, control->reqseq, control->final,
7332 /* Validate F-bit - F=0 always valid, F=1 only
7333 * valid in TX WAIT_F
7335 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7338 if (chan->mode != L2CAP_MODE_STREAMING) {
7339 event = L2CAP_EV_RECV_IFRAME;
7340 err = l2cap_rx(chan, control, skb, event);
7342 err = l2cap_stream_rx(chan, control, skb);
7346 l2cap_send_disconn_req(chan, ECONNRESET);
7348 const u8 rx_func_to_event[4] = {
7349 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7350 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7353 /* Only I-frames are expected in streaming mode */
7354 if (chan->mode == L2CAP_MODE_STREAMING)
7357 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7358 control->reqseq, control->final, control->poll,
7362 BT_ERR("Trailing bytes: %d in sframe", len);
7363 l2cap_send_disconn_req(chan, ECONNRESET);
7367 /* Validate F and P bits */
7368 if (control->final && (control->poll ||
7369 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7372 event = rx_func_to_event[control->super];
7373 if (l2cap_rx(chan, control, skb, event))
7374 l2cap_send_disconn_req(chan, ECONNRESET);
7384 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7386 struct l2cap_conn *conn = chan->conn;
7387 struct l2cap_le_credits pkt;
7390 return_credits = (chan->imtu / chan->mps) + 1;
7392 if (chan->rx_credits >= return_credits)
7395 return_credits -= chan->rx_credits;
7397 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7399 chan->rx_credits += return_credits;
7401 pkt.cid = cpu_to_le16(chan->scid);
7402 pkt.credits = cpu_to_le16(return_credits);
7404 chan->ident = l2cap_get_ident(conn);
7406 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7409 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7413 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7415 /* Wait recv to confirm reception before updating the credits */
7416 err = chan->ops->recv(chan, skb);
7418 /* Update credits whenever an SDU is received */
7419 l2cap_chan_le_send_credits(chan);
7424 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7428 if (!chan->rx_credits) {
7429 BT_ERR("No credits to receive LE L2CAP data");
7430 l2cap_send_disconn_req(chan, ECONNRESET);
7434 if (chan->imtu < skb->len) {
7435 BT_ERR("Too big LE L2CAP PDU");
7440 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7442 /* Update if remote had run out of credits, this should only happens
7443 * if the remote is not using the entire MPS.
7445 if (!chan->rx_credits)
7446 l2cap_chan_le_send_credits(chan);
7453 sdu_len = get_unaligned_le16(skb->data);
7454 skb_pull(skb, L2CAP_SDULEN_SIZE);
7456 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7457 sdu_len, skb->len, chan->imtu);
7459 if (sdu_len > chan->imtu) {
7460 BT_ERR("Too big LE L2CAP SDU length received");
7465 if (skb->len > sdu_len) {
7466 BT_ERR("Too much LE L2CAP data received");
7471 if (skb->len == sdu_len)
7472 return l2cap_ecred_recv(chan, skb);
7475 chan->sdu_len = sdu_len;
7476 chan->sdu_last_frag = skb;
7478 /* Detect if remote is not able to use the selected MPS */
7479 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7480 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7482 /* Adjust the number of credits */
7483 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7484 chan->mps = mps_len;
7485 l2cap_chan_le_send_credits(chan);
7491 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7492 chan->sdu->len, skb->len, chan->sdu_len);
7494 if (chan->sdu->len + skb->len > chan->sdu_len) {
7495 BT_ERR("Too much LE L2CAP data received");
7500 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7503 if (chan->sdu->len == chan->sdu_len) {
7504 err = l2cap_ecred_recv(chan, chan->sdu);
7507 chan->sdu_last_frag = NULL;
7515 kfree_skb(chan->sdu);
7517 chan->sdu_last_frag = NULL;
7521 /* We can't return an error here since we took care of the skb
7522 * freeing internally. An error return would cause the caller to
7523 * do a double-free of the skb.
7528 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7529 struct sk_buff *skb)
7531 struct l2cap_chan *chan;
7533 chan = l2cap_get_chan_by_scid(conn, cid);
7535 if (cid == L2CAP_CID_A2MP) {
7536 chan = a2mp_channel_create(conn, skb);
7542 l2cap_chan_lock(chan);
7544 BT_DBG("unknown cid 0x%4.4x", cid);
7545 /* Drop packet and return */
7551 BT_DBG("chan %p, len %d", chan, skb->len);
7553 /* If we receive data on a fixed channel before the info req/rsp
7554 * procedure is done simply assume that the channel is supported
7555 * and mark it as ready.
7557 if (chan->chan_type == L2CAP_CHAN_FIXED)
7558 l2cap_chan_ready(chan);
7560 if (chan->state != BT_CONNECTED)
7563 switch (chan->mode) {
7564 case L2CAP_MODE_LE_FLOWCTL:
7565 case L2CAP_MODE_EXT_FLOWCTL:
7566 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7571 case L2CAP_MODE_BASIC:
7572 /* If socket recv buffers overflows we drop data here
7573 * which is *bad* because L2CAP has to be reliable.
7574 * But we don't have any other choice. L2CAP doesn't
7575 * provide flow control mechanism. */
7577 if (chan->imtu < skb->len) {
7578 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7582 if (!chan->ops->recv(chan, skb))
7586 case L2CAP_MODE_ERTM:
7587 case L2CAP_MODE_STREAMING:
7588 l2cap_data_rcv(chan, skb);
7592 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7600 l2cap_chan_unlock(chan);
7603 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7604 struct sk_buff *skb)
7606 struct hci_conn *hcon = conn->hcon;
7607 struct l2cap_chan *chan;
7609 if (hcon->type != ACL_LINK)
7612 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7617 BT_DBG("chan %p, len %d", chan, skb->len);
7619 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7622 if (chan->imtu < skb->len)
7625 /* Store remote BD_ADDR and PSM for msg_name */
7626 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7627 bt_cb(skb)->l2cap.psm = psm;
7629 if (!chan->ops->recv(chan, skb)) {
7630 l2cap_chan_put(chan);
7635 l2cap_chan_put(chan);
7640 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7642 struct l2cap_hdr *lh = (void *) skb->data;
7643 struct hci_conn *hcon = conn->hcon;
7647 if (hcon->state != BT_CONNECTED) {
7648 BT_DBG("queueing pending rx skb");
7649 skb_queue_tail(&conn->pending_rx, skb);
7653 skb_pull(skb, L2CAP_HDR_SIZE);
7654 cid = __le16_to_cpu(lh->cid);
7655 len = __le16_to_cpu(lh->len);
7657 if (len != skb->len) {
7662 /* Since we can't actively block incoming LE connections we must
7663 * at least ensure that we ignore incoming data from them.
7665 if (hcon->type == LE_LINK &&
7666 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7667 bdaddr_dst_type(hcon))) {
7672 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7675 case L2CAP_CID_SIGNALING:
7676 l2cap_sig_channel(conn, skb);
7679 case L2CAP_CID_CONN_LESS:
7680 psm = get_unaligned((__le16 *) skb->data);
7681 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7682 l2cap_conless_channel(conn, psm, skb);
7685 case L2CAP_CID_LE_SIGNALING:
7686 l2cap_le_sig_channel(conn, skb);
7690 l2cap_data_channel(conn, cid, skb);
7695 static void process_pending_rx(struct work_struct *work)
7697 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7699 struct sk_buff *skb;
7703 while ((skb = skb_dequeue(&conn->pending_rx)))
7704 l2cap_recv_frame(conn, skb);
7707 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7709 struct l2cap_conn *conn = hcon->l2cap_data;
7710 struct hci_chan *hchan;
7715 hchan = hci_chan_create(hcon);
7719 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7721 hci_chan_del(hchan);
7725 kref_init(&conn->ref);
7726 hcon->l2cap_data = conn;
7727 conn->hcon = hci_conn_get(hcon);
7728 conn->hchan = hchan;
7730 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7732 switch (hcon->type) {
7734 if (hcon->hdev->le_mtu) {
7735 conn->mtu = hcon->hdev->le_mtu;
7740 conn->mtu = hcon->hdev->acl_mtu;
7744 conn->feat_mask = 0;
7746 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7748 if (hcon->type == ACL_LINK &&
7749 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7750 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7752 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7753 (bredr_sc_enabled(hcon->hdev) ||
7754 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7755 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7757 mutex_init(&conn->ident_lock);
7758 mutex_init(&conn->chan_lock);
7760 INIT_LIST_HEAD(&conn->chan_l);
7761 INIT_LIST_HEAD(&conn->users);
7763 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7765 skb_queue_head_init(&conn->pending_rx);
7766 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7767 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7769 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7774 static bool is_valid_psm(u16 psm, u8 dst_type)
7779 if (bdaddr_type_is_le(dst_type))
7780 return (psm <= 0x00ff);
7782 /* PSM must be odd and lsb of upper byte must be 0 */
7783 return ((psm & 0x0101) == 0x0001);
7786 struct l2cap_chan_data {
7787 struct l2cap_chan *chan;
7792 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7794 struct l2cap_chan_data *d = data;
7797 if (chan == d->chan)
7800 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7803 pid = chan->ops->get_peer_pid(chan);
7805 /* Only count deferred channels with the same PID/PSM */
7806 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7807 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7813 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7814 bdaddr_t *dst, u8 dst_type)
7816 struct l2cap_conn *conn;
7817 struct hci_conn *hcon;
7818 struct hci_dev *hdev;
7821 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7822 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7824 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7826 return -EHOSTUNREACH;
7830 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7831 chan->chan_type != L2CAP_CHAN_RAW) {
7836 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7841 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7846 switch (chan->mode) {
7847 case L2CAP_MODE_BASIC:
7849 case L2CAP_MODE_LE_FLOWCTL:
7851 case L2CAP_MODE_EXT_FLOWCTL:
7852 if (!enable_ecred) {
7857 case L2CAP_MODE_ERTM:
7858 case L2CAP_MODE_STREAMING:
7867 switch (chan->state) {
7871 /* Already connecting */
7876 /* Already connected */
7890 /* Set destination address and psm */
7891 bacpy(&chan->dst, dst);
7892 chan->dst_type = dst_type;
7897 if (bdaddr_type_is_le(dst_type)) {
7898 /* Convert from L2CAP channel address type to HCI address type
7900 if (dst_type == BDADDR_LE_PUBLIC)
7901 dst_type = ADDR_LE_DEV_PUBLIC;
7903 dst_type = ADDR_LE_DEV_RANDOM;
7905 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7906 hcon = hci_connect_le(hdev, dst, dst_type,
7908 HCI_LE_CONN_TIMEOUT,
7909 HCI_ROLE_SLAVE, NULL);
7911 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7913 HCI_LE_CONN_TIMEOUT,
7914 CONN_REASON_L2CAP_CHAN);
7917 u8 auth_type = l2cap_get_auth_type(chan);
7918 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7919 CONN_REASON_L2CAP_CHAN);
7923 err = PTR_ERR(hcon);
7927 conn = l2cap_conn_add(hcon);
7929 hci_conn_drop(hcon);
7934 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7935 struct l2cap_chan_data data;
7938 data.pid = chan->ops->get_peer_pid(chan);
7941 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7943 /* Check if there isn't too many channels being connected */
7944 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7945 hci_conn_drop(hcon);
7951 mutex_lock(&conn->chan_lock);
7952 l2cap_chan_lock(chan);
7954 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7955 hci_conn_drop(hcon);
7960 /* Update source addr of the socket */
7961 bacpy(&chan->src, &hcon->src);
7962 chan->src_type = bdaddr_src_type(hcon);
7964 __l2cap_chan_add(conn, chan);
7966 /* l2cap_chan_add takes its own ref so we can drop this one */
7967 hci_conn_drop(hcon);
7969 l2cap_state_change(chan, BT_CONNECT);
7970 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7972 /* Release chan->sport so that it can be reused by other
7973 * sockets (as it's only used for listening sockets).
7975 write_lock(&chan_list_lock);
7977 write_unlock(&chan_list_lock);
7979 if (hcon->state == BT_CONNECTED) {
7980 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7981 __clear_chan_timer(chan);
7982 if (l2cap_chan_check_security(chan, true))
7983 l2cap_state_change(chan, BT_CONNECTED);
7985 l2cap_do_start(chan);
7991 l2cap_chan_unlock(chan);
7992 mutex_unlock(&conn->chan_lock);
7994 hci_dev_unlock(hdev);
7998 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8000 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8002 struct l2cap_conn *conn = chan->conn;
8004 struct l2cap_ecred_reconf_req req;
8008 pdu.req.mtu = cpu_to_le16(chan->imtu);
8009 pdu.req.mps = cpu_to_le16(chan->mps);
8010 pdu.scid = cpu_to_le16(chan->scid);
8012 chan->ident = l2cap_get_ident(conn);
8014 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8018 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8020 if (chan->imtu > mtu)
8023 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8027 l2cap_ecred_reconfigure(chan);
8032 /* ---- L2CAP interface with lower layer (HCI) ---- */
8034 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8036 int exact = 0, lm1 = 0, lm2 = 0;
8037 struct l2cap_chan *c;
8039 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8041 /* Find listening sockets and check their link_mode */
8042 read_lock(&chan_list_lock);
8043 list_for_each_entry(c, &chan_list, global_l) {
8044 if (c->state != BT_LISTEN)
8047 if (!bacmp(&c->src, &hdev->bdaddr)) {
8048 lm1 |= HCI_LM_ACCEPT;
8049 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8050 lm1 |= HCI_LM_MASTER;
8052 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8053 lm2 |= HCI_LM_ACCEPT;
8054 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8055 lm2 |= HCI_LM_MASTER;
8058 read_unlock(&chan_list_lock);
8060 return exact ? lm1 : lm2;
8063 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8064 * from an existing channel in the list or from the beginning of the
8065 * global list (by passing NULL as first parameter).
8067 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8068 struct hci_conn *hcon)
8070 u8 src_type = bdaddr_src_type(hcon);
8072 read_lock(&chan_list_lock);
8075 c = list_next_entry(c, global_l);
8077 c = list_entry(chan_list.next, typeof(*c), global_l);
8079 list_for_each_entry_from(c, &chan_list, global_l) {
8080 if (c->chan_type != L2CAP_CHAN_FIXED)
8082 if (c->state != BT_LISTEN)
8084 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8086 if (src_type != c->src_type)
8090 read_unlock(&chan_list_lock);
8094 read_unlock(&chan_list_lock);
8099 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8101 struct hci_dev *hdev = hcon->hdev;
8102 struct l2cap_conn *conn;
8103 struct l2cap_chan *pchan;
8106 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8109 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8112 l2cap_conn_del(hcon, bt_to_errno(status));
8116 conn = l2cap_conn_add(hcon);
8120 dst_type = bdaddr_dst_type(hcon);
8122 /* If device is blocked, do not create channels for it */
8123 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8126 /* Find fixed channels and notify them of the new connection. We
8127 * use multiple individual lookups, continuing each time where
8128 * we left off, because the list lock would prevent calling the
8129 * potentially sleeping l2cap_chan_lock() function.
8131 pchan = l2cap_global_fixed_chan(NULL, hcon);
8133 struct l2cap_chan *chan, *next;
8135 /* Client fixed channels should override server ones */
8136 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8139 l2cap_chan_lock(pchan);
8140 chan = pchan->ops->new_connection(pchan);
8142 bacpy(&chan->src, &hcon->src);
8143 bacpy(&chan->dst, &hcon->dst);
8144 chan->src_type = bdaddr_src_type(hcon);
8145 chan->dst_type = dst_type;
8147 __l2cap_chan_add(conn, chan);
8150 l2cap_chan_unlock(pchan);
8152 next = l2cap_global_fixed_chan(pchan, hcon);
8153 l2cap_chan_put(pchan);
8157 l2cap_conn_ready(conn);
8160 int l2cap_disconn_ind(struct hci_conn *hcon)
8162 struct l2cap_conn *conn = hcon->l2cap_data;
8164 BT_DBG("hcon %p", hcon);
8167 return HCI_ERROR_REMOTE_USER_TERM;
8168 return conn->disc_reason;
8171 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8173 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8176 BT_DBG("hcon %p reason %d", hcon, reason);
8178 l2cap_conn_del(hcon, bt_to_errno(reason));
8181 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8183 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8186 if (encrypt == 0x00) {
8187 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8188 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8189 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8190 chan->sec_level == BT_SECURITY_FIPS)
8191 l2cap_chan_close(chan, ECONNREFUSED);
8193 if (chan->sec_level == BT_SECURITY_MEDIUM)
8194 __clear_chan_timer(chan);
8198 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8200 struct l2cap_conn *conn = hcon->l2cap_data;
8201 struct l2cap_chan *chan;
8206 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8208 mutex_lock(&conn->chan_lock);
8210 list_for_each_entry(chan, &conn->chan_l, list) {
8211 l2cap_chan_lock(chan);
8213 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8214 state_to_string(chan->state));
8216 if (chan->scid == L2CAP_CID_A2MP) {
8217 l2cap_chan_unlock(chan);
8221 if (!status && encrypt)
8222 chan->sec_level = hcon->sec_level;
8224 if (!__l2cap_no_conn_pending(chan)) {
8225 l2cap_chan_unlock(chan);
8229 if (!status && (chan->state == BT_CONNECTED ||
8230 chan->state == BT_CONFIG)) {
8231 chan->ops->resume(chan);
8232 l2cap_check_encryption(chan, encrypt);
8233 l2cap_chan_unlock(chan);
8237 if (chan->state == BT_CONNECT) {
8238 if (!status && l2cap_check_enc_key_size(hcon))
8239 l2cap_start_connection(chan);
8241 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8242 } else if (chan->state == BT_CONNECT2 &&
8243 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8244 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8245 struct l2cap_conn_rsp rsp;
8248 if (!status && l2cap_check_enc_key_size(hcon)) {
8249 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8250 res = L2CAP_CR_PEND;
8251 stat = L2CAP_CS_AUTHOR_PEND;
8252 chan->ops->defer(chan);
8254 l2cap_state_change(chan, BT_CONFIG);
8255 res = L2CAP_CR_SUCCESS;
8256 stat = L2CAP_CS_NO_INFO;
8259 l2cap_state_change(chan, BT_DISCONN);
8260 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8261 res = L2CAP_CR_SEC_BLOCK;
8262 stat = L2CAP_CS_NO_INFO;
8265 rsp.scid = cpu_to_le16(chan->dcid);
8266 rsp.dcid = cpu_to_le16(chan->scid);
8267 rsp.result = cpu_to_le16(res);
8268 rsp.status = cpu_to_le16(stat);
8269 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8272 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8273 res == L2CAP_CR_SUCCESS) {
8275 set_bit(CONF_REQ_SENT, &chan->conf_state);
8276 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8278 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8280 chan->num_conf_req++;
8284 l2cap_chan_unlock(chan);
8287 mutex_unlock(&conn->chan_lock);
8290 /* Append fragment into frame respecting the maximum len of rx_skb */
8291 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8294 if (!conn->rx_skb) {
8295 /* Allocate skb for the complete frame (with header) */
8296 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8303 /* Copy as much as the rx_skb can hold */
8304 len = min_t(u16, len, skb->len);
8305 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8307 conn->rx_len -= len;
8312 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8314 struct sk_buff *rx_skb;
8317 /* Append just enough to complete the header */
8318 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8320 /* If header could not be read just continue */
8321 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8324 rx_skb = conn->rx_skb;
8325 len = get_unaligned_le16(rx_skb->data);
8327 /* Check if rx_skb has enough space to received all fragments */
8328 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8329 /* Update expected len */
8330 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8331 return L2CAP_LEN_SIZE;
8334 /* Reset conn->rx_skb since it will need to be reallocated in order to
8335 * fit all fragments.
8337 conn->rx_skb = NULL;
8339 /* Reallocates rx_skb using the exact expected length */
8340 len = l2cap_recv_frag(conn, rx_skb,
8341 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8347 static void l2cap_recv_reset(struct l2cap_conn *conn)
8349 kfree_skb(conn->rx_skb);
8350 conn->rx_skb = NULL;
8354 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8356 struct l2cap_conn *conn = hcon->l2cap_data;
8359 /* For AMP controller do not create l2cap conn */
8360 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8364 conn = l2cap_conn_add(hcon);
8369 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8373 case ACL_START_NO_FLUSH:
8376 BT_ERR("Unexpected start frame (len %d)", skb->len);
8377 l2cap_recv_reset(conn);
8378 l2cap_conn_unreliable(conn, ECOMM);
8381 /* Start fragment may not contain the L2CAP length so just
8382 * copy the initial byte when that happens and use conn->mtu as
8385 if (skb->len < L2CAP_LEN_SIZE) {
8386 if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8391 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8393 if (len == skb->len) {
8394 /* Complete frame received */
8395 l2cap_recv_frame(conn, skb);
8399 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8401 if (skb->len > len) {
8402 BT_ERR("Frame is too long (len %u, expected len %d)",
8404 l2cap_conn_unreliable(conn, ECOMM);
8408 /* Append fragment into frame (with header) */
8409 if (l2cap_recv_frag(conn, skb, len) < 0)
8415 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8417 if (!conn->rx_skb) {
8418 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8419 l2cap_conn_unreliable(conn, ECOMM);
8423 /* Complete the L2CAP length if it has not been read */
8424 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8425 if (l2cap_recv_len(conn, skb) < 0) {
8426 l2cap_conn_unreliable(conn, ECOMM);
8430 /* Header still could not be read just continue */
8431 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8435 if (skb->len > conn->rx_len) {
8436 BT_ERR("Fragment is too long (len %u, expected %u)",
8437 skb->len, conn->rx_len);
8438 l2cap_recv_reset(conn);
8439 l2cap_conn_unreliable(conn, ECOMM);
8443 /* Append fragment into frame (with header) */
8444 l2cap_recv_frag(conn, skb, skb->len);
8446 if (!conn->rx_len) {
8447 /* Complete frame received. l2cap_recv_frame
8448 * takes ownership of the skb so set the global
8449 * rx_skb pointer to NULL first.
8451 struct sk_buff *rx_skb = conn->rx_skb;
8452 conn->rx_skb = NULL;
8453 l2cap_recv_frame(conn, rx_skb);
8462 static struct hci_cb l2cap_cb = {
8464 .connect_cfm = l2cap_connect_cfm,
8465 .disconn_cfm = l2cap_disconn_cfm,
8466 .security_cfm = l2cap_security_cfm,
8469 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8471 struct l2cap_chan *c;
8473 read_lock(&chan_list_lock);
8475 list_for_each_entry(c, &chan_list, global_l) {
8476 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8477 &c->src, c->src_type, &c->dst, c->dst_type,
8478 c->state, __le16_to_cpu(c->psm),
8479 c->scid, c->dcid, c->imtu, c->omtu,
8480 c->sec_level, c->mode);
8483 read_unlock(&chan_list_lock);
8488 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8490 static struct dentry *l2cap_debugfs;
8492 int __init l2cap_init(void)
8496 err = l2cap_init_sockets();
8500 hci_register_cb(&l2cap_cb);
8502 if (IS_ERR_OR_NULL(bt_debugfs))
8505 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8506 NULL, &l2cap_debugfs_fops);
8511 void l2cap_exit(void)
8513 debugfs_remove(l2cap_debugfs);
8514 hci_unregister_cb(&l2cap_cb);
8515 l2cap_cleanup_sockets();
8518 module_param(disable_ertm, bool, 0644);
8519 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8521 module_param(enable_ecred, bool, 0644);
8522 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");