2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
46 #ifdef CONFIG_TIZEN_WIP
47 #ifndef list_next_entry
48 #define list_next_entry(pos, member) \
49 list_entry((pos)->member.next, typeof(*(pos)), member)
53 #ifdef CONFIG_TIZEN_WIP
54 /* #define HCI_BROADCOMM_QOS_PATCH */
56 #ifdef HCI_BROADCOMM_QOS_PATCH
57 #define L2CAP_PSM_AVDTP 25
58 #define HCI_BROADCOM_QOS_CMD 0xFC57 /* For bcm4329/bcm4330/bcm4334 chipsets */
59 #define PRIORITY_NORMAL 0x00 /* Broadcom ACL priority for bcm4334 chipset */
60 #define PRIORITY_HIGH 0x01
62 struct hci_cp_broadcom_cmd {
64 __u8 priority; /* Only for bcm4330/bcm4334 chipsets */
65 } __attribute__ ((__packed__));
70 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
72 static LIST_HEAD(chan_list);
73 static DEFINE_RWLOCK(chan_list_lock);
75 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
76 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
78 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
79 u8 code, u8 ident, u16 dlen, void *data);
80 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
82 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
83 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
85 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
86 struct sk_buff_head *skbs, u8 event);
88 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
90 if (link_type == LE_LINK) {
91 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
92 return BDADDR_LE_PUBLIC;
94 return BDADDR_LE_RANDOM;
100 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
102 return bdaddr_type(hcon->type, hcon->src_type);
105 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
107 return bdaddr_type(hcon->type, hcon->dst_type);
110 /* ---- L2CAP channels ---- */
112 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
115 struct l2cap_chan *c;
117 list_for_each_entry(c, &conn->chan_l, list) {
124 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
127 struct l2cap_chan *c;
129 list_for_each_entry(c, &conn->chan_l, list) {
136 /* Find channel with given SCID.
137 * Returns locked channel. */
138 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
141 struct l2cap_chan *c;
143 mutex_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_scid(conn, cid);
147 mutex_unlock(&conn->chan_lock);
152 /* Find channel with given DCID.
153 * Returns locked channel.
155 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
158 struct l2cap_chan *c;
160 mutex_lock(&conn->chan_lock);
161 c = __l2cap_get_chan_by_dcid(conn, cid);
164 mutex_unlock(&conn->chan_lock);
169 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
172 struct l2cap_chan *c;
174 list_for_each_entry(c, &conn->chan_l, list) {
175 if (c->ident == ident)
181 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
184 struct l2cap_chan *c;
186 mutex_lock(&conn->chan_lock);
187 c = __l2cap_get_chan_by_ident(conn, ident);
190 mutex_unlock(&conn->chan_lock);
195 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
197 struct l2cap_chan *c;
199 list_for_each_entry(c, &chan_list, global_l) {
200 if (c->sport == psm && !bacmp(&c->src, src))
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
210 write_lock(&chan_list_lock);
212 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
225 for (p = 0x1001; p < 0x1100; p += 2)
226 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
227 chan->psm = cpu_to_le16(p);
228 chan->sport = cpu_to_le16(p);
235 write_unlock(&chan_list_lock);
238 EXPORT_SYMBOL_GPL(l2cap_add_psm);
240 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
242 write_lock(&chan_list_lock);
244 /* Override the defaults (which are for conn-oriented) */
245 chan->omtu = L2CAP_DEFAULT_MTU;
246 chan->chan_type = L2CAP_CHAN_FIXED;
250 write_unlock(&chan_list_lock);
255 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
259 if (conn->hcon->type == LE_LINK)
260 dyn_end = L2CAP_CID_LE_DYN_END;
262 dyn_end = L2CAP_CID_DYN_END;
264 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
265 if (!__l2cap_get_chan_by_scid(conn, cid))
272 static void l2cap_state_change(struct l2cap_chan *chan, int state)
274 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
275 state_to_string(state));
278 chan->ops->state_change(chan, state, 0);
281 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
285 chan->ops->state_change(chan, chan->state, err);
288 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
290 chan->ops->state_change(chan, chan->state, err);
293 static void __set_retrans_timer(struct l2cap_chan *chan)
295 if (!delayed_work_pending(&chan->monitor_timer) &&
296 chan->retrans_timeout) {
297 l2cap_set_timer(chan, &chan->retrans_timer,
298 msecs_to_jiffies(chan->retrans_timeout));
302 static void __set_monitor_timer(struct l2cap_chan *chan)
304 __clear_retrans_timer(chan);
305 if (chan->monitor_timeout) {
306 l2cap_set_timer(chan, &chan->monitor_timer,
307 msecs_to_jiffies(chan->monitor_timeout));
311 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
316 skb_queue_walk(head, skb) {
317 if (bt_cb(skb)->control.txseq == seq)
324 /* ---- L2CAP sequence number lists ---- */
326 /* For ERTM, ordered lists of sequence numbers must be tracked for
327 * SREJ requests that are received and for frames that are to be
328 * retransmitted. These seq_list functions implement a singly-linked
329 * list in an array, where membership in the list can also be checked
330 * in constant time. Items can also be added to the tail of the list
331 * and removed from the head in constant time, without further memory
335 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
337 size_t alloc_size, i;
339 /* Allocated size is a power of 2 to map sequence numbers
340 * (which may be up to 14 bits) in to a smaller array that is
341 * sized for the negotiated ERTM transmit windows.
343 alloc_size = roundup_pow_of_two(size);
345 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
349 seq_list->mask = alloc_size - 1;
350 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
351 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
352 for (i = 0; i < alloc_size; i++)
353 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
358 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
360 kfree(seq_list->list);
363 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
366 /* Constant-time check for list membership */
367 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
370 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
372 u16 seq = seq_list->head;
373 u16 mask = seq_list->mask;
375 seq_list->head = seq_list->list[seq & mask];
376 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
378 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
379 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
380 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
386 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
390 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
393 for (i = 0; i <= seq_list->mask; i++)
394 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
396 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
397 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
400 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
402 u16 mask = seq_list->mask;
404 /* All appends happen in constant time */
406 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
409 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
410 seq_list->head = seq;
412 seq_list->list[seq_list->tail & mask] = seq;
414 seq_list->tail = seq;
415 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
418 static void l2cap_chan_timeout(struct work_struct *work)
420 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
422 struct l2cap_conn *conn = chan->conn;
425 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
427 mutex_lock(&conn->chan_lock);
428 l2cap_chan_lock(chan);
430 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
431 reason = ECONNREFUSED;
432 else if (chan->state == BT_CONNECT &&
433 chan->sec_level != BT_SECURITY_SDP)
434 reason = ECONNREFUSED;
438 l2cap_chan_close(chan, reason);
440 l2cap_chan_unlock(chan);
442 chan->ops->close(chan);
443 mutex_unlock(&conn->chan_lock);
445 l2cap_chan_put(chan);
448 struct l2cap_chan *l2cap_chan_create(void)
450 struct l2cap_chan *chan;
452 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
456 mutex_init(&chan->lock);
458 /* Set default lock nesting level */
459 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
461 write_lock(&chan_list_lock);
462 list_add(&chan->global_l, &chan_list);
463 write_unlock(&chan_list_lock);
465 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
467 chan->state = BT_OPEN;
469 kref_init(&chan->kref);
471 /* This flag is cleared in l2cap_chan_ready() */
472 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 BT_DBG("chan %p", chan);
478 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 static void l2cap_chan_destroy(struct kref *kref)
482 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 BT_DBG("chan %p", chan);
486 write_lock(&chan_list_lock);
487 list_del(&chan->global_l);
488 write_unlock(&chan_list_lock);
493 void l2cap_chan_hold(struct l2cap_chan *c)
495 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
500 void l2cap_chan_put(struct l2cap_chan *c)
502 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
504 kref_put(&c->kref, l2cap_chan_destroy);
506 EXPORT_SYMBOL_GPL(l2cap_chan_put);
508 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
510 chan->fcs = L2CAP_FCS_CRC16;
511 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
512 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
513 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
514 chan->remote_max_tx = chan->max_tx;
515 chan->remote_tx_win = chan->tx_win;
516 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
517 chan->sec_level = BT_SECURITY_LOW;
518 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
519 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
520 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
521 chan->conf_state = 0;
523 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
525 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
527 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
530 chan->sdu_last_frag = NULL;
532 chan->tx_credits = 0;
533 chan->rx_credits = le_max_credits;
534 chan->mps = min_t(u16, chan->imtu, le_default_mps);
536 skb_queue_head_init(&chan->tx_q);
539 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
541 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
542 __le16_to_cpu(chan->psm), chan->dcid);
544 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
548 switch (chan->chan_type) {
549 case L2CAP_CHAN_CONN_ORIENTED:
550 /* Alloc CID for connection-oriented socket */
551 chan->scid = l2cap_alloc_cid(conn);
552 if (conn->hcon->type == ACL_LINK)
553 chan->omtu = L2CAP_DEFAULT_MTU;
556 case L2CAP_CHAN_CONN_LESS:
557 /* Connectionless socket */
558 chan->scid = L2CAP_CID_CONN_LESS;
559 chan->dcid = L2CAP_CID_CONN_LESS;
560 chan->omtu = L2CAP_DEFAULT_MTU;
563 case L2CAP_CHAN_FIXED:
564 /* Caller will set CID and CID specific MTU values */
568 /* Raw socket can send/recv signalling messages only */
569 chan->scid = L2CAP_CID_SIGNALING;
570 chan->dcid = L2CAP_CID_SIGNALING;
571 chan->omtu = L2CAP_DEFAULT_MTU;
574 chan->local_id = L2CAP_BESTEFFORT_ID;
575 chan->local_stype = L2CAP_SERV_BESTEFFORT;
576 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
577 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
578 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
579 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
581 l2cap_chan_hold(chan);
583 /* Only keep a reference for fixed channels if they requested it */
584 if (chan->chan_type != L2CAP_CHAN_FIXED ||
585 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
586 hci_conn_hold(conn->hcon);
588 list_add(&chan->list, &conn->chan_l);
591 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
593 mutex_lock(&conn->chan_lock);
594 __l2cap_chan_add(conn, chan);
595 mutex_unlock(&conn->chan_lock);
598 void l2cap_chan_del(struct l2cap_chan *chan, int err)
600 struct l2cap_conn *conn = chan->conn;
602 __clear_chan_timer(chan);
604 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
605 state_to_string(chan->state));
607 chan->ops->teardown(chan, err);
610 struct amp_mgr *mgr = conn->hcon->amp_mgr;
611 /* Delete from channel list */
612 list_del(&chan->list);
614 l2cap_chan_put(chan);
618 /* Reference was only held for non-fixed channels or
619 * fixed channels that explicitly requested it using the
620 * FLAG_HOLD_HCI_CONN flag.
622 if (chan->chan_type != L2CAP_CHAN_FIXED ||
623 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
624 hci_conn_drop(conn->hcon);
626 if (mgr && mgr->bredr_chan == chan)
627 mgr->bredr_chan = NULL;
630 if (chan->hs_hchan) {
631 struct hci_chan *hs_hchan = chan->hs_hchan;
633 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
634 amp_disconnect_logical_link(hs_hchan);
637 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
641 case L2CAP_MODE_BASIC:
644 case L2CAP_MODE_LE_FLOWCTL:
645 skb_queue_purge(&chan->tx_q);
648 case L2CAP_MODE_ERTM:
649 __clear_retrans_timer(chan);
650 __clear_monitor_timer(chan);
651 __clear_ack_timer(chan);
653 skb_queue_purge(&chan->srej_q);
655 l2cap_seq_list_free(&chan->srej_list);
656 l2cap_seq_list_free(&chan->retrans_list);
660 case L2CAP_MODE_STREAMING:
661 skb_queue_purge(&chan->tx_q);
667 EXPORT_SYMBOL_GPL(l2cap_chan_del);
669 static void l2cap_conn_update_id_addr(struct work_struct *work)
671 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
672 id_addr_update_work);
673 struct hci_conn *hcon = conn->hcon;
674 struct l2cap_chan *chan;
676 mutex_lock(&conn->chan_lock);
678 list_for_each_entry(chan, &conn->chan_l, list) {
679 l2cap_chan_lock(chan);
680 bacpy(&chan->dst, &hcon->dst);
681 chan->dst_type = bdaddr_dst_type(hcon);
682 l2cap_chan_unlock(chan);
685 mutex_unlock(&conn->chan_lock);
688 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
690 struct l2cap_conn *conn = chan->conn;
691 struct l2cap_le_conn_rsp rsp;
694 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
695 result = L2CAP_CR_AUTHORIZATION;
697 result = L2CAP_CR_BAD_PSM;
699 l2cap_state_change(chan, BT_DISCONN);
701 rsp.dcid = cpu_to_le16(chan->scid);
702 rsp.mtu = cpu_to_le16(chan->imtu);
703 rsp.mps = cpu_to_le16(chan->mps);
704 rsp.credits = cpu_to_le16(chan->rx_credits);
705 rsp.result = cpu_to_le16(result);
707 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
711 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
713 struct l2cap_conn *conn = chan->conn;
714 struct l2cap_conn_rsp rsp;
717 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
718 result = L2CAP_CR_SEC_BLOCK;
720 result = L2CAP_CR_BAD_PSM;
722 l2cap_state_change(chan, BT_DISCONN);
724 rsp.scid = cpu_to_le16(chan->dcid);
725 rsp.dcid = cpu_to_le16(chan->scid);
726 rsp.result = cpu_to_le16(result);
727 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
729 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
732 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
734 struct l2cap_conn *conn = chan->conn;
736 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
738 switch (chan->state) {
740 chan->ops->teardown(chan, 0);
745 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
746 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
747 l2cap_send_disconn_req(chan, reason);
749 l2cap_chan_del(chan, reason);
753 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
754 if (conn->hcon->type == ACL_LINK)
755 l2cap_chan_connect_reject(chan);
756 else if (conn->hcon->type == LE_LINK)
757 l2cap_chan_le_connect_reject(chan);
760 l2cap_chan_del(chan, reason);
765 l2cap_chan_del(chan, reason);
769 chan->ops->teardown(chan, 0);
773 EXPORT_SYMBOL(l2cap_chan_close);
775 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
777 switch (chan->chan_type) {
779 switch (chan->sec_level) {
780 case BT_SECURITY_HIGH:
781 case BT_SECURITY_FIPS:
782 return HCI_AT_DEDICATED_BONDING_MITM;
783 case BT_SECURITY_MEDIUM:
784 return HCI_AT_DEDICATED_BONDING;
786 return HCI_AT_NO_BONDING;
789 case L2CAP_CHAN_CONN_LESS:
790 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
791 if (chan->sec_level == BT_SECURITY_LOW)
792 chan->sec_level = BT_SECURITY_SDP;
794 if (chan->sec_level == BT_SECURITY_HIGH ||
795 chan->sec_level == BT_SECURITY_FIPS)
796 return HCI_AT_NO_BONDING_MITM;
798 return HCI_AT_NO_BONDING;
800 case L2CAP_CHAN_CONN_ORIENTED:
801 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
802 if (chan->sec_level == BT_SECURITY_LOW)
803 chan->sec_level = BT_SECURITY_SDP;
805 if (chan->sec_level == BT_SECURITY_HIGH ||
806 chan->sec_level == BT_SECURITY_FIPS)
807 return HCI_AT_NO_BONDING_MITM;
809 return HCI_AT_NO_BONDING;
813 switch (chan->sec_level) {
814 case BT_SECURITY_HIGH:
815 case BT_SECURITY_FIPS:
816 return HCI_AT_GENERAL_BONDING_MITM;
817 case BT_SECURITY_MEDIUM:
818 return HCI_AT_GENERAL_BONDING;
820 return HCI_AT_NO_BONDING;
826 /* Service level security */
827 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
829 struct l2cap_conn *conn = chan->conn;
832 if (conn->hcon->type == LE_LINK)
833 return smp_conn_security(conn->hcon, chan->sec_level);
835 auth_type = l2cap_get_auth_type(chan);
837 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
841 static u8 l2cap_get_ident(struct l2cap_conn *conn)
845 /* Get next available identificator.
846 * 1 - 128 are used by kernel.
847 * 129 - 199 are reserved.
848 * 200 - 254 are used by utilities like l2ping, etc.
851 mutex_lock(&conn->ident_lock);
853 if (++conn->tx_ident > 128)
858 mutex_unlock(&conn->ident_lock);
863 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
866 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
869 BT_DBG("code 0x%2.2x", code);
874 /* Use NO_FLUSH if supported or we have an LE link (which does
875 * not support auto-flushing packets) */
876 if (lmp_no_flush_capable(conn->hcon->hdev) ||
877 conn->hcon->type == LE_LINK)
878 flags = ACL_START_NO_FLUSH;
882 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
883 skb->priority = HCI_PRIO_MAX;
885 hci_send_acl(conn->hchan, skb, flags);
888 static bool __chan_is_moving(struct l2cap_chan *chan)
890 return chan->move_state != L2CAP_MOVE_STABLE &&
891 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
894 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
896 struct hci_conn *hcon = chan->conn->hcon;
899 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
902 if (chan->hs_hcon && !__chan_is_moving(chan)) {
904 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
911 /* Use NO_FLUSH for LE links (where this is the only option) or
912 * if the BR/EDR link supports it and flushing has not been
913 * explicitly requested (through FLAG_FLUSHABLE).
915 if (hcon->type == LE_LINK ||
916 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
917 lmp_no_flush_capable(hcon->hdev)))
918 flags = ACL_START_NO_FLUSH;
922 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
923 hci_send_acl(chan->conn->hchan, skb, flags);
926 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
928 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
929 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
931 if (enh & L2CAP_CTRL_FRAME_TYPE) {
934 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
935 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
942 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
943 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
950 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
952 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
953 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
955 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
958 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
959 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
966 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
967 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
974 static inline void __unpack_control(struct l2cap_chan *chan,
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
978 __unpack_extended_control(get_unaligned_le32(skb->data),
979 &bt_cb(skb)->control);
980 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
982 __unpack_enhanced_control(get_unaligned_le16(skb->data),
983 &bt_cb(skb)->control);
984 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
988 static u32 __pack_extended_control(struct l2cap_ctrl *control)
992 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
993 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
995 if (control->sframe) {
996 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
997 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
998 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1000 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1001 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1007 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1011 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1012 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1014 if (control->sframe) {
1015 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1016 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1017 packed |= L2CAP_CTRL_FRAME_TYPE;
1019 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1020 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1026 static inline void __pack_control(struct l2cap_chan *chan,
1027 struct l2cap_ctrl *control,
1028 struct sk_buff *skb)
1030 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1031 put_unaligned_le32(__pack_extended_control(control),
1032 skb->data + L2CAP_HDR_SIZE);
1034 put_unaligned_le16(__pack_enhanced_control(control),
1035 skb->data + L2CAP_HDR_SIZE);
1039 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1041 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1042 return L2CAP_EXT_HDR_SIZE;
1044 return L2CAP_ENH_HDR_SIZE;
1047 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1050 struct sk_buff *skb;
1051 struct l2cap_hdr *lh;
1052 int hlen = __ertm_hdr_size(chan);
1054 if (chan->fcs == L2CAP_FCS_CRC16)
1055 hlen += L2CAP_FCS_SIZE;
1057 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1060 return ERR_PTR(-ENOMEM);
1062 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1063 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1064 lh->cid = cpu_to_le16(chan->dcid);
1066 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1067 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1069 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1071 if (chan->fcs == L2CAP_FCS_CRC16) {
1072 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1073 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1076 skb->priority = HCI_PRIO_MAX;
1080 static void l2cap_send_sframe(struct l2cap_chan *chan,
1081 struct l2cap_ctrl *control)
1083 struct sk_buff *skb;
1086 BT_DBG("chan %p, control %p", chan, control);
1088 if (!control->sframe)
1091 if (__chan_is_moving(chan))
1094 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1098 if (control->super == L2CAP_SUPER_RR)
1099 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1100 else if (control->super == L2CAP_SUPER_RNR)
1101 set_bit(CONN_RNR_SENT, &chan->conn_state);
1103 if (control->super != L2CAP_SUPER_SREJ) {
1104 chan->last_acked_seq = control->reqseq;
1105 __clear_ack_timer(chan);
1108 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1109 control->final, control->poll, control->super);
1111 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1112 control_field = __pack_extended_control(control);
1114 control_field = __pack_enhanced_control(control);
1116 skb = l2cap_create_sframe_pdu(chan, control_field);
1118 l2cap_do_send(chan, skb);
1121 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1123 struct l2cap_ctrl control;
1125 BT_DBG("chan %p, poll %d", chan, poll);
1127 memset(&control, 0, sizeof(control));
1129 control.poll = poll;
1131 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1132 control.super = L2CAP_SUPER_RNR;
1134 control.super = L2CAP_SUPER_RR;
1136 control.reqseq = chan->buffer_seq;
1137 l2cap_send_sframe(chan, &control);
1140 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1142 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1145 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1148 static bool __amp_capable(struct l2cap_chan *chan)
1150 struct l2cap_conn *conn = chan->conn;
1151 struct hci_dev *hdev;
1152 bool amp_available = false;
1154 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1157 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1160 read_lock(&hci_dev_list_lock);
1161 list_for_each_entry(hdev, &hci_dev_list, list) {
1162 if (hdev->amp_type != AMP_TYPE_BREDR &&
1163 test_bit(HCI_UP, &hdev->flags)) {
1164 amp_available = true;
1168 read_unlock(&hci_dev_list_lock);
1170 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1171 return amp_available;
1176 static bool l2cap_check_efs(struct l2cap_chan *chan)
1178 /* Check EFS parameters */
1182 void l2cap_send_conn_req(struct l2cap_chan *chan)
1184 struct l2cap_conn *conn = chan->conn;
1185 struct l2cap_conn_req req;
1187 req.scid = cpu_to_le16(chan->scid);
1188 req.psm = chan->psm;
1190 chan->ident = l2cap_get_ident(conn);
1192 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1194 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1197 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1199 struct l2cap_create_chan_req req;
1200 req.scid = cpu_to_le16(chan->scid);
1201 req.psm = chan->psm;
1202 req.amp_id = amp_id;
1204 chan->ident = l2cap_get_ident(chan->conn);
1206 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1210 static void l2cap_move_setup(struct l2cap_chan *chan)
1212 struct sk_buff *skb;
1214 BT_DBG("chan %p", chan);
1216 if (chan->mode != L2CAP_MODE_ERTM)
1219 __clear_retrans_timer(chan);
1220 __clear_monitor_timer(chan);
1221 __clear_ack_timer(chan);
1223 chan->retry_count = 0;
1224 skb_queue_walk(&chan->tx_q, skb) {
1225 if (bt_cb(skb)->control.retries)
1226 bt_cb(skb)->control.retries = 1;
1231 chan->expected_tx_seq = chan->buffer_seq;
1233 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1234 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1235 l2cap_seq_list_clear(&chan->retrans_list);
1236 l2cap_seq_list_clear(&chan->srej_list);
1237 skb_queue_purge(&chan->srej_q);
1239 chan->tx_state = L2CAP_TX_STATE_XMIT;
1240 chan->rx_state = L2CAP_RX_STATE_MOVE;
1242 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1245 static void l2cap_move_done(struct l2cap_chan *chan)
1247 u8 move_role = chan->move_role;
1248 BT_DBG("chan %p", chan);
1250 chan->move_state = L2CAP_MOVE_STABLE;
1251 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1253 if (chan->mode != L2CAP_MODE_ERTM)
1256 switch (move_role) {
1257 case L2CAP_MOVE_ROLE_INITIATOR:
1258 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1259 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1261 case L2CAP_MOVE_ROLE_RESPONDER:
1262 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1267 static void l2cap_chan_ready(struct l2cap_chan *chan)
1269 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1270 chan->conf_state = 0;
1271 __clear_chan_timer(chan);
1273 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1274 chan->ops->suspend(chan);
1276 chan->state = BT_CONNECTED;
1278 chan->ops->ready(chan);
1281 static void l2cap_le_connect(struct l2cap_chan *chan)
1283 struct l2cap_conn *conn = chan->conn;
1284 struct l2cap_le_conn_req req;
1286 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1289 req.psm = chan->psm;
1290 req.scid = cpu_to_le16(chan->scid);
1291 req.mtu = cpu_to_le16(chan->imtu);
1292 req.mps = cpu_to_le16(chan->mps);
1293 req.credits = cpu_to_le16(chan->rx_credits);
1295 chan->ident = l2cap_get_ident(conn);
1297 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1301 static void l2cap_le_start(struct l2cap_chan *chan)
1303 struct l2cap_conn *conn = chan->conn;
1305 if (!smp_conn_security(conn->hcon, chan->sec_level))
1309 l2cap_chan_ready(chan);
1313 if (chan->state == BT_CONNECT)
1314 l2cap_le_connect(chan);
1317 static void l2cap_start_connection(struct l2cap_chan *chan)
1319 if (__amp_capable(chan)) {
1320 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1321 a2mp_discover_amp(chan);
1322 } else if (chan->conn->hcon->type == LE_LINK) {
1323 l2cap_le_start(chan);
1325 l2cap_send_conn_req(chan);
1329 static void l2cap_request_info(struct l2cap_conn *conn)
1331 struct l2cap_info_req req;
1333 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1336 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1338 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1339 conn->info_ident = l2cap_get_ident(conn);
1341 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1343 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1347 static void l2cap_do_start(struct l2cap_chan *chan)
1349 struct l2cap_conn *conn = chan->conn;
1351 if (conn->hcon->type == LE_LINK) {
1352 l2cap_le_start(chan);
1356 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1357 l2cap_request_info(conn);
1361 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1364 if (l2cap_chan_check_security(chan, true) &&
1365 __l2cap_no_conn_pending(chan))
1366 l2cap_start_connection(chan);
1369 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1371 u32 local_feat_mask = l2cap_feat_mask;
1373 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1376 case L2CAP_MODE_ERTM:
1377 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1378 case L2CAP_MODE_STREAMING:
1379 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1385 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1387 struct l2cap_conn *conn = chan->conn;
1388 struct l2cap_disconn_req req;
1393 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1394 __clear_retrans_timer(chan);
1395 __clear_monitor_timer(chan);
1396 __clear_ack_timer(chan);
1399 if (chan->scid == L2CAP_CID_A2MP) {
1400 l2cap_state_change(chan, BT_DISCONN);
1404 req.dcid = cpu_to_le16(chan->dcid);
1405 req.scid = cpu_to_le16(chan->scid);
1406 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1409 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1412 /* ---- L2CAP connections ---- */
1413 static void l2cap_conn_start(struct l2cap_conn *conn)
1415 struct l2cap_chan *chan, *tmp;
1417 BT_DBG("conn %p", conn);
1419 mutex_lock(&conn->chan_lock);
1421 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1422 l2cap_chan_lock(chan);
1424 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1425 l2cap_chan_ready(chan);
1426 l2cap_chan_unlock(chan);
1430 if (chan->state == BT_CONNECT) {
1431 if (!l2cap_chan_check_security(chan, true) ||
1432 !__l2cap_no_conn_pending(chan)) {
1433 l2cap_chan_unlock(chan);
1437 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1438 && test_bit(CONF_STATE2_DEVICE,
1439 &chan->conf_state)) {
1440 l2cap_chan_close(chan, ECONNRESET);
1441 l2cap_chan_unlock(chan);
1445 l2cap_start_connection(chan);
1447 } else if (chan->state == BT_CONNECT2) {
1448 struct l2cap_conn_rsp rsp;
1450 rsp.scid = cpu_to_le16(chan->dcid);
1451 rsp.dcid = cpu_to_le16(chan->scid);
1453 if (l2cap_chan_check_security(chan, false)) {
1454 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1455 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1456 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1457 chan->ops->defer(chan);
1460 l2cap_state_change(chan, BT_CONFIG);
1461 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1462 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1465 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1466 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1472 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1473 rsp.result != L2CAP_CR_SUCCESS) {
1474 l2cap_chan_unlock(chan);
1478 set_bit(CONF_REQ_SENT, &chan->conf_state);
1479 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1480 l2cap_build_conf_req(chan, buf), buf);
1481 chan->num_conf_req++;
1484 l2cap_chan_unlock(chan);
1487 mutex_unlock(&conn->chan_lock);
1490 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1492 struct hci_conn *hcon = conn->hcon;
1493 struct hci_dev *hdev = hcon->hdev;
1495 BT_DBG("%s conn %p", hdev->name, conn);
1497 /* For outgoing pairing which doesn't necessarily have an
1498 * associated socket (e.g. mgmt_pair_device).
1501 smp_conn_security(hcon, hcon->pending_sec_level);
1503 #ifndef CONFIG_TIZEN_WIP
1504 /* For LE slave connections, make sure the connection interval
1505 * is in the range of the minium and maximum interval that has
1506 * been configured for this connection. If not, then trigger
1507 * the connection update procedure.
1509 if (hcon->role == HCI_ROLE_SLAVE &&
1510 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1511 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1512 struct l2cap_conn_param_update_req req;
1514 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1515 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1516 req.latency = cpu_to_le16(hcon->le_conn_latency);
1517 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1519 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1520 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1524 * Too small supervision timeout causes sudden link loss,
1525 * when remote device has multiple links and it cannot manage those
1528 * To protect such a case, it needs to widen supervision timeout
1530 if (hcon->role == HCI_ROLE_SLAVE &&
1531 hcon->le_supv_timeout < hdev->le_supv_timeout) {
1532 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC &&
1533 hcon->features[0][0] & HCI_LE_CONN_PARAM_REQ_PROC) {
1534 BT_DBG("use hci_le_conn_update");
1535 hci_le_conn_update(hcon,
1536 hcon->le_conn_min_interval,
1537 hcon->le_conn_max_interval,
1538 hcon->le_conn_latency,
1539 hdev->le_supv_timeout);
1541 BT_DBG("use l2cap conn_update");
1542 l2cap_update_connection_param(conn,
1543 hcon->le_conn_min_interval,
1544 hcon->le_conn_max_interval,
1545 hcon->le_conn_latency,
1546 hdev->le_supv_timeout);
1553 static void l2cap_conn_ready(struct l2cap_conn *conn)
1555 struct l2cap_chan *chan;
1556 struct hci_conn *hcon = conn->hcon;
1558 BT_DBG("conn %p", conn);
1560 if (hcon->type == ACL_LINK)
1561 l2cap_request_info(conn);
1563 mutex_lock(&conn->chan_lock);
1565 list_for_each_entry(chan, &conn->chan_l, list) {
1567 l2cap_chan_lock(chan);
1569 if (chan->scid == L2CAP_CID_A2MP) {
1570 l2cap_chan_unlock(chan);
1574 if (hcon->type == LE_LINK) {
1575 l2cap_le_start(chan);
1576 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1577 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1578 l2cap_chan_ready(chan);
1579 } else if (chan->state == BT_CONNECT) {
1580 l2cap_do_start(chan);
1583 l2cap_chan_unlock(chan);
1586 mutex_unlock(&conn->chan_lock);
1588 if (hcon->type == LE_LINK)
1589 l2cap_le_conn_ready(conn);
1591 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1594 /* Notify sockets that we cannot guaranty reliability anymore */
1595 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1597 struct l2cap_chan *chan;
1599 BT_DBG("conn %p", conn);
1601 mutex_lock(&conn->chan_lock);
1603 list_for_each_entry(chan, &conn->chan_l, list) {
1604 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1605 l2cap_chan_set_err(chan, err);
1608 mutex_unlock(&conn->chan_lock);
1611 static void l2cap_info_timeout(struct work_struct *work)
1613 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1616 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1617 conn->info_ident = 0;
1619 l2cap_conn_start(conn);
1624 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1625 * callback is called during registration. The ->remove callback is called
1626 * during unregistration.
1627 * An l2cap_user object can either be explicitly unregistered or when the
1628 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1629 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1630 * External modules must own a reference to the l2cap_conn object if they intend
1631 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1632 * any time if they don't.
1635 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1637 struct hci_dev *hdev = conn->hcon->hdev;
1640 /* We need to check whether l2cap_conn is registered. If it is not, we
1641 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1642 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1643 * relies on the parent hci_conn object to be locked. This itself relies
1644 * on the hci_dev object to be locked. So we must lock the hci device
1649 if (user->list.next || user->list.prev) {
1654 /* conn->hchan is NULL after l2cap_conn_del() was called */
1660 ret = user->probe(conn, user);
1664 list_add(&user->list, &conn->users);
1668 hci_dev_unlock(hdev);
1671 EXPORT_SYMBOL(l2cap_register_user);
1673 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1675 struct hci_dev *hdev = conn->hcon->hdev;
1679 if (!user->list.next || !user->list.prev)
1682 list_del(&user->list);
1683 user->list.next = NULL;
1684 user->list.prev = NULL;
1685 user->remove(conn, user);
1688 hci_dev_unlock(hdev);
1690 EXPORT_SYMBOL(l2cap_unregister_user);
1692 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1694 struct l2cap_user *user;
1696 while (!list_empty(&conn->users)) {
1697 user = list_first_entry(&conn->users, struct l2cap_user, list);
1698 list_del(&user->list);
1699 user->list.next = NULL;
1700 user->list.prev = NULL;
1701 user->remove(conn, user);
1705 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1707 struct l2cap_conn *conn = hcon->l2cap_data;
1708 struct l2cap_chan *chan, *l;
1713 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1715 kfree_skb(conn->rx_skb);
1717 skb_queue_purge(&conn->pending_rx);
1719 /* We can not call flush_work(&conn->pending_rx_work) here since we
1720 * might block if we are running on a worker from the same workqueue
1721 * pending_rx_work is waiting on.
1723 if (work_pending(&conn->pending_rx_work))
1724 cancel_work_sync(&conn->pending_rx_work);
1726 if (work_pending(&conn->id_addr_update_work))
1727 cancel_work_sync(&conn->id_addr_update_work);
1729 l2cap_unregister_all_users(conn);
1731 /* Force the connection to be immediately dropped */
1732 hcon->disc_timeout = 0;
1734 mutex_lock(&conn->chan_lock);
1737 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1738 l2cap_chan_hold(chan);
1739 l2cap_chan_lock(chan);
1741 l2cap_chan_del(chan, err);
1743 l2cap_chan_unlock(chan);
1745 chan->ops->close(chan);
1746 l2cap_chan_put(chan);
1749 mutex_unlock(&conn->chan_lock);
1751 hci_chan_del(conn->hchan);
1753 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1754 cancel_delayed_work_sync(&conn->info_timer);
1756 hcon->l2cap_data = NULL;
1758 l2cap_conn_put(conn);
1761 static void l2cap_conn_free(struct kref *ref)
1763 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1765 hci_conn_put(conn->hcon);
1769 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1771 kref_get(&conn->ref);
1774 EXPORT_SYMBOL(l2cap_conn_get);
1776 void l2cap_conn_put(struct l2cap_conn *conn)
1778 kref_put(&conn->ref, l2cap_conn_free);
1780 EXPORT_SYMBOL(l2cap_conn_put);
1782 /* ---- Socket interface ---- */
1784 /* Find socket with psm and source / destination bdaddr.
1785 * Returns closest match.
1787 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1792 struct l2cap_chan *c, *c1 = NULL;
1794 read_lock(&chan_list_lock);
1796 list_for_each_entry(c, &chan_list, global_l) {
1797 if (state && c->state != state)
1800 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1803 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1806 if (c->psm == psm) {
1807 int src_match, dst_match;
1808 int src_any, dst_any;
1811 src_match = !bacmp(&c->src, src);
1812 dst_match = !bacmp(&c->dst, dst);
1813 if (src_match && dst_match) {
1815 read_unlock(&chan_list_lock);
1820 src_any = !bacmp(&c->src, BDADDR_ANY);
1821 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1822 if ((src_match && dst_any) || (src_any && dst_match) ||
1823 (src_any && dst_any))
1829 l2cap_chan_hold(c1);
1831 read_unlock(&chan_list_lock);
1836 static void l2cap_monitor_timeout(struct work_struct *work)
1838 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1839 monitor_timer.work);
1841 BT_DBG("chan %p", chan);
1843 l2cap_chan_lock(chan);
1846 l2cap_chan_unlock(chan);
1847 l2cap_chan_put(chan);
1851 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1853 l2cap_chan_unlock(chan);
1854 l2cap_chan_put(chan);
1857 static void l2cap_retrans_timeout(struct work_struct *work)
1859 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1860 retrans_timer.work);
1862 BT_DBG("chan %p", chan);
1864 l2cap_chan_lock(chan);
1867 l2cap_chan_unlock(chan);
1868 l2cap_chan_put(chan);
1872 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1873 l2cap_chan_unlock(chan);
1874 l2cap_chan_put(chan);
1877 static void l2cap_streaming_send(struct l2cap_chan *chan,
1878 struct sk_buff_head *skbs)
1880 struct sk_buff *skb;
1881 struct l2cap_ctrl *control;
1883 BT_DBG("chan %p, skbs %p", chan, skbs);
1885 if (__chan_is_moving(chan))
1888 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1890 while (!skb_queue_empty(&chan->tx_q)) {
1892 skb = skb_dequeue(&chan->tx_q);
1894 bt_cb(skb)->control.retries = 1;
1895 control = &bt_cb(skb)->control;
1897 control->reqseq = 0;
1898 control->txseq = chan->next_tx_seq;
1900 __pack_control(chan, control, skb);
1902 if (chan->fcs == L2CAP_FCS_CRC16) {
1903 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1904 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1907 l2cap_do_send(chan, skb);
1909 BT_DBG("Sent txseq %u", control->txseq);
1911 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1912 chan->frames_sent++;
1916 static int l2cap_ertm_send(struct l2cap_chan *chan)
1918 struct sk_buff *skb, *tx_skb;
1919 struct l2cap_ctrl *control;
1922 BT_DBG("chan %p", chan);
1924 if (chan->state != BT_CONNECTED)
1927 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1930 if (__chan_is_moving(chan))
1933 while (chan->tx_send_head &&
1934 chan->unacked_frames < chan->remote_tx_win &&
1935 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1937 skb = chan->tx_send_head;
1939 bt_cb(skb)->control.retries = 1;
1940 control = &bt_cb(skb)->control;
1942 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1945 control->reqseq = chan->buffer_seq;
1946 chan->last_acked_seq = chan->buffer_seq;
1947 control->txseq = chan->next_tx_seq;
1949 __pack_control(chan, control, skb);
1951 if (chan->fcs == L2CAP_FCS_CRC16) {
1952 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1953 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1956 /* Clone after data has been modified. Data is assumed to be
1957 read-only (for locking purposes) on cloned sk_buffs.
1959 tx_skb = skb_clone(skb, GFP_KERNEL);
1964 __set_retrans_timer(chan);
1966 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1967 chan->unacked_frames++;
1968 chan->frames_sent++;
1971 if (skb_queue_is_last(&chan->tx_q, skb))
1972 chan->tx_send_head = NULL;
1974 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1976 l2cap_do_send(chan, tx_skb);
1977 BT_DBG("Sent txseq %u", control->txseq);
1980 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1981 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1986 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1988 struct l2cap_ctrl control;
1989 struct sk_buff *skb;
1990 struct sk_buff *tx_skb;
1993 BT_DBG("chan %p", chan);
1995 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1998 if (__chan_is_moving(chan))
2001 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2002 seq = l2cap_seq_list_pop(&chan->retrans_list);
2004 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2006 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2011 bt_cb(skb)->control.retries++;
2012 control = bt_cb(skb)->control;
2014 if (chan->max_tx != 0 &&
2015 bt_cb(skb)->control.retries > chan->max_tx) {
2016 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2017 l2cap_send_disconn_req(chan, ECONNRESET);
2018 l2cap_seq_list_clear(&chan->retrans_list);
2022 control.reqseq = chan->buffer_seq;
2023 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2028 if (skb_cloned(skb)) {
2029 /* Cloned sk_buffs are read-only, so we need a
2032 tx_skb = skb_copy(skb, GFP_KERNEL);
2034 tx_skb = skb_clone(skb, GFP_KERNEL);
2038 l2cap_seq_list_clear(&chan->retrans_list);
2042 /* Update skb contents */
2043 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2044 put_unaligned_le32(__pack_extended_control(&control),
2045 tx_skb->data + L2CAP_HDR_SIZE);
2047 put_unaligned_le16(__pack_enhanced_control(&control),
2048 tx_skb->data + L2CAP_HDR_SIZE);
2052 if (chan->fcs == L2CAP_FCS_CRC16) {
2053 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2054 tx_skb->len - L2CAP_FCS_SIZE);
2055 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2059 l2cap_do_send(chan, tx_skb);
2061 BT_DBG("Resent txseq %d", control.txseq);
2063 chan->last_acked_seq = chan->buffer_seq;
2067 static void l2cap_retransmit(struct l2cap_chan *chan,
2068 struct l2cap_ctrl *control)
2070 BT_DBG("chan %p, control %p", chan, control);
2072 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2073 l2cap_ertm_resend(chan);
2076 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2077 struct l2cap_ctrl *control)
2079 struct sk_buff *skb;
2081 BT_DBG("chan %p, control %p", chan, control);
2084 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2086 l2cap_seq_list_clear(&chan->retrans_list);
2088 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2091 if (chan->unacked_frames) {
2092 skb_queue_walk(&chan->tx_q, skb) {
2093 if (bt_cb(skb)->control.txseq == control->reqseq ||
2094 skb == chan->tx_send_head)
2098 skb_queue_walk_from(&chan->tx_q, skb) {
2099 if (skb == chan->tx_send_head)
2102 l2cap_seq_list_append(&chan->retrans_list,
2103 bt_cb(skb)->control.txseq);
2106 l2cap_ertm_resend(chan);
2110 static void l2cap_send_ack(struct l2cap_chan *chan)
2112 struct l2cap_ctrl control;
2113 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2114 chan->last_acked_seq);
2117 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2118 chan, chan->last_acked_seq, chan->buffer_seq);
2120 memset(&control, 0, sizeof(control));
2123 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2124 chan->rx_state == L2CAP_RX_STATE_RECV) {
2125 __clear_ack_timer(chan);
2126 control.super = L2CAP_SUPER_RNR;
2127 control.reqseq = chan->buffer_seq;
2128 l2cap_send_sframe(chan, &control);
2130 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2131 l2cap_ertm_send(chan);
2132 /* If any i-frames were sent, they included an ack */
2133 if (chan->buffer_seq == chan->last_acked_seq)
2137 /* Ack now if the window is 3/4ths full.
2138 * Calculate without mul or div
2140 threshold = chan->ack_win;
2141 threshold += threshold << 1;
2144 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2147 if (frames_to_ack >= threshold) {
2148 __clear_ack_timer(chan);
2149 control.super = L2CAP_SUPER_RR;
2150 control.reqseq = chan->buffer_seq;
2151 l2cap_send_sframe(chan, &control);
2156 __set_ack_timer(chan);
2160 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2161 struct msghdr *msg, int len,
2162 int count, struct sk_buff *skb)
2164 struct l2cap_conn *conn = chan->conn;
2165 struct sk_buff **frag;
2168 #ifdef CONFIG_TIZEN_WIP
2169 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2170 msg->msg_iov, count))
2173 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2180 /* Continuation fragments (no L2CAP header) */
2181 frag = &skb_shinfo(skb)->frag_list;
2183 struct sk_buff *tmp;
2185 count = min_t(unsigned int, conn->mtu, len);
2187 tmp = chan->ops->alloc_skb(chan, 0, count,
2188 msg->msg_flags & MSG_DONTWAIT);
2190 return PTR_ERR(tmp);
2193 #ifdef CONFIG_TIZEN_WIP
2194 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2195 msg->msg_iov, count))
2198 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2205 skb->len += (*frag)->len;
2206 skb->data_len += (*frag)->len;
2208 frag = &(*frag)->next;
2214 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2215 struct msghdr *msg, size_t len)
2217 struct l2cap_conn *conn = chan->conn;
2218 struct sk_buff *skb;
2219 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2220 struct l2cap_hdr *lh;
2222 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2223 __le16_to_cpu(chan->psm), len);
2225 count = min_t(unsigned int, (conn->mtu - hlen), len);
2227 skb = chan->ops->alloc_skb(chan, hlen, count,
2228 msg->msg_flags & MSG_DONTWAIT);
2232 /* Create L2CAP header */
2233 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2234 lh->cid = cpu_to_le16(chan->dcid);
2235 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2236 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2238 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2239 if (unlikely(err < 0)) {
2241 return ERR_PTR(err);
2246 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2247 struct msghdr *msg, size_t len)
2249 struct l2cap_conn *conn = chan->conn;
2250 struct sk_buff *skb;
2252 struct l2cap_hdr *lh;
2254 BT_DBG("chan %p len %zu", chan, len);
2256 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2258 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2259 msg->msg_flags & MSG_DONTWAIT);
2263 /* Create L2CAP header */
2264 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2265 lh->cid = cpu_to_le16(chan->dcid);
2266 lh->len = cpu_to_le16(len);
2268 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2269 if (unlikely(err < 0)) {
2271 return ERR_PTR(err);
2276 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2277 struct msghdr *msg, size_t len,
2280 struct l2cap_conn *conn = chan->conn;
2281 struct sk_buff *skb;
2282 int err, count, hlen;
2283 struct l2cap_hdr *lh;
2285 BT_DBG("chan %p len %zu", chan, len);
2288 return ERR_PTR(-ENOTCONN);
2290 hlen = __ertm_hdr_size(chan);
2293 hlen += L2CAP_SDULEN_SIZE;
2295 if (chan->fcs == L2CAP_FCS_CRC16)
2296 hlen += L2CAP_FCS_SIZE;
2298 count = min_t(unsigned int, (conn->mtu - hlen), len);
2300 skb = chan->ops->alloc_skb(chan, hlen, count,
2301 msg->msg_flags & MSG_DONTWAIT);
2305 /* Create L2CAP header */
2306 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2307 lh->cid = cpu_to_le16(chan->dcid);
2308 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2310 /* Control header is populated later */
2311 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2312 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2314 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2317 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2319 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2320 if (unlikely(err < 0)) {
2322 return ERR_PTR(err);
2325 bt_cb(skb)->control.fcs = chan->fcs;
2326 bt_cb(skb)->control.retries = 0;
2330 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2331 struct sk_buff_head *seg_queue,
2332 struct msghdr *msg, size_t len)
2334 struct sk_buff *skb;
2339 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2341 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2342 * so fragmented skbs are not used. The HCI layer's handling
2343 * of fragmented skbs is not compatible with ERTM's queueing.
2346 /* PDU size is derived from the HCI MTU */
2347 pdu_len = chan->conn->mtu;
2349 /* Constrain PDU size for BR/EDR connections */
2351 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2353 /* Adjust for largest possible L2CAP overhead. */
2355 pdu_len -= L2CAP_FCS_SIZE;
2357 pdu_len -= __ertm_hdr_size(chan);
2359 /* Remote device may have requested smaller PDUs */
2360 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2362 if (len <= pdu_len) {
2363 sar = L2CAP_SAR_UNSEGMENTED;
2367 sar = L2CAP_SAR_START;
2372 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2375 __skb_queue_purge(seg_queue);
2376 return PTR_ERR(skb);
2379 bt_cb(skb)->control.sar = sar;
2380 __skb_queue_tail(seg_queue, skb);
2386 if (len <= pdu_len) {
2387 sar = L2CAP_SAR_END;
2390 sar = L2CAP_SAR_CONTINUE;
2397 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2399 size_t len, u16 sdulen)
2401 struct l2cap_conn *conn = chan->conn;
2402 struct sk_buff *skb;
2403 int err, count, hlen;
2404 struct l2cap_hdr *lh;
2406 BT_DBG("chan %p len %zu", chan, len);
2409 return ERR_PTR(-ENOTCONN);
2411 hlen = L2CAP_HDR_SIZE;
2414 hlen += L2CAP_SDULEN_SIZE;
2416 count = min_t(unsigned int, (conn->mtu - hlen), len);
2418 skb = chan->ops->alloc_skb(chan, hlen, count,
2419 msg->msg_flags & MSG_DONTWAIT);
2423 /* Create L2CAP header */
2424 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2425 lh->cid = cpu_to_le16(chan->dcid);
2426 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2429 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2431 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2432 if (unlikely(err < 0)) {
2434 return ERR_PTR(err);
2440 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2441 struct sk_buff_head *seg_queue,
2442 struct msghdr *msg, size_t len)
2444 struct sk_buff *skb;
2448 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2451 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2457 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2459 __skb_queue_purge(seg_queue);
2460 return PTR_ERR(skb);
2463 __skb_queue_tail(seg_queue, skb);
2469 pdu_len += L2CAP_SDULEN_SIZE;
2476 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2478 struct sk_buff *skb;
2480 struct sk_buff_head seg_queue;
2485 /* Connectionless channel */
2486 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2487 skb = l2cap_create_connless_pdu(chan, msg, len);
2489 return PTR_ERR(skb);
2491 /* Channel lock is released before requesting new skb and then
2492 * reacquired thus we need to recheck channel state.
2494 if (chan->state != BT_CONNECTED) {
2499 l2cap_do_send(chan, skb);
2503 switch (chan->mode) {
2504 case L2CAP_MODE_LE_FLOWCTL:
2505 /* Check outgoing MTU */
2506 if (len > chan->omtu)
2509 if (!chan->tx_credits)
2512 __skb_queue_head_init(&seg_queue);
2514 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2516 if (chan->state != BT_CONNECTED) {
2517 __skb_queue_purge(&seg_queue);
2524 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2526 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2527 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2531 if (!chan->tx_credits)
2532 chan->ops->suspend(chan);
2538 case L2CAP_MODE_BASIC:
2539 /* Check outgoing MTU */
2540 if (len > chan->omtu)
2543 /* Create a basic PDU */
2544 skb = l2cap_create_basic_pdu(chan, msg, len);
2546 return PTR_ERR(skb);
2548 /* Channel lock is released before requesting new skb and then
2549 * reacquired thus we need to recheck channel state.
2551 if (chan->state != BT_CONNECTED) {
2556 l2cap_do_send(chan, skb);
2560 case L2CAP_MODE_ERTM:
2561 case L2CAP_MODE_STREAMING:
2562 /* Check outgoing MTU */
2563 if (len > chan->omtu) {
2568 __skb_queue_head_init(&seg_queue);
2570 /* Do segmentation before calling in to the state machine,
2571 * since it's possible to block while waiting for memory
2574 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2576 /* The channel could have been closed while segmenting,
2577 * check that it is still connected.
2579 if (chan->state != BT_CONNECTED) {
2580 __skb_queue_purge(&seg_queue);
2587 if (chan->mode == L2CAP_MODE_ERTM)
2588 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2590 l2cap_streaming_send(chan, &seg_queue);
2594 /* If the skbs were not queued for sending, they'll still be in
2595 * seg_queue and need to be purged.
2597 __skb_queue_purge(&seg_queue);
2601 BT_DBG("bad state %1.1x", chan->mode);
2607 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2609 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2611 struct l2cap_ctrl control;
2614 BT_DBG("chan %p, txseq %u", chan, txseq);
2616 memset(&control, 0, sizeof(control));
2618 control.super = L2CAP_SUPER_SREJ;
2620 for (seq = chan->expected_tx_seq; seq != txseq;
2621 seq = __next_seq(chan, seq)) {
2622 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2623 control.reqseq = seq;
2624 l2cap_send_sframe(chan, &control);
2625 l2cap_seq_list_append(&chan->srej_list, seq);
2629 chan->expected_tx_seq = __next_seq(chan, txseq);
2632 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2634 struct l2cap_ctrl control;
2636 BT_DBG("chan %p", chan);
2638 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2641 memset(&control, 0, sizeof(control));
2643 control.super = L2CAP_SUPER_SREJ;
2644 control.reqseq = chan->srej_list.tail;
2645 l2cap_send_sframe(chan, &control);
2648 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2650 struct l2cap_ctrl control;
2654 BT_DBG("chan %p, txseq %u", chan, txseq);
2656 memset(&control, 0, sizeof(control));
2658 control.super = L2CAP_SUPER_SREJ;
2660 /* Capture initial list head to allow only one pass through the list. */
2661 initial_head = chan->srej_list.head;
2664 seq = l2cap_seq_list_pop(&chan->srej_list);
2665 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2668 control.reqseq = seq;
2669 l2cap_send_sframe(chan, &control);
2670 l2cap_seq_list_append(&chan->srej_list, seq);
2671 } while (chan->srej_list.head != initial_head);
2674 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2676 struct sk_buff *acked_skb;
2679 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2681 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2684 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2685 chan->expected_ack_seq, chan->unacked_frames);
2687 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2688 ackseq = __next_seq(chan, ackseq)) {
2690 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2692 skb_unlink(acked_skb, &chan->tx_q);
2693 kfree_skb(acked_skb);
2694 chan->unacked_frames--;
2698 chan->expected_ack_seq = reqseq;
2700 if (chan->unacked_frames == 0)
2701 __clear_retrans_timer(chan);
2703 BT_DBG("unacked_frames %u", chan->unacked_frames);
2706 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2708 BT_DBG("chan %p", chan);
2710 chan->expected_tx_seq = chan->buffer_seq;
2711 l2cap_seq_list_clear(&chan->srej_list);
2712 skb_queue_purge(&chan->srej_q);
2713 chan->rx_state = L2CAP_RX_STATE_RECV;
2716 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2717 struct l2cap_ctrl *control,
2718 struct sk_buff_head *skbs, u8 event)
2720 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2724 case L2CAP_EV_DATA_REQUEST:
2725 if (chan->tx_send_head == NULL)
2726 chan->tx_send_head = skb_peek(skbs);
2728 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2729 l2cap_ertm_send(chan);
2731 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2732 BT_DBG("Enter LOCAL_BUSY");
2733 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2735 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2736 /* The SREJ_SENT state must be aborted if we are to
2737 * enter the LOCAL_BUSY state.
2739 l2cap_abort_rx_srej_sent(chan);
2742 l2cap_send_ack(chan);
2745 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2746 BT_DBG("Exit LOCAL_BUSY");
2747 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2749 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2750 struct l2cap_ctrl local_control;
2752 memset(&local_control, 0, sizeof(local_control));
2753 local_control.sframe = 1;
2754 local_control.super = L2CAP_SUPER_RR;
2755 local_control.poll = 1;
2756 local_control.reqseq = chan->buffer_seq;
2757 l2cap_send_sframe(chan, &local_control);
2759 chan->retry_count = 1;
2760 __set_monitor_timer(chan);
2761 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2764 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2765 l2cap_process_reqseq(chan, control->reqseq);
2767 case L2CAP_EV_EXPLICIT_POLL:
2768 l2cap_send_rr_or_rnr(chan, 1);
2769 chan->retry_count = 1;
2770 __set_monitor_timer(chan);
2771 __clear_ack_timer(chan);
2772 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2774 case L2CAP_EV_RETRANS_TO:
2775 l2cap_send_rr_or_rnr(chan, 1);
2776 chan->retry_count = 1;
2777 __set_monitor_timer(chan);
2778 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2780 case L2CAP_EV_RECV_FBIT:
2781 /* Nothing to process */
2788 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2789 struct l2cap_ctrl *control,
2790 struct sk_buff_head *skbs, u8 event)
2792 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2796 case L2CAP_EV_DATA_REQUEST:
2797 if (chan->tx_send_head == NULL)
2798 chan->tx_send_head = skb_peek(skbs);
2799 /* Queue data, but don't send. */
2800 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2802 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2803 BT_DBG("Enter LOCAL_BUSY");
2804 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2806 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2807 /* The SREJ_SENT state must be aborted if we are to
2808 * enter the LOCAL_BUSY state.
2810 l2cap_abort_rx_srej_sent(chan);
2813 l2cap_send_ack(chan);
2816 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2817 BT_DBG("Exit LOCAL_BUSY");
2818 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2820 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2821 struct l2cap_ctrl local_control;
2822 memset(&local_control, 0, sizeof(local_control));
2823 local_control.sframe = 1;
2824 local_control.super = L2CAP_SUPER_RR;
2825 local_control.poll = 1;
2826 local_control.reqseq = chan->buffer_seq;
2827 l2cap_send_sframe(chan, &local_control);
2829 chan->retry_count = 1;
2830 __set_monitor_timer(chan);
2831 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2834 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2835 l2cap_process_reqseq(chan, control->reqseq);
2839 case L2CAP_EV_RECV_FBIT:
2840 if (control && control->final) {
2841 __clear_monitor_timer(chan);
2842 if (chan->unacked_frames > 0)
2843 __set_retrans_timer(chan);
2844 chan->retry_count = 0;
2845 chan->tx_state = L2CAP_TX_STATE_XMIT;
2846 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2849 case L2CAP_EV_EXPLICIT_POLL:
2852 case L2CAP_EV_MONITOR_TO:
2853 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2854 l2cap_send_rr_or_rnr(chan, 1);
2855 __set_monitor_timer(chan);
2856 chan->retry_count++;
2858 l2cap_send_disconn_req(chan, ECONNABORTED);
2866 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2867 struct sk_buff_head *skbs, u8 event)
2869 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2870 chan, control, skbs, event, chan->tx_state);
2872 switch (chan->tx_state) {
2873 case L2CAP_TX_STATE_XMIT:
2874 l2cap_tx_state_xmit(chan, control, skbs, event);
2876 case L2CAP_TX_STATE_WAIT_F:
2877 l2cap_tx_state_wait_f(chan, control, skbs, event);
2885 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2886 struct l2cap_ctrl *control)
2888 BT_DBG("chan %p, control %p", chan, control);
2889 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2892 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2893 struct l2cap_ctrl *control)
2895 BT_DBG("chan %p, control %p", chan, control);
2896 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2899 /* Copy frame to all raw sockets on that connection */
2900 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2902 struct sk_buff *nskb;
2903 struct l2cap_chan *chan;
2905 BT_DBG("conn %p", conn);
2907 mutex_lock(&conn->chan_lock);
2909 list_for_each_entry(chan, &conn->chan_l, list) {
2910 if (chan->chan_type != L2CAP_CHAN_RAW)
2913 /* Don't send frame to the channel it came from */
2914 if (bt_cb(skb)->chan == chan)
2917 nskb = skb_clone(skb, GFP_KERNEL);
2920 if (chan->ops->recv(chan, nskb))
2924 mutex_unlock(&conn->chan_lock);
2927 /* ---- L2CAP signalling commands ---- */
2928 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2929 u8 ident, u16 dlen, void *data)
2931 struct sk_buff *skb, **frag;
2932 struct l2cap_cmd_hdr *cmd;
2933 struct l2cap_hdr *lh;
2936 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2937 conn, code, ident, dlen);
2939 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2942 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2943 count = min_t(unsigned int, conn->mtu, len);
2945 skb = bt_skb_alloc(count, GFP_KERNEL);
2949 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2950 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2952 if (conn->hcon->type == LE_LINK)
2953 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2955 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2957 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2960 cmd->len = cpu_to_le16(dlen);
2963 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2964 memcpy(skb_put(skb, count), data, count);
2970 /* Continuation fragments (no L2CAP header) */
2971 frag = &skb_shinfo(skb)->frag_list;
2973 count = min_t(unsigned int, conn->mtu, len);
2975 *frag = bt_skb_alloc(count, GFP_KERNEL);
2979 memcpy(skb_put(*frag, count), data, count);
2984 frag = &(*frag)->next;
2994 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2997 struct l2cap_conf_opt *opt = *ptr;
3000 len = L2CAP_CONF_OPT_SIZE + opt->len;
3008 *val = *((u8 *) opt->val);
3012 *val = get_unaligned_le16(opt->val);
3016 *val = get_unaligned_le32(opt->val);
3020 *val = (unsigned long) opt->val;
3024 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3028 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3030 struct l2cap_conf_opt *opt = *ptr;
3032 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3039 *((u8 *) opt->val) = val;
3043 put_unaligned_le16(val, opt->val);
3047 put_unaligned_le32(val, opt->val);
3051 memcpy(opt->val, (void *) val, len);
3055 *ptr += L2CAP_CONF_OPT_SIZE + len;
3058 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3060 struct l2cap_conf_efs efs;
3062 switch (chan->mode) {
3063 case L2CAP_MODE_ERTM:
3064 efs.id = chan->local_id;
3065 efs.stype = chan->local_stype;
3066 efs.msdu = cpu_to_le16(chan->local_msdu);
3067 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3068 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3069 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3072 case L2CAP_MODE_STREAMING:
3074 efs.stype = L2CAP_SERV_BESTEFFORT;
3075 efs.msdu = cpu_to_le16(chan->local_msdu);
3076 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3085 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3086 (unsigned long) &efs);
3089 static void l2cap_ack_timeout(struct work_struct *work)
3091 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3095 BT_DBG("chan %p", chan);
3097 l2cap_chan_lock(chan);
3099 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3100 chan->last_acked_seq);
3103 l2cap_send_rr_or_rnr(chan, 0);
3105 l2cap_chan_unlock(chan);
3106 l2cap_chan_put(chan);
3109 int l2cap_ertm_init(struct l2cap_chan *chan)
3113 chan->next_tx_seq = 0;
3114 chan->expected_tx_seq = 0;
3115 chan->expected_ack_seq = 0;
3116 chan->unacked_frames = 0;
3117 chan->buffer_seq = 0;
3118 chan->frames_sent = 0;
3119 chan->last_acked_seq = 0;
3121 chan->sdu_last_frag = NULL;
3124 skb_queue_head_init(&chan->tx_q);
3126 chan->local_amp_id = AMP_ID_BREDR;
3127 chan->move_id = AMP_ID_BREDR;
3128 chan->move_state = L2CAP_MOVE_STABLE;
3129 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3131 if (chan->mode != L2CAP_MODE_ERTM)
3134 chan->rx_state = L2CAP_RX_STATE_RECV;
3135 chan->tx_state = L2CAP_TX_STATE_XMIT;
3137 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3138 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3139 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3141 skb_queue_head_init(&chan->srej_q);
3143 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3147 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3149 l2cap_seq_list_free(&chan->srej_list);
3154 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3157 case L2CAP_MODE_STREAMING:
3158 case L2CAP_MODE_ERTM:
3159 if (l2cap_mode_supported(mode, remote_feat_mask))
3163 return L2CAP_MODE_BASIC;
3167 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3169 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3170 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3173 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3175 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3176 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3179 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3180 struct l2cap_conf_rfc *rfc)
3182 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3183 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3185 /* Class 1 devices have must have ERTM timeouts
3186 * exceeding the Link Supervision Timeout. The
3187 * default Link Supervision Timeout for AMP
3188 * controllers is 10 seconds.
3190 * Class 1 devices use 0xffffffff for their
3191 * best-effort flush timeout, so the clamping logic
3192 * will result in a timeout that meets the above
3193 * requirement. ERTM timeouts are 16-bit values, so
3194 * the maximum timeout is 65.535 seconds.
3197 /* Convert timeout to milliseconds and round */
3198 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3200 /* This is the recommended formula for class 2 devices
3201 * that start ERTM timers when packets are sent to the
3204 ertm_to = 3 * ertm_to + 500;
3206 if (ertm_to > 0xffff)
3209 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3210 rfc->monitor_timeout = rfc->retrans_timeout;
3212 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3213 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3217 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3219 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3220 __l2cap_ews_supported(chan->conn)) {
3221 /* use extended control field */
3222 set_bit(FLAG_EXT_CTRL, &chan->flags);
3223 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3225 chan->tx_win = min_t(u16, chan->tx_win,
3226 L2CAP_DEFAULT_TX_WINDOW);
3227 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3229 chan->ack_win = chan->tx_win;
3232 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3234 struct l2cap_conf_req *req = data;
3235 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3236 void *ptr = req->data;
3239 BT_DBG("chan %p", chan);
3241 if (chan->num_conf_req || chan->num_conf_rsp)
3244 switch (chan->mode) {
3245 case L2CAP_MODE_STREAMING:
3246 case L2CAP_MODE_ERTM:
3247 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3250 if (__l2cap_efs_supported(chan->conn))
3251 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3255 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3260 if (chan->imtu != L2CAP_DEFAULT_MTU)
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3263 switch (chan->mode) {
3264 case L2CAP_MODE_BASIC:
3268 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3269 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3272 rfc.mode = L2CAP_MODE_BASIC;
3274 rfc.max_transmit = 0;
3275 rfc.retrans_timeout = 0;
3276 rfc.monitor_timeout = 0;
3277 rfc.max_pdu_size = 0;
3279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3280 (unsigned long) &rfc);
3283 case L2CAP_MODE_ERTM:
3284 rfc.mode = L2CAP_MODE_ERTM;
3285 rfc.max_transmit = chan->max_tx;
3287 __l2cap_set_ertm_timeouts(chan, &rfc);
3289 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3290 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3292 rfc.max_pdu_size = cpu_to_le16(size);
3294 l2cap_txwin_setup(chan);
3296 rfc.txwin_size = min_t(u16, chan->tx_win,
3297 L2CAP_DEFAULT_TX_WINDOW);
3299 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3300 (unsigned long) &rfc);
3302 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3303 l2cap_add_opt_efs(&ptr, chan);
3305 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3306 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3309 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3310 if (chan->fcs == L2CAP_FCS_NONE ||
3311 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3312 chan->fcs = L2CAP_FCS_NONE;
3313 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3318 case L2CAP_MODE_STREAMING:
3319 l2cap_txwin_setup(chan);
3320 rfc.mode = L2CAP_MODE_STREAMING;
3322 rfc.max_transmit = 0;
3323 rfc.retrans_timeout = 0;
3324 rfc.monitor_timeout = 0;
3326 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3327 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3329 rfc.max_pdu_size = cpu_to_le16(size);
3331 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3332 (unsigned long) &rfc);
3334 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3335 l2cap_add_opt_efs(&ptr, chan);
3337 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3338 if (chan->fcs == L2CAP_FCS_NONE ||
3339 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3340 chan->fcs = L2CAP_FCS_NONE;
3341 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3347 req->dcid = cpu_to_le16(chan->dcid);
3348 req->flags = cpu_to_le16(0);
3353 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3355 struct l2cap_conf_rsp *rsp = data;
3356 void *ptr = rsp->data;
3357 void *req = chan->conf_req;
3358 int len = chan->conf_len;
3359 int type, hint, olen;
3361 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3362 struct l2cap_conf_efs efs;
3364 u16 mtu = L2CAP_DEFAULT_MTU;
3365 u16 result = L2CAP_CONF_SUCCESS;
3368 BT_DBG("chan %p", chan);
3370 while (len >= L2CAP_CONF_OPT_SIZE) {
3371 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3373 hint = type & L2CAP_CONF_HINT;
3374 type &= L2CAP_CONF_MASK;
3377 case L2CAP_CONF_MTU:
3381 case L2CAP_CONF_FLUSH_TO:
3382 chan->flush_to = val;
3385 case L2CAP_CONF_QOS:
3388 case L2CAP_CONF_RFC:
3389 if (olen == sizeof(rfc))
3390 memcpy(&rfc, (void *) val, olen);
3393 case L2CAP_CONF_FCS:
3394 if (val == L2CAP_FCS_NONE)
3395 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3398 case L2CAP_CONF_EFS:
3400 if (olen == sizeof(efs))
3401 memcpy(&efs, (void *) val, olen);
3404 case L2CAP_CONF_EWS:
3405 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3406 return -ECONNREFUSED;
3408 set_bit(FLAG_EXT_CTRL, &chan->flags);
3409 set_bit(CONF_EWS_RECV, &chan->conf_state);
3410 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3411 chan->remote_tx_win = val;
3418 result = L2CAP_CONF_UNKNOWN;
3419 *((u8 *) ptr++) = type;
3424 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3427 switch (chan->mode) {
3428 case L2CAP_MODE_STREAMING:
3429 case L2CAP_MODE_ERTM:
3430 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3431 chan->mode = l2cap_select_mode(rfc.mode,
3432 chan->conn->feat_mask);
3437 if (__l2cap_efs_supported(chan->conn))
3438 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3440 return -ECONNREFUSED;
3443 if (chan->mode != rfc.mode)
3444 return -ECONNREFUSED;
3450 if (chan->mode != rfc.mode) {
3451 result = L2CAP_CONF_UNACCEPT;
3452 rfc.mode = chan->mode;
3454 if (chan->num_conf_rsp == 1)
3455 return -ECONNREFUSED;
3457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3458 (unsigned long) &rfc);
3461 if (result == L2CAP_CONF_SUCCESS) {
3462 /* Configure output options and let the other side know
3463 * which ones we don't like. */
3465 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3466 result = L2CAP_CONF_UNACCEPT;
3469 set_bit(CONF_MTU_DONE, &chan->conf_state);
3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3474 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3475 efs.stype != L2CAP_SERV_NOTRAFIC &&
3476 efs.stype != chan->local_stype) {
3478 result = L2CAP_CONF_UNACCEPT;
3480 if (chan->num_conf_req >= 1)
3481 return -ECONNREFUSED;
3483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3485 (unsigned long) &efs);
3487 /* Send PENDING Conf Rsp */
3488 result = L2CAP_CONF_PENDING;
3489 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3494 case L2CAP_MODE_BASIC:
3495 chan->fcs = L2CAP_FCS_NONE;
3496 set_bit(CONF_MODE_DONE, &chan->conf_state);
3499 case L2CAP_MODE_ERTM:
3500 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3501 chan->remote_tx_win = rfc.txwin_size;
3503 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3505 chan->remote_max_tx = rfc.max_transmit;
3507 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3508 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3509 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3510 rfc.max_pdu_size = cpu_to_le16(size);
3511 chan->remote_mps = size;
3513 __l2cap_set_ertm_timeouts(chan, &rfc);
3515 set_bit(CONF_MODE_DONE, &chan->conf_state);
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3518 sizeof(rfc), (unsigned long) &rfc);
3520 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3521 chan->remote_id = efs.id;
3522 chan->remote_stype = efs.stype;
3523 chan->remote_msdu = le16_to_cpu(efs.msdu);
3524 chan->remote_flush_to =
3525 le32_to_cpu(efs.flush_to);
3526 chan->remote_acc_lat =
3527 le32_to_cpu(efs.acc_lat);
3528 chan->remote_sdu_itime =
3529 le32_to_cpu(efs.sdu_itime);
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3532 (unsigned long) &efs);
3536 case L2CAP_MODE_STREAMING:
3537 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3538 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3539 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3540 rfc.max_pdu_size = cpu_to_le16(size);
3541 chan->remote_mps = size;
3543 set_bit(CONF_MODE_DONE, &chan->conf_state);
3545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3546 (unsigned long) &rfc);
3551 result = L2CAP_CONF_UNACCEPT;
3553 memset(&rfc, 0, sizeof(rfc));
3554 rfc.mode = chan->mode;
3557 if (result == L2CAP_CONF_SUCCESS)
3558 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3560 rsp->scid = cpu_to_le16(chan->dcid);
3561 rsp->result = cpu_to_le16(result);
3562 rsp->flags = cpu_to_le16(0);
3567 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3568 void *data, u16 *result)
3570 struct l2cap_conf_req *req = data;
3571 void *ptr = req->data;
3574 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3575 struct l2cap_conf_efs efs;
3577 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3579 while (len >= L2CAP_CONF_OPT_SIZE) {
3580 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3583 case L2CAP_CONF_MTU:
3584 if (val < L2CAP_DEFAULT_MIN_MTU) {
3585 *result = L2CAP_CONF_UNACCEPT;
3586 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3589 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3592 case L2CAP_CONF_FLUSH_TO:
3593 chan->flush_to = val;
3594 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3598 case L2CAP_CONF_RFC:
3599 if (olen == sizeof(rfc))
3600 memcpy(&rfc, (void *)val, olen);
3602 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3603 rfc.mode != chan->mode)
3604 return -ECONNREFUSED;
3608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3609 sizeof(rfc), (unsigned long) &rfc);
3612 case L2CAP_CONF_EWS:
3613 chan->ack_win = min_t(u16, val, chan->ack_win);
3614 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3618 case L2CAP_CONF_EFS:
3619 if (olen == sizeof(efs))
3620 memcpy(&efs, (void *)val, olen);
3622 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3623 efs.stype != L2CAP_SERV_NOTRAFIC &&
3624 efs.stype != chan->local_stype)
3625 return -ECONNREFUSED;
3627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3628 (unsigned long) &efs);
3631 case L2CAP_CONF_FCS:
3632 if (*result == L2CAP_CONF_PENDING)
3633 if (val == L2CAP_FCS_NONE)
3634 set_bit(CONF_RECV_NO_FCS,
3640 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3641 return -ECONNREFUSED;
3643 chan->mode = rfc.mode;
3645 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3647 case L2CAP_MODE_ERTM:
3648 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3649 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3650 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3651 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3652 chan->ack_win = min_t(u16, chan->ack_win,
3655 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3656 chan->local_msdu = le16_to_cpu(efs.msdu);
3657 chan->local_sdu_itime =
3658 le32_to_cpu(efs.sdu_itime);
3659 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3660 chan->local_flush_to =
3661 le32_to_cpu(efs.flush_to);
3665 case L2CAP_MODE_STREAMING:
3666 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3670 req->dcid = cpu_to_le16(chan->dcid);
3671 req->flags = cpu_to_le16(0);
3676 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3677 u16 result, u16 flags)
3679 struct l2cap_conf_rsp *rsp = data;
3680 void *ptr = rsp->data;
3682 BT_DBG("chan %p", chan);
3684 rsp->scid = cpu_to_le16(chan->dcid);
3685 rsp->result = cpu_to_le16(result);
3686 rsp->flags = cpu_to_le16(flags);
3691 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3693 struct l2cap_le_conn_rsp rsp;
3694 struct l2cap_conn *conn = chan->conn;
3696 BT_DBG("chan %p", chan);
3698 rsp.dcid = cpu_to_le16(chan->scid);
3699 rsp.mtu = cpu_to_le16(chan->imtu);
3700 rsp.mps = cpu_to_le16(chan->mps);
3701 rsp.credits = cpu_to_le16(chan->rx_credits);
3702 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3704 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3708 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3710 struct l2cap_conn_rsp rsp;
3711 struct l2cap_conn *conn = chan->conn;
3715 rsp.scid = cpu_to_le16(chan->dcid);
3716 rsp.dcid = cpu_to_le16(chan->scid);
3717 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3718 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3721 rsp_code = L2CAP_CREATE_CHAN_RSP;
3723 rsp_code = L2CAP_CONN_RSP;
3725 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3727 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3729 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3731 /* BEGIN SLP_Bluetooth :: fix av chopping issue. */
3732 #ifdef HCI_BROADCOMM_QOS_PATCH
3733 /* To gurantee the A2DP packet*/
3734 if (chan->psm == L2CAP_PSM_AVDTP) {
3735 struct hci_cp_broadcom_cmd cp;
3737 cp.handle = cpu_to_le16(conn->hcon->handle);
3738 cp.priority = PRIORITY_HIGH;
3740 hci_send_cmd(conn->hcon->hdev, HCI_BROADCOM_QOS_CMD,
3744 /* END SLP_Bluetooth */
3746 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3747 l2cap_build_conf_req(chan, buf), buf);
3748 chan->num_conf_req++;
3751 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3755 /* Use sane default values in case a misbehaving remote device
3756 * did not send an RFC or extended window size option.
3758 u16 txwin_ext = chan->ack_win;
3759 struct l2cap_conf_rfc rfc = {
3761 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3762 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3763 .max_pdu_size = cpu_to_le16(chan->imtu),
3764 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3767 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3769 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3772 while (len >= L2CAP_CONF_OPT_SIZE) {
3773 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3776 case L2CAP_CONF_RFC:
3777 if (olen == sizeof(rfc))
3778 memcpy(&rfc, (void *)val, olen);
3780 case L2CAP_CONF_EWS:
3787 case L2CAP_MODE_ERTM:
3788 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3789 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3790 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3791 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3792 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3794 chan->ack_win = min_t(u16, chan->ack_win,
3797 case L2CAP_MODE_STREAMING:
3798 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3802 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3803 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3806 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3808 if (cmd_len < sizeof(*rej))
3811 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3814 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3815 cmd->ident == conn->info_ident) {
3816 cancel_delayed_work(&conn->info_timer);
3818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3819 conn->info_ident = 0;
3821 l2cap_conn_start(conn);
3827 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3828 struct l2cap_cmd_hdr *cmd,
3829 u8 *data, u8 rsp_code, u8 amp_id)
3831 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3832 struct l2cap_conn_rsp rsp;
3833 struct l2cap_chan *chan = NULL, *pchan;
3834 int result, status = L2CAP_CS_NO_INFO;
3836 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3837 __le16 psm = req->psm;
3839 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3841 /* Check if we have socket listening on psm */
3842 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3843 &conn->hcon->dst, ACL_LINK);
3845 result = L2CAP_CR_BAD_PSM;
3849 mutex_lock(&conn->chan_lock);
3850 l2cap_chan_lock(pchan);
3852 /* Check if the ACL is secure enough (if not SDP) */
3853 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3854 !hci_conn_check_link_mode(conn->hcon)) {
3855 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3856 result = L2CAP_CR_SEC_BLOCK;
3860 result = L2CAP_CR_NO_MEM;
3862 /* Check if we already have channel with that dcid */
3863 if (__l2cap_get_chan_by_dcid(conn, scid))
3866 chan = pchan->ops->new_connection(pchan);
3870 /* For certain devices (ex: HID mouse), support for authentication,
3871 * pairing and bonding is optional. For such devices, inorder to avoid
3872 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3873 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3875 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3877 bacpy(&chan->src, &conn->hcon->src);
3878 bacpy(&chan->dst, &conn->hcon->dst);
3879 chan->src_type = bdaddr_src_type(conn->hcon);
3880 chan->dst_type = bdaddr_dst_type(conn->hcon);
3883 chan->local_amp_id = amp_id;
3885 __l2cap_chan_add(conn, chan);
3889 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3891 chan->ident = cmd->ident;
3893 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3894 if (l2cap_chan_check_security(chan, false)) {
3895 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3896 l2cap_state_change(chan, BT_CONNECT2);
3897 result = L2CAP_CR_PEND;
3898 status = L2CAP_CS_AUTHOR_PEND;
3899 chan->ops->defer(chan);
3901 /* Force pending result for AMP controllers.
3902 * The connection will succeed after the
3903 * physical link is up.
3905 if (amp_id == AMP_ID_BREDR) {
3906 l2cap_state_change(chan, BT_CONFIG);
3907 result = L2CAP_CR_SUCCESS;
3909 l2cap_state_change(chan, BT_CONNECT2);
3910 result = L2CAP_CR_PEND;
3912 status = L2CAP_CS_NO_INFO;
3915 l2cap_state_change(chan, BT_CONNECT2);
3916 result = L2CAP_CR_PEND;
3917 status = L2CAP_CS_AUTHEN_PEND;
3920 l2cap_state_change(chan, BT_CONNECT2);
3921 result = L2CAP_CR_PEND;
3922 status = L2CAP_CS_NO_INFO;
3926 l2cap_chan_unlock(pchan);
3927 mutex_unlock(&conn->chan_lock);
3928 l2cap_chan_put(pchan);
3931 rsp.scid = cpu_to_le16(scid);
3932 rsp.dcid = cpu_to_le16(dcid);
3933 rsp.result = cpu_to_le16(result);
3934 rsp.status = cpu_to_le16(status);
3935 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3937 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3938 struct l2cap_info_req info;
3939 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3941 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3942 conn->info_ident = l2cap_get_ident(conn);
3944 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3946 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3947 sizeof(info), &info);
3950 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3951 result == L2CAP_CR_SUCCESS) {
3953 set_bit(CONF_REQ_SENT, &chan->conf_state);
3954 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3955 l2cap_build_conf_req(chan, buf), buf);
3956 chan->num_conf_req++;
3962 static int l2cap_connect_req(struct l2cap_conn *conn,
3963 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3965 struct hci_dev *hdev = conn->hcon->hdev;
3966 struct hci_conn *hcon = conn->hcon;
3968 if (cmd_len < sizeof(struct l2cap_conn_req))
3972 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3973 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3974 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3975 hci_dev_unlock(hdev);
3977 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3981 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3982 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3985 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3986 u16 scid, dcid, result, status;
3987 struct l2cap_chan *chan;
3991 if (cmd_len < sizeof(*rsp))
3994 scid = __le16_to_cpu(rsp->scid);
3995 dcid = __le16_to_cpu(rsp->dcid);
3996 result = __le16_to_cpu(rsp->result);
3997 status = __le16_to_cpu(rsp->status);
3999 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4000 dcid, scid, result, status);
4002 mutex_lock(&conn->chan_lock);
4005 chan = __l2cap_get_chan_by_scid(conn, scid);
4011 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4020 l2cap_chan_lock(chan);
4023 case L2CAP_CR_SUCCESS:
4024 l2cap_state_change(chan, BT_CONFIG);
4027 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4029 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4032 /* BEGIN SLP_Bluetooth :: fix av chopping issue. */
4033 #ifdef HCI_BROADCOMM_QOS_PATCH
4034 /* To gurantee the A2DP packet*/
4035 if (chan->psm == L2CAP_PSM_AVDTP) {
4036 struct hci_cp_broadcom_cmd cp;
4037 cp.handle = cpu_to_le16(conn->hcon->handle);
4038 cp.priority = PRIORITY_HIGH;
4040 hci_send_cmd(conn->hcon->hdev, HCI_BROADCOM_QOS_CMD,
4044 /* END SLP_Bluetooth */
4046 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4047 l2cap_build_conf_req(chan, req), req);
4048 chan->num_conf_req++;
4052 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4056 l2cap_chan_del(chan, ECONNREFUSED);
4060 l2cap_chan_unlock(chan);
4063 mutex_unlock(&conn->chan_lock);
4068 static inline void set_default_fcs(struct l2cap_chan *chan)
4070 /* FCS is enabled only in ERTM or streaming mode, if one or both
4073 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4074 chan->fcs = L2CAP_FCS_NONE;
4075 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4076 chan->fcs = L2CAP_FCS_CRC16;
4079 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4080 u8 ident, u16 flags)
4082 struct l2cap_conn *conn = chan->conn;
4084 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4087 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4088 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4090 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4091 l2cap_build_conf_rsp(chan, data,
4092 L2CAP_CONF_SUCCESS, flags), data);
4095 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4098 struct l2cap_cmd_rej_cid rej;
4100 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4101 rej.scid = __cpu_to_le16(scid);
4102 rej.dcid = __cpu_to_le16(dcid);
4104 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4107 static inline int l2cap_config_req(struct l2cap_conn *conn,
4108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4111 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4114 struct l2cap_chan *chan;
4117 if (cmd_len < sizeof(*req))
4120 dcid = __le16_to_cpu(req->dcid);
4121 flags = __le16_to_cpu(req->flags);
4123 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4125 chan = l2cap_get_chan_by_scid(conn, dcid);
4127 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4131 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4132 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4137 /* Reject if config buffer is too small. */
4138 len = cmd_len - sizeof(*req);
4139 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4140 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4141 l2cap_build_conf_rsp(chan, rsp,
4142 L2CAP_CONF_REJECT, flags), rsp);
4147 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4148 chan->conf_len += len;
4150 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4151 /* Incomplete config. Send empty response. */
4152 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4153 l2cap_build_conf_rsp(chan, rsp,
4154 L2CAP_CONF_SUCCESS, flags), rsp);
4158 /* Complete config. */
4159 len = l2cap_parse_conf_req(chan, rsp);
4161 l2cap_send_disconn_req(chan, ECONNRESET);
4165 chan->ident = cmd->ident;
4166 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4167 chan->num_conf_rsp++;
4169 /* Reset config buffer. */
4172 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4175 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4176 set_default_fcs(chan);
4178 if (chan->mode == L2CAP_MODE_ERTM ||
4179 chan->mode == L2CAP_MODE_STREAMING)
4180 err = l2cap_ertm_init(chan);
4183 l2cap_send_disconn_req(chan, -err);
4185 l2cap_chan_ready(chan);
4190 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4192 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4193 l2cap_build_conf_req(chan, buf), buf);
4194 chan->num_conf_req++;
4197 /* Got Conf Rsp PENDING from remote side and assume we sent
4198 Conf Rsp PENDING in the code above */
4199 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4200 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4202 /* check compatibility */
4204 /* Send rsp for BR/EDR channel */
4206 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4208 chan->ident = cmd->ident;
4212 l2cap_chan_unlock(chan);
4216 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4217 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4220 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4221 u16 scid, flags, result;
4222 struct l2cap_chan *chan;
4223 int len = cmd_len - sizeof(*rsp);
4226 if (cmd_len < sizeof(*rsp))
4229 scid = __le16_to_cpu(rsp->scid);
4230 flags = __le16_to_cpu(rsp->flags);
4231 result = __le16_to_cpu(rsp->result);
4233 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4236 chan = l2cap_get_chan_by_scid(conn, scid);
4241 case L2CAP_CONF_SUCCESS:
4242 l2cap_conf_rfc_get(chan, rsp->data, len);
4243 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4246 case L2CAP_CONF_PENDING:
4247 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4249 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4252 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4255 l2cap_send_disconn_req(chan, ECONNRESET);
4259 if (!chan->hs_hcon) {
4260 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4263 if (l2cap_check_efs(chan)) {
4264 amp_create_logical_link(chan);
4265 chan->ident = cmd->ident;
4271 case L2CAP_CONF_UNACCEPT:
4272 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4275 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4276 l2cap_send_disconn_req(chan, ECONNRESET);
4280 /* throw out any old stored conf requests */
4281 result = L2CAP_CONF_SUCCESS;
4282 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4285 l2cap_send_disconn_req(chan, ECONNRESET);
4289 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4290 L2CAP_CONF_REQ, len, req);
4291 chan->num_conf_req++;
4292 if (result != L2CAP_CONF_SUCCESS)
4298 l2cap_chan_set_err(chan, ECONNRESET);
4300 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4301 l2cap_send_disconn_req(chan, ECONNRESET);
4305 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4308 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4310 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4311 set_default_fcs(chan);
4313 if (chan->mode == L2CAP_MODE_ERTM ||
4314 chan->mode == L2CAP_MODE_STREAMING)
4315 err = l2cap_ertm_init(chan);
4318 l2cap_send_disconn_req(chan, -err);
4320 l2cap_chan_ready(chan);
4324 l2cap_chan_unlock(chan);
4328 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4329 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4332 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4333 struct l2cap_disconn_rsp rsp;
4335 struct l2cap_chan *chan;
4337 if (cmd_len != sizeof(*req))
4340 scid = __le16_to_cpu(req->scid);
4341 dcid = __le16_to_cpu(req->dcid);
4343 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4345 mutex_lock(&conn->chan_lock);
4347 chan = __l2cap_get_chan_by_scid(conn, dcid);
4349 mutex_unlock(&conn->chan_lock);
4350 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4354 l2cap_chan_lock(chan);
4356 rsp.dcid = cpu_to_le16(chan->scid);
4357 rsp.scid = cpu_to_le16(chan->dcid);
4358 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4360 chan->ops->set_shutdown(chan);
4362 l2cap_chan_hold(chan);
4363 l2cap_chan_del(chan, ECONNRESET);
4365 l2cap_chan_unlock(chan);
4367 chan->ops->close(chan);
4368 l2cap_chan_put(chan);
4370 mutex_unlock(&conn->chan_lock);
4375 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4376 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4379 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4381 struct l2cap_chan *chan;
4383 if (cmd_len != sizeof(*rsp))
4386 scid = __le16_to_cpu(rsp->scid);
4387 dcid = __le16_to_cpu(rsp->dcid);
4389 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4391 mutex_lock(&conn->chan_lock);
4393 chan = __l2cap_get_chan_by_scid(conn, scid);
4395 mutex_unlock(&conn->chan_lock);
4399 l2cap_chan_lock(chan);
4401 l2cap_chan_hold(chan);
4402 l2cap_chan_del(chan, 0);
4404 l2cap_chan_unlock(chan);
4406 chan->ops->close(chan);
4408 /* BEGIN SLP_Bluetooth :: fix av chopping issue. */
4409 #ifdef HCI_BROADCOMM_QOS_PATCH
4410 /* To reset the QOS back to normal */
4412 if (chan->psm == L2CAP_PSM_AVDTP) {
4413 struct hci_cp_broadcom_cmd cp;
4415 cp.handle = cpu_to_le16(conn->hcon->handle);
4416 cp.priority = PRIORITY_NORMAL;
4418 hci_send_cmd(conn->hcon->hdev, HCI_BROADCOM_QOS_CMD,
4422 /* END SLP_Bluetooth */
4424 l2cap_chan_put(chan);
4426 mutex_unlock(&conn->chan_lock);
4431 static inline int l2cap_information_req(struct l2cap_conn *conn,
4432 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4435 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4438 if (cmd_len != sizeof(*req))
4441 type = __le16_to_cpu(req->type);
4443 BT_DBG("type 0x%4.4x", type);
4445 if (type == L2CAP_IT_FEAT_MASK) {
4447 u32 feat_mask = l2cap_feat_mask;
4448 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4449 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4450 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4452 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4454 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4455 feat_mask |= L2CAP_FEAT_EXT_FLOW
4456 | L2CAP_FEAT_EXT_WINDOW;
4458 put_unaligned_le32(feat_mask, rsp->data);
4459 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4461 } else if (type == L2CAP_IT_FIXED_CHAN) {
4463 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4465 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4466 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4467 rsp->data[0] = conn->local_fixed_chan;
4468 memset(rsp->data + 1, 0, 7);
4469 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4472 struct l2cap_info_rsp rsp;
4473 rsp.type = cpu_to_le16(type);
4474 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4475 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4482 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4483 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4486 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4489 if (cmd_len < sizeof(*rsp))
4492 type = __le16_to_cpu(rsp->type);
4493 result = __le16_to_cpu(rsp->result);
4495 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4497 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4498 if (cmd->ident != conn->info_ident ||
4499 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4502 cancel_delayed_work(&conn->info_timer);
4504 if (result != L2CAP_IR_SUCCESS) {
4505 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4506 conn->info_ident = 0;
4508 l2cap_conn_start(conn);
4514 case L2CAP_IT_FEAT_MASK:
4515 conn->feat_mask = get_unaligned_le32(rsp->data);
4517 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4518 struct l2cap_info_req req;
4519 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4521 conn->info_ident = l2cap_get_ident(conn);
4523 l2cap_send_cmd(conn, conn->info_ident,
4524 L2CAP_INFO_REQ, sizeof(req), &req);
4526 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4527 conn->info_ident = 0;
4529 l2cap_conn_start(conn);
4533 case L2CAP_IT_FIXED_CHAN:
4534 conn->remote_fixed_chan = rsp->data[0];
4535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4536 conn->info_ident = 0;
4538 l2cap_conn_start(conn);
4545 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4546 struct l2cap_cmd_hdr *cmd,
4547 u16 cmd_len, void *data)
4549 struct l2cap_create_chan_req *req = data;
4550 struct l2cap_create_chan_rsp rsp;
4551 struct l2cap_chan *chan;
4552 struct hci_dev *hdev;
4555 if (cmd_len != sizeof(*req))
4558 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4561 psm = le16_to_cpu(req->psm);
4562 scid = le16_to_cpu(req->scid);
4564 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4566 /* For controller id 0 make BR/EDR connection */
4567 if (req->amp_id == AMP_ID_BREDR) {
4568 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4573 /* Validate AMP controller id */
4574 hdev = hci_dev_get(req->amp_id);
4578 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4583 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4586 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4587 struct hci_conn *hs_hcon;
4589 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4593 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4598 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4600 mgr->bredr_chan = chan;
4601 chan->hs_hcon = hs_hcon;
4602 chan->fcs = L2CAP_FCS_NONE;
4603 conn->mtu = hdev->block_mtu;
4612 rsp.scid = cpu_to_le16(scid);
4613 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4614 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4616 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4622 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4624 struct l2cap_move_chan_req req;
4627 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4629 ident = l2cap_get_ident(chan->conn);
4630 chan->ident = ident;
4632 req.icid = cpu_to_le16(chan->scid);
4633 req.dest_amp_id = dest_amp_id;
4635 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4638 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4641 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4643 struct l2cap_move_chan_rsp rsp;
4645 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4647 rsp.icid = cpu_to_le16(chan->dcid);
4648 rsp.result = cpu_to_le16(result);
4650 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4654 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4656 struct l2cap_move_chan_cfm cfm;
4658 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4660 chan->ident = l2cap_get_ident(chan->conn);
4662 cfm.icid = cpu_to_le16(chan->scid);
4663 cfm.result = cpu_to_le16(result);
4665 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4668 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4671 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4673 struct l2cap_move_chan_cfm cfm;
4675 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4677 cfm.icid = cpu_to_le16(icid);
4678 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4680 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4684 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4687 struct l2cap_move_chan_cfm_rsp rsp;
4689 BT_DBG("icid 0x%4.4x", icid);
4691 rsp.icid = cpu_to_le16(icid);
4692 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4695 static void __release_logical_link(struct l2cap_chan *chan)
4697 chan->hs_hchan = NULL;
4698 chan->hs_hcon = NULL;
4700 /* Placeholder - release the logical link */
4703 static void l2cap_logical_fail(struct l2cap_chan *chan)
4705 /* Logical link setup failed */
4706 if (chan->state != BT_CONNECTED) {
4707 /* Create channel failure, disconnect */
4708 l2cap_send_disconn_req(chan, ECONNRESET);
4712 switch (chan->move_role) {
4713 case L2CAP_MOVE_ROLE_RESPONDER:
4714 l2cap_move_done(chan);
4715 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4717 case L2CAP_MOVE_ROLE_INITIATOR:
4718 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4719 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4720 /* Remote has only sent pending or
4721 * success responses, clean up
4723 l2cap_move_done(chan);
4726 /* Other amp move states imply that the move
4727 * has already aborted
4729 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4734 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4735 struct hci_chan *hchan)
4737 struct l2cap_conf_rsp rsp;
4739 chan->hs_hchan = hchan;
4740 chan->hs_hcon->l2cap_data = chan->conn;
4742 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4744 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4747 set_default_fcs(chan);
4749 err = l2cap_ertm_init(chan);
4751 l2cap_send_disconn_req(chan, -err);
4753 l2cap_chan_ready(chan);
4757 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4758 struct hci_chan *hchan)
4760 chan->hs_hcon = hchan->conn;
4761 chan->hs_hcon->l2cap_data = chan->conn;
4763 BT_DBG("move_state %d", chan->move_state);
4765 switch (chan->move_state) {
4766 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4767 /* Move confirm will be sent after a success
4768 * response is received
4770 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4772 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4773 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4774 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4775 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4776 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4777 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4778 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4779 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4780 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4784 /* Move was not in expected state, free the channel */
4785 __release_logical_link(chan);
4787 chan->move_state = L2CAP_MOVE_STABLE;
4791 /* Call with chan locked */
4792 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4795 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4798 l2cap_logical_fail(chan);
4799 __release_logical_link(chan);
4803 if (chan->state != BT_CONNECTED) {
4804 /* Ignore logical link if channel is on BR/EDR */
4805 if (chan->local_amp_id != AMP_ID_BREDR)
4806 l2cap_logical_finish_create(chan, hchan);
4808 l2cap_logical_finish_move(chan, hchan);
4812 void l2cap_move_start(struct l2cap_chan *chan)
4814 BT_DBG("chan %p", chan);
4816 if (chan->local_amp_id == AMP_ID_BREDR) {
4817 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4819 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4820 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4821 /* Placeholder - start physical link setup */
4823 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4824 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4826 l2cap_move_setup(chan);
4827 l2cap_send_move_chan_req(chan, 0);
4831 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4832 u8 local_amp_id, u8 remote_amp_id)
4834 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4835 local_amp_id, remote_amp_id);
4837 chan->fcs = L2CAP_FCS_NONE;
4839 /* Outgoing channel on AMP */
4840 if (chan->state == BT_CONNECT) {
4841 if (result == L2CAP_CR_SUCCESS) {
4842 chan->local_amp_id = local_amp_id;
4843 l2cap_send_create_chan_req(chan, remote_amp_id);
4845 /* Revert to BR/EDR connect */
4846 l2cap_send_conn_req(chan);
4852 /* Incoming channel on AMP */
4853 if (__l2cap_no_conn_pending(chan)) {
4854 struct l2cap_conn_rsp rsp;
4856 rsp.scid = cpu_to_le16(chan->dcid);
4857 rsp.dcid = cpu_to_le16(chan->scid);
4859 if (result == L2CAP_CR_SUCCESS) {
4860 /* Send successful response */
4861 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4862 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4864 /* Send negative response */
4865 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4866 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4869 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4872 if (result == L2CAP_CR_SUCCESS) {
4873 l2cap_state_change(chan, BT_CONFIG);
4874 set_bit(CONF_REQ_SENT, &chan->conf_state);
4875 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4877 l2cap_build_conf_req(chan, buf), buf);
4878 chan->num_conf_req++;
4883 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4886 l2cap_move_setup(chan);
4887 chan->move_id = local_amp_id;
4888 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4890 l2cap_send_move_chan_req(chan, remote_amp_id);
4893 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4895 struct hci_chan *hchan = NULL;
4897 /* Placeholder - get hci_chan for logical link */
4900 if (hchan->state == BT_CONNECTED) {
4901 /* Logical link is ready to go */
4902 chan->hs_hcon = hchan->conn;
4903 chan->hs_hcon->l2cap_data = chan->conn;
4904 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4905 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4907 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4909 /* Wait for logical link to be ready */
4910 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4913 /* Logical link not available */
4914 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4918 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4920 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4922 if (result == -EINVAL)
4923 rsp_result = L2CAP_MR_BAD_ID;
4925 rsp_result = L2CAP_MR_NOT_ALLOWED;
4927 l2cap_send_move_chan_rsp(chan, rsp_result);
4930 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4931 chan->move_state = L2CAP_MOVE_STABLE;
4933 /* Restart data transmission */
4934 l2cap_ertm_send(chan);
4937 /* Invoke with locked chan */
4938 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4940 u8 local_amp_id = chan->local_amp_id;
4941 u8 remote_amp_id = chan->remote_amp_id;
4943 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4944 chan, result, local_amp_id, remote_amp_id);
4946 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4947 l2cap_chan_unlock(chan);
4951 if (chan->state != BT_CONNECTED) {
4952 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4953 } else if (result != L2CAP_MR_SUCCESS) {
4954 l2cap_do_move_cancel(chan, result);
4956 switch (chan->move_role) {
4957 case L2CAP_MOVE_ROLE_INITIATOR:
4958 l2cap_do_move_initiate(chan, local_amp_id,
4961 case L2CAP_MOVE_ROLE_RESPONDER:
4962 l2cap_do_move_respond(chan, result);
4965 l2cap_do_move_cancel(chan, result);
4971 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4972 struct l2cap_cmd_hdr *cmd,
4973 u16 cmd_len, void *data)
4975 struct l2cap_move_chan_req *req = data;
4976 struct l2cap_move_chan_rsp rsp;
4977 struct l2cap_chan *chan;
4979 u16 result = L2CAP_MR_NOT_ALLOWED;
4981 if (cmd_len != sizeof(*req))
4984 icid = le16_to_cpu(req->icid);
4986 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4988 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4991 chan = l2cap_get_chan_by_dcid(conn, icid);
4993 rsp.icid = cpu_to_le16(icid);
4994 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4995 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5000 chan->ident = cmd->ident;
5002 if (chan->scid < L2CAP_CID_DYN_START ||
5003 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5004 (chan->mode != L2CAP_MODE_ERTM &&
5005 chan->mode != L2CAP_MODE_STREAMING)) {
5006 result = L2CAP_MR_NOT_ALLOWED;
5007 goto send_move_response;
5010 if (chan->local_amp_id == req->dest_amp_id) {
5011 result = L2CAP_MR_SAME_ID;
5012 goto send_move_response;
5015 if (req->dest_amp_id != AMP_ID_BREDR) {
5016 struct hci_dev *hdev;
5017 hdev = hci_dev_get(req->dest_amp_id);
5018 if (!hdev || hdev->dev_type != HCI_AMP ||
5019 !test_bit(HCI_UP, &hdev->flags)) {
5023 result = L2CAP_MR_BAD_ID;
5024 goto send_move_response;
5029 /* Detect a move collision. Only send a collision response
5030 * if this side has "lost", otherwise proceed with the move.
5031 * The winner has the larger bd_addr.
5033 if ((__chan_is_moving(chan) ||
5034 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5035 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5036 result = L2CAP_MR_COLLISION;
5037 goto send_move_response;
5040 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5041 l2cap_move_setup(chan);
5042 chan->move_id = req->dest_amp_id;
5045 if (req->dest_amp_id == AMP_ID_BREDR) {
5046 /* Moving to BR/EDR */
5047 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5048 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5049 result = L2CAP_MR_PEND;
5051 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5052 result = L2CAP_MR_SUCCESS;
5055 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5056 /* Placeholder - uncomment when amp functions are available */
5057 /*amp_accept_physical(chan, req->dest_amp_id);*/
5058 result = L2CAP_MR_PEND;
5062 l2cap_send_move_chan_rsp(chan, result);
5064 l2cap_chan_unlock(chan);
5069 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5071 struct l2cap_chan *chan;
5072 struct hci_chan *hchan = NULL;
5074 chan = l2cap_get_chan_by_scid(conn, icid);
5076 l2cap_send_move_chan_cfm_icid(conn, icid);
5080 __clear_chan_timer(chan);
5081 if (result == L2CAP_MR_PEND)
5082 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5084 switch (chan->move_state) {
5085 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5086 /* Move confirm will be sent when logical link
5089 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5091 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5092 if (result == L2CAP_MR_PEND) {
5094 } else if (test_bit(CONN_LOCAL_BUSY,
5095 &chan->conn_state)) {
5096 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5098 /* Logical link is up or moving to BR/EDR,
5101 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5102 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5105 case L2CAP_MOVE_WAIT_RSP:
5107 if (result == L2CAP_MR_SUCCESS) {
5108 /* Remote is ready, send confirm immediately
5109 * after logical link is ready
5111 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5113 /* Both logical link and move success
5114 * are required to confirm
5116 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5119 /* Placeholder - get hci_chan for logical link */
5121 /* Logical link not available */
5122 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5126 /* If the logical link is not yet connected, do not
5127 * send confirmation.
5129 if (hchan->state != BT_CONNECTED)
5132 /* Logical link is already ready to go */
5134 chan->hs_hcon = hchan->conn;
5135 chan->hs_hcon->l2cap_data = chan->conn;
5137 if (result == L2CAP_MR_SUCCESS) {
5138 /* Can confirm now */
5139 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5141 /* Now only need move success
5144 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5147 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5150 /* Any other amp move state means the move failed. */
5151 chan->move_id = chan->local_amp_id;
5152 l2cap_move_done(chan);
5153 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5156 l2cap_chan_unlock(chan);
5159 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5162 struct l2cap_chan *chan;
5164 chan = l2cap_get_chan_by_ident(conn, ident);
5166 /* Could not locate channel, icid is best guess */
5167 l2cap_send_move_chan_cfm_icid(conn, icid);
5171 __clear_chan_timer(chan);
5173 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5174 if (result == L2CAP_MR_COLLISION) {
5175 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5177 /* Cleanup - cancel move */
5178 chan->move_id = chan->local_amp_id;
5179 l2cap_move_done(chan);
5183 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5185 l2cap_chan_unlock(chan);
5188 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5189 struct l2cap_cmd_hdr *cmd,
5190 u16 cmd_len, void *data)
5192 struct l2cap_move_chan_rsp *rsp = data;
5195 if (cmd_len != sizeof(*rsp))
5198 icid = le16_to_cpu(rsp->icid);
5199 result = le16_to_cpu(rsp->result);
5201 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5203 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5204 l2cap_move_continue(conn, icid, result);
5206 l2cap_move_fail(conn, cmd->ident, icid, result);
5211 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5212 struct l2cap_cmd_hdr *cmd,
5213 u16 cmd_len, void *data)
5215 struct l2cap_move_chan_cfm *cfm = data;
5216 struct l2cap_chan *chan;
5219 if (cmd_len != sizeof(*cfm))
5222 icid = le16_to_cpu(cfm->icid);
5223 result = le16_to_cpu(cfm->result);
5225 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5227 chan = l2cap_get_chan_by_dcid(conn, icid);
5229 /* Spec requires a response even if the icid was not found */
5230 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5234 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5235 if (result == L2CAP_MC_CONFIRMED) {
5236 chan->local_amp_id = chan->move_id;
5237 if (chan->local_amp_id == AMP_ID_BREDR)
5238 __release_logical_link(chan);
5240 chan->move_id = chan->local_amp_id;
5243 l2cap_move_done(chan);
5246 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5248 l2cap_chan_unlock(chan);
5253 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5254 struct l2cap_cmd_hdr *cmd,
5255 u16 cmd_len, void *data)
5257 struct l2cap_move_chan_cfm_rsp *rsp = data;
5258 struct l2cap_chan *chan;
5261 if (cmd_len != sizeof(*rsp))
5264 icid = le16_to_cpu(rsp->icid);
5266 BT_DBG("icid 0x%4.4x", icid);
5268 chan = l2cap_get_chan_by_scid(conn, icid);
5272 __clear_chan_timer(chan);
5274 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5275 chan->local_amp_id = chan->move_id;
5277 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5278 __release_logical_link(chan);
5280 l2cap_move_done(chan);
5283 l2cap_chan_unlock(chan);
5288 #ifdef CONFIG_TIZEN_WIP
5289 int l2cap_update_connection_param(struct l2cap_conn *conn, u16 min, u16 max,
5290 u16 latency, u16 to_multiplier)
5292 struct l2cap_conn_param_update_req req;
5294 req.min = cpu_to_le16(min);
5295 req.max = cpu_to_le16(max);
5296 req.latency = cpu_to_le16(latency);
5297 req.to_multiplier = cpu_to_le16(to_multiplier);
5299 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONN_PARAM_UPDATE_REQ,
5306 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5307 struct l2cap_cmd_hdr *cmd,
5308 u16 cmd_len, u8 *data)
5310 struct hci_conn *hcon = conn->hcon;
5311 struct l2cap_conn_param_update_req *req;
5312 struct l2cap_conn_param_update_rsp rsp;
5313 u16 min, max, latency, to_multiplier;
5316 if (hcon->role != HCI_ROLE_MASTER)
5319 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5322 req = (struct l2cap_conn_param_update_req *) data;
5323 min = __le16_to_cpu(req->min);
5324 max = __le16_to_cpu(req->max);
5325 latency = __le16_to_cpu(req->latency);
5326 to_multiplier = __le16_to_cpu(req->to_multiplier);
5328 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5329 min, max, latency, to_multiplier);
5331 memset(&rsp, 0, sizeof(rsp));
5333 err = hci_check_conn_params(min, max, latency, to_multiplier);
5335 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5337 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5339 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5345 store_hint = hci_le_conn_update(hcon, min, max, latency,
5347 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5348 store_hint, min, max, latency,
5356 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5357 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5360 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5361 struct hci_conn *hcon = conn->hcon;
5362 u16 dcid, mtu, mps, credits, result;
5363 struct l2cap_chan *chan;
5366 if (cmd_len < sizeof(*rsp))
5369 dcid = __le16_to_cpu(rsp->dcid);
5370 mtu = __le16_to_cpu(rsp->mtu);
5371 mps = __le16_to_cpu(rsp->mps);
5372 credits = __le16_to_cpu(rsp->credits);
5373 result = __le16_to_cpu(rsp->result);
5375 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5378 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5379 dcid, mtu, mps, credits, result);
5381 mutex_lock(&conn->chan_lock);
5383 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5391 l2cap_chan_lock(chan);
5394 case L2CAP_CR_SUCCESS:
5398 chan->remote_mps = mps;
5399 chan->tx_credits = credits;
5400 l2cap_chan_ready(chan);
5403 case L2CAP_CR_AUTHENTICATION:
5404 case L2CAP_CR_ENCRYPTION:
5405 /* If we already have MITM protection we can't do
5408 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5409 l2cap_chan_del(chan, ECONNREFUSED);
5413 sec_level = hcon->sec_level + 1;
5414 if (chan->sec_level < sec_level)
5415 chan->sec_level = sec_level;
5417 /* We'll need to send a new Connect Request */
5418 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5420 smp_conn_security(hcon, chan->sec_level);
5424 l2cap_chan_del(chan, ECONNREFUSED);
5428 l2cap_chan_unlock(chan);
5431 mutex_unlock(&conn->chan_lock);
5436 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5437 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5442 switch (cmd->code) {
5443 case L2CAP_COMMAND_REJ:
5444 l2cap_command_rej(conn, cmd, cmd_len, data);
5447 case L2CAP_CONN_REQ:
5448 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5451 case L2CAP_CONN_RSP:
5452 case L2CAP_CREATE_CHAN_RSP:
5453 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5456 case L2CAP_CONF_REQ:
5457 err = l2cap_config_req(conn, cmd, cmd_len, data);
5460 case L2CAP_CONF_RSP:
5461 l2cap_config_rsp(conn, cmd, cmd_len, data);
5464 case L2CAP_DISCONN_REQ:
5465 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5468 case L2CAP_DISCONN_RSP:
5469 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5472 case L2CAP_ECHO_REQ:
5473 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5476 case L2CAP_ECHO_RSP:
5479 case L2CAP_INFO_REQ:
5480 err = l2cap_information_req(conn, cmd, cmd_len, data);
5483 case L2CAP_INFO_RSP:
5484 l2cap_information_rsp(conn, cmd, cmd_len, data);
5487 case L2CAP_CREATE_CHAN_REQ:
5488 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5491 case L2CAP_MOVE_CHAN_REQ:
5492 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5495 case L2CAP_MOVE_CHAN_RSP:
5496 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5499 case L2CAP_MOVE_CHAN_CFM:
5500 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5503 case L2CAP_MOVE_CHAN_CFM_RSP:
5504 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5508 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5516 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5517 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5520 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5521 struct l2cap_le_conn_rsp rsp;
5522 struct l2cap_chan *chan, *pchan;
5523 u16 dcid, scid, credits, mtu, mps;
5527 if (cmd_len != sizeof(*req))
5530 scid = __le16_to_cpu(req->scid);
5531 mtu = __le16_to_cpu(req->mtu);
5532 mps = __le16_to_cpu(req->mps);
5537 if (mtu < 23 || mps < 23)
5540 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5543 /* Check if we have socket listening on psm */
5544 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5545 &conn->hcon->dst, LE_LINK);
5547 result = L2CAP_CR_BAD_PSM;
5552 mutex_lock(&conn->chan_lock);
5553 l2cap_chan_lock(pchan);
5555 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5557 result = L2CAP_CR_AUTHENTICATION;
5559 goto response_unlock;
5562 /* Check if we already have channel with that dcid */
5563 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5564 result = L2CAP_CR_NO_MEM;
5566 goto response_unlock;
5569 chan = pchan->ops->new_connection(pchan);
5571 result = L2CAP_CR_NO_MEM;
5572 goto response_unlock;
5575 l2cap_le_flowctl_init(chan);
5577 bacpy(&chan->src, &conn->hcon->src);
5578 bacpy(&chan->dst, &conn->hcon->dst);
5579 chan->src_type = bdaddr_src_type(conn->hcon);
5580 chan->dst_type = bdaddr_dst_type(conn->hcon);
5584 chan->remote_mps = mps;
5585 chan->tx_credits = __le16_to_cpu(req->credits);
5587 __l2cap_chan_add(conn, chan);
5589 credits = chan->rx_credits;
5591 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5593 chan->ident = cmd->ident;
5595 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5596 l2cap_state_change(chan, BT_CONNECT2);
5597 /* The following result value is actually not defined
5598 * for LE CoC but we use it to let the function know
5599 * that it should bail out after doing its cleanup
5600 * instead of sending a response.
5602 result = L2CAP_CR_PEND;
5603 chan->ops->defer(chan);
5605 l2cap_chan_ready(chan);
5606 result = L2CAP_CR_SUCCESS;
5610 l2cap_chan_unlock(pchan);
5611 mutex_unlock(&conn->chan_lock);
5612 l2cap_chan_put(pchan);
5614 if (result == L2CAP_CR_PEND)
5619 rsp.mtu = cpu_to_le16(chan->imtu);
5620 rsp.mps = cpu_to_le16(chan->mps);
5626 rsp.dcid = cpu_to_le16(dcid);
5627 rsp.credits = cpu_to_le16(credits);
5628 rsp.result = cpu_to_le16(result);
5630 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5635 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5636 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5639 struct l2cap_le_credits *pkt;
5640 struct l2cap_chan *chan;
5641 u16 cid, credits, max_credits;
5643 if (cmd_len != sizeof(*pkt))
5646 pkt = (struct l2cap_le_credits *) data;
5647 cid = __le16_to_cpu(pkt->cid);
5648 credits = __le16_to_cpu(pkt->credits);
5650 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5652 chan = l2cap_get_chan_by_dcid(conn, cid);
5656 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5657 if (credits > max_credits) {
5658 BT_ERR("LE credits overflow");
5659 l2cap_send_disconn_req(chan, ECONNRESET);
5660 l2cap_chan_unlock(chan);
5662 /* Return 0 so that we don't trigger an unnecessary
5663 * command reject packet.
5668 chan->tx_credits += credits;
5670 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5671 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5675 if (chan->tx_credits)
5676 chan->ops->resume(chan);
5678 l2cap_chan_unlock(chan);
5683 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5684 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5687 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5688 struct l2cap_chan *chan;
5690 if (cmd_len < sizeof(*rej))
5693 mutex_lock(&conn->chan_lock);
5695 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5699 l2cap_chan_lock(chan);
5700 l2cap_chan_del(chan, ECONNREFUSED);
5701 l2cap_chan_unlock(chan);
5704 mutex_unlock(&conn->chan_lock);
5708 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5709 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5714 switch (cmd->code) {
5715 case L2CAP_COMMAND_REJ:
5716 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5719 case L2CAP_CONN_PARAM_UPDATE_REQ:
5720 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5723 case L2CAP_CONN_PARAM_UPDATE_RSP:
5726 case L2CAP_LE_CONN_RSP:
5727 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5730 case L2CAP_LE_CONN_REQ:
5731 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5734 case L2CAP_LE_CREDITS:
5735 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5738 case L2CAP_DISCONN_REQ:
5739 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5742 case L2CAP_DISCONN_RSP:
5743 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5747 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5755 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5756 struct sk_buff *skb)
5758 struct hci_conn *hcon = conn->hcon;
5759 struct l2cap_cmd_hdr *cmd;
5763 if (hcon->type != LE_LINK)
5766 if (skb->len < L2CAP_CMD_HDR_SIZE)
5769 cmd = (void *) skb->data;
5770 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5772 len = le16_to_cpu(cmd->len);
5774 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5776 if (len != skb->len || !cmd->ident) {
5777 BT_DBG("corrupted command");
5781 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5783 struct l2cap_cmd_rej_unk rej;
5785 BT_ERR("Wrong link type (%d)", err);
5787 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5788 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5796 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5797 struct sk_buff *skb)
5799 struct hci_conn *hcon = conn->hcon;
5800 u8 *data = skb->data;
5802 struct l2cap_cmd_hdr cmd;
5805 l2cap_raw_recv(conn, skb);
5807 if (hcon->type != ACL_LINK)
5810 while (len >= L2CAP_CMD_HDR_SIZE) {
5812 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5813 data += L2CAP_CMD_HDR_SIZE;
5814 len -= L2CAP_CMD_HDR_SIZE;
5816 cmd_len = le16_to_cpu(cmd.len);
5818 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5821 if (cmd_len > len || !cmd.ident) {
5822 BT_DBG("corrupted command");
5826 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5828 struct l2cap_cmd_rej_unk rej;
5830 BT_ERR("Wrong link type (%d)", err);
5832 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5833 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5845 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5847 u16 our_fcs, rcv_fcs;
5850 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5851 hdr_size = L2CAP_EXT_HDR_SIZE;
5853 hdr_size = L2CAP_ENH_HDR_SIZE;
5855 if (chan->fcs == L2CAP_FCS_CRC16) {
5856 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5857 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5858 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5860 if (our_fcs != rcv_fcs)
5866 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5868 struct l2cap_ctrl control;
5870 BT_DBG("chan %p", chan);
5872 memset(&control, 0, sizeof(control));
5875 control.reqseq = chan->buffer_seq;
5876 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5878 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5879 control.super = L2CAP_SUPER_RNR;
5880 l2cap_send_sframe(chan, &control);
5883 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5884 chan->unacked_frames > 0)
5885 __set_retrans_timer(chan);
5887 /* Send pending iframes */
5888 l2cap_ertm_send(chan);
5890 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5891 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5892 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5895 control.super = L2CAP_SUPER_RR;
5896 l2cap_send_sframe(chan, &control);
5900 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5901 struct sk_buff **last_frag)
5903 /* skb->len reflects data in skb as well as all fragments
5904 * skb->data_len reflects only data in fragments
5906 if (!skb_has_frag_list(skb))
5907 skb_shinfo(skb)->frag_list = new_frag;
5909 new_frag->next = NULL;
5911 (*last_frag)->next = new_frag;
5912 *last_frag = new_frag;
5914 skb->len += new_frag->len;
5915 skb->data_len += new_frag->len;
5916 skb->truesize += new_frag->truesize;
5919 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5920 struct l2cap_ctrl *control)
5924 switch (control->sar) {
5925 case L2CAP_SAR_UNSEGMENTED:
5929 err = chan->ops->recv(chan, skb);
5932 case L2CAP_SAR_START:
5936 chan->sdu_len = get_unaligned_le16(skb->data);
5937 skb_pull(skb, L2CAP_SDULEN_SIZE);
5939 if (chan->sdu_len > chan->imtu) {
5944 if (skb->len >= chan->sdu_len)
5948 chan->sdu_last_frag = skb;
5954 case L2CAP_SAR_CONTINUE:
5958 append_skb_frag(chan->sdu, skb,
5959 &chan->sdu_last_frag);
5962 if (chan->sdu->len >= chan->sdu_len)
5972 append_skb_frag(chan->sdu, skb,
5973 &chan->sdu_last_frag);
5976 if (chan->sdu->len != chan->sdu_len)
5979 err = chan->ops->recv(chan, chan->sdu);
5982 /* Reassembly complete */
5984 chan->sdu_last_frag = NULL;
5992 kfree_skb(chan->sdu);
5994 chan->sdu_last_frag = NULL;
6001 static int l2cap_resegment(struct l2cap_chan *chan)
6007 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6011 if (chan->mode != L2CAP_MODE_ERTM)
6014 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6015 l2cap_tx(chan, NULL, NULL, event);
6018 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6021 /* Pass sequential frames to l2cap_reassemble_sdu()
6022 * until a gap is encountered.
6025 BT_DBG("chan %p", chan);
6027 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6028 struct sk_buff *skb;
6029 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6030 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6032 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6037 skb_unlink(skb, &chan->srej_q);
6038 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6039 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6044 if (skb_queue_empty(&chan->srej_q)) {
6045 chan->rx_state = L2CAP_RX_STATE_RECV;
6046 l2cap_send_ack(chan);
6052 static void l2cap_handle_srej(struct l2cap_chan *chan,
6053 struct l2cap_ctrl *control)
6055 struct sk_buff *skb;
6057 BT_DBG("chan %p, control %p", chan, control);
6059 if (control->reqseq == chan->next_tx_seq) {
6060 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6061 l2cap_send_disconn_req(chan, ECONNRESET);
6065 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6068 BT_DBG("Seq %d not available for retransmission",
6073 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6074 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6075 l2cap_send_disconn_req(chan, ECONNRESET);
6079 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6081 if (control->poll) {
6082 l2cap_pass_to_tx(chan, control);
6084 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6085 l2cap_retransmit(chan, control);
6086 l2cap_ertm_send(chan);
6088 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6089 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6090 chan->srej_save_reqseq = control->reqseq;
6093 l2cap_pass_to_tx_fbit(chan, control);
6095 if (control->final) {
6096 if (chan->srej_save_reqseq != control->reqseq ||
6097 !test_and_clear_bit(CONN_SREJ_ACT,
6099 l2cap_retransmit(chan, control);
6101 l2cap_retransmit(chan, control);
6102 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6103 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6104 chan->srej_save_reqseq = control->reqseq;
6110 static void l2cap_handle_rej(struct l2cap_chan *chan,
6111 struct l2cap_ctrl *control)
6113 struct sk_buff *skb;
6115 BT_DBG("chan %p, control %p", chan, control);
6117 if (control->reqseq == chan->next_tx_seq) {
6118 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6119 l2cap_send_disconn_req(chan, ECONNRESET);
6123 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6125 if (chan->max_tx && skb &&
6126 bt_cb(skb)->control.retries >= chan->max_tx) {
6127 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6128 l2cap_send_disconn_req(chan, ECONNRESET);
6132 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6134 l2cap_pass_to_tx(chan, control);
6136 if (control->final) {
6137 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6138 l2cap_retransmit_all(chan, control);
6140 l2cap_retransmit_all(chan, control);
6141 l2cap_ertm_send(chan);
6142 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6143 set_bit(CONN_REJ_ACT, &chan->conn_state);
6147 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6149 BT_DBG("chan %p, txseq %d", chan, txseq);
6151 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6152 chan->expected_tx_seq);
6154 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6155 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6157 /* See notes below regarding "double poll" and
6160 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6161 BT_DBG("Invalid/Ignore - after SREJ");
6162 return L2CAP_TXSEQ_INVALID_IGNORE;
6164 BT_DBG("Invalid - in window after SREJ sent");
6165 return L2CAP_TXSEQ_INVALID;
6169 if (chan->srej_list.head == txseq) {
6170 BT_DBG("Expected SREJ");
6171 return L2CAP_TXSEQ_EXPECTED_SREJ;
6174 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6175 BT_DBG("Duplicate SREJ - txseq already stored");
6176 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6179 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6180 BT_DBG("Unexpected SREJ - not requested");
6181 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6185 if (chan->expected_tx_seq == txseq) {
6186 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6188 BT_DBG("Invalid - txseq outside tx window");
6189 return L2CAP_TXSEQ_INVALID;
6192 return L2CAP_TXSEQ_EXPECTED;
6196 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6197 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6198 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6199 return L2CAP_TXSEQ_DUPLICATE;
6202 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6203 /* A source of invalid packets is a "double poll" condition,
6204 * where delays cause us to send multiple poll packets. If
6205 * the remote stack receives and processes both polls,
6206 * sequence numbers can wrap around in such a way that a
6207 * resent frame has a sequence number that looks like new data
6208 * with a sequence gap. This would trigger an erroneous SREJ
6211 * Fortunately, this is impossible with a tx window that's
6212 * less than half of the maximum sequence number, which allows
6213 * invalid frames to be safely ignored.
6215 * With tx window sizes greater than half of the tx window
6216 * maximum, the frame is invalid and cannot be ignored. This
6217 * causes a disconnect.
6220 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6221 BT_DBG("Invalid/Ignore - txseq outside tx window");
6222 return L2CAP_TXSEQ_INVALID_IGNORE;
6224 BT_DBG("Invalid - txseq outside tx window");
6225 return L2CAP_TXSEQ_INVALID;
6228 BT_DBG("Unexpected - txseq indicates missing frames");
6229 return L2CAP_TXSEQ_UNEXPECTED;
6233 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6234 struct l2cap_ctrl *control,
6235 struct sk_buff *skb, u8 event)
6238 bool skb_in_use = false;
6240 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6244 case L2CAP_EV_RECV_IFRAME:
6245 switch (l2cap_classify_txseq(chan, control->txseq)) {
6246 case L2CAP_TXSEQ_EXPECTED:
6247 l2cap_pass_to_tx(chan, control);
6249 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6250 BT_DBG("Busy, discarding expected seq %d",
6255 chan->expected_tx_seq = __next_seq(chan,
6258 chan->buffer_seq = chan->expected_tx_seq;
6261 err = l2cap_reassemble_sdu(chan, skb, control);
6265 if (control->final) {
6266 if (!test_and_clear_bit(CONN_REJ_ACT,
6267 &chan->conn_state)) {
6269 l2cap_retransmit_all(chan, control);
6270 l2cap_ertm_send(chan);
6274 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6275 l2cap_send_ack(chan);
6277 case L2CAP_TXSEQ_UNEXPECTED:
6278 l2cap_pass_to_tx(chan, control);
6280 /* Can't issue SREJ frames in the local busy state.
6281 * Drop this frame, it will be seen as missing
6282 * when local busy is exited.
6284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6285 BT_DBG("Busy, discarding unexpected seq %d",
6290 /* There was a gap in the sequence, so an SREJ
6291 * must be sent for each missing frame. The
6292 * current frame is stored for later use.
6294 skb_queue_tail(&chan->srej_q, skb);
6296 BT_DBG("Queued %p (queue len %d)", skb,
6297 skb_queue_len(&chan->srej_q));
6299 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6300 l2cap_seq_list_clear(&chan->srej_list);
6301 l2cap_send_srej(chan, control->txseq);
6303 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6305 case L2CAP_TXSEQ_DUPLICATE:
6306 l2cap_pass_to_tx(chan, control);
6308 case L2CAP_TXSEQ_INVALID_IGNORE:
6310 case L2CAP_TXSEQ_INVALID:
6312 l2cap_send_disconn_req(chan, ECONNRESET);
6316 case L2CAP_EV_RECV_RR:
6317 l2cap_pass_to_tx(chan, control);
6318 if (control->final) {
6319 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6321 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6322 !__chan_is_moving(chan)) {
6324 l2cap_retransmit_all(chan, control);
6327 l2cap_ertm_send(chan);
6328 } else if (control->poll) {
6329 l2cap_send_i_or_rr_or_rnr(chan);
6331 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6332 &chan->conn_state) &&
6333 chan->unacked_frames)
6334 __set_retrans_timer(chan);
6336 l2cap_ertm_send(chan);
6339 case L2CAP_EV_RECV_RNR:
6340 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6341 l2cap_pass_to_tx(chan, control);
6342 if (control && control->poll) {
6343 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6344 l2cap_send_rr_or_rnr(chan, 0);
6346 __clear_retrans_timer(chan);
6347 l2cap_seq_list_clear(&chan->retrans_list);
6349 case L2CAP_EV_RECV_REJ:
6350 l2cap_handle_rej(chan, control);
6352 case L2CAP_EV_RECV_SREJ:
6353 l2cap_handle_srej(chan, control);
6359 if (skb && !skb_in_use) {
6360 BT_DBG("Freeing %p", skb);
6367 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6368 struct l2cap_ctrl *control,
6369 struct sk_buff *skb, u8 event)
6372 u16 txseq = control->txseq;
6373 bool skb_in_use = false;
6375 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6379 case L2CAP_EV_RECV_IFRAME:
6380 switch (l2cap_classify_txseq(chan, txseq)) {
6381 case L2CAP_TXSEQ_EXPECTED:
6382 /* Keep frame for reassembly later */
6383 l2cap_pass_to_tx(chan, control);
6384 skb_queue_tail(&chan->srej_q, skb);
6386 BT_DBG("Queued %p (queue len %d)", skb,
6387 skb_queue_len(&chan->srej_q));
6389 chan->expected_tx_seq = __next_seq(chan, txseq);
6391 case L2CAP_TXSEQ_EXPECTED_SREJ:
6392 l2cap_seq_list_pop(&chan->srej_list);
6394 l2cap_pass_to_tx(chan, control);
6395 skb_queue_tail(&chan->srej_q, skb);
6397 BT_DBG("Queued %p (queue len %d)", skb,
6398 skb_queue_len(&chan->srej_q));
6400 err = l2cap_rx_queued_iframes(chan);
6405 case L2CAP_TXSEQ_UNEXPECTED:
6406 /* Got a frame that can't be reassembled yet.
6407 * Save it for later, and send SREJs to cover
6408 * the missing frames.
6410 skb_queue_tail(&chan->srej_q, skb);
6412 BT_DBG("Queued %p (queue len %d)", skb,
6413 skb_queue_len(&chan->srej_q));
6415 l2cap_pass_to_tx(chan, control);
6416 l2cap_send_srej(chan, control->txseq);
6418 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6419 /* This frame was requested with an SREJ, but
6420 * some expected retransmitted frames are
6421 * missing. Request retransmission of missing
6424 skb_queue_tail(&chan->srej_q, skb);
6426 BT_DBG("Queued %p (queue len %d)", skb,
6427 skb_queue_len(&chan->srej_q));
6429 l2cap_pass_to_tx(chan, control);
6430 l2cap_send_srej_list(chan, control->txseq);
6432 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6433 /* We've already queued this frame. Drop this copy. */
6434 l2cap_pass_to_tx(chan, control);
6436 case L2CAP_TXSEQ_DUPLICATE:
6437 /* Expecting a later sequence number, so this frame
6438 * was already received. Ignore it completely.
6441 case L2CAP_TXSEQ_INVALID_IGNORE:
6443 case L2CAP_TXSEQ_INVALID:
6445 l2cap_send_disconn_req(chan, ECONNRESET);
6449 case L2CAP_EV_RECV_RR:
6450 l2cap_pass_to_tx(chan, control);
6451 if (control->final) {
6452 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6454 if (!test_and_clear_bit(CONN_REJ_ACT,
6455 &chan->conn_state)) {
6457 l2cap_retransmit_all(chan, control);
6460 l2cap_ertm_send(chan);
6461 } else if (control->poll) {
6462 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6463 &chan->conn_state) &&
6464 chan->unacked_frames) {
6465 __set_retrans_timer(chan);
6468 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6469 l2cap_send_srej_tail(chan);
6471 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6472 &chan->conn_state) &&
6473 chan->unacked_frames)
6474 __set_retrans_timer(chan);
6476 l2cap_send_ack(chan);
6479 case L2CAP_EV_RECV_RNR:
6480 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6481 l2cap_pass_to_tx(chan, control);
6482 if (control->poll) {
6483 l2cap_send_srej_tail(chan);
6485 struct l2cap_ctrl rr_control;
6486 memset(&rr_control, 0, sizeof(rr_control));
6487 rr_control.sframe = 1;
6488 rr_control.super = L2CAP_SUPER_RR;
6489 rr_control.reqseq = chan->buffer_seq;
6490 l2cap_send_sframe(chan, &rr_control);
6494 case L2CAP_EV_RECV_REJ:
6495 l2cap_handle_rej(chan, control);
6497 case L2CAP_EV_RECV_SREJ:
6498 l2cap_handle_srej(chan, control);
6502 if (skb && !skb_in_use) {
6503 BT_DBG("Freeing %p", skb);
6510 static int l2cap_finish_move(struct l2cap_chan *chan)
6512 BT_DBG("chan %p", chan);
6514 chan->rx_state = L2CAP_RX_STATE_RECV;
6517 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6519 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6521 return l2cap_resegment(chan);
6524 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6525 struct l2cap_ctrl *control,
6526 struct sk_buff *skb, u8 event)
6530 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6536 l2cap_process_reqseq(chan, control->reqseq);
6538 if (!skb_queue_empty(&chan->tx_q))
6539 chan->tx_send_head = skb_peek(&chan->tx_q);
6541 chan->tx_send_head = NULL;
6543 /* Rewind next_tx_seq to the point expected
6546 chan->next_tx_seq = control->reqseq;
6547 chan->unacked_frames = 0;
6549 err = l2cap_finish_move(chan);
6553 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6554 l2cap_send_i_or_rr_or_rnr(chan);
6556 if (event == L2CAP_EV_RECV_IFRAME)
6559 return l2cap_rx_state_recv(chan, control, NULL, event);
6562 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6563 struct l2cap_ctrl *control,
6564 struct sk_buff *skb, u8 event)
6568 if (!control->final)
6571 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6573 chan->rx_state = L2CAP_RX_STATE_RECV;
6574 l2cap_process_reqseq(chan, control->reqseq);
6576 if (!skb_queue_empty(&chan->tx_q))
6577 chan->tx_send_head = skb_peek(&chan->tx_q);
6579 chan->tx_send_head = NULL;
6581 /* Rewind next_tx_seq to the point expected
6584 chan->next_tx_seq = control->reqseq;
6585 chan->unacked_frames = 0;
6588 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6590 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6592 err = l2cap_resegment(chan);
6595 err = l2cap_rx_state_recv(chan, control, skb, event);
6600 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6602 /* Make sure reqseq is for a packet that has been sent but not acked */
6605 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6606 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6609 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6610 struct sk_buff *skb, u8 event)
6614 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6615 control, skb, event, chan->rx_state);
6617 if (__valid_reqseq(chan, control->reqseq)) {
6618 switch (chan->rx_state) {
6619 case L2CAP_RX_STATE_RECV:
6620 err = l2cap_rx_state_recv(chan, control, skb, event);
6622 case L2CAP_RX_STATE_SREJ_SENT:
6623 err = l2cap_rx_state_srej_sent(chan, control, skb,
6626 case L2CAP_RX_STATE_WAIT_P:
6627 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6629 case L2CAP_RX_STATE_WAIT_F:
6630 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6637 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6638 control->reqseq, chan->next_tx_seq,
6639 chan->expected_ack_seq);
6640 l2cap_send_disconn_req(chan, ECONNRESET);
6646 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6647 struct sk_buff *skb)
6651 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6654 if (l2cap_classify_txseq(chan, control->txseq) ==
6655 L2CAP_TXSEQ_EXPECTED) {
6656 l2cap_pass_to_tx(chan, control);
6658 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6659 __next_seq(chan, chan->buffer_seq));
6661 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6663 l2cap_reassemble_sdu(chan, skb, control);
6666 kfree_skb(chan->sdu);
6669 chan->sdu_last_frag = NULL;
6673 BT_DBG("Freeing %p", skb);
6678 chan->last_acked_seq = control->txseq;
6679 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6684 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6686 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6690 __unpack_control(chan, skb);
6695 * We can just drop the corrupted I-frame here.
6696 * Receiver will miss it and start proper recovery
6697 * procedures and ask for retransmission.
6699 if (l2cap_check_fcs(chan, skb))
6702 if (!control->sframe && control->sar == L2CAP_SAR_START)
6703 len -= L2CAP_SDULEN_SIZE;
6705 if (chan->fcs == L2CAP_FCS_CRC16)
6706 len -= L2CAP_FCS_SIZE;
6708 if (len > chan->mps) {
6709 l2cap_send_disconn_req(chan, ECONNRESET);
6713 if (!control->sframe) {
6716 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6717 control->sar, control->reqseq, control->final,
6720 /* Validate F-bit - F=0 always valid, F=1 only
6721 * valid in TX WAIT_F
6723 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6726 if (chan->mode != L2CAP_MODE_STREAMING) {
6727 event = L2CAP_EV_RECV_IFRAME;
6728 err = l2cap_rx(chan, control, skb, event);
6730 err = l2cap_stream_rx(chan, control, skb);
6734 l2cap_send_disconn_req(chan, ECONNRESET);
6736 const u8 rx_func_to_event[4] = {
6737 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6738 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6741 /* Only I-frames are expected in streaming mode */
6742 if (chan->mode == L2CAP_MODE_STREAMING)
6745 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6746 control->reqseq, control->final, control->poll,
6750 BT_ERR("Trailing bytes: %d in sframe", len);
6751 l2cap_send_disconn_req(chan, ECONNRESET);
6755 /* Validate F and P bits */
6756 if (control->final && (control->poll ||
6757 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6760 event = rx_func_to_event[control->super];
6761 if (l2cap_rx(chan, control, skb, event))
6762 l2cap_send_disconn_req(chan, ECONNRESET);
6772 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6774 struct l2cap_conn *conn = chan->conn;
6775 struct l2cap_le_credits pkt;
6778 /* We return more credits to the sender only after the amount of
6779 * credits falls below half of the initial amount.
6781 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6784 return_credits = le_max_credits - chan->rx_credits;
6786 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6788 chan->rx_credits += return_credits;
6790 pkt.cid = cpu_to_le16(chan->scid);
6791 pkt.credits = cpu_to_le16(return_credits);
6793 chan->ident = l2cap_get_ident(conn);
6795 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6798 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6802 if (!chan->rx_credits) {
6803 BT_ERR("No credits to receive LE L2CAP data");
6804 l2cap_send_disconn_req(chan, ECONNRESET);
6808 if (chan->imtu < skb->len) {
6809 BT_ERR("Too big LE L2CAP PDU");
6814 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6816 l2cap_chan_le_send_credits(chan);
6823 sdu_len = get_unaligned_le16(skb->data);
6824 skb_pull(skb, L2CAP_SDULEN_SIZE);
6826 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6827 sdu_len, skb->len, chan->imtu);
6829 if (sdu_len > chan->imtu) {
6830 BT_ERR("Too big LE L2CAP SDU length received");
6835 if (skb->len > sdu_len) {
6836 BT_ERR("Too much LE L2CAP data received");
6841 if (skb->len == sdu_len)
6842 return chan->ops->recv(chan, skb);
6845 chan->sdu_len = sdu_len;
6846 chan->sdu_last_frag = skb;
6851 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6852 chan->sdu->len, skb->len, chan->sdu_len);
6854 if (chan->sdu->len + skb->len > chan->sdu_len) {
6855 BT_ERR("Too much LE L2CAP data received");
6860 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6863 if (chan->sdu->len == chan->sdu_len) {
6864 err = chan->ops->recv(chan, chan->sdu);
6867 chan->sdu_last_frag = NULL;
6875 kfree_skb(chan->sdu);
6877 chan->sdu_last_frag = NULL;
6881 /* We can't return an error here since we took care of the skb
6882 * freeing internally. An error return would cause the caller to
6883 * do a double-free of the skb.
6888 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6889 struct sk_buff *skb)
6891 struct l2cap_chan *chan;
6893 chan = l2cap_get_chan_by_scid(conn, cid);
6895 if (cid == L2CAP_CID_A2MP) {
6896 chan = a2mp_channel_create(conn, skb);
6902 l2cap_chan_lock(chan);
6904 BT_DBG("unknown cid 0x%4.4x", cid);
6905 /* Drop packet and return */
6911 BT_DBG("chan %p, len %d", chan, skb->len);
6913 /* If we receive data on a fixed channel before the info req/rsp
6914 * procdure is done simply assume that the channel is supported
6915 * and mark it as ready.
6917 #ifdef CONFIG_TIZEN_WIP
6918 if (chan->chan_type == L2CAP_CHAN_FIXED) {
6919 if (chan->psm == L2CAP_PSM_IPSP) {
6920 struct l2cap_conn *conn = chan->conn;
6922 if (conn->hcon->out)
6923 l2cap_chan_ready(chan);
6924 else if (conn->hcon->type != LE_LINK)
6925 l2cap_chan_ready(chan);
6927 l2cap_chan_ready(chan);
6932 if (chan->state != BT_CONNECTED)
6935 switch (chan->mode) {
6936 case L2CAP_MODE_LE_FLOWCTL:
6937 if (l2cap_le_data_rcv(chan, skb) < 0)
6942 case L2CAP_MODE_BASIC:
6943 /* If socket recv buffers overflows we drop data here
6944 * which is *bad* because L2CAP has to be reliable.
6945 * But we don't have any other choice. L2CAP doesn't
6946 * provide flow control mechanism. */
6948 if (chan->imtu < skb->len) {
6949 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6953 if (!chan->ops->recv(chan, skb))
6957 case L2CAP_MODE_ERTM:
6958 case L2CAP_MODE_STREAMING:
6959 l2cap_data_rcv(chan, skb);
6963 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6971 l2cap_chan_unlock(chan);
6974 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6975 struct sk_buff *skb)
6977 struct hci_conn *hcon = conn->hcon;
6978 struct l2cap_chan *chan;
6980 if (hcon->type != ACL_LINK)
6983 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6988 BT_DBG("chan %p, len %d", chan, skb->len);
6990 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6993 if (chan->imtu < skb->len)
6996 /* Store remote BD_ADDR and PSM for msg_name */
6997 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6998 bt_cb(skb)->psm = psm;
7000 if (!chan->ops->recv(chan, skb)) {
7001 l2cap_chan_put(chan);
7006 l2cap_chan_put(chan);
7011 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7013 struct l2cap_hdr *lh = (void *) skb->data;
7014 struct hci_conn *hcon = conn->hcon;
7018 if (hcon->state != BT_CONNECTED) {
7019 BT_DBG("queueing pending rx skb");
7020 skb_queue_tail(&conn->pending_rx, skb);
7024 skb_pull(skb, L2CAP_HDR_SIZE);
7025 cid = __le16_to_cpu(lh->cid);
7026 len = __le16_to_cpu(lh->len);
7028 if (len != skb->len) {
7033 /* Since we can't actively block incoming LE connections we must
7034 * at least ensure that we ignore incoming data from them.
7036 if (hcon->type == LE_LINK &&
7037 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7038 bdaddr_dst_type(hcon))) {
7043 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7046 case L2CAP_CID_SIGNALING:
7047 l2cap_sig_channel(conn, skb);
7050 case L2CAP_CID_CONN_LESS:
7051 psm = get_unaligned((__le16 *) skb->data);
7052 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7053 l2cap_conless_channel(conn, psm, skb);
7056 case L2CAP_CID_LE_SIGNALING:
7057 l2cap_le_sig_channel(conn, skb);
7061 l2cap_data_channel(conn, cid, skb);
7066 static void process_pending_rx(struct work_struct *work)
7068 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7070 struct sk_buff *skb;
7074 while ((skb = skb_dequeue(&conn->pending_rx)))
7075 l2cap_recv_frame(conn, skb);
7078 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7080 struct l2cap_conn *conn = hcon->l2cap_data;
7081 struct hci_chan *hchan;
7086 hchan = hci_chan_create(hcon);
7090 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7092 hci_chan_del(hchan);
7096 kref_init(&conn->ref);
7097 hcon->l2cap_data = conn;
7098 conn->hcon = hci_conn_get(hcon);
7099 conn->hchan = hchan;
7101 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7103 switch (hcon->type) {
7105 if (hcon->hdev->le_mtu) {
7106 conn->mtu = hcon->hdev->le_mtu;
7111 conn->mtu = hcon->hdev->acl_mtu;
7115 conn->feat_mask = 0;
7117 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7119 if (hcon->type == ACL_LINK &&
7120 test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
7121 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7123 if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
7124 (bredr_sc_enabled(hcon->hdev) ||
7125 test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
7126 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7128 mutex_init(&conn->ident_lock);
7129 mutex_init(&conn->chan_lock);
7131 INIT_LIST_HEAD(&conn->chan_l);
7132 INIT_LIST_HEAD(&conn->users);
7134 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7136 skb_queue_head_init(&conn->pending_rx);
7137 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7138 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7140 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7145 static bool is_valid_psm(u16 psm, u8 dst_type) {
7149 if (bdaddr_type_is_le(dst_type))
7150 return (psm <= 0x00ff);
7152 /* PSM must be odd and lsb of upper byte must be 0 */
7153 return ((psm & 0x0101) == 0x0001);
7156 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7157 bdaddr_t *dst, u8 dst_type)
7159 struct l2cap_conn *conn;
7160 struct hci_conn *hcon;
7161 struct hci_dev *hdev;
7164 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7165 dst_type, __le16_to_cpu(psm));
7167 hdev = hci_get_route(dst, &chan->src);
7169 return -EHOSTUNREACH;
7173 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7174 chan->chan_type != L2CAP_CHAN_RAW) {
7179 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7184 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7189 switch (chan->mode) {
7190 case L2CAP_MODE_BASIC:
7192 case L2CAP_MODE_LE_FLOWCTL:
7193 l2cap_le_flowctl_init(chan);
7195 case L2CAP_MODE_ERTM:
7196 case L2CAP_MODE_STREAMING:
7205 switch (chan->state) {
7209 /* Already connecting */
7214 /* Already connected */
7228 /* Set destination address and psm */
7229 bacpy(&chan->dst, dst);
7230 chan->dst_type = dst_type;
7235 if (bdaddr_type_is_le(dst_type)) {
7238 /* Convert from L2CAP channel address type to HCI address type
7240 if (dst_type == BDADDR_LE_PUBLIC)
7241 dst_type = ADDR_LE_DEV_PUBLIC;
7243 dst_type = ADDR_LE_DEV_RANDOM;
7245 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7246 role = HCI_ROLE_SLAVE;
7248 role = HCI_ROLE_MASTER;
7250 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7251 HCI_LE_CONN_TIMEOUT, role);
7253 u8 auth_type = l2cap_get_auth_type(chan);
7254 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7258 err = PTR_ERR(hcon);
7262 conn = l2cap_conn_add(hcon);
7264 hci_conn_drop(hcon);
7269 mutex_lock(&conn->chan_lock);
7270 l2cap_chan_lock(chan);
7272 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7273 hci_conn_drop(hcon);
7278 /* Update source addr of the socket */
7279 bacpy(&chan->src, &hcon->src);
7280 chan->src_type = bdaddr_src_type(hcon);
7282 __l2cap_chan_add(conn, chan);
7284 /* l2cap_chan_add takes its own ref so we can drop this one */
7285 hci_conn_drop(hcon);
7287 l2cap_state_change(chan, BT_CONNECT);
7288 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7290 /* Release chan->sport so that it can be reused by other
7291 * sockets (as it's only used for listening sockets).
7293 write_lock(&chan_list_lock);
7295 write_unlock(&chan_list_lock);
7297 if (hcon->state == BT_CONNECTED) {
7298 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7299 __clear_chan_timer(chan);
7300 if (l2cap_chan_check_security(chan, true))
7301 l2cap_state_change(chan, BT_CONNECTED);
7303 l2cap_do_start(chan);
7309 l2cap_chan_unlock(chan);
7310 mutex_unlock(&conn->chan_lock);
7312 hci_dev_unlock(hdev);
7316 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7318 /* ---- L2CAP interface with lower layer (HCI) ---- */
7320 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7322 int exact = 0, lm1 = 0, lm2 = 0;
7323 struct l2cap_chan *c;
7325 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7327 /* Find listening sockets and check their link_mode */
7328 read_lock(&chan_list_lock);
7329 list_for_each_entry(c, &chan_list, global_l) {
7330 if (c->state != BT_LISTEN)
7333 if (!bacmp(&c->src, &hdev->bdaddr)) {
7334 lm1 |= HCI_LM_ACCEPT;
7335 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7336 lm1 |= HCI_LM_MASTER;
7338 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7339 lm2 |= HCI_LM_ACCEPT;
7340 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7341 lm2 |= HCI_LM_MASTER;
7344 read_unlock(&chan_list_lock);
7346 return exact ? lm1 : lm2;
7349 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7350 * from an existing channel in the list or from the beginning of the
7351 * global list (by passing NULL as first parameter).
7353 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7354 struct hci_conn *hcon)
7356 u8 src_type = bdaddr_src_type(hcon);
7358 read_lock(&chan_list_lock);
7361 c = list_next_entry(c, global_l);
7363 c = list_entry(chan_list.next, typeof(*c), global_l);
7365 list_for_each_entry_from(c, &chan_list, global_l) {
7366 if (c->chan_type != L2CAP_CHAN_FIXED)
7368 if (c->state != BT_LISTEN)
7370 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7372 if (src_type != c->src_type)
7376 read_unlock(&chan_list_lock);
7380 read_unlock(&chan_list_lock);
7385 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7387 struct hci_dev *hdev = hcon->hdev;
7388 struct l2cap_conn *conn;
7389 struct l2cap_chan *pchan;
7392 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7395 l2cap_conn_del(hcon, bt_to_errno(status));
7399 conn = l2cap_conn_add(hcon);
7403 dst_type = bdaddr_dst_type(hcon);
7405 /* If device is blocked, do not create channels for it */
7406 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7409 /* Find fixed channels and notify them of the new connection. We
7410 * use multiple individual lookups, continuing each time where
7411 * we left off, because the list lock would prevent calling the
7412 * potentially sleeping l2cap_chan_lock() function.
7414 pchan = l2cap_global_fixed_chan(NULL, hcon);
7416 struct l2cap_chan *chan, *next;
7418 /* Client fixed channels should override server ones */
7419 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7422 l2cap_chan_lock(pchan);
7423 chan = pchan->ops->new_connection(pchan);
7425 bacpy(&chan->src, &hcon->src);
7426 bacpy(&chan->dst, &hcon->dst);
7427 chan->src_type = bdaddr_src_type(hcon);
7428 chan->dst_type = dst_type;
7430 __l2cap_chan_add(conn, chan);
7433 l2cap_chan_unlock(pchan);
7435 next = l2cap_global_fixed_chan(pchan, hcon);
7436 l2cap_chan_put(pchan);
7440 l2cap_conn_ready(conn);
7443 int l2cap_disconn_ind(struct hci_conn *hcon)
7445 struct l2cap_conn *conn = hcon->l2cap_data;
7447 BT_DBG("hcon %p", hcon);
7450 return HCI_ERROR_REMOTE_USER_TERM;
7451 return conn->disc_reason;
7454 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7456 BT_DBG("hcon %p reason %d", hcon, reason);
7458 l2cap_conn_del(hcon, bt_to_errno(reason));
7461 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7463 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7466 if (encrypt == 0x00) {
7467 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7468 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7469 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7470 chan->sec_level == BT_SECURITY_FIPS)
7471 l2cap_chan_close(chan, ECONNREFUSED);
7473 if (chan->sec_level == BT_SECURITY_MEDIUM)
7474 __clear_chan_timer(chan);
7478 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7480 struct l2cap_conn *conn = hcon->l2cap_data;
7481 struct l2cap_chan *chan;
7486 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7488 mutex_lock(&conn->chan_lock);
7490 list_for_each_entry(chan, &conn->chan_l, list) {
7491 l2cap_chan_lock(chan);
7493 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7494 state_to_string(chan->state));
7496 if (chan->scid == L2CAP_CID_A2MP) {
7497 l2cap_chan_unlock(chan);
7501 if (!status && encrypt)
7502 chan->sec_level = hcon->sec_level;
7504 if (!__l2cap_no_conn_pending(chan)) {
7505 l2cap_chan_unlock(chan);
7509 if (!status && (chan->state == BT_CONNECTED ||
7510 chan->state == BT_CONFIG)) {
7511 chan->ops->resume(chan);
7512 l2cap_check_encryption(chan, encrypt);
7513 l2cap_chan_unlock(chan);
7517 if (chan->state == BT_CONNECT) {
7519 l2cap_start_connection(chan);
7521 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7522 } else if (chan->state == BT_CONNECT2 &&
7523 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7524 struct l2cap_conn_rsp rsp;
7528 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7529 res = L2CAP_CR_PEND;
7530 stat = L2CAP_CS_AUTHOR_PEND;
7531 chan->ops->defer(chan);
7533 l2cap_state_change(chan, BT_CONFIG);
7534 res = L2CAP_CR_SUCCESS;
7535 stat = L2CAP_CS_NO_INFO;
7538 l2cap_state_change(chan, BT_DISCONN);
7539 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7540 res = L2CAP_CR_SEC_BLOCK;
7541 stat = L2CAP_CS_NO_INFO;
7544 rsp.scid = cpu_to_le16(chan->dcid);
7545 rsp.dcid = cpu_to_le16(chan->scid);
7546 rsp.result = cpu_to_le16(res);
7547 rsp.status = cpu_to_le16(stat);
7548 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7551 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7552 res == L2CAP_CR_SUCCESS) {
7554 set_bit(CONF_REQ_SENT, &chan->conf_state);
7555 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7557 l2cap_build_conf_req(chan, buf),
7559 chan->num_conf_req++;
7563 l2cap_chan_unlock(chan);
7566 mutex_unlock(&conn->chan_lock);
7571 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7573 struct l2cap_conn *conn = hcon->l2cap_data;
7574 struct l2cap_hdr *hdr;
7577 /* For AMP controller do not create l2cap conn */
7578 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7582 conn = l2cap_conn_add(hcon);
7587 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7591 case ACL_START_NO_FLUSH:
7594 BT_ERR("Unexpected start frame (len %d)", skb->len);
7595 kfree_skb(conn->rx_skb);
7596 conn->rx_skb = NULL;
7598 l2cap_conn_unreliable(conn, ECOMM);
7601 /* Start fragment always begin with Basic L2CAP header */
7602 if (skb->len < L2CAP_HDR_SIZE) {
7603 BT_ERR("Frame is too short (len %d)", skb->len);
7604 l2cap_conn_unreliable(conn, ECOMM);
7608 hdr = (struct l2cap_hdr *) skb->data;
7609 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7611 if (len == skb->len) {
7612 /* Complete frame received */
7613 l2cap_recv_frame(conn, skb);
7617 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7619 if (skb->len > len) {
7620 BT_ERR("Frame is too long (len %d, expected len %d)",
7622 l2cap_conn_unreliable(conn, ECOMM);
7626 /* Allocate skb for the complete frame (with header) */
7627 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7631 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7633 conn->rx_len = len - skb->len;
7637 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7639 if (!conn->rx_len) {
7640 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7641 l2cap_conn_unreliable(conn, ECOMM);
7645 if (skb->len > conn->rx_len) {
7646 BT_ERR("Fragment is too long (len %d, expected %d)",
7647 skb->len, conn->rx_len);
7648 kfree_skb(conn->rx_skb);
7649 conn->rx_skb = NULL;
7651 l2cap_conn_unreliable(conn, ECOMM);
7655 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7657 conn->rx_len -= skb->len;
7659 if (!conn->rx_len) {
7660 /* Complete frame received. l2cap_recv_frame
7661 * takes ownership of the skb so set the global
7662 * rx_skb pointer to NULL first.
7664 struct sk_buff *rx_skb = conn->rx_skb;
7665 conn->rx_skb = NULL;
7666 l2cap_recv_frame(conn, rx_skb);
7676 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7678 struct l2cap_chan *c;
7680 read_lock(&chan_list_lock);
7682 list_for_each_entry(c, &chan_list, global_l) {
7683 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7684 &c->src, c->src_type, &c->dst, c->dst_type,
7685 c->state, __le16_to_cpu(c->psm),
7686 c->scid, c->dcid, c->imtu, c->omtu,
7687 c->sec_level, c->mode);
7690 read_unlock(&chan_list_lock);
7695 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7697 return single_open(file, l2cap_debugfs_show, inode->i_private);
7700 static const struct file_operations l2cap_debugfs_fops = {
7701 .open = l2cap_debugfs_open,
7703 .llseek = seq_lseek,
7704 .release = single_release,
7707 static struct dentry *l2cap_debugfs;
7709 int __init l2cap_init(void)
7713 err = l2cap_init_sockets();
7717 if (IS_ERR_OR_NULL(bt_debugfs))
7720 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7721 NULL, &l2cap_debugfs_fops);
7723 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7725 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7731 void l2cap_exit(void)
7733 debugfs_remove(l2cap_debugfs);
7734 l2cap_cleanup_sockets();
7737 module_param(disable_ertm, bool, 0644);
7738 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");