2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 list_for_each_entry(c, &conn->chan_l, list) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 struct l2cap_chan *c;
118 read_lock(&conn->chan_lock);
119 c = __l2cap_get_chan_by_scid(conn, cid);
122 read_unlock(&conn->chan_lock);
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c;
130 list_for_each_entry(c, &conn->chan_l, list) {
131 if (c->ident == ident)
137 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 struct l2cap_chan *c;
141 read_lock(&conn->chan_lock);
142 c = __l2cap_get_chan_by_ident(conn, ident);
145 read_unlock(&conn->chan_lock);
149 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151 struct l2cap_chan *c;
153 list_for_each_entry(c, &chan_list, global_l) {
154 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
160 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
164 write_lock_bh(&chan_list_lock);
166 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
179 for (p = 0x1001; p < 0x1100; p += 2)
180 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
181 chan->psm = cpu_to_le16(p);
182 chan->sport = cpu_to_le16(p);
189 write_unlock_bh(&chan_list_lock);
193 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
195 write_lock_bh(&chan_list_lock);
199 write_unlock_bh(&chan_list_lock);
204 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
206 u16 cid = L2CAP_CID_DYN_START;
208 for (; cid < L2CAP_CID_DYN_END; cid++) {
209 if (!__l2cap_get_chan_by_scid(conn, cid))
216 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
218 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
220 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
224 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
226 BT_DBG("chan %p state %d", chan, chan->state);
228 if (timer_pending(timer) && del_timer(timer))
232 static void l2cap_state_change(struct l2cap_chan *chan, int state)
235 chan->ops->state_change(chan->data, state);
238 static void l2cap_chan_timeout(unsigned long arg)
240 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
241 struct sock *sk = chan->sk;
244 BT_DBG("chan %p state %d", chan, chan->state);
248 if (sock_owned_by_user(sk)) {
249 /* sk is owned by user. Try again later */
250 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
256 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
257 reason = ECONNREFUSED;
258 else if (chan->state == BT_CONNECT &&
259 chan->sec_level != BT_SECURITY_SDP)
260 reason = ECONNREFUSED;
264 l2cap_chan_close(chan, reason);
268 chan->ops->close(chan->data);
272 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
274 struct l2cap_chan *chan;
276 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 write_lock_bh(&chan_list_lock);
283 list_add(&chan->global_l, &chan_list);
284 write_unlock_bh(&chan_list_lock);
286 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
288 chan->state = BT_OPEN;
290 atomic_set(&chan->refcnt, 1);
292 BT_DBG("sk %p chan %p", sk, chan);
297 void l2cap_chan_destroy(struct l2cap_chan *chan)
299 write_lock_bh(&chan_list_lock);
300 list_del(&chan->global_l);
301 write_unlock_bh(&chan_list_lock);
306 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
308 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
309 chan->psm, chan->dcid);
311 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
315 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
316 if (conn->hcon->type == LE_LINK) {
318 chan->omtu = L2CAP_LE_DEFAULT_MTU;
319 chan->scid = L2CAP_CID_LE_DATA;
320 chan->dcid = L2CAP_CID_LE_DATA;
322 /* Alloc CID for connection-oriented socket */
323 chan->scid = l2cap_alloc_cid(conn);
324 chan->omtu = L2CAP_DEFAULT_MTU;
326 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
327 /* Connectionless socket */
328 chan->scid = L2CAP_CID_CONN_LESS;
329 chan->dcid = L2CAP_CID_CONN_LESS;
330 chan->omtu = L2CAP_DEFAULT_MTU;
332 /* Raw socket can send/recv signalling messages only */
333 chan->scid = L2CAP_CID_SIGNALING;
334 chan->dcid = L2CAP_CID_SIGNALING;
335 chan->omtu = L2CAP_DEFAULT_MTU;
338 chan->local_id = L2CAP_BESTEFFORT_ID;
339 chan->local_stype = L2CAP_SERV_BESTEFFORT;
340 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
341 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
342 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
343 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
347 list_add(&chan->list, &conn->chan_l);
351 * Must be called on the locked socket. */
352 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
354 struct sock *sk = chan->sk;
355 struct l2cap_conn *conn = chan->conn;
356 struct sock *parent = bt_sk(sk)->parent;
358 __clear_chan_timer(chan);
360 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
363 /* Delete from channel list */
364 write_lock_bh(&conn->chan_lock);
365 list_del(&chan->list);
366 write_unlock_bh(&conn->chan_lock);
370 hci_conn_put(conn->hcon);
373 l2cap_state_change(chan, BT_CLOSED);
374 sock_set_flag(sk, SOCK_ZAPPED);
380 bt_accept_unlink(sk);
381 parent->sk_data_ready(parent, 0);
383 sk->sk_state_change(sk);
385 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
386 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
389 skb_queue_purge(&chan->tx_q);
391 if (chan->mode == L2CAP_MODE_ERTM) {
392 struct srej_list *l, *tmp;
394 __clear_retrans_timer(chan);
395 __clear_monitor_timer(chan);
396 __clear_ack_timer(chan);
398 skb_queue_purge(&chan->srej_q);
400 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
407 static void l2cap_chan_cleanup_listen(struct sock *parent)
411 BT_DBG("parent %p", parent);
413 /* Close not yet accepted channels */
414 while ((sk = bt_accept_dequeue(parent, NULL))) {
415 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
416 __clear_chan_timer(chan);
418 l2cap_chan_close(chan, ECONNRESET);
420 chan->ops->close(chan->data);
424 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
426 struct l2cap_conn *conn = chan->conn;
427 struct sock *sk = chan->sk;
429 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
431 switch (chan->state) {
433 l2cap_chan_cleanup_listen(sk);
435 l2cap_state_change(chan, BT_CLOSED);
436 sock_set_flag(sk, SOCK_ZAPPED);
441 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
442 conn->hcon->type == ACL_LINK) {
443 __clear_chan_timer(chan);
444 __set_chan_timer(chan, sk->sk_sndtimeo);
445 l2cap_send_disconn_req(conn, chan, reason);
447 l2cap_chan_del(chan, reason);
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 struct l2cap_conn_rsp rsp;
456 if (bt_sk(sk)->defer_setup)
457 result = L2CAP_CR_SEC_BLOCK;
459 result = L2CAP_CR_BAD_PSM;
460 l2cap_state_change(chan, BT_DISCONN);
462 rsp.scid = cpu_to_le16(chan->dcid);
463 rsp.dcid = cpu_to_le16(chan->scid);
464 rsp.result = cpu_to_le16(result);
465 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
466 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
470 l2cap_chan_del(chan, reason);
475 l2cap_chan_del(chan, reason);
479 sock_set_flag(sk, SOCK_ZAPPED);
484 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
486 if (chan->chan_type == L2CAP_CHAN_RAW) {
487 switch (chan->sec_level) {
488 case BT_SECURITY_HIGH:
489 return HCI_AT_DEDICATED_BONDING_MITM;
490 case BT_SECURITY_MEDIUM:
491 return HCI_AT_DEDICATED_BONDING;
493 return HCI_AT_NO_BONDING;
495 } else if (chan->psm == cpu_to_le16(0x0001)) {
496 if (chan->sec_level == BT_SECURITY_LOW)
497 chan->sec_level = BT_SECURITY_SDP;
499 if (chan->sec_level == BT_SECURITY_HIGH)
500 return HCI_AT_NO_BONDING_MITM;
502 return HCI_AT_NO_BONDING;
504 switch (chan->sec_level) {
505 case BT_SECURITY_HIGH:
506 return HCI_AT_GENERAL_BONDING_MITM;
507 case BT_SECURITY_MEDIUM:
508 return HCI_AT_GENERAL_BONDING;
510 return HCI_AT_NO_BONDING;
515 /* Service level security */
516 int l2cap_chan_check_security(struct l2cap_chan *chan)
518 struct l2cap_conn *conn = chan->conn;
521 auth_type = l2cap_get_auth_type(chan);
523 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
526 static u8 l2cap_get_ident(struct l2cap_conn *conn)
530 /* Get next available identificator.
531 * 1 - 128 are used by kernel.
532 * 129 - 199 are reserved.
533 * 200 - 254 are used by utilities like l2ping, etc.
536 spin_lock_bh(&conn->lock);
538 if (++conn->tx_ident > 128)
543 spin_unlock_bh(&conn->lock);
548 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
550 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
553 BT_DBG("code 0x%2.2x", code);
558 if (lmp_no_flush_capable(conn->hcon->hdev))
559 flags = ACL_START_NO_FLUSH;
563 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
564 skb->priority = HCI_PRIO_MAX;
566 hci_send_acl(conn->hchan, skb, flags);
569 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
571 struct hci_conn *hcon = chan->conn->hcon;
574 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
577 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
578 lmp_no_flush_capable(hcon->hdev))
579 flags = ACL_START_NO_FLUSH;
583 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
584 hci_send_acl(chan->conn->hchan, skb, flags);
587 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
590 struct l2cap_hdr *lh;
591 struct l2cap_conn *conn = chan->conn;
594 if (chan->state != BT_CONNECTED)
597 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
598 hlen = L2CAP_EXT_HDR_SIZE;
600 hlen = L2CAP_ENH_HDR_SIZE;
602 if (chan->fcs == L2CAP_FCS_CRC16)
603 hlen += L2CAP_FCS_SIZE;
605 BT_DBG("chan %p, control 0x%8.8x", chan, control);
607 count = min_t(unsigned int, conn->mtu, hlen);
609 control |= __set_sframe(chan);
611 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
612 control |= __set_ctrl_final(chan);
614 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
615 control |= __set_ctrl_poll(chan);
617 skb = bt_skb_alloc(count, GFP_ATOMIC);
621 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
622 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
623 lh->cid = cpu_to_le16(chan->dcid);
625 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
627 if (chan->fcs == L2CAP_FCS_CRC16) {
628 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
629 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
632 skb->priority = HCI_PRIO_MAX;
633 l2cap_do_send(chan, skb);
636 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
638 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
639 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
640 set_bit(CONN_RNR_SENT, &chan->conn_state);
642 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
644 control |= __set_reqseq(chan, chan->buffer_seq);
646 l2cap_send_sframe(chan, control);
649 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
651 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
654 static void l2cap_do_start(struct l2cap_chan *chan)
656 struct l2cap_conn *conn = chan->conn;
658 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
659 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
662 if (l2cap_chan_check_security(chan) &&
663 __l2cap_no_conn_pending(chan)) {
664 struct l2cap_conn_req req;
665 req.scid = cpu_to_le16(chan->scid);
668 chan->ident = l2cap_get_ident(conn);
669 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
671 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
675 struct l2cap_info_req req;
676 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
678 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
679 conn->info_ident = l2cap_get_ident(conn);
681 mod_timer(&conn->info_timer, jiffies +
682 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
684 l2cap_send_cmd(conn, conn->info_ident,
685 L2CAP_INFO_REQ, sizeof(req), &req);
689 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
691 u32 local_feat_mask = l2cap_feat_mask;
693 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
696 case L2CAP_MODE_ERTM:
697 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
698 case L2CAP_MODE_STREAMING:
699 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
705 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
708 struct l2cap_disconn_req req;
715 if (chan->mode == L2CAP_MODE_ERTM) {
716 __clear_retrans_timer(chan);
717 __clear_monitor_timer(chan);
718 __clear_ack_timer(chan);
721 req.dcid = cpu_to_le16(chan->dcid);
722 req.scid = cpu_to_le16(chan->scid);
723 l2cap_send_cmd(conn, l2cap_get_ident(conn),
724 L2CAP_DISCONN_REQ, sizeof(req), &req);
726 l2cap_state_change(chan, BT_DISCONN);
730 /* ---- L2CAP connections ---- */
731 static void l2cap_conn_start(struct l2cap_conn *conn)
733 struct l2cap_chan *chan, *tmp;
735 BT_DBG("conn %p", conn);
737 read_lock(&conn->chan_lock);
739 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
740 struct sock *sk = chan->sk;
744 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
749 if (chan->state == BT_CONNECT) {
750 struct l2cap_conn_req req;
752 if (!l2cap_chan_check_security(chan) ||
753 !__l2cap_no_conn_pending(chan)) {
758 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
759 && test_bit(CONF_STATE2_DEVICE,
760 &chan->conf_state)) {
761 /* l2cap_chan_close() calls list_del(chan)
762 * so release the lock */
763 read_unlock(&conn->chan_lock);
764 l2cap_chan_close(chan, ECONNRESET);
765 read_lock(&conn->chan_lock);
770 req.scid = cpu_to_le16(chan->scid);
773 chan->ident = l2cap_get_ident(conn);
774 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
776 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
779 } else if (chan->state == BT_CONNECT2) {
780 struct l2cap_conn_rsp rsp;
782 rsp.scid = cpu_to_le16(chan->dcid);
783 rsp.dcid = cpu_to_le16(chan->scid);
785 if (l2cap_chan_check_security(chan)) {
786 if (bt_sk(sk)->defer_setup) {
787 struct sock *parent = bt_sk(sk)->parent;
788 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
789 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
791 parent->sk_data_ready(parent, 0);
794 l2cap_state_change(chan, BT_CONFIG);
795 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
796 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
799 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
800 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
803 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
806 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
807 rsp.result != L2CAP_CR_SUCCESS) {
812 set_bit(CONF_REQ_SENT, &chan->conf_state);
813 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
814 l2cap_build_conf_req(chan, buf), buf);
815 chan->num_conf_req++;
821 read_unlock(&conn->chan_lock);
824 /* Find socket with cid and source bdaddr.
825 * Returns closest match, locked.
827 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
829 struct l2cap_chan *c, *c1 = NULL;
831 read_lock(&chan_list_lock);
833 list_for_each_entry(c, &chan_list, global_l) {
834 struct sock *sk = c->sk;
836 if (state && c->state != state)
839 if (c->scid == cid) {
841 if (!bacmp(&bt_sk(sk)->src, src)) {
842 read_unlock(&chan_list_lock);
847 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
852 read_unlock(&chan_list_lock);
857 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
859 struct sock *parent, *sk;
860 struct l2cap_chan *chan, *pchan;
864 /* Check if we have socket listening on cid */
865 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
872 bh_lock_sock(parent);
874 /* Check for backlog size */
875 if (sk_acceptq_is_full(parent)) {
876 BT_DBG("backlog full %d", parent->sk_ack_backlog);
880 chan = pchan->ops->new_connection(pchan->data);
886 write_lock_bh(&conn->chan_lock);
888 hci_conn_hold(conn->hcon);
890 bacpy(&bt_sk(sk)->src, conn->src);
891 bacpy(&bt_sk(sk)->dst, conn->dst);
893 bt_accept_enqueue(parent, sk);
895 __l2cap_chan_add(conn, chan);
897 __set_chan_timer(chan, sk->sk_sndtimeo);
899 l2cap_state_change(chan, BT_CONNECTED);
900 parent->sk_data_ready(parent, 0);
902 write_unlock_bh(&conn->chan_lock);
905 bh_unlock_sock(parent);
908 static void l2cap_chan_ready(struct sock *sk)
910 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
911 struct sock *parent = bt_sk(sk)->parent;
913 BT_DBG("sk %p, parent %p", sk, parent);
915 chan->conf_state = 0;
916 __clear_chan_timer(chan);
918 l2cap_state_change(chan, BT_CONNECTED);
919 sk->sk_state_change(sk);
922 parent->sk_data_ready(parent, 0);
925 static void l2cap_conn_ready(struct l2cap_conn *conn)
927 struct l2cap_chan *chan;
929 BT_DBG("conn %p", conn);
931 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
932 l2cap_le_conn_ready(conn);
934 if (conn->hcon->out && conn->hcon->type == LE_LINK)
935 smp_conn_security(conn, conn->hcon->pending_sec_level);
937 read_lock(&conn->chan_lock);
939 list_for_each_entry(chan, &conn->chan_l, list) {
940 struct sock *sk = chan->sk;
944 if (conn->hcon->type == LE_LINK) {
945 if (smp_conn_security(conn, chan->sec_level))
946 l2cap_chan_ready(sk);
948 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
949 __clear_chan_timer(chan);
950 l2cap_state_change(chan, BT_CONNECTED);
951 sk->sk_state_change(sk);
953 } else if (chan->state == BT_CONNECT)
954 l2cap_do_start(chan);
959 read_unlock(&conn->chan_lock);
962 /* Notify sockets that we cannot guaranty reliability anymore */
963 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
965 struct l2cap_chan *chan;
967 BT_DBG("conn %p", conn);
969 read_lock(&conn->chan_lock);
971 list_for_each_entry(chan, &conn->chan_l, list) {
972 struct sock *sk = chan->sk;
974 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
978 read_unlock(&conn->chan_lock);
981 static void l2cap_info_timeout(unsigned long arg)
983 struct l2cap_conn *conn = (void *) arg;
985 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
986 conn->info_ident = 0;
988 l2cap_conn_start(conn);
991 static void l2cap_conn_del(struct hci_conn *hcon, int err)
993 struct l2cap_conn *conn = hcon->l2cap_data;
994 struct l2cap_chan *chan, *l;
1000 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1002 kfree_skb(conn->rx_skb);
1005 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1008 l2cap_chan_del(chan, err);
1010 chan->ops->close(chan->data);
1013 hci_chan_del(conn->hchan);
1015 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1016 del_timer_sync(&conn->info_timer);
1018 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1019 del_timer(&conn->security_timer);
1020 smp_chan_destroy(conn);
1023 hcon->l2cap_data = NULL;
1027 static void security_timeout(unsigned long arg)
1029 struct l2cap_conn *conn = (void *) arg;
1031 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1034 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1036 struct l2cap_conn *conn = hcon->l2cap_data;
1037 struct hci_chan *hchan;
1042 hchan = hci_chan_create(hcon);
1046 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1048 hci_chan_del(hchan);
1052 hcon->l2cap_data = conn;
1054 conn->hchan = hchan;
1056 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1058 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1059 conn->mtu = hcon->hdev->le_mtu;
1061 conn->mtu = hcon->hdev->acl_mtu;
1063 conn->src = &hcon->hdev->bdaddr;
1064 conn->dst = &hcon->dst;
1066 conn->feat_mask = 0;
1068 spin_lock_init(&conn->lock);
1069 rwlock_init(&conn->chan_lock);
1071 INIT_LIST_HEAD(&conn->chan_l);
1073 if (hcon->type == LE_LINK)
1074 setup_timer(&conn->security_timer, security_timeout,
1075 (unsigned long) conn);
1077 setup_timer(&conn->info_timer, l2cap_info_timeout,
1078 (unsigned long) conn);
1080 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1085 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1087 write_lock_bh(&conn->chan_lock);
1088 __l2cap_chan_add(conn, chan);
1089 write_unlock_bh(&conn->chan_lock);
1092 /* ---- Socket interface ---- */
1094 /* Find socket with psm and source bdaddr.
1095 * Returns closest match.
1097 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1099 struct l2cap_chan *c, *c1 = NULL;
1101 read_lock(&chan_list_lock);
1103 list_for_each_entry(c, &chan_list, global_l) {
1104 struct sock *sk = c->sk;
1106 if (state && c->state != state)
1109 if (c->psm == psm) {
1111 if (!bacmp(&bt_sk(sk)->src, src)) {
1112 read_unlock(&chan_list_lock);
1117 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1122 read_unlock(&chan_list_lock);
1127 int l2cap_chan_connect(struct l2cap_chan *chan)
1129 struct sock *sk = chan->sk;
1130 bdaddr_t *src = &bt_sk(sk)->src;
1131 bdaddr_t *dst = &bt_sk(sk)->dst;
1132 struct l2cap_conn *conn;
1133 struct hci_conn *hcon;
1134 struct hci_dev *hdev;
1138 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1141 hdev = hci_get_route(dst, src);
1143 return -EHOSTUNREACH;
1145 hci_dev_lock_bh(hdev);
1147 auth_type = l2cap_get_auth_type(chan);
1149 if (chan->dcid == L2CAP_CID_LE_DATA)
1150 hcon = hci_connect(hdev, LE_LINK, dst,
1151 chan->sec_level, auth_type);
1153 hcon = hci_connect(hdev, ACL_LINK, dst,
1154 chan->sec_level, auth_type);
1157 err = PTR_ERR(hcon);
1161 conn = l2cap_conn_add(hcon, 0);
1168 /* Update source addr of the socket */
1169 bacpy(src, conn->src);
1171 l2cap_chan_add(conn, chan);
1173 l2cap_state_change(chan, BT_CONNECT);
1174 __set_chan_timer(chan, sk->sk_sndtimeo);
1176 if (hcon->state == BT_CONNECTED) {
1177 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1178 __clear_chan_timer(chan);
1179 if (l2cap_chan_check_security(chan))
1180 l2cap_state_change(chan, BT_CONNECTED);
1182 l2cap_do_start(chan);
1188 hci_dev_unlock_bh(hdev);
1193 int __l2cap_wait_ack(struct sock *sk)
1195 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1196 DECLARE_WAITQUEUE(wait, current);
1200 add_wait_queue(sk_sleep(sk), &wait);
1201 set_current_state(TASK_INTERRUPTIBLE);
1202 while (chan->unacked_frames > 0 && chan->conn) {
1206 if (signal_pending(current)) {
1207 err = sock_intr_errno(timeo);
1212 timeo = schedule_timeout(timeo);
1214 set_current_state(TASK_INTERRUPTIBLE);
1216 err = sock_error(sk);
1220 set_current_state(TASK_RUNNING);
1221 remove_wait_queue(sk_sleep(sk), &wait);
1225 static void l2cap_monitor_timeout(unsigned long arg)
1227 struct l2cap_chan *chan = (void *) arg;
1228 struct sock *sk = chan->sk;
1230 BT_DBG("chan %p", chan);
1233 if (chan->retry_count >= chan->remote_max_tx) {
1234 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1239 chan->retry_count++;
1240 __set_monitor_timer(chan);
1242 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1246 static void l2cap_retrans_timeout(unsigned long arg)
1248 struct l2cap_chan *chan = (void *) arg;
1249 struct sock *sk = chan->sk;
1251 BT_DBG("chan %p", chan);
1254 chan->retry_count = 1;
1255 __set_monitor_timer(chan);
1257 set_bit(CONN_WAIT_F, &chan->conn_state);
1259 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1263 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1265 struct sk_buff *skb;
1267 while ((skb = skb_peek(&chan->tx_q)) &&
1268 chan->unacked_frames) {
1269 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1272 skb = skb_dequeue(&chan->tx_q);
1275 chan->unacked_frames--;
1278 if (!chan->unacked_frames)
1279 __clear_retrans_timer(chan);
1282 static void l2cap_streaming_send(struct l2cap_chan *chan)
1284 struct sk_buff *skb;
1288 while ((skb = skb_dequeue(&chan->tx_q))) {
1289 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1290 control |= __set_txseq(chan, chan->next_tx_seq);
1291 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1293 if (chan->fcs == L2CAP_FCS_CRC16) {
1294 fcs = crc16(0, (u8 *)skb->data,
1295 skb->len - L2CAP_FCS_SIZE);
1296 put_unaligned_le16(fcs,
1297 skb->data + skb->len - L2CAP_FCS_SIZE);
1300 l2cap_do_send(chan, skb);
1302 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1306 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1308 struct sk_buff *skb, *tx_skb;
1312 skb = skb_peek(&chan->tx_q);
1316 while (bt_cb(skb)->tx_seq != tx_seq) {
1317 if (skb_queue_is_last(&chan->tx_q, skb))
1320 skb = skb_queue_next(&chan->tx_q, skb);
1323 if (chan->remote_max_tx &&
1324 bt_cb(skb)->retries == chan->remote_max_tx) {
1325 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1329 tx_skb = skb_clone(skb, GFP_ATOMIC);
1330 bt_cb(skb)->retries++;
1332 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1333 control &= __get_sar_mask(chan);
1335 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1336 control |= __set_ctrl_final(chan);
1338 control |= __set_reqseq(chan, chan->buffer_seq);
1339 control |= __set_txseq(chan, tx_seq);
1341 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1343 if (chan->fcs == L2CAP_FCS_CRC16) {
1344 fcs = crc16(0, (u8 *)tx_skb->data,
1345 tx_skb->len - L2CAP_FCS_SIZE);
1346 put_unaligned_le16(fcs,
1347 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1350 l2cap_do_send(chan, tx_skb);
1353 static int l2cap_ertm_send(struct l2cap_chan *chan)
1355 struct sk_buff *skb, *tx_skb;
1360 if (chan->state != BT_CONNECTED)
1363 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1365 if (chan->remote_max_tx &&
1366 bt_cb(skb)->retries == chan->remote_max_tx) {
1367 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1371 tx_skb = skb_clone(skb, GFP_ATOMIC);
1373 bt_cb(skb)->retries++;
1375 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1376 control &= __get_sar_mask(chan);
1378 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1379 control |= __set_ctrl_final(chan);
1381 control |= __set_reqseq(chan, chan->buffer_seq);
1382 control |= __set_txseq(chan, chan->next_tx_seq);
1384 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1386 if (chan->fcs == L2CAP_FCS_CRC16) {
1387 fcs = crc16(0, (u8 *)skb->data,
1388 tx_skb->len - L2CAP_FCS_SIZE);
1389 put_unaligned_le16(fcs, skb->data +
1390 tx_skb->len - L2CAP_FCS_SIZE);
1393 l2cap_do_send(chan, tx_skb);
1395 __set_retrans_timer(chan);
1397 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1399 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1401 if (bt_cb(skb)->retries == 1)
1402 chan->unacked_frames++;
1404 chan->frames_sent++;
1406 if (skb_queue_is_last(&chan->tx_q, skb))
1407 chan->tx_send_head = NULL;
1409 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1417 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1421 if (!skb_queue_empty(&chan->tx_q))
1422 chan->tx_send_head = chan->tx_q.next;
1424 chan->next_tx_seq = chan->expected_ack_seq;
1425 ret = l2cap_ertm_send(chan);
1429 static void l2cap_send_ack(struct l2cap_chan *chan)
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1435 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1436 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1437 set_bit(CONN_RNR_SENT, &chan->conn_state);
1438 l2cap_send_sframe(chan, control);
1442 if (l2cap_ertm_send(chan) > 0)
1445 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1446 l2cap_send_sframe(chan, control);
1449 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1451 struct srej_list *tail;
1454 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1455 control |= __set_ctrl_final(chan);
1457 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1458 control |= __set_reqseq(chan, tail->tx_seq);
1460 l2cap_send_sframe(chan, control);
1463 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1465 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1466 struct sk_buff **frag;
1469 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1475 /* Continuation fragments (no L2CAP header) */
1476 frag = &skb_shinfo(skb)->frag_list;
1478 count = min_t(unsigned int, conn->mtu, len);
1480 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1483 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1486 (*frag)->priority = skb->priority;
1491 frag = &(*frag)->next;
1497 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1498 struct msghdr *msg, size_t len,
1501 struct sock *sk = chan->sk;
1502 struct l2cap_conn *conn = chan->conn;
1503 struct sk_buff *skb;
1504 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1505 struct l2cap_hdr *lh;
1507 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1509 count = min_t(unsigned int, (conn->mtu - hlen), len);
1510 skb = bt_skb_send_alloc(sk, count + hlen,
1511 msg->msg_flags & MSG_DONTWAIT, &err);
1513 return ERR_PTR(err);
1515 skb->priority = priority;
1517 /* Create L2CAP header */
1518 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1519 lh->cid = cpu_to_le16(chan->dcid);
1520 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1523 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1524 if (unlikely(err < 0)) {
1526 return ERR_PTR(err);
1531 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1532 struct msghdr *msg, size_t len,
1535 struct sock *sk = chan->sk;
1536 struct l2cap_conn *conn = chan->conn;
1537 struct sk_buff *skb;
1538 int err, count, hlen = L2CAP_HDR_SIZE;
1539 struct l2cap_hdr *lh;
1541 BT_DBG("sk %p len %d", sk, (int)len);
1543 count = min_t(unsigned int, (conn->mtu - hlen), len);
1544 skb = bt_skb_send_alloc(sk, count + hlen,
1545 msg->msg_flags & MSG_DONTWAIT, &err);
1547 return ERR_PTR(err);
1549 skb->priority = priority;
1551 /* Create L2CAP header */
1552 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1553 lh->cid = cpu_to_le16(chan->dcid);
1554 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1556 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1557 if (unlikely(err < 0)) {
1559 return ERR_PTR(err);
1564 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1565 struct msghdr *msg, size_t len,
1566 u32 control, u16 sdulen)
1568 struct sock *sk = chan->sk;
1569 struct l2cap_conn *conn = chan->conn;
1570 struct sk_buff *skb;
1571 int err, count, hlen;
1572 struct l2cap_hdr *lh;
1574 BT_DBG("sk %p len %d", sk, (int)len);
1577 return ERR_PTR(-ENOTCONN);
1579 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1580 hlen = L2CAP_EXT_HDR_SIZE;
1582 hlen = L2CAP_ENH_HDR_SIZE;
1585 hlen += L2CAP_SDULEN_SIZE;
1587 if (chan->fcs == L2CAP_FCS_CRC16)
1588 hlen += L2CAP_FCS_SIZE;
1590 count = min_t(unsigned int, (conn->mtu - hlen), len);
1591 skb = bt_skb_send_alloc(sk, count + hlen,
1592 msg->msg_flags & MSG_DONTWAIT, &err);
1594 return ERR_PTR(err);
1596 /* Create L2CAP header */
1597 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1598 lh->cid = cpu_to_le16(chan->dcid);
1599 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1601 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1604 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1606 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1607 if (unlikely(err < 0)) {
1609 return ERR_PTR(err);
1612 if (chan->fcs == L2CAP_FCS_CRC16)
1613 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1615 bt_cb(skb)->retries = 0;
1619 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1621 struct sk_buff *skb;
1622 struct sk_buff_head sar_queue;
1626 skb_queue_head_init(&sar_queue);
1627 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1628 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1630 return PTR_ERR(skb);
1632 __skb_queue_tail(&sar_queue, skb);
1633 len -= chan->remote_mps;
1634 size += chan->remote_mps;
1639 if (len > chan->remote_mps) {
1640 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1641 buflen = chan->remote_mps;
1643 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1647 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1649 skb_queue_purge(&sar_queue);
1650 return PTR_ERR(skb);
1653 __skb_queue_tail(&sar_queue, skb);
1657 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1658 if (chan->tx_send_head == NULL)
1659 chan->tx_send_head = sar_queue.next;
1664 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1667 struct sk_buff *skb;
1671 /* Connectionless channel */
1672 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1673 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1675 return PTR_ERR(skb);
1677 l2cap_do_send(chan, skb);
1681 switch (chan->mode) {
1682 case L2CAP_MODE_BASIC:
1683 /* Check outgoing MTU */
1684 if (len > chan->omtu)
1687 /* Create a basic PDU */
1688 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1690 return PTR_ERR(skb);
1692 l2cap_do_send(chan, skb);
1696 case L2CAP_MODE_ERTM:
1697 case L2CAP_MODE_STREAMING:
1698 /* Entire SDU fits into one PDU */
1699 if (len <= chan->remote_mps) {
1700 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1701 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1704 return PTR_ERR(skb);
1706 __skb_queue_tail(&chan->tx_q, skb);
1708 if (chan->tx_send_head == NULL)
1709 chan->tx_send_head = skb;
1712 /* Segment SDU into multiples PDUs */
1713 err = l2cap_sar_segment_sdu(chan, msg, len);
1718 if (chan->mode == L2CAP_MODE_STREAMING) {
1719 l2cap_streaming_send(chan);
1724 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1725 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1730 err = l2cap_ertm_send(chan);
1737 BT_DBG("bad state %1.1x", chan->mode);
1744 /* Copy frame to all raw sockets on that connection */
1745 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1747 struct sk_buff *nskb;
1748 struct l2cap_chan *chan;
1750 BT_DBG("conn %p", conn);
1752 read_lock(&conn->chan_lock);
1753 list_for_each_entry(chan, &conn->chan_l, list) {
1754 struct sock *sk = chan->sk;
1755 if (chan->chan_type != L2CAP_CHAN_RAW)
1758 /* Don't send frame to the socket it came from */
1761 nskb = skb_clone(skb, GFP_ATOMIC);
1765 if (chan->ops->recv(chan->data, nskb))
1768 read_unlock(&conn->chan_lock);
1771 /* ---- L2CAP signalling commands ---- */
1772 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1773 u8 code, u8 ident, u16 dlen, void *data)
1775 struct sk_buff *skb, **frag;
1776 struct l2cap_cmd_hdr *cmd;
1777 struct l2cap_hdr *lh;
1780 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1781 conn, code, ident, dlen);
1783 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1784 count = min_t(unsigned int, conn->mtu, len);
1786 skb = bt_skb_alloc(count, GFP_ATOMIC);
1790 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1791 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1793 if (conn->hcon->type == LE_LINK)
1794 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1796 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1798 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1801 cmd->len = cpu_to_le16(dlen);
1804 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1805 memcpy(skb_put(skb, count), data, count);
1811 /* Continuation fragments (no L2CAP header) */
1812 frag = &skb_shinfo(skb)->frag_list;
1814 count = min_t(unsigned int, conn->mtu, len);
1816 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1820 memcpy(skb_put(*frag, count), data, count);
1825 frag = &(*frag)->next;
1835 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1837 struct l2cap_conf_opt *opt = *ptr;
1840 len = L2CAP_CONF_OPT_SIZE + opt->len;
1848 *val = *((u8 *) opt->val);
1852 *val = get_unaligned_le16(opt->val);
1856 *val = get_unaligned_le32(opt->val);
1860 *val = (unsigned long) opt->val;
1864 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1868 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1870 struct l2cap_conf_opt *opt = *ptr;
1872 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1879 *((u8 *) opt->val) = val;
1883 put_unaligned_le16(val, opt->val);
1887 put_unaligned_le32(val, opt->val);
1891 memcpy(opt->val, (void *) val, len);
1895 *ptr += L2CAP_CONF_OPT_SIZE + len;
1898 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1900 struct l2cap_conf_efs efs;
1902 switch (chan->mode) {
1903 case L2CAP_MODE_ERTM:
1904 efs.id = chan->local_id;
1905 efs.stype = chan->local_stype;
1906 efs.msdu = cpu_to_le16(chan->local_msdu);
1907 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1908 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1909 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1912 case L2CAP_MODE_STREAMING:
1914 efs.stype = L2CAP_SERV_BESTEFFORT;
1915 efs.msdu = cpu_to_le16(chan->local_msdu);
1916 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1925 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1926 (unsigned long) &efs);
1929 static void l2cap_ack_timeout(unsigned long arg)
1931 struct l2cap_chan *chan = (void *) arg;
1933 bh_lock_sock(chan->sk);
1934 l2cap_send_ack(chan);
1935 bh_unlock_sock(chan->sk);
1938 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1940 struct sock *sk = chan->sk;
1942 chan->expected_ack_seq = 0;
1943 chan->unacked_frames = 0;
1944 chan->buffer_seq = 0;
1945 chan->num_acked = 0;
1946 chan->frames_sent = 0;
1948 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1949 (unsigned long) chan);
1950 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1951 (unsigned long) chan);
1952 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1954 skb_queue_head_init(&chan->srej_q);
1956 INIT_LIST_HEAD(&chan->srej_l);
1959 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1962 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1965 case L2CAP_MODE_STREAMING:
1966 case L2CAP_MODE_ERTM:
1967 if (l2cap_mode_supported(mode, remote_feat_mask))
1971 return L2CAP_MODE_BASIC;
1975 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1977 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1980 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1982 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1985 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1987 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1988 __l2cap_ews_supported(chan)) {
1989 /* use extended control field */
1990 set_bit(FLAG_EXT_CTRL, &chan->flags);
1991 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1993 chan->tx_win = min_t(u16, chan->tx_win,
1994 L2CAP_DEFAULT_TX_WINDOW);
1995 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
1999 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2001 struct l2cap_conf_req *req = data;
2002 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2003 void *ptr = req->data;
2006 BT_DBG("chan %p", chan);
2008 if (chan->num_conf_req || chan->num_conf_rsp)
2011 switch (chan->mode) {
2012 case L2CAP_MODE_STREAMING:
2013 case L2CAP_MODE_ERTM:
2014 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2017 if (__l2cap_efs_supported(chan))
2018 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2022 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2027 if (chan->imtu != L2CAP_DEFAULT_MTU)
2028 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2030 switch (chan->mode) {
2031 case L2CAP_MODE_BASIC:
2032 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2033 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2036 rfc.mode = L2CAP_MODE_BASIC;
2038 rfc.max_transmit = 0;
2039 rfc.retrans_timeout = 0;
2040 rfc.monitor_timeout = 0;
2041 rfc.max_pdu_size = 0;
2043 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2044 (unsigned long) &rfc);
2047 case L2CAP_MODE_ERTM:
2048 rfc.mode = L2CAP_MODE_ERTM;
2049 rfc.max_transmit = chan->max_tx;
2050 rfc.retrans_timeout = 0;
2051 rfc.monitor_timeout = 0;
2053 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2054 L2CAP_EXT_HDR_SIZE -
2057 rfc.max_pdu_size = cpu_to_le16(size);
2059 l2cap_txwin_setup(chan);
2061 rfc.txwin_size = min_t(u16, chan->tx_win,
2062 L2CAP_DEFAULT_TX_WINDOW);
2064 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2065 (unsigned long) &rfc);
2067 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2068 l2cap_add_opt_efs(&ptr, chan);
2070 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2073 if (chan->fcs == L2CAP_FCS_NONE ||
2074 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2075 chan->fcs = L2CAP_FCS_NONE;
2076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2079 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2080 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2084 case L2CAP_MODE_STREAMING:
2085 rfc.mode = L2CAP_MODE_STREAMING;
2087 rfc.max_transmit = 0;
2088 rfc.retrans_timeout = 0;
2089 rfc.monitor_timeout = 0;
2091 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2092 L2CAP_EXT_HDR_SIZE -
2095 rfc.max_pdu_size = cpu_to_le16(size);
2097 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2098 (unsigned long) &rfc);
2100 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2101 l2cap_add_opt_efs(&ptr, chan);
2103 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2106 if (chan->fcs == L2CAP_FCS_NONE ||
2107 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2108 chan->fcs = L2CAP_FCS_NONE;
2109 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2114 req->dcid = cpu_to_le16(chan->dcid);
2115 req->flags = cpu_to_le16(0);
2120 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2122 struct l2cap_conf_rsp *rsp = data;
2123 void *ptr = rsp->data;
2124 void *req = chan->conf_req;
2125 int len = chan->conf_len;
2126 int type, hint, olen;
2128 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2129 struct l2cap_conf_efs efs;
2131 u16 mtu = L2CAP_DEFAULT_MTU;
2132 u16 result = L2CAP_CONF_SUCCESS;
2135 BT_DBG("chan %p", chan);
2137 while (len >= L2CAP_CONF_OPT_SIZE) {
2138 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2140 hint = type & L2CAP_CONF_HINT;
2141 type &= L2CAP_CONF_MASK;
2144 case L2CAP_CONF_MTU:
2148 case L2CAP_CONF_FLUSH_TO:
2149 chan->flush_to = val;
2152 case L2CAP_CONF_QOS:
2155 case L2CAP_CONF_RFC:
2156 if (olen == sizeof(rfc))
2157 memcpy(&rfc, (void *) val, olen);
2160 case L2CAP_CONF_FCS:
2161 if (val == L2CAP_FCS_NONE)
2162 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2165 case L2CAP_CONF_EFS:
2167 if (olen == sizeof(efs))
2168 memcpy(&efs, (void *) val, olen);
2171 case L2CAP_CONF_EWS:
2173 return -ECONNREFUSED;
2175 set_bit(FLAG_EXT_CTRL, &chan->flags);
2176 set_bit(CONF_EWS_RECV, &chan->conf_state);
2177 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2178 chan->remote_tx_win = val;
2185 result = L2CAP_CONF_UNKNOWN;
2186 *((u8 *) ptr++) = type;
2191 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2194 switch (chan->mode) {
2195 case L2CAP_MODE_STREAMING:
2196 case L2CAP_MODE_ERTM:
2197 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2198 chan->mode = l2cap_select_mode(rfc.mode,
2199 chan->conn->feat_mask);
2204 if (__l2cap_efs_supported(chan))
2205 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2207 return -ECONNREFUSED;
2210 if (chan->mode != rfc.mode)
2211 return -ECONNREFUSED;
2217 if (chan->mode != rfc.mode) {
2218 result = L2CAP_CONF_UNACCEPT;
2219 rfc.mode = chan->mode;
2221 if (chan->num_conf_rsp == 1)
2222 return -ECONNREFUSED;
2224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2225 sizeof(rfc), (unsigned long) &rfc);
2228 if (result == L2CAP_CONF_SUCCESS) {
2229 /* Configure output options and let the other side know
2230 * which ones we don't like. */
2232 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2233 result = L2CAP_CONF_UNACCEPT;
2236 set_bit(CONF_MTU_DONE, &chan->conf_state);
2238 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2241 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2242 efs.stype != L2CAP_SERV_NOTRAFIC &&
2243 efs.stype != chan->local_stype) {
2245 result = L2CAP_CONF_UNACCEPT;
2247 if (chan->num_conf_req >= 1)
2248 return -ECONNREFUSED;
2250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2252 (unsigned long) &efs);
2254 /* Send PENDING Conf Rsp */
2255 result = L2CAP_CONF_PENDING;
2256 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2261 case L2CAP_MODE_BASIC:
2262 chan->fcs = L2CAP_FCS_NONE;
2263 set_bit(CONF_MODE_DONE, &chan->conf_state);
2266 case L2CAP_MODE_ERTM:
2267 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2268 chan->remote_tx_win = rfc.txwin_size;
2270 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2272 chan->remote_max_tx = rfc.max_transmit;
2274 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2276 L2CAP_EXT_HDR_SIZE -
2279 rfc.max_pdu_size = cpu_to_le16(size);
2280 chan->remote_mps = size;
2282 rfc.retrans_timeout =
2283 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2284 rfc.monitor_timeout =
2285 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2287 set_bit(CONF_MODE_DONE, &chan->conf_state);
2289 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2290 sizeof(rfc), (unsigned long) &rfc);
2292 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2293 chan->remote_id = efs.id;
2294 chan->remote_stype = efs.stype;
2295 chan->remote_msdu = le16_to_cpu(efs.msdu);
2296 chan->remote_flush_to =
2297 le32_to_cpu(efs.flush_to);
2298 chan->remote_acc_lat =
2299 le32_to_cpu(efs.acc_lat);
2300 chan->remote_sdu_itime =
2301 le32_to_cpu(efs.sdu_itime);
2302 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2303 sizeof(efs), (unsigned long) &efs);
2307 case L2CAP_MODE_STREAMING:
2308 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2310 L2CAP_EXT_HDR_SIZE -
2313 rfc.max_pdu_size = cpu_to_le16(size);
2314 chan->remote_mps = size;
2316 set_bit(CONF_MODE_DONE, &chan->conf_state);
2318 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2319 sizeof(rfc), (unsigned long) &rfc);
2324 result = L2CAP_CONF_UNACCEPT;
2326 memset(&rfc, 0, sizeof(rfc));
2327 rfc.mode = chan->mode;
2330 if (result == L2CAP_CONF_SUCCESS)
2331 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2333 rsp->scid = cpu_to_le16(chan->dcid);
2334 rsp->result = cpu_to_le16(result);
2335 rsp->flags = cpu_to_le16(0x0000);
2340 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2342 struct l2cap_conf_req *req = data;
2343 void *ptr = req->data;
2346 struct l2cap_conf_rfc rfc;
2347 struct l2cap_conf_efs efs;
2349 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2351 while (len >= L2CAP_CONF_OPT_SIZE) {
2352 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2355 case L2CAP_CONF_MTU:
2356 if (val < L2CAP_DEFAULT_MIN_MTU) {
2357 *result = L2CAP_CONF_UNACCEPT;
2358 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2364 case L2CAP_CONF_FLUSH_TO:
2365 chan->flush_to = val;
2366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2370 case L2CAP_CONF_RFC:
2371 if (olen == sizeof(rfc))
2372 memcpy(&rfc, (void *)val, olen);
2374 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2375 rfc.mode != chan->mode)
2376 return -ECONNREFUSED;
2380 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2381 sizeof(rfc), (unsigned long) &rfc);
2384 case L2CAP_CONF_EWS:
2385 chan->tx_win = min_t(u16, val,
2386 L2CAP_DEFAULT_EXT_WINDOW);
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2391 case L2CAP_CONF_EFS:
2392 if (olen == sizeof(efs))
2393 memcpy(&efs, (void *)val, olen);
2395 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2396 efs.stype != L2CAP_SERV_NOTRAFIC &&
2397 efs.stype != chan->local_stype)
2398 return -ECONNREFUSED;
2400 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2401 sizeof(efs), (unsigned long) &efs);
2406 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2407 return -ECONNREFUSED;
2409 chan->mode = rfc.mode;
2411 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2413 case L2CAP_MODE_ERTM:
2414 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2415 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2416 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2418 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2419 chan->local_msdu = le16_to_cpu(efs.msdu);
2420 chan->local_sdu_itime =
2421 le32_to_cpu(efs.sdu_itime);
2422 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2423 chan->local_flush_to =
2424 le32_to_cpu(efs.flush_to);
2428 case L2CAP_MODE_STREAMING:
2429 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2433 req->dcid = cpu_to_le16(chan->dcid);
2434 req->flags = cpu_to_le16(0x0000);
2439 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2441 struct l2cap_conf_rsp *rsp = data;
2442 void *ptr = rsp->data;
2444 BT_DBG("chan %p", chan);
2446 rsp->scid = cpu_to_le16(chan->dcid);
2447 rsp->result = cpu_to_le16(result);
2448 rsp->flags = cpu_to_le16(flags);
2453 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2455 struct l2cap_conn_rsp rsp;
2456 struct l2cap_conn *conn = chan->conn;
2459 rsp.scid = cpu_to_le16(chan->dcid);
2460 rsp.dcid = cpu_to_le16(chan->scid);
2461 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2462 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2463 l2cap_send_cmd(conn, chan->ident,
2464 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2466 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2469 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2470 l2cap_build_conf_req(chan, buf), buf);
2471 chan->num_conf_req++;
2474 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2478 struct l2cap_conf_rfc rfc;
2480 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2482 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2485 while (len >= L2CAP_CONF_OPT_SIZE) {
2486 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2489 case L2CAP_CONF_RFC:
2490 if (olen == sizeof(rfc))
2491 memcpy(&rfc, (void *)val, olen);
2498 case L2CAP_MODE_ERTM:
2499 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2500 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2501 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2503 case L2CAP_MODE_STREAMING:
2504 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2508 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2510 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2512 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2515 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2516 cmd->ident == conn->info_ident) {
2517 del_timer(&conn->info_timer);
2519 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2520 conn->info_ident = 0;
2522 l2cap_conn_start(conn);
2528 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2530 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2531 struct l2cap_conn_rsp rsp;
2532 struct l2cap_chan *chan = NULL, *pchan;
2533 struct sock *parent, *sk = NULL;
2534 int result, status = L2CAP_CS_NO_INFO;
2536 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2537 __le16 psm = req->psm;
2539 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2541 /* Check if we have socket listening on psm */
2542 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2544 result = L2CAP_CR_BAD_PSM;
2550 bh_lock_sock(parent);
2552 /* Check if the ACL is secure enough (if not SDP) */
2553 if (psm != cpu_to_le16(0x0001) &&
2554 !hci_conn_check_link_mode(conn->hcon)) {
2555 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2556 result = L2CAP_CR_SEC_BLOCK;
2560 result = L2CAP_CR_NO_MEM;
2562 /* Check for backlog size */
2563 if (sk_acceptq_is_full(parent)) {
2564 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2568 chan = pchan->ops->new_connection(pchan->data);
2574 write_lock_bh(&conn->chan_lock);
2576 /* Check if we already have channel with that dcid */
2577 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2578 write_unlock_bh(&conn->chan_lock);
2579 sock_set_flag(sk, SOCK_ZAPPED);
2580 chan->ops->close(chan->data);
2584 hci_conn_hold(conn->hcon);
2586 bacpy(&bt_sk(sk)->src, conn->src);
2587 bacpy(&bt_sk(sk)->dst, conn->dst);
2591 bt_accept_enqueue(parent, sk);
2593 __l2cap_chan_add(conn, chan);
2597 __set_chan_timer(chan, sk->sk_sndtimeo);
2599 chan->ident = cmd->ident;
2601 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2602 if (l2cap_chan_check_security(chan)) {
2603 if (bt_sk(sk)->defer_setup) {
2604 l2cap_state_change(chan, BT_CONNECT2);
2605 result = L2CAP_CR_PEND;
2606 status = L2CAP_CS_AUTHOR_PEND;
2607 parent->sk_data_ready(parent, 0);
2609 l2cap_state_change(chan, BT_CONFIG);
2610 result = L2CAP_CR_SUCCESS;
2611 status = L2CAP_CS_NO_INFO;
2614 l2cap_state_change(chan, BT_CONNECT2);
2615 result = L2CAP_CR_PEND;
2616 status = L2CAP_CS_AUTHEN_PEND;
2619 l2cap_state_change(chan, BT_CONNECT2);
2620 result = L2CAP_CR_PEND;
2621 status = L2CAP_CS_NO_INFO;
2624 write_unlock_bh(&conn->chan_lock);
2627 bh_unlock_sock(parent);
2630 rsp.scid = cpu_to_le16(scid);
2631 rsp.dcid = cpu_to_le16(dcid);
2632 rsp.result = cpu_to_le16(result);
2633 rsp.status = cpu_to_le16(status);
2634 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2636 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2637 struct l2cap_info_req info;
2638 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2640 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2641 conn->info_ident = l2cap_get_ident(conn);
2643 mod_timer(&conn->info_timer, jiffies +
2644 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2646 l2cap_send_cmd(conn, conn->info_ident,
2647 L2CAP_INFO_REQ, sizeof(info), &info);
2650 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2651 result == L2CAP_CR_SUCCESS) {
2653 set_bit(CONF_REQ_SENT, &chan->conf_state);
2654 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2655 l2cap_build_conf_req(chan, buf), buf);
2656 chan->num_conf_req++;
2662 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2664 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2665 u16 scid, dcid, result, status;
2666 struct l2cap_chan *chan;
2670 scid = __le16_to_cpu(rsp->scid);
2671 dcid = __le16_to_cpu(rsp->dcid);
2672 result = __le16_to_cpu(rsp->result);
2673 status = __le16_to_cpu(rsp->status);
2675 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2678 chan = l2cap_get_chan_by_scid(conn, scid);
2682 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2690 case L2CAP_CR_SUCCESS:
2691 l2cap_state_change(chan, BT_CONFIG);
2694 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2696 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2699 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2700 l2cap_build_conf_req(chan, req), req);
2701 chan->num_conf_req++;
2705 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2709 /* don't delete l2cap channel if sk is owned by user */
2710 if (sock_owned_by_user(sk)) {
2711 l2cap_state_change(chan, BT_DISCONN);
2712 __clear_chan_timer(chan);
2713 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2717 l2cap_chan_del(chan, ECONNREFUSED);
2725 static inline void set_default_fcs(struct l2cap_chan *chan)
2727 /* FCS is enabled only in ERTM or streaming mode, if one or both
2730 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2731 chan->fcs = L2CAP_FCS_NONE;
2732 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2733 chan->fcs = L2CAP_FCS_CRC16;
2736 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2738 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2741 struct l2cap_chan *chan;
2745 dcid = __le16_to_cpu(req->dcid);
2746 flags = __le16_to_cpu(req->flags);
2748 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2750 chan = l2cap_get_chan_by_scid(conn, dcid);
2756 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2757 struct l2cap_cmd_rej_cid rej;
2759 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2760 rej.scid = cpu_to_le16(chan->scid);
2761 rej.dcid = cpu_to_le16(chan->dcid);
2763 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2768 /* Reject if config buffer is too small. */
2769 len = cmd_len - sizeof(*req);
2770 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2771 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2772 l2cap_build_conf_rsp(chan, rsp,
2773 L2CAP_CONF_REJECT, flags), rsp);
2778 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2779 chan->conf_len += len;
2781 if (flags & 0x0001) {
2782 /* Incomplete config. Send empty response. */
2783 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2784 l2cap_build_conf_rsp(chan, rsp,
2785 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2789 /* Complete config. */
2790 len = l2cap_parse_conf_req(chan, rsp);
2792 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2796 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2797 chan->num_conf_rsp++;
2799 /* Reset config buffer. */
2802 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2805 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2806 set_default_fcs(chan);
2808 l2cap_state_change(chan, BT_CONNECTED);
2810 chan->next_tx_seq = 0;
2811 chan->expected_tx_seq = 0;
2812 skb_queue_head_init(&chan->tx_q);
2813 if (chan->mode == L2CAP_MODE_ERTM)
2814 l2cap_ertm_init(chan);
2816 l2cap_chan_ready(sk);
2820 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2823 l2cap_build_conf_req(chan, buf), buf);
2824 chan->num_conf_req++;
2827 /* Got Conf Rsp PENDING from remote side and asume we sent
2828 Conf Rsp PENDING in the code above */
2829 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2830 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2832 /* check compatibility */
2834 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2835 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2837 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2838 l2cap_build_conf_rsp(chan, rsp,
2839 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2847 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2849 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2850 u16 scid, flags, result;
2851 struct l2cap_chan *chan;
2853 int len = cmd->len - sizeof(*rsp);
2855 scid = __le16_to_cpu(rsp->scid);
2856 flags = __le16_to_cpu(rsp->flags);
2857 result = __le16_to_cpu(rsp->result);
2859 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2860 scid, flags, result);
2862 chan = l2cap_get_chan_by_scid(conn, scid);
2869 case L2CAP_CONF_SUCCESS:
2870 l2cap_conf_rfc_get(chan, rsp->data, len);
2871 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2874 case L2CAP_CONF_PENDING:
2875 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2877 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2880 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2883 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2887 /* check compatibility */
2889 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2890 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2892 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2893 l2cap_build_conf_rsp(chan, buf,
2894 L2CAP_CONF_SUCCESS, 0x0000), buf);
2898 case L2CAP_CONF_UNACCEPT:
2899 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2902 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2903 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2907 /* throw out any old stored conf requests */
2908 result = L2CAP_CONF_SUCCESS;
2909 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2912 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2916 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2917 L2CAP_CONF_REQ, len, req);
2918 chan->num_conf_req++;
2919 if (result != L2CAP_CONF_SUCCESS)
2925 sk->sk_err = ECONNRESET;
2926 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2927 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2934 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2936 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2937 set_default_fcs(chan);
2939 l2cap_state_change(chan, BT_CONNECTED);
2940 chan->next_tx_seq = 0;
2941 chan->expected_tx_seq = 0;
2942 skb_queue_head_init(&chan->tx_q);
2943 if (chan->mode == L2CAP_MODE_ERTM)
2944 l2cap_ertm_init(chan);
2946 l2cap_chan_ready(sk);
2954 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2956 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2957 struct l2cap_disconn_rsp rsp;
2959 struct l2cap_chan *chan;
2962 scid = __le16_to_cpu(req->scid);
2963 dcid = __le16_to_cpu(req->dcid);
2965 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2967 chan = l2cap_get_chan_by_scid(conn, dcid);
2973 rsp.dcid = cpu_to_le16(chan->scid);
2974 rsp.scid = cpu_to_le16(chan->dcid);
2975 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2977 sk->sk_shutdown = SHUTDOWN_MASK;
2979 /* don't delete l2cap channel if sk is owned by user */
2980 if (sock_owned_by_user(sk)) {
2981 l2cap_state_change(chan, BT_DISCONN);
2982 __clear_chan_timer(chan);
2983 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2988 l2cap_chan_del(chan, ECONNRESET);
2991 chan->ops->close(chan->data);
2995 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2997 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2999 struct l2cap_chan *chan;
3002 scid = __le16_to_cpu(rsp->scid);
3003 dcid = __le16_to_cpu(rsp->dcid);
3005 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3007 chan = l2cap_get_chan_by_scid(conn, scid);
3013 /* don't delete l2cap channel if sk is owned by user */
3014 if (sock_owned_by_user(sk)) {
3015 l2cap_state_change(chan, BT_DISCONN);
3016 __clear_chan_timer(chan);
3017 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3022 l2cap_chan_del(chan, 0);
3025 chan->ops->close(chan->data);
3029 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3031 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3034 type = __le16_to_cpu(req->type);
3036 BT_DBG("type 0x%4.4x", type);
3038 if (type == L2CAP_IT_FEAT_MASK) {
3040 u32 feat_mask = l2cap_feat_mask;
3041 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3042 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3043 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3045 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3048 feat_mask |= L2CAP_FEAT_EXT_FLOW
3049 | L2CAP_FEAT_EXT_WINDOW;
3051 put_unaligned_le32(feat_mask, rsp->data);
3052 l2cap_send_cmd(conn, cmd->ident,
3053 L2CAP_INFO_RSP, sizeof(buf), buf);
3054 } else if (type == L2CAP_IT_FIXED_CHAN) {
3056 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3059 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3061 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3063 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3064 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3065 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3066 l2cap_send_cmd(conn, cmd->ident,
3067 L2CAP_INFO_RSP, sizeof(buf), buf);
3069 struct l2cap_info_rsp rsp;
3070 rsp.type = cpu_to_le16(type);
3071 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3072 l2cap_send_cmd(conn, cmd->ident,
3073 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3079 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3081 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3084 type = __le16_to_cpu(rsp->type);
3085 result = __le16_to_cpu(rsp->result);
3087 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3089 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3090 if (cmd->ident != conn->info_ident ||
3091 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3094 del_timer(&conn->info_timer);
3096 if (result != L2CAP_IR_SUCCESS) {
3097 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3098 conn->info_ident = 0;
3100 l2cap_conn_start(conn);
3105 if (type == L2CAP_IT_FEAT_MASK) {
3106 conn->feat_mask = get_unaligned_le32(rsp->data);
3108 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3109 struct l2cap_info_req req;
3110 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3112 conn->info_ident = l2cap_get_ident(conn);
3114 l2cap_send_cmd(conn, conn->info_ident,
3115 L2CAP_INFO_REQ, sizeof(req), &req);
3117 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3118 conn->info_ident = 0;
3120 l2cap_conn_start(conn);
3122 } else if (type == L2CAP_IT_FIXED_CHAN) {
3123 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3124 conn->info_ident = 0;
3126 l2cap_conn_start(conn);
3132 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3133 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3136 struct l2cap_create_chan_req *req = data;
3137 struct l2cap_create_chan_rsp rsp;
3140 if (cmd_len != sizeof(*req))
3146 psm = le16_to_cpu(req->psm);
3147 scid = le16_to_cpu(req->scid);
3149 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3151 /* Placeholder: Always reject */
3153 rsp.scid = cpu_to_le16(scid);
3154 rsp.result = L2CAP_CR_NO_MEM;
3155 rsp.status = L2CAP_CS_NO_INFO;
3157 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3163 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3164 struct l2cap_cmd_hdr *cmd, void *data)
3166 BT_DBG("conn %p", conn);
3168 return l2cap_connect_rsp(conn, cmd, data);
3171 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3172 u16 icid, u16 result)
3174 struct l2cap_move_chan_rsp rsp;
3176 BT_DBG("icid %d, result %d", icid, result);
3178 rsp.icid = cpu_to_le16(icid);
3179 rsp.result = cpu_to_le16(result);
3181 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3184 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3185 struct l2cap_chan *chan, u16 icid, u16 result)
3187 struct l2cap_move_chan_cfm cfm;
3190 BT_DBG("icid %d, result %d", icid, result);
3192 ident = l2cap_get_ident(conn);
3194 chan->ident = ident;
3196 cfm.icid = cpu_to_le16(icid);
3197 cfm.result = cpu_to_le16(result);
3199 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3202 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3205 struct l2cap_move_chan_cfm_rsp rsp;
3207 BT_DBG("icid %d", icid);
3209 rsp.icid = cpu_to_le16(icid);
3210 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3213 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3214 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3216 struct l2cap_move_chan_req *req = data;
3218 u16 result = L2CAP_MR_NOT_ALLOWED;
3220 if (cmd_len != sizeof(*req))
3223 icid = le16_to_cpu(req->icid);
3225 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3230 /* Placeholder: Always refuse */
3231 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3236 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3237 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3239 struct l2cap_move_chan_rsp *rsp = data;
3242 if (cmd_len != sizeof(*rsp))
3245 icid = le16_to_cpu(rsp->icid);
3246 result = le16_to_cpu(rsp->result);
3248 BT_DBG("icid %d, result %d", icid, result);
3250 /* Placeholder: Always unconfirmed */
3251 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3256 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3257 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3259 struct l2cap_move_chan_cfm *cfm = data;
3262 if (cmd_len != sizeof(*cfm))
3265 icid = le16_to_cpu(cfm->icid);
3266 result = le16_to_cpu(cfm->result);
3268 BT_DBG("icid %d, result %d", icid, result);
3270 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3275 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3276 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3278 struct l2cap_move_chan_cfm_rsp *rsp = data;
3281 if (cmd_len != sizeof(*rsp))
3284 icid = le16_to_cpu(rsp->icid);
3286 BT_DBG("icid %d", icid);
3291 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3296 if (min > max || min < 6 || max > 3200)
3299 if (to_multiplier < 10 || to_multiplier > 3200)
3302 if (max >= to_multiplier * 8)
3305 max_latency = (to_multiplier * 8 / max) - 1;
3306 if (latency > 499 || latency > max_latency)
3312 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3313 struct l2cap_cmd_hdr *cmd, u8 *data)
3315 struct hci_conn *hcon = conn->hcon;
3316 struct l2cap_conn_param_update_req *req;
3317 struct l2cap_conn_param_update_rsp rsp;
3318 u16 min, max, latency, to_multiplier, cmd_len;
3321 if (!(hcon->link_mode & HCI_LM_MASTER))
3324 cmd_len = __le16_to_cpu(cmd->len);
3325 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3328 req = (struct l2cap_conn_param_update_req *) data;
3329 min = __le16_to_cpu(req->min);
3330 max = __le16_to_cpu(req->max);
3331 latency = __le16_to_cpu(req->latency);
3332 to_multiplier = __le16_to_cpu(req->to_multiplier);
3334 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3335 min, max, latency, to_multiplier);
3337 memset(&rsp, 0, sizeof(rsp));
3339 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3341 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3343 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3345 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3349 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3354 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3355 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3359 switch (cmd->code) {
3360 case L2CAP_COMMAND_REJ:
3361 l2cap_command_rej(conn, cmd, data);
3364 case L2CAP_CONN_REQ:
3365 err = l2cap_connect_req(conn, cmd, data);
3368 case L2CAP_CONN_RSP:
3369 err = l2cap_connect_rsp(conn, cmd, data);
3372 case L2CAP_CONF_REQ:
3373 err = l2cap_config_req(conn, cmd, cmd_len, data);
3376 case L2CAP_CONF_RSP:
3377 err = l2cap_config_rsp(conn, cmd, data);
3380 case L2CAP_DISCONN_REQ:
3381 err = l2cap_disconnect_req(conn, cmd, data);
3384 case L2CAP_DISCONN_RSP:
3385 err = l2cap_disconnect_rsp(conn, cmd, data);
3388 case L2CAP_ECHO_REQ:
3389 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3392 case L2CAP_ECHO_RSP:
3395 case L2CAP_INFO_REQ:
3396 err = l2cap_information_req(conn, cmd, data);
3399 case L2CAP_INFO_RSP:
3400 err = l2cap_information_rsp(conn, cmd, data);
3403 case L2CAP_CREATE_CHAN_REQ:
3404 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3407 case L2CAP_CREATE_CHAN_RSP:
3408 err = l2cap_create_channel_rsp(conn, cmd, data);
3411 case L2CAP_MOVE_CHAN_REQ:
3412 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3415 case L2CAP_MOVE_CHAN_RSP:
3416 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3419 case L2CAP_MOVE_CHAN_CFM:
3420 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3423 case L2CAP_MOVE_CHAN_CFM_RSP:
3424 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3428 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3436 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3437 struct l2cap_cmd_hdr *cmd, u8 *data)
3439 switch (cmd->code) {
3440 case L2CAP_COMMAND_REJ:
3443 case L2CAP_CONN_PARAM_UPDATE_REQ:
3444 return l2cap_conn_param_update_req(conn, cmd, data);
3446 case L2CAP_CONN_PARAM_UPDATE_RSP:
3450 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3455 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3456 struct sk_buff *skb)
3458 u8 *data = skb->data;
3460 struct l2cap_cmd_hdr cmd;
3463 l2cap_raw_recv(conn, skb);
3465 while (len >= L2CAP_CMD_HDR_SIZE) {
3467 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3468 data += L2CAP_CMD_HDR_SIZE;
3469 len -= L2CAP_CMD_HDR_SIZE;
3471 cmd_len = le16_to_cpu(cmd.len);
3473 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3475 if (cmd_len > len || !cmd.ident) {
3476 BT_DBG("corrupted command");
3480 if (conn->hcon->type == LE_LINK)
3481 err = l2cap_le_sig_cmd(conn, &cmd, data);
3483 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3486 struct l2cap_cmd_rej_unk rej;
3488 BT_ERR("Wrong link type (%d)", err);
3490 /* FIXME: Map err to a valid reason */
3491 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3492 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3502 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3504 u16 our_fcs, rcv_fcs;
3507 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3508 hdr_size = L2CAP_EXT_HDR_SIZE;
3510 hdr_size = L2CAP_ENH_HDR_SIZE;
3512 if (chan->fcs == L2CAP_FCS_CRC16) {
3513 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3514 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3515 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3517 if (our_fcs != rcv_fcs)
3523 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3527 chan->frames_sent = 0;
3529 control |= __set_reqseq(chan, chan->buffer_seq);
3531 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3532 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3533 l2cap_send_sframe(chan, control);
3534 set_bit(CONN_RNR_SENT, &chan->conn_state);
3537 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3538 l2cap_retransmit_frames(chan);
3540 l2cap_ertm_send(chan);
3542 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3543 chan->frames_sent == 0) {
3544 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3545 l2cap_send_sframe(chan, control);
3549 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3551 struct sk_buff *next_skb;
3552 int tx_seq_offset, next_tx_seq_offset;
3554 bt_cb(skb)->tx_seq = tx_seq;
3555 bt_cb(skb)->sar = sar;
3557 next_skb = skb_peek(&chan->srej_q);
3559 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3562 if (bt_cb(next_skb)->tx_seq == tx_seq)
3565 next_tx_seq_offset = __seq_offset(chan,
3566 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3568 if (next_tx_seq_offset > tx_seq_offset) {
3569 __skb_queue_before(&chan->srej_q, next_skb, skb);
3573 if (skb_queue_is_last(&chan->srej_q, next_skb))
3576 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3579 __skb_queue_tail(&chan->srej_q, skb);
3584 static void append_skb_frag(struct sk_buff *skb,
3585 struct sk_buff *new_frag, struct sk_buff **last_frag)
3587 /* skb->len reflects data in skb as well as all fragments
3588 * skb->data_len reflects only data in fragments
3590 if (!skb_has_frag_list(skb))
3591 skb_shinfo(skb)->frag_list = new_frag;
3593 new_frag->next = NULL;
3595 (*last_frag)->next = new_frag;
3596 *last_frag = new_frag;
3598 skb->len += new_frag->len;
3599 skb->data_len += new_frag->len;
3600 skb->truesize += new_frag->truesize;
3603 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3607 switch (__get_ctrl_sar(chan, control)) {
3608 case L2CAP_SAR_UNSEGMENTED:
3612 err = chan->ops->recv(chan->data, skb);
3615 case L2CAP_SAR_START:
3619 chan->sdu_len = get_unaligned_le16(skb->data);
3620 skb_pull(skb, L2CAP_SDULEN_SIZE);
3622 if (chan->sdu_len > chan->imtu) {
3627 if (skb->len >= chan->sdu_len)
3631 chan->sdu_last_frag = skb;
3637 case L2CAP_SAR_CONTINUE:
3641 append_skb_frag(chan->sdu, skb,
3642 &chan->sdu_last_frag);
3645 if (chan->sdu->len >= chan->sdu_len)
3655 append_skb_frag(chan->sdu, skb,
3656 &chan->sdu_last_frag);
3659 if (chan->sdu->len != chan->sdu_len)
3662 err = chan->ops->recv(chan->data, chan->sdu);
3665 /* Reassembly complete */
3667 chan->sdu_last_frag = NULL;
3675 kfree_skb(chan->sdu);
3677 chan->sdu_last_frag = NULL;
3684 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3688 BT_DBG("chan %p, Enter local busy", chan);
3690 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3692 control = __set_reqseq(chan, chan->buffer_seq);
3693 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3694 l2cap_send_sframe(chan, control);
3696 set_bit(CONN_RNR_SENT, &chan->conn_state);
3698 __clear_ack_timer(chan);
3701 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3705 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3708 control = __set_reqseq(chan, chan->buffer_seq);
3709 control |= __set_ctrl_poll(chan);
3710 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3711 l2cap_send_sframe(chan, control);
3712 chan->retry_count = 1;
3714 __clear_retrans_timer(chan);
3715 __set_monitor_timer(chan);
3717 set_bit(CONN_WAIT_F, &chan->conn_state);
3720 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3721 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3723 BT_DBG("chan %p, Exit local busy", chan);
3726 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3728 if (chan->mode == L2CAP_MODE_ERTM) {
3730 l2cap_ertm_enter_local_busy(chan);
3732 l2cap_ertm_exit_local_busy(chan);
3736 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3738 struct sk_buff *skb;
3741 while ((skb = skb_peek(&chan->srej_q)) &&
3742 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3745 if (bt_cb(skb)->tx_seq != tx_seq)
3748 skb = skb_dequeue(&chan->srej_q);
3749 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3750 err = l2cap_reassemble_sdu(chan, skb, control);
3753 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3757 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3758 tx_seq = __next_seq(chan, tx_seq);
3762 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3764 struct srej_list *l, *tmp;
3767 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3768 if (l->tx_seq == tx_seq) {
3773 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3774 control |= __set_reqseq(chan, l->tx_seq);
3775 l2cap_send_sframe(chan, control);
3777 list_add_tail(&l->list, &chan->srej_l);
3781 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3783 struct srej_list *new;
3786 while (tx_seq != chan->expected_tx_seq) {
3787 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3788 control |= __set_reqseq(chan, chan->expected_tx_seq);
3789 l2cap_send_sframe(chan, control);
3791 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3795 new->tx_seq = chan->expected_tx_seq;
3797 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3799 list_add_tail(&new->list, &chan->srej_l);
3802 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3807 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3809 u16 tx_seq = __get_txseq(chan, rx_control);
3810 u16 req_seq = __get_reqseq(chan, rx_control);
3811 u8 sar = __get_ctrl_sar(chan, rx_control);
3812 int tx_seq_offset, expected_tx_seq_offset;
3813 int num_to_ack = (chan->tx_win/6) + 1;
3816 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3817 tx_seq, rx_control);
3819 if (__is_ctrl_final(chan, rx_control) &&
3820 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3821 __clear_monitor_timer(chan);
3822 if (chan->unacked_frames > 0)
3823 __set_retrans_timer(chan);
3824 clear_bit(CONN_WAIT_F, &chan->conn_state);
3827 chan->expected_ack_seq = req_seq;
3828 l2cap_drop_acked_frames(chan);
3830 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3832 /* invalid tx_seq */
3833 if (tx_seq_offset >= chan->tx_win) {
3834 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3838 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3841 if (tx_seq == chan->expected_tx_seq)
3844 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3845 struct srej_list *first;
3847 first = list_first_entry(&chan->srej_l,
3848 struct srej_list, list);
3849 if (tx_seq == first->tx_seq) {
3850 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3851 l2cap_check_srej_gap(chan, tx_seq);
3853 list_del(&first->list);
3856 if (list_empty(&chan->srej_l)) {
3857 chan->buffer_seq = chan->buffer_seq_srej;
3858 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3859 l2cap_send_ack(chan);
3860 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3863 struct srej_list *l;
3865 /* duplicated tx_seq */
3866 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3869 list_for_each_entry(l, &chan->srej_l, list) {
3870 if (l->tx_seq == tx_seq) {
3871 l2cap_resend_srejframe(chan, tx_seq);
3876 err = l2cap_send_srejframe(chan, tx_seq);
3878 l2cap_send_disconn_req(chan->conn, chan, -err);
3883 expected_tx_seq_offset = __seq_offset(chan,
3884 chan->expected_tx_seq, chan->buffer_seq);
3886 /* duplicated tx_seq */
3887 if (tx_seq_offset < expected_tx_seq_offset)
3890 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3892 BT_DBG("chan %p, Enter SREJ", chan);
3894 INIT_LIST_HEAD(&chan->srej_l);
3895 chan->buffer_seq_srej = chan->buffer_seq;
3897 __skb_queue_head_init(&chan->srej_q);
3898 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3900 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3902 err = l2cap_send_srejframe(chan, tx_seq);
3904 l2cap_send_disconn_req(chan->conn, chan, -err);
3908 __clear_ack_timer(chan);
3913 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3915 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3916 bt_cb(skb)->tx_seq = tx_seq;
3917 bt_cb(skb)->sar = sar;
3918 __skb_queue_tail(&chan->srej_q, skb);
3922 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3923 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3926 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3930 if (__is_ctrl_final(chan, rx_control)) {
3931 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3932 l2cap_retransmit_frames(chan);
3936 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3937 if (chan->num_acked == num_to_ack - 1)
3938 l2cap_send_ack(chan);
3940 __set_ack_timer(chan);
3949 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3951 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3952 __get_reqseq(chan, rx_control), rx_control);
3954 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3955 l2cap_drop_acked_frames(chan);
3957 if (__is_ctrl_poll(chan, rx_control)) {
3958 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3959 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3960 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3961 (chan->unacked_frames > 0))
3962 __set_retrans_timer(chan);
3964 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3965 l2cap_send_srejtail(chan);
3967 l2cap_send_i_or_rr_or_rnr(chan);
3970 } else if (__is_ctrl_final(chan, rx_control)) {
3971 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3973 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3974 l2cap_retransmit_frames(chan);
3977 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3978 (chan->unacked_frames > 0))
3979 __set_retrans_timer(chan);
3981 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3982 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3983 l2cap_send_ack(chan);
3985 l2cap_ertm_send(chan);
3989 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3991 u16 tx_seq = __get_reqseq(chan, rx_control);
3993 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3995 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3997 chan->expected_ack_seq = tx_seq;
3998 l2cap_drop_acked_frames(chan);
4000 if (__is_ctrl_final(chan, rx_control)) {
4001 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4002 l2cap_retransmit_frames(chan);
4004 l2cap_retransmit_frames(chan);
4006 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4007 set_bit(CONN_REJ_ACT, &chan->conn_state);
4010 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4012 u16 tx_seq = __get_reqseq(chan, rx_control);
4014 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4016 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4018 if (__is_ctrl_poll(chan, rx_control)) {
4019 chan->expected_ack_seq = tx_seq;
4020 l2cap_drop_acked_frames(chan);
4022 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4023 l2cap_retransmit_one_frame(chan, tx_seq);
4025 l2cap_ertm_send(chan);
4027 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4028 chan->srej_save_reqseq = tx_seq;
4029 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4031 } else if (__is_ctrl_final(chan, rx_control)) {
4032 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4033 chan->srej_save_reqseq == tx_seq)
4034 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4036 l2cap_retransmit_one_frame(chan, tx_seq);
4038 l2cap_retransmit_one_frame(chan, tx_seq);
4039 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4040 chan->srej_save_reqseq = tx_seq;
4041 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4046 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4048 u16 tx_seq = __get_reqseq(chan, rx_control);
4050 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4052 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4053 chan->expected_ack_seq = tx_seq;
4054 l2cap_drop_acked_frames(chan);
4056 if (__is_ctrl_poll(chan, rx_control))
4057 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4059 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4060 __clear_retrans_timer(chan);
4061 if (__is_ctrl_poll(chan, rx_control))
4062 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4066 if (__is_ctrl_poll(chan, rx_control)) {
4067 l2cap_send_srejtail(chan);
4069 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4070 l2cap_send_sframe(chan, rx_control);
4074 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4076 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4078 if (__is_ctrl_final(chan, rx_control) &&
4079 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4080 __clear_monitor_timer(chan);
4081 if (chan->unacked_frames > 0)
4082 __set_retrans_timer(chan);
4083 clear_bit(CONN_WAIT_F, &chan->conn_state);
4086 switch (__get_ctrl_super(chan, rx_control)) {
4087 case L2CAP_SUPER_RR:
4088 l2cap_data_channel_rrframe(chan, rx_control);
4091 case L2CAP_SUPER_REJ:
4092 l2cap_data_channel_rejframe(chan, rx_control);
4095 case L2CAP_SUPER_SREJ:
4096 l2cap_data_channel_srejframe(chan, rx_control);
4099 case L2CAP_SUPER_RNR:
4100 l2cap_data_channel_rnrframe(chan, rx_control);
4108 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4110 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4113 int len, next_tx_seq_offset, req_seq_offset;
4115 control = __get_control(chan, skb->data);
4116 skb_pull(skb, __ctrl_size(chan));
4120 * We can just drop the corrupted I-frame here.
4121 * Receiver will miss it and start proper recovery
4122 * procedures and ask retransmission.
4124 if (l2cap_check_fcs(chan, skb))
4127 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4128 len -= L2CAP_SDULEN_SIZE;
4130 if (chan->fcs == L2CAP_FCS_CRC16)
4131 len -= L2CAP_FCS_SIZE;
4133 if (len > chan->mps) {
4134 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4138 req_seq = __get_reqseq(chan, control);
4140 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4142 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4143 chan->expected_ack_seq);
4145 /* check for invalid req-seq */
4146 if (req_seq_offset > next_tx_seq_offset) {
4147 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4151 if (!__is_sframe(chan, control)) {
4153 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4157 l2cap_data_channel_iframe(chan, control, skb);
4161 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4165 l2cap_data_channel_sframe(chan, control, skb);
4175 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4177 struct l2cap_chan *chan;
4178 struct sock *sk = NULL;
4183 chan = l2cap_get_chan_by_scid(conn, cid);
4185 BT_DBG("unknown cid 0x%4.4x", cid);
4191 BT_DBG("chan %p, len %d", chan, skb->len);
4193 if (chan->state != BT_CONNECTED)
4196 switch (chan->mode) {
4197 case L2CAP_MODE_BASIC:
4198 /* If socket recv buffers overflows we drop data here
4199 * which is *bad* because L2CAP has to be reliable.
4200 * But we don't have any other choice. L2CAP doesn't
4201 * provide flow control mechanism. */
4203 if (chan->imtu < skb->len)
4206 if (!chan->ops->recv(chan->data, skb))
4210 case L2CAP_MODE_ERTM:
4211 if (!sock_owned_by_user(sk)) {
4212 l2cap_ertm_data_rcv(sk, skb);
4214 if (sk_add_backlog(sk, skb))
4220 case L2CAP_MODE_STREAMING:
4221 control = __get_control(chan, skb->data);
4222 skb_pull(skb, __ctrl_size(chan));
4225 if (l2cap_check_fcs(chan, skb))
4228 if (__is_sar_start(chan, control))
4229 len -= L2CAP_SDULEN_SIZE;
4231 if (chan->fcs == L2CAP_FCS_CRC16)
4232 len -= L2CAP_FCS_SIZE;
4234 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4237 tx_seq = __get_txseq(chan, control);
4239 if (chan->expected_tx_seq != tx_seq) {
4240 /* Frame(s) missing - must discard partial SDU */
4241 kfree_skb(chan->sdu);
4243 chan->sdu_last_frag = NULL;
4246 /* TODO: Notify userland of missing data */
4249 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4251 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4252 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4257 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4271 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4273 struct sock *sk = NULL;
4274 struct l2cap_chan *chan;
4276 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4284 BT_DBG("sk %p, len %d", sk, skb->len);
4286 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4289 if (chan->imtu < skb->len)
4292 if (!chan->ops->recv(chan->data, skb))
4304 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4306 struct sock *sk = NULL;
4307 struct l2cap_chan *chan;
4309 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4317 BT_DBG("sk %p, len %d", sk, skb->len);
4319 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4322 if (chan->imtu < skb->len)
4325 if (!chan->ops->recv(chan->data, skb))
4337 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4339 struct l2cap_hdr *lh = (void *) skb->data;
4343 skb_pull(skb, L2CAP_HDR_SIZE);
4344 cid = __le16_to_cpu(lh->cid);
4345 len = __le16_to_cpu(lh->len);
4347 if (len != skb->len) {
4352 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4355 case L2CAP_CID_LE_SIGNALING:
4356 case L2CAP_CID_SIGNALING:
4357 l2cap_sig_channel(conn, skb);
4360 case L2CAP_CID_CONN_LESS:
4361 psm = get_unaligned_le16(skb->data);
4363 l2cap_conless_channel(conn, psm, skb);
4366 case L2CAP_CID_LE_DATA:
4367 l2cap_att_channel(conn, cid, skb);
4371 if (smp_sig_channel(conn, skb))
4372 l2cap_conn_del(conn->hcon, EACCES);
4376 l2cap_data_channel(conn, cid, skb);
4381 /* ---- L2CAP interface with lower layer (HCI) ---- */
4383 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4385 int exact = 0, lm1 = 0, lm2 = 0;
4386 struct l2cap_chan *c;
4388 if (type != ACL_LINK)
4391 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4393 /* Find listening sockets and check their link_mode */
4394 read_lock(&chan_list_lock);
4395 list_for_each_entry(c, &chan_list, global_l) {
4396 struct sock *sk = c->sk;
4398 if (c->state != BT_LISTEN)
4401 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4402 lm1 |= HCI_LM_ACCEPT;
4403 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4404 lm1 |= HCI_LM_MASTER;
4406 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4407 lm2 |= HCI_LM_ACCEPT;
4408 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4409 lm2 |= HCI_LM_MASTER;
4412 read_unlock(&chan_list_lock);
4414 return exact ? lm1 : lm2;
4417 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4419 struct l2cap_conn *conn;
4421 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4423 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4427 conn = l2cap_conn_add(hcon, status);
4429 l2cap_conn_ready(conn);
4431 l2cap_conn_del(hcon, bt_to_errno(status));
4436 static int l2cap_disconn_ind(struct hci_conn *hcon)
4438 struct l2cap_conn *conn = hcon->l2cap_data;
4440 BT_DBG("hcon %p", hcon);
4442 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4443 return HCI_ERROR_REMOTE_USER_TERM;
4445 return conn->disc_reason;
4448 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4450 BT_DBG("hcon %p reason %d", hcon, reason);
4452 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4455 l2cap_conn_del(hcon, bt_to_errno(reason));
4460 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4462 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4465 if (encrypt == 0x00) {
4466 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4467 __clear_chan_timer(chan);
4468 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4469 } else if (chan->sec_level == BT_SECURITY_HIGH)
4470 l2cap_chan_close(chan, ECONNREFUSED);
4472 if (chan->sec_level == BT_SECURITY_MEDIUM)
4473 __clear_chan_timer(chan);
4477 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4479 struct l2cap_conn *conn = hcon->l2cap_data;
4480 struct l2cap_chan *chan;
4485 BT_DBG("conn %p", conn);
4487 if (hcon->type == LE_LINK) {
4488 smp_distribute_keys(conn, 0);
4489 del_timer(&conn->security_timer);
4492 read_lock(&conn->chan_lock);
4494 list_for_each_entry(chan, &conn->chan_l, list) {
4495 struct sock *sk = chan->sk;
4499 BT_DBG("chan->scid %d", chan->scid);
4501 if (chan->scid == L2CAP_CID_LE_DATA) {
4502 if (!status && encrypt) {
4503 chan->sec_level = hcon->sec_level;
4504 l2cap_chan_ready(sk);
4511 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4516 if (!status && (chan->state == BT_CONNECTED ||
4517 chan->state == BT_CONFIG)) {
4518 l2cap_check_encryption(chan, encrypt);
4523 if (chan->state == BT_CONNECT) {
4525 struct l2cap_conn_req req;
4526 req.scid = cpu_to_le16(chan->scid);
4527 req.psm = chan->psm;
4529 chan->ident = l2cap_get_ident(conn);
4530 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4532 l2cap_send_cmd(conn, chan->ident,
4533 L2CAP_CONN_REQ, sizeof(req), &req);
4535 __clear_chan_timer(chan);
4536 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4538 } else if (chan->state == BT_CONNECT2) {
4539 struct l2cap_conn_rsp rsp;
4543 if (bt_sk(sk)->defer_setup) {
4544 struct sock *parent = bt_sk(sk)->parent;
4545 res = L2CAP_CR_PEND;
4546 stat = L2CAP_CS_AUTHOR_PEND;
4548 parent->sk_data_ready(parent, 0);
4550 l2cap_state_change(chan, BT_CONFIG);
4551 res = L2CAP_CR_SUCCESS;
4552 stat = L2CAP_CS_NO_INFO;
4555 l2cap_state_change(chan, BT_DISCONN);
4556 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4557 res = L2CAP_CR_SEC_BLOCK;
4558 stat = L2CAP_CS_NO_INFO;
4561 rsp.scid = cpu_to_le16(chan->dcid);
4562 rsp.dcid = cpu_to_le16(chan->scid);
4563 rsp.result = cpu_to_le16(res);
4564 rsp.status = cpu_to_le16(stat);
4565 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4572 read_unlock(&conn->chan_lock);
4577 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4579 struct l2cap_conn *conn = hcon->l2cap_data;
4582 conn = l2cap_conn_add(hcon, 0);
4587 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4589 if (!(flags & ACL_CONT)) {
4590 struct l2cap_hdr *hdr;
4591 struct l2cap_chan *chan;
4596 BT_ERR("Unexpected start frame (len %d)", skb->len);
4597 kfree_skb(conn->rx_skb);
4598 conn->rx_skb = NULL;
4600 l2cap_conn_unreliable(conn, ECOMM);
4603 /* Start fragment always begin with Basic L2CAP header */
4604 if (skb->len < L2CAP_HDR_SIZE) {
4605 BT_ERR("Frame is too short (len %d)", skb->len);
4606 l2cap_conn_unreliable(conn, ECOMM);
4610 hdr = (struct l2cap_hdr *) skb->data;
4611 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4612 cid = __le16_to_cpu(hdr->cid);
4614 if (len == skb->len) {
4615 /* Complete frame received */
4616 l2cap_recv_frame(conn, skb);
4620 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4622 if (skb->len > len) {
4623 BT_ERR("Frame is too long (len %d, expected len %d)",
4625 l2cap_conn_unreliable(conn, ECOMM);
4629 chan = l2cap_get_chan_by_scid(conn, cid);
4631 if (chan && chan->sk) {
4632 struct sock *sk = chan->sk;
4634 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4635 BT_ERR("Frame exceeding recv MTU (len %d, "
4639 l2cap_conn_unreliable(conn, ECOMM);
4645 /* Allocate skb for the complete frame (with header) */
4646 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4650 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4652 conn->rx_len = len - skb->len;
4654 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4656 if (!conn->rx_len) {
4657 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4658 l2cap_conn_unreliable(conn, ECOMM);
4662 if (skb->len > conn->rx_len) {
4663 BT_ERR("Fragment is too long (len %d, expected %d)",
4664 skb->len, conn->rx_len);
4665 kfree_skb(conn->rx_skb);
4666 conn->rx_skb = NULL;
4668 l2cap_conn_unreliable(conn, ECOMM);
4672 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4674 conn->rx_len -= skb->len;
4676 if (!conn->rx_len) {
4677 /* Complete frame received */
4678 l2cap_recv_frame(conn, conn->rx_skb);
4679 conn->rx_skb = NULL;
4688 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4690 struct l2cap_chan *c;
4692 read_lock_bh(&chan_list_lock);
4694 list_for_each_entry(c, &chan_list, global_l) {
4695 struct sock *sk = c->sk;
4697 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4698 batostr(&bt_sk(sk)->src),
4699 batostr(&bt_sk(sk)->dst),
4700 c->state, __le16_to_cpu(c->psm),
4701 c->scid, c->dcid, c->imtu, c->omtu,
4702 c->sec_level, c->mode);
4705 read_unlock_bh(&chan_list_lock);
4710 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4712 return single_open(file, l2cap_debugfs_show, inode->i_private);
4715 static const struct file_operations l2cap_debugfs_fops = {
4716 .open = l2cap_debugfs_open,
4718 .llseek = seq_lseek,
4719 .release = single_release,
4722 static struct dentry *l2cap_debugfs;
4724 static struct hci_proto l2cap_hci_proto = {
4726 .id = HCI_PROTO_L2CAP,
4727 .connect_ind = l2cap_connect_ind,
4728 .connect_cfm = l2cap_connect_cfm,
4729 .disconn_ind = l2cap_disconn_ind,
4730 .disconn_cfm = l2cap_disconn_cfm,
4731 .security_cfm = l2cap_security_cfm,
4732 .recv_acldata = l2cap_recv_acldata
4735 int __init l2cap_init(void)
4739 err = l2cap_init_sockets();
4743 err = hci_register_proto(&l2cap_hci_proto);
4745 BT_ERR("L2CAP protocol registration failed");
4746 bt_sock_unregister(BTPROTO_L2CAP);
4751 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4752 bt_debugfs, NULL, &l2cap_debugfs_fops);
4754 BT_ERR("Failed to create L2CAP debug file");
4760 l2cap_cleanup_sockets();
4764 void l2cap_exit(void)
4766 debugfs_remove(l2cap_debugfs);
4768 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4769 BT_ERR("L2CAP protocol unregistration failed");
4771 l2cap_cleanup_sockets();
4774 module_param(disable_ertm, bool, 0644);
4775 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");