2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static struct workqueue_struct *_busy_wq;
66 static LIST_HEAD(chan_list);
67 static DEFINE_RWLOCK(chan_list_lock);
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
75 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
76 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
77 struct l2cap_chan *chan, int err);
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
81 /* ---- L2CAP channels ---- */
83 static inline void chan_hold(struct l2cap_chan *c)
85 atomic_inc(&c->refcnt);
88 static inline void chan_put(struct l2cap_chan *c)
90 if (atomic_dec_and_test(&c->refcnt))
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
98 list_for_each_entry(c, &conn->chan_l, list) {
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
108 struct l2cap_chan *c;
110 list_for_each_entry(c, &conn->chan_l, list) {
117 /* Find channel with given SCID.
118 * Returns locked socket */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
121 struct l2cap_chan *c;
123 read_lock(&conn->chan_lock);
124 c = __l2cap_get_chan_by_scid(conn, cid);
127 read_unlock(&conn->chan_lock);
131 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
133 struct l2cap_chan *c;
135 list_for_each_entry(c, &conn->chan_l, list) {
136 if (c->ident == ident)
142 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144 struct l2cap_chan *c;
146 read_lock(&conn->chan_lock);
147 c = __l2cap_get_chan_by_ident(conn, ident);
150 read_unlock(&conn->chan_lock);
154 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
156 struct l2cap_chan *c;
158 list_for_each_entry(c, &chan_list, global_l) {
159 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
168 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
172 write_lock_bh(&chan_list_lock);
174 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
187 for (p = 0x1001; p < 0x1100; p += 2)
188 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
189 chan->psm = cpu_to_le16(p);
190 chan->sport = cpu_to_le16(p);
197 write_unlock_bh(&chan_list_lock);
201 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
203 write_lock_bh(&chan_list_lock);
207 write_unlock_bh(&chan_list_lock);
212 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
214 u16 cid = L2CAP_CID_DYN_START;
216 for (; cid < L2CAP_CID_DYN_END; cid++) {
217 if (!__l2cap_get_chan_by_scid(conn, cid))
224 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
226 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
228 if (!mod_timer(timer, jiffies + timeout))
232 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
234 BT_DBG("chan %p state %d", chan, chan->state);
236 if (timer_pending(timer) && del_timer(timer))
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
243 chan->ops->state_change(chan->data, state);
246 static void l2cap_chan_timeout(unsigned long arg)
248 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
249 struct sock *sk = chan->sk;
252 BT_DBG("chan %p state %d", chan, chan->state);
256 if (sock_owned_by_user(sk)) {
257 /* sk is owned by user. Try again later */
258 __set_chan_timer(chan, HZ / 5);
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
272 l2cap_chan_close(chan, reason);
276 chan->ops->close(chan->data);
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
282 struct l2cap_chan *chan;
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
290 write_lock_bh(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock_bh(&chan_list_lock);
294 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
296 chan->state = BT_OPEN;
298 atomic_set(&chan->refcnt, 1);
303 void l2cap_chan_destroy(struct l2cap_chan *chan)
305 write_lock_bh(&chan_list_lock);
306 list_del(&chan->global_l);
307 write_unlock_bh(&chan_list_lock);
312 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
314 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
315 chan->psm, chan->dcid);
317 conn->disc_reason = 0x13;
321 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
322 if (conn->hcon->type == LE_LINK) {
324 chan->omtu = L2CAP_LE_DEFAULT_MTU;
325 chan->scid = L2CAP_CID_LE_DATA;
326 chan->dcid = L2CAP_CID_LE_DATA;
328 /* Alloc CID for connection-oriented socket */
329 chan->scid = l2cap_alloc_cid(conn);
330 chan->omtu = L2CAP_DEFAULT_MTU;
332 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
333 /* Connectionless socket */
334 chan->scid = L2CAP_CID_CONN_LESS;
335 chan->dcid = L2CAP_CID_CONN_LESS;
336 chan->omtu = L2CAP_DEFAULT_MTU;
338 /* Raw socket can send/recv signalling messages only */
339 chan->scid = L2CAP_CID_SIGNALING;
340 chan->dcid = L2CAP_CID_SIGNALING;
341 chan->omtu = L2CAP_DEFAULT_MTU;
346 list_add(&chan->list, &conn->chan_l);
350 * Must be called on the locked socket. */
351 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
353 struct sock *sk = chan->sk;
354 struct l2cap_conn *conn = chan->conn;
355 struct sock *parent = bt_sk(sk)->parent;
357 __clear_chan_timer(chan);
359 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
362 /* Delete from channel list */
363 write_lock_bh(&conn->chan_lock);
364 list_del(&chan->list);
365 write_unlock_bh(&conn->chan_lock);
369 hci_conn_put(conn->hcon);
372 l2cap_state_change(chan, BT_CLOSED);
373 sock_set_flag(sk, SOCK_ZAPPED);
379 bt_accept_unlink(sk);
380 parent->sk_data_ready(parent, 0);
382 sk->sk_state_change(sk);
384 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
385 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
388 skb_queue_purge(&chan->tx_q);
390 if (chan->mode == L2CAP_MODE_ERTM) {
391 struct srej_list *l, *tmp;
393 __clear_retrans_timer(chan);
394 __clear_monitor_timer(chan);
395 __clear_ack_timer(chan);
397 skb_queue_purge(&chan->srej_q);
398 skb_queue_purge(&chan->busy_q);
400 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
407 static void l2cap_chan_cleanup_listen(struct sock *parent)
411 BT_DBG("parent %p", parent);
413 /* Close not yet accepted channels */
414 while ((sk = bt_accept_dequeue(parent, NULL))) {
415 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
416 __clear_chan_timer(chan);
418 l2cap_chan_close(chan, ECONNRESET);
420 chan->ops->close(chan->data);
424 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
426 struct l2cap_conn *conn = chan->conn;
427 struct sock *sk = chan->sk;
429 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
431 switch (chan->state) {
433 l2cap_chan_cleanup_listen(sk);
435 l2cap_state_change(chan, BT_CLOSED);
436 sock_set_flag(sk, SOCK_ZAPPED);
441 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
442 conn->hcon->type == ACL_LINK) {
443 __clear_chan_timer(chan);
444 __set_chan_timer(chan, sk->sk_sndtimeo);
445 l2cap_send_disconn_req(conn, chan, reason);
447 l2cap_chan_del(chan, reason);
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 struct l2cap_conn_rsp rsp;
456 if (bt_sk(sk)->defer_setup)
457 result = L2CAP_CR_SEC_BLOCK;
459 result = L2CAP_CR_BAD_PSM;
460 l2cap_state_change(chan, BT_DISCONN);
462 rsp.scid = cpu_to_le16(chan->dcid);
463 rsp.dcid = cpu_to_le16(chan->scid);
464 rsp.result = cpu_to_le16(result);
465 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
466 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
470 l2cap_chan_del(chan, reason);
475 l2cap_chan_del(chan, reason);
479 sock_set_flag(sk, SOCK_ZAPPED);
484 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
486 if (chan->chan_type == L2CAP_CHAN_RAW) {
487 switch (chan->sec_level) {
488 case BT_SECURITY_HIGH:
489 return HCI_AT_DEDICATED_BONDING_MITM;
490 case BT_SECURITY_MEDIUM:
491 return HCI_AT_DEDICATED_BONDING;
493 return HCI_AT_NO_BONDING;
495 } else if (chan->psm == cpu_to_le16(0x0001)) {
496 if (chan->sec_level == BT_SECURITY_LOW)
497 chan->sec_level = BT_SECURITY_SDP;
499 if (chan->sec_level == BT_SECURITY_HIGH)
500 return HCI_AT_NO_BONDING_MITM;
502 return HCI_AT_NO_BONDING;
504 switch (chan->sec_level) {
505 case BT_SECURITY_HIGH:
506 return HCI_AT_GENERAL_BONDING_MITM;
507 case BT_SECURITY_MEDIUM:
508 return HCI_AT_GENERAL_BONDING;
510 return HCI_AT_NO_BONDING;
515 /* Service level security */
516 static inline int l2cap_check_security(struct l2cap_chan *chan)
518 struct l2cap_conn *conn = chan->conn;
521 auth_type = l2cap_get_auth_type(chan);
523 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
526 static u8 l2cap_get_ident(struct l2cap_conn *conn)
530 /* Get next available identificator.
531 * 1 - 128 are used by kernel.
532 * 129 - 199 are reserved.
533 * 200 - 254 are used by utilities like l2ping, etc.
536 spin_lock_bh(&conn->lock);
538 if (++conn->tx_ident > 128)
543 spin_unlock_bh(&conn->lock);
548 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
550 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
553 BT_DBG("code 0x%2.2x", code);
558 if (lmp_no_flush_capable(conn->hcon->hdev))
559 flags = ACL_START_NO_FLUSH;
563 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
565 hci_send_acl(conn->hcon, skb, flags);
568 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
571 struct l2cap_hdr *lh;
572 struct l2cap_conn *conn = chan->conn;
573 int count, hlen = L2CAP_HDR_SIZE + 2;
576 if (chan->state != BT_CONNECTED)
579 if (chan->fcs == L2CAP_FCS_CRC16)
582 BT_DBG("chan %p, control 0x%2.2x", chan, control);
584 count = min_t(unsigned int, conn->mtu, hlen);
585 control |= L2CAP_CTRL_FRAME_TYPE;
587 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
588 control |= L2CAP_CTRL_FINAL;
589 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
592 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
593 control |= L2CAP_CTRL_POLL;
594 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
597 skb = bt_skb_alloc(count, GFP_ATOMIC);
601 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
602 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
603 lh->cid = cpu_to_le16(chan->dcid);
604 put_unaligned_le16(control, skb_put(skb, 2));
606 if (chan->fcs == L2CAP_FCS_CRC16) {
607 u16 fcs = crc16(0, (u8 *)lh, count - 2);
608 put_unaligned_le16(fcs, skb_put(skb, 2));
611 if (lmp_no_flush_capable(conn->hcon->hdev))
612 flags = ACL_START_NO_FLUSH;
616 bt_cb(skb)->force_active = chan->force_active;
618 hci_send_acl(chan->conn->hcon, skb, flags);
621 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
623 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
624 control |= L2CAP_SUPER_RCV_NOT_READY;
625 chan->conn_state |= L2CAP_CONN_RNR_SENT;
627 control |= L2CAP_SUPER_RCV_READY;
629 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
631 l2cap_send_sframe(chan, control);
634 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
636 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
639 static void l2cap_do_start(struct l2cap_chan *chan)
641 struct l2cap_conn *conn = chan->conn;
643 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
644 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
647 if (l2cap_check_security(chan) &&
648 __l2cap_no_conn_pending(chan)) {
649 struct l2cap_conn_req req;
650 req.scid = cpu_to_le16(chan->scid);
653 chan->ident = l2cap_get_ident(conn);
654 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
656 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
660 struct l2cap_info_req req;
661 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
663 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
664 conn->info_ident = l2cap_get_ident(conn);
666 mod_timer(&conn->info_timer, jiffies +
667 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
669 l2cap_send_cmd(conn, conn->info_ident,
670 L2CAP_INFO_REQ, sizeof(req), &req);
674 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
676 u32 local_feat_mask = l2cap_feat_mask;
678 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
681 case L2CAP_MODE_ERTM:
682 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
683 case L2CAP_MODE_STREAMING:
684 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
690 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
693 struct l2cap_disconn_req req;
700 if (chan->mode == L2CAP_MODE_ERTM) {
701 __clear_retrans_timer(chan);
702 __clear_monitor_timer(chan);
703 __clear_ack_timer(chan);
706 req.dcid = cpu_to_le16(chan->dcid);
707 req.scid = cpu_to_le16(chan->scid);
708 l2cap_send_cmd(conn, l2cap_get_ident(conn),
709 L2CAP_DISCONN_REQ, sizeof(req), &req);
711 l2cap_state_change(chan, BT_DISCONN);
715 /* ---- L2CAP connections ---- */
716 static void l2cap_conn_start(struct l2cap_conn *conn)
718 struct l2cap_chan *chan, *tmp;
720 BT_DBG("conn %p", conn);
722 read_lock(&conn->chan_lock);
724 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
729 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
734 if (chan->state == BT_CONNECT) {
735 struct l2cap_conn_req req;
737 if (!l2cap_check_security(chan) ||
738 !__l2cap_no_conn_pending(chan)) {
743 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
744 && test_bit(CONF_STATE2_DEVICE,
745 &chan->conf_state)) {
746 /* l2cap_chan_close() calls list_del(chan)
747 * so release the lock */
748 read_unlock_bh(&conn->chan_lock);
749 l2cap_chan_close(chan, ECONNRESET);
750 read_lock_bh(&conn->chan_lock);
755 req.scid = cpu_to_le16(chan->scid);
758 chan->ident = l2cap_get_ident(conn);
759 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
761 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
764 } else if (chan->state == BT_CONNECT2) {
765 struct l2cap_conn_rsp rsp;
767 rsp.scid = cpu_to_le16(chan->dcid);
768 rsp.dcid = cpu_to_le16(chan->scid);
770 if (l2cap_check_security(chan)) {
771 if (bt_sk(sk)->defer_setup) {
772 struct sock *parent = bt_sk(sk)->parent;
773 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
774 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
775 parent->sk_data_ready(parent, 0);
778 l2cap_state_change(chan, BT_CONFIG);
779 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
780 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
783 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
784 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
787 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
790 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
791 rsp.result != L2CAP_CR_SUCCESS) {
796 set_bit(CONF_REQ_SENT, &chan->conf_state);
797 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
798 l2cap_build_conf_req(chan, buf), buf);
799 chan->num_conf_req++;
805 read_unlock(&conn->chan_lock);
808 /* Find socket with cid and source bdaddr.
809 * Returns closest match, locked.
811 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
813 struct l2cap_chan *c, *c1 = NULL;
815 read_lock(&chan_list_lock);
817 list_for_each_entry(c, &chan_list, global_l) {
818 struct sock *sk = c->sk;
820 if (state && c->state != state)
823 if (c->scid == cid) {
825 if (!bacmp(&bt_sk(sk)->src, src)) {
826 read_unlock(&chan_list_lock);
831 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
836 read_unlock(&chan_list_lock);
841 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
843 struct sock *parent, *sk;
844 struct l2cap_chan *chan, *pchan;
848 /* Check if we have socket listening on cid */
849 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
856 bh_lock_sock(parent);
858 /* Check for backlog size */
859 if (sk_acceptq_is_full(parent)) {
860 BT_DBG("backlog full %d", parent->sk_ack_backlog);
864 chan = pchan->ops->new_connection(pchan->data);
870 write_lock_bh(&conn->chan_lock);
872 hci_conn_hold(conn->hcon);
874 bacpy(&bt_sk(sk)->src, conn->src);
875 bacpy(&bt_sk(sk)->dst, conn->dst);
877 bt_accept_enqueue(parent, sk);
879 __l2cap_chan_add(conn, chan);
881 __set_chan_timer(chan, sk->sk_sndtimeo);
883 l2cap_state_change(chan, BT_CONNECTED);
884 parent->sk_data_ready(parent, 0);
886 write_unlock_bh(&conn->chan_lock);
889 bh_unlock_sock(parent);
892 static void l2cap_chan_ready(struct sock *sk)
894 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
895 struct sock *parent = bt_sk(sk)->parent;
897 BT_DBG("sk %p, parent %p", sk, parent);
899 chan->conf_state = 0;
900 __clear_chan_timer(chan);
902 sk->sk_state = BT_CONNECTED;
903 sk->sk_state_change(sk);
906 parent->sk_data_ready(parent, 0);
909 static void l2cap_conn_ready(struct l2cap_conn *conn)
911 struct l2cap_chan *chan;
913 BT_DBG("conn %p", conn);
915 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
916 l2cap_le_conn_ready(conn);
918 read_lock(&conn->chan_lock);
920 list_for_each_entry(chan, &conn->chan_l, list) {
921 struct sock *sk = chan->sk;
925 if (conn->hcon->type == LE_LINK)
926 if (smp_conn_security(conn, chan->sec_level))
927 l2cap_chan_ready(sk);
929 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
930 __clear_chan_timer(chan);
931 l2cap_state_change(chan, BT_CONNECTED);
932 sk->sk_state_change(sk);
934 } else if (chan->state == BT_CONNECT)
935 l2cap_do_start(chan);
940 read_unlock(&conn->chan_lock);
943 /* Notify sockets that we cannot guaranty reliability anymore */
944 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
946 struct l2cap_chan *chan;
948 BT_DBG("conn %p", conn);
950 read_lock(&conn->chan_lock);
952 list_for_each_entry(chan, &conn->chan_l, list) {
953 struct sock *sk = chan->sk;
955 if (chan->force_reliable)
959 read_unlock(&conn->chan_lock);
962 static void l2cap_info_timeout(unsigned long arg)
964 struct l2cap_conn *conn = (void *) arg;
966 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
967 conn->info_ident = 0;
969 l2cap_conn_start(conn);
972 static void l2cap_conn_del(struct hci_conn *hcon, int err)
974 struct l2cap_conn *conn = hcon->l2cap_data;
975 struct l2cap_chan *chan, *l;
981 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
983 kfree_skb(conn->rx_skb);
986 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
989 l2cap_chan_del(chan, err);
991 chan->ops->close(chan->data);
994 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
995 del_timer_sync(&conn->info_timer);
997 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
998 del_timer(&conn->security_timer);
1000 hcon->l2cap_data = NULL;
1004 static void security_timeout(unsigned long arg)
1006 struct l2cap_conn *conn = (void *) arg;
1008 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1011 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1013 struct l2cap_conn *conn = hcon->l2cap_data;
1018 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1022 hcon->l2cap_data = conn;
1025 BT_DBG("hcon %p conn %p", hcon, conn);
1027 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1028 conn->mtu = hcon->hdev->le_mtu;
1030 conn->mtu = hcon->hdev->acl_mtu;
1032 conn->src = &hcon->hdev->bdaddr;
1033 conn->dst = &hcon->dst;
1035 conn->feat_mask = 0;
1037 spin_lock_init(&conn->lock);
1038 rwlock_init(&conn->chan_lock);
1040 INIT_LIST_HEAD(&conn->chan_l);
1042 if (hcon->type == LE_LINK)
1043 setup_timer(&conn->security_timer, security_timeout,
1044 (unsigned long) conn);
1046 setup_timer(&conn->info_timer, l2cap_info_timeout,
1047 (unsigned long) conn);
1049 conn->disc_reason = 0x13;
1054 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1056 write_lock_bh(&conn->chan_lock);
1057 __l2cap_chan_add(conn, chan);
1058 write_unlock_bh(&conn->chan_lock);
1061 /* ---- Socket interface ---- */
1063 /* Find socket with psm and source bdaddr.
1064 * Returns closest match.
1066 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1068 struct l2cap_chan *c, *c1 = NULL;
1070 read_lock(&chan_list_lock);
1072 list_for_each_entry(c, &chan_list, global_l) {
1073 struct sock *sk = c->sk;
1075 if (state && c->state != state)
1078 if (c->psm == psm) {
1080 if (!bacmp(&bt_sk(sk)->src, src)) {
1081 read_unlock(&chan_list_lock);
1086 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1091 read_unlock(&chan_list_lock);
1096 int l2cap_chan_connect(struct l2cap_chan *chan)
1098 struct sock *sk = chan->sk;
1099 bdaddr_t *src = &bt_sk(sk)->src;
1100 bdaddr_t *dst = &bt_sk(sk)->dst;
1101 struct l2cap_conn *conn;
1102 struct hci_conn *hcon;
1103 struct hci_dev *hdev;
1107 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1110 hdev = hci_get_route(dst, src);
1112 return -EHOSTUNREACH;
1114 hci_dev_lock_bh(hdev);
1116 auth_type = l2cap_get_auth_type(chan);
1118 if (chan->dcid == L2CAP_CID_LE_DATA)
1119 hcon = hci_connect(hdev, LE_LINK, dst,
1120 chan->sec_level, auth_type);
1122 hcon = hci_connect(hdev, ACL_LINK, dst,
1123 chan->sec_level, auth_type);
1126 err = PTR_ERR(hcon);
1130 conn = l2cap_conn_add(hcon, 0);
1137 /* Update source addr of the socket */
1138 bacpy(src, conn->src);
1140 l2cap_chan_add(conn, chan);
1142 l2cap_state_change(chan, BT_CONNECT);
1143 __set_chan_timer(chan, sk->sk_sndtimeo);
1145 if (hcon->state == BT_CONNECTED) {
1146 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1147 __clear_chan_timer(chan);
1148 if (l2cap_check_security(chan))
1149 l2cap_state_change(chan, BT_CONNECTED);
1151 l2cap_do_start(chan);
1157 hci_dev_unlock_bh(hdev);
1162 int __l2cap_wait_ack(struct sock *sk)
1164 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1165 DECLARE_WAITQUEUE(wait, current);
1169 add_wait_queue(sk_sleep(sk), &wait);
1170 while ((chan->unacked_frames > 0 && chan->conn)) {
1171 set_current_state(TASK_INTERRUPTIBLE);
1176 if (signal_pending(current)) {
1177 err = sock_intr_errno(timeo);
1182 timeo = schedule_timeout(timeo);
1185 err = sock_error(sk);
1189 set_current_state(TASK_RUNNING);
1190 remove_wait_queue(sk_sleep(sk), &wait);
1194 static void l2cap_monitor_timeout(unsigned long arg)
1196 struct l2cap_chan *chan = (void *) arg;
1197 struct sock *sk = chan->sk;
1199 BT_DBG("chan %p", chan);
1202 if (chan->retry_count >= chan->remote_max_tx) {
1203 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1208 chan->retry_count++;
1209 __set_monitor_timer(chan);
1211 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1215 static void l2cap_retrans_timeout(unsigned long arg)
1217 struct l2cap_chan *chan = (void *) arg;
1218 struct sock *sk = chan->sk;
1220 BT_DBG("chan %p", chan);
1223 chan->retry_count = 1;
1224 __set_monitor_timer(chan);
1226 chan->conn_state |= L2CAP_CONN_WAIT_F;
1228 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1232 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1234 struct sk_buff *skb;
1236 while ((skb = skb_peek(&chan->tx_q)) &&
1237 chan->unacked_frames) {
1238 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1241 skb = skb_dequeue(&chan->tx_q);
1244 chan->unacked_frames--;
1247 if (!chan->unacked_frames)
1248 __clear_retrans_timer(chan);
1251 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1253 struct hci_conn *hcon = chan->conn->hcon;
1256 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1258 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1259 flags = ACL_START_NO_FLUSH;
1263 bt_cb(skb)->force_active = chan->force_active;
1264 hci_send_acl(hcon, skb, flags);
1267 void l2cap_streaming_send(struct l2cap_chan *chan)
1269 struct sk_buff *skb;
1272 while ((skb = skb_dequeue(&chan->tx_q))) {
1273 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1274 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1275 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1277 if (chan->fcs == L2CAP_FCS_CRC16) {
1278 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1279 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1282 l2cap_do_send(chan, skb);
1284 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1288 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1290 struct sk_buff *skb, *tx_skb;
1293 skb = skb_peek(&chan->tx_q);
1298 if (bt_cb(skb)->tx_seq == tx_seq)
1301 if (skb_queue_is_last(&chan->tx_q, skb))
1304 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1306 if (chan->remote_max_tx &&
1307 bt_cb(skb)->retries == chan->remote_max_tx) {
1308 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1312 tx_skb = skb_clone(skb, GFP_ATOMIC);
1313 bt_cb(skb)->retries++;
1314 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1315 control &= L2CAP_CTRL_SAR;
1317 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1318 control |= L2CAP_CTRL_FINAL;
1319 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1322 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1323 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1325 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1327 if (chan->fcs == L2CAP_FCS_CRC16) {
1328 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1329 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1332 l2cap_do_send(chan, tx_skb);
1335 int l2cap_ertm_send(struct l2cap_chan *chan)
1337 struct sk_buff *skb, *tx_skb;
1341 if (chan->state != BT_CONNECTED)
1344 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1346 if (chan->remote_max_tx &&
1347 bt_cb(skb)->retries == chan->remote_max_tx) {
1348 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1352 tx_skb = skb_clone(skb, GFP_ATOMIC);
1354 bt_cb(skb)->retries++;
1356 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1357 control &= L2CAP_CTRL_SAR;
1359 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1360 control |= L2CAP_CTRL_FINAL;
1361 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1363 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1364 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1365 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1368 if (chan->fcs == L2CAP_FCS_CRC16) {
1369 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1370 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1373 l2cap_do_send(chan, tx_skb);
1375 __set_retrans_timer(chan);
1377 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1378 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1380 if (bt_cb(skb)->retries == 1)
1381 chan->unacked_frames++;
1383 chan->frames_sent++;
1385 if (skb_queue_is_last(&chan->tx_q, skb))
1386 chan->tx_send_head = NULL;
1388 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1396 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1400 if (!skb_queue_empty(&chan->tx_q))
1401 chan->tx_send_head = chan->tx_q.next;
1403 chan->next_tx_seq = chan->expected_ack_seq;
1404 ret = l2cap_ertm_send(chan);
1408 static void l2cap_send_ack(struct l2cap_chan *chan)
1412 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1414 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1415 control |= L2CAP_SUPER_RCV_NOT_READY;
1416 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1417 l2cap_send_sframe(chan, control);
1421 if (l2cap_ertm_send(chan) > 0)
1424 control |= L2CAP_SUPER_RCV_READY;
1425 l2cap_send_sframe(chan, control);
1428 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1430 struct srej_list *tail;
1433 control = L2CAP_SUPER_SELECT_REJECT;
1434 control |= L2CAP_CTRL_FINAL;
1436 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1437 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1439 l2cap_send_sframe(chan, control);
1442 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1444 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1445 struct sk_buff **frag;
1448 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1454 /* Continuation fragments (no L2CAP header) */
1455 frag = &skb_shinfo(skb)->frag_list;
1457 count = min_t(unsigned int, conn->mtu, len);
1459 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1462 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1468 frag = &(*frag)->next;
1474 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1476 struct sock *sk = chan->sk;
1477 struct l2cap_conn *conn = chan->conn;
1478 struct sk_buff *skb;
1479 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1480 struct l2cap_hdr *lh;
1482 BT_DBG("sk %p len %d", sk, (int)len);
1484 count = min_t(unsigned int, (conn->mtu - hlen), len);
1485 skb = bt_skb_send_alloc(sk, count + hlen,
1486 msg->msg_flags & MSG_DONTWAIT, &err);
1488 return ERR_PTR(err);
1490 /* Create L2CAP header */
1491 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1492 lh->cid = cpu_to_le16(chan->dcid);
1493 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1494 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1496 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1497 if (unlikely(err < 0)) {
1499 return ERR_PTR(err);
1504 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1506 struct sock *sk = chan->sk;
1507 struct l2cap_conn *conn = chan->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE;
1510 struct l2cap_hdr *lh;
1512 BT_DBG("sk %p len %d", sk, (int)len);
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1518 return ERR_PTR(err);
1520 /* Create L2CAP header */
1521 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1522 lh->cid = cpu_to_le16(chan->dcid);
1523 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1525 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1526 if (unlikely(err < 0)) {
1528 return ERR_PTR(err);
1533 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1535 struct sock *sk = chan->sk;
1536 struct l2cap_conn *conn = chan->conn;
1537 struct sk_buff *skb;
1538 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1539 struct l2cap_hdr *lh;
1541 BT_DBG("sk %p len %d", sk, (int)len);
1544 return ERR_PTR(-ENOTCONN);
1549 if (chan->fcs == L2CAP_FCS_CRC16)
1552 count = min_t(unsigned int, (conn->mtu - hlen), len);
1553 skb = bt_skb_send_alloc(sk, count + hlen,
1554 msg->msg_flags & MSG_DONTWAIT, &err);
1556 return ERR_PTR(err);
1558 /* Create L2CAP header */
1559 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1560 lh->cid = cpu_to_le16(chan->dcid);
1561 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1562 put_unaligned_le16(control, skb_put(skb, 2));
1564 put_unaligned_le16(sdulen, skb_put(skb, 2));
1566 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1567 if (unlikely(err < 0)) {
1569 return ERR_PTR(err);
1572 if (chan->fcs == L2CAP_FCS_CRC16)
1573 put_unaligned_le16(0, skb_put(skb, 2));
1575 bt_cb(skb)->retries = 0;
1579 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1581 struct sk_buff *skb;
1582 struct sk_buff_head sar_queue;
1586 skb_queue_head_init(&sar_queue);
1587 control = L2CAP_SDU_START;
1588 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1590 return PTR_ERR(skb);
1592 __skb_queue_tail(&sar_queue, skb);
1593 len -= chan->remote_mps;
1594 size += chan->remote_mps;
1599 if (len > chan->remote_mps) {
1600 control = L2CAP_SDU_CONTINUE;
1601 buflen = chan->remote_mps;
1603 control = L2CAP_SDU_END;
1607 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1609 skb_queue_purge(&sar_queue);
1610 return PTR_ERR(skb);
1613 __skb_queue_tail(&sar_queue, skb);
1617 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1618 if (chan->tx_send_head == NULL)
1619 chan->tx_send_head = sar_queue.next;
1624 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1626 struct sk_buff *skb;
1630 /* Connectionless channel */
1631 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1632 skb = l2cap_create_connless_pdu(chan, msg, len);
1634 return PTR_ERR(skb);
1636 l2cap_do_send(chan, skb);
1640 switch (chan->mode) {
1641 case L2CAP_MODE_BASIC:
1642 /* Check outgoing MTU */
1643 if (len > chan->omtu)
1646 /* Create a basic PDU */
1647 skb = l2cap_create_basic_pdu(chan, msg, len);
1649 return PTR_ERR(skb);
1651 l2cap_do_send(chan, skb);
1655 case L2CAP_MODE_ERTM:
1656 case L2CAP_MODE_STREAMING:
1657 /* Entire SDU fits into one PDU */
1658 if (len <= chan->remote_mps) {
1659 control = L2CAP_SDU_UNSEGMENTED;
1660 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1663 return PTR_ERR(skb);
1665 __skb_queue_tail(&chan->tx_q, skb);
1667 if (chan->tx_send_head == NULL)
1668 chan->tx_send_head = skb;
1671 /* Segment SDU into multiples PDUs */
1672 err = l2cap_sar_segment_sdu(chan, msg, len);
1677 if (chan->mode == L2CAP_MODE_STREAMING) {
1678 l2cap_streaming_send(chan);
1683 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1684 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
1689 err = l2cap_ertm_send(chan);
1696 BT_DBG("bad state %1.1x", chan->mode);
1703 /* Copy frame to all raw sockets on that connection */
1704 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1706 struct sk_buff *nskb;
1707 struct l2cap_chan *chan;
1709 BT_DBG("conn %p", conn);
1711 read_lock(&conn->chan_lock);
1712 list_for_each_entry(chan, &conn->chan_l, list) {
1713 struct sock *sk = chan->sk;
1714 if (chan->chan_type != L2CAP_CHAN_RAW)
1717 /* Don't send frame to the socket it came from */
1720 nskb = skb_clone(skb, GFP_ATOMIC);
1724 if (chan->ops->recv(chan->data, nskb))
1727 read_unlock(&conn->chan_lock);
1730 /* ---- L2CAP signalling commands ---- */
1731 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1732 u8 code, u8 ident, u16 dlen, void *data)
1734 struct sk_buff *skb, **frag;
1735 struct l2cap_cmd_hdr *cmd;
1736 struct l2cap_hdr *lh;
1739 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1740 conn, code, ident, dlen);
1742 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1743 count = min_t(unsigned int, conn->mtu, len);
1745 skb = bt_skb_alloc(count, GFP_ATOMIC);
1749 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1750 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1752 if (conn->hcon->type == LE_LINK)
1753 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1755 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1757 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1760 cmd->len = cpu_to_le16(dlen);
1763 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1764 memcpy(skb_put(skb, count), data, count);
1770 /* Continuation fragments (no L2CAP header) */
1771 frag = &skb_shinfo(skb)->frag_list;
1773 count = min_t(unsigned int, conn->mtu, len);
1775 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1779 memcpy(skb_put(*frag, count), data, count);
1784 frag = &(*frag)->next;
1794 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1796 struct l2cap_conf_opt *opt = *ptr;
1799 len = L2CAP_CONF_OPT_SIZE + opt->len;
1807 *val = *((u8 *) opt->val);
1811 *val = get_unaligned_le16(opt->val);
1815 *val = get_unaligned_le32(opt->val);
1819 *val = (unsigned long) opt->val;
1823 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1827 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1829 struct l2cap_conf_opt *opt = *ptr;
1831 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1838 *((u8 *) opt->val) = val;
1842 put_unaligned_le16(val, opt->val);
1846 put_unaligned_le32(val, opt->val);
1850 memcpy(opt->val, (void *) val, len);
1854 *ptr += L2CAP_CONF_OPT_SIZE + len;
1857 static void l2cap_ack_timeout(unsigned long arg)
1859 struct l2cap_chan *chan = (void *) arg;
1861 bh_lock_sock(chan->sk);
1862 l2cap_send_ack(chan);
1863 bh_unlock_sock(chan->sk);
1866 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1868 struct sock *sk = chan->sk;
1870 chan->expected_ack_seq = 0;
1871 chan->unacked_frames = 0;
1872 chan->buffer_seq = 0;
1873 chan->num_acked = 0;
1874 chan->frames_sent = 0;
1876 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1877 (unsigned long) chan);
1878 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1879 (unsigned long) chan);
1880 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1882 skb_queue_head_init(&chan->srej_q);
1883 skb_queue_head_init(&chan->busy_q);
1885 INIT_LIST_HEAD(&chan->srej_l);
1887 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1889 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1892 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1895 case L2CAP_MODE_STREAMING:
1896 case L2CAP_MODE_ERTM:
1897 if (l2cap_mode_supported(mode, remote_feat_mask))
1901 return L2CAP_MODE_BASIC;
1905 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1907 struct l2cap_conf_req *req = data;
1908 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1909 void *ptr = req->data;
1911 BT_DBG("chan %p", chan);
1913 if (chan->num_conf_req || chan->num_conf_rsp)
1916 switch (chan->mode) {
1917 case L2CAP_MODE_STREAMING:
1918 case L2CAP_MODE_ERTM:
1919 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1924 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1929 if (chan->imtu != L2CAP_DEFAULT_MTU)
1930 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1932 switch (chan->mode) {
1933 case L2CAP_MODE_BASIC:
1934 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1935 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1938 rfc.mode = L2CAP_MODE_BASIC;
1940 rfc.max_transmit = 0;
1941 rfc.retrans_timeout = 0;
1942 rfc.monitor_timeout = 0;
1943 rfc.max_pdu_size = 0;
1945 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1946 (unsigned long) &rfc);
1949 case L2CAP_MODE_ERTM:
1950 rfc.mode = L2CAP_MODE_ERTM;
1951 rfc.txwin_size = chan->tx_win;
1952 rfc.max_transmit = chan->max_tx;
1953 rfc.retrans_timeout = 0;
1954 rfc.monitor_timeout = 0;
1955 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1956 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1957 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1959 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1960 (unsigned long) &rfc);
1962 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1965 if (chan->fcs == L2CAP_FCS_NONE ||
1966 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1967 chan->fcs = L2CAP_FCS_NONE;
1968 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1972 case L2CAP_MODE_STREAMING:
1973 rfc.mode = L2CAP_MODE_STREAMING;
1975 rfc.max_transmit = 0;
1976 rfc.retrans_timeout = 0;
1977 rfc.monitor_timeout = 0;
1978 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1979 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1980 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1982 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1983 (unsigned long) &rfc);
1985 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1988 if (chan->fcs == L2CAP_FCS_NONE ||
1989 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1990 chan->fcs = L2CAP_FCS_NONE;
1991 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1996 req->dcid = cpu_to_le16(chan->dcid);
1997 req->flags = cpu_to_le16(0);
2002 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2004 struct l2cap_conf_rsp *rsp = data;
2005 void *ptr = rsp->data;
2006 void *req = chan->conf_req;
2007 int len = chan->conf_len;
2008 int type, hint, olen;
2010 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2011 u16 mtu = L2CAP_DEFAULT_MTU;
2012 u16 result = L2CAP_CONF_SUCCESS;
2014 BT_DBG("chan %p", chan);
2016 while (len >= L2CAP_CONF_OPT_SIZE) {
2017 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2019 hint = type & L2CAP_CONF_HINT;
2020 type &= L2CAP_CONF_MASK;
2023 case L2CAP_CONF_MTU:
2027 case L2CAP_CONF_FLUSH_TO:
2028 chan->flush_to = val;
2031 case L2CAP_CONF_QOS:
2034 case L2CAP_CONF_RFC:
2035 if (olen == sizeof(rfc))
2036 memcpy(&rfc, (void *) val, olen);
2039 case L2CAP_CONF_FCS:
2040 if (val == L2CAP_FCS_NONE)
2041 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2049 result = L2CAP_CONF_UNKNOWN;
2050 *((u8 *) ptr++) = type;
2055 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2058 switch (chan->mode) {
2059 case L2CAP_MODE_STREAMING:
2060 case L2CAP_MODE_ERTM:
2061 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2062 chan->mode = l2cap_select_mode(rfc.mode,
2063 chan->conn->feat_mask);
2067 if (chan->mode != rfc.mode)
2068 return -ECONNREFUSED;
2074 if (chan->mode != rfc.mode) {
2075 result = L2CAP_CONF_UNACCEPT;
2076 rfc.mode = chan->mode;
2078 if (chan->num_conf_rsp == 1)
2079 return -ECONNREFUSED;
2081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2082 sizeof(rfc), (unsigned long) &rfc);
2086 if (result == L2CAP_CONF_SUCCESS) {
2087 /* Configure output options and let the other side know
2088 * which ones we don't like. */
2090 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2091 result = L2CAP_CONF_UNACCEPT;
2094 set_bit(CONF_MTU_DONE, &chan->conf_state);
2096 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2099 case L2CAP_MODE_BASIC:
2100 chan->fcs = L2CAP_FCS_NONE;
2101 set_bit(CONF_MODE_DONE, &chan->conf_state);
2104 case L2CAP_MODE_ERTM:
2105 chan->remote_tx_win = rfc.txwin_size;
2106 chan->remote_max_tx = rfc.max_transmit;
2108 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2109 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2111 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2113 rfc.retrans_timeout =
2114 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2115 rfc.monitor_timeout =
2116 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2118 set_bit(CONF_MODE_DONE, &chan->conf_state);
2120 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2121 sizeof(rfc), (unsigned long) &rfc);
2125 case L2CAP_MODE_STREAMING:
2126 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2127 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2129 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2131 set_bit(CONF_MODE_DONE, &chan->conf_state);
2133 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2134 sizeof(rfc), (unsigned long) &rfc);
2139 result = L2CAP_CONF_UNACCEPT;
2141 memset(&rfc, 0, sizeof(rfc));
2142 rfc.mode = chan->mode;
2145 if (result == L2CAP_CONF_SUCCESS)
2146 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2148 rsp->scid = cpu_to_le16(chan->dcid);
2149 rsp->result = cpu_to_le16(result);
2150 rsp->flags = cpu_to_le16(0x0000);
2155 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2157 struct l2cap_conf_req *req = data;
2158 void *ptr = req->data;
2161 struct l2cap_conf_rfc rfc;
2163 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2165 while (len >= L2CAP_CONF_OPT_SIZE) {
2166 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2169 case L2CAP_CONF_MTU:
2170 if (val < L2CAP_DEFAULT_MIN_MTU) {
2171 *result = L2CAP_CONF_UNACCEPT;
2172 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2178 case L2CAP_CONF_FLUSH_TO:
2179 chan->flush_to = val;
2180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2184 case L2CAP_CONF_RFC:
2185 if (olen == sizeof(rfc))
2186 memcpy(&rfc, (void *)val, olen);
2188 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2189 rfc.mode != chan->mode)
2190 return -ECONNREFUSED;
2194 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2195 sizeof(rfc), (unsigned long) &rfc);
2200 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2201 return -ECONNREFUSED;
2203 chan->mode = rfc.mode;
2205 if (*result == L2CAP_CONF_SUCCESS) {
2207 case L2CAP_MODE_ERTM:
2208 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2209 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2210 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2212 case L2CAP_MODE_STREAMING:
2213 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2217 req->dcid = cpu_to_le16(chan->dcid);
2218 req->flags = cpu_to_le16(0x0000);
2223 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2225 struct l2cap_conf_rsp *rsp = data;
2226 void *ptr = rsp->data;
2228 BT_DBG("chan %p", chan);
2230 rsp->scid = cpu_to_le16(chan->dcid);
2231 rsp->result = cpu_to_le16(result);
2232 rsp->flags = cpu_to_le16(flags);
2237 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2239 struct l2cap_conn_rsp rsp;
2240 struct l2cap_conn *conn = chan->conn;
2243 rsp.scid = cpu_to_le16(chan->dcid);
2244 rsp.dcid = cpu_to_le16(chan->scid);
2245 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2246 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2247 l2cap_send_cmd(conn, chan->ident,
2248 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2250 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2253 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2254 l2cap_build_conf_req(chan, buf), buf);
2255 chan->num_conf_req++;
2258 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2262 struct l2cap_conf_rfc rfc;
2264 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2266 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2269 while (len >= L2CAP_CONF_OPT_SIZE) {
2270 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2273 case L2CAP_CONF_RFC:
2274 if (olen == sizeof(rfc))
2275 memcpy(&rfc, (void *)val, olen);
2282 case L2CAP_MODE_ERTM:
2283 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2284 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2285 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2287 case L2CAP_MODE_STREAMING:
2288 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2292 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2294 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2296 if (rej->reason != 0x0000)
2299 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2300 cmd->ident == conn->info_ident) {
2301 del_timer(&conn->info_timer);
2303 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2304 conn->info_ident = 0;
2306 l2cap_conn_start(conn);
2312 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2314 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2315 struct l2cap_conn_rsp rsp;
2316 struct l2cap_chan *chan = NULL, *pchan;
2317 struct sock *parent, *sk = NULL;
2318 int result, status = L2CAP_CS_NO_INFO;
2320 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2321 __le16 psm = req->psm;
2323 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2325 /* Check if we have socket listening on psm */
2326 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2328 result = L2CAP_CR_BAD_PSM;
2334 bh_lock_sock(parent);
2336 /* Check if the ACL is secure enough (if not SDP) */
2337 if (psm != cpu_to_le16(0x0001) &&
2338 !hci_conn_check_link_mode(conn->hcon)) {
2339 conn->disc_reason = 0x05;
2340 result = L2CAP_CR_SEC_BLOCK;
2344 result = L2CAP_CR_NO_MEM;
2346 /* Check for backlog size */
2347 if (sk_acceptq_is_full(parent)) {
2348 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2352 chan = pchan->ops->new_connection(pchan->data);
2358 write_lock_bh(&conn->chan_lock);
2360 /* Check if we already have channel with that dcid */
2361 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2362 write_unlock_bh(&conn->chan_lock);
2363 sock_set_flag(sk, SOCK_ZAPPED);
2364 chan->ops->close(chan->data);
2368 hci_conn_hold(conn->hcon);
2370 bacpy(&bt_sk(sk)->src, conn->src);
2371 bacpy(&bt_sk(sk)->dst, conn->dst);
2375 bt_accept_enqueue(parent, sk);
2377 __l2cap_chan_add(conn, chan);
2381 __set_chan_timer(chan, sk->sk_sndtimeo);
2383 chan->ident = cmd->ident;
2385 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2386 if (l2cap_check_security(chan)) {
2387 if (bt_sk(sk)->defer_setup) {
2388 l2cap_state_change(chan, BT_CONNECT2);
2389 result = L2CAP_CR_PEND;
2390 status = L2CAP_CS_AUTHOR_PEND;
2391 parent->sk_data_ready(parent, 0);
2393 l2cap_state_change(chan, BT_CONFIG);
2394 result = L2CAP_CR_SUCCESS;
2395 status = L2CAP_CS_NO_INFO;
2398 l2cap_state_change(chan, BT_CONNECT2);
2399 result = L2CAP_CR_PEND;
2400 status = L2CAP_CS_AUTHEN_PEND;
2403 l2cap_state_change(chan, BT_CONNECT2);
2404 result = L2CAP_CR_PEND;
2405 status = L2CAP_CS_NO_INFO;
2408 write_unlock_bh(&conn->chan_lock);
2411 bh_unlock_sock(parent);
2414 rsp.scid = cpu_to_le16(scid);
2415 rsp.dcid = cpu_to_le16(dcid);
2416 rsp.result = cpu_to_le16(result);
2417 rsp.status = cpu_to_le16(status);
2418 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2420 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2421 struct l2cap_info_req info;
2422 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2424 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2425 conn->info_ident = l2cap_get_ident(conn);
2427 mod_timer(&conn->info_timer, jiffies +
2428 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2430 l2cap_send_cmd(conn, conn->info_ident,
2431 L2CAP_INFO_REQ, sizeof(info), &info);
2434 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2435 result == L2CAP_CR_SUCCESS) {
2437 set_bit(CONF_REQ_SENT, &chan->conf_state);
2438 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2439 l2cap_build_conf_req(chan, buf), buf);
2440 chan->num_conf_req++;
2446 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2448 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2449 u16 scid, dcid, result, status;
2450 struct l2cap_chan *chan;
2454 scid = __le16_to_cpu(rsp->scid);
2455 dcid = __le16_to_cpu(rsp->dcid);
2456 result = __le16_to_cpu(rsp->result);
2457 status = __le16_to_cpu(rsp->status);
2459 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2462 chan = l2cap_get_chan_by_scid(conn, scid);
2466 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2474 case L2CAP_CR_SUCCESS:
2475 l2cap_state_change(chan, BT_CONFIG);
2478 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2480 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2483 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2484 l2cap_build_conf_req(chan, req), req);
2485 chan->num_conf_req++;
2489 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2493 /* don't delete l2cap channel if sk is owned by user */
2494 if (sock_owned_by_user(sk)) {
2495 l2cap_state_change(chan, BT_DISCONN);
2496 __clear_chan_timer(chan);
2497 __set_chan_timer(chan, HZ / 5);
2501 l2cap_chan_del(chan, ECONNREFUSED);
2509 static inline void set_default_fcs(struct l2cap_chan *chan)
2511 /* FCS is enabled only in ERTM or streaming mode, if one or both
2514 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2515 chan->fcs = L2CAP_FCS_NONE;
2516 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2517 chan->fcs = L2CAP_FCS_CRC16;
2520 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2522 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2525 struct l2cap_chan *chan;
2529 dcid = __le16_to_cpu(req->dcid);
2530 flags = __le16_to_cpu(req->flags);
2532 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2534 chan = l2cap_get_chan_by_scid(conn, dcid);
2540 if (chan->state != BT_CONFIG) {
2541 struct l2cap_cmd_rej rej;
2543 rej.reason = cpu_to_le16(0x0002);
2544 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2549 /* Reject if config buffer is too small. */
2550 len = cmd_len - sizeof(*req);
2551 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2552 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2553 l2cap_build_conf_rsp(chan, rsp,
2554 L2CAP_CONF_REJECT, flags), rsp);
2559 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2560 chan->conf_len += len;
2562 if (flags & 0x0001) {
2563 /* Incomplete config. Send empty response. */
2564 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2565 l2cap_build_conf_rsp(chan, rsp,
2566 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2570 /* Complete config. */
2571 len = l2cap_parse_conf_req(chan, rsp);
2573 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2577 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2578 chan->num_conf_rsp++;
2580 /* Reset config buffer. */
2583 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2586 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2587 set_default_fcs(chan);
2589 l2cap_state_change(chan, BT_CONNECTED);
2591 chan->next_tx_seq = 0;
2592 chan->expected_tx_seq = 0;
2593 skb_queue_head_init(&chan->tx_q);
2594 if (chan->mode == L2CAP_MODE_ERTM)
2595 l2cap_ertm_init(chan);
2597 l2cap_chan_ready(sk);
2601 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2603 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2604 l2cap_build_conf_req(chan, buf), buf);
2605 chan->num_conf_req++;
2613 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2615 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2616 u16 scid, flags, result;
2617 struct l2cap_chan *chan;
2619 int len = cmd->len - sizeof(*rsp);
2621 scid = __le16_to_cpu(rsp->scid);
2622 flags = __le16_to_cpu(rsp->flags);
2623 result = __le16_to_cpu(rsp->result);
2625 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2626 scid, flags, result);
2628 chan = l2cap_get_chan_by_scid(conn, scid);
2635 case L2CAP_CONF_SUCCESS:
2636 l2cap_conf_rfc_get(chan, rsp->data, len);
2639 case L2CAP_CONF_UNACCEPT:
2640 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2643 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2644 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2648 /* throw out any old stored conf requests */
2649 result = L2CAP_CONF_SUCCESS;
2650 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2653 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2657 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2658 L2CAP_CONF_REQ, len, req);
2659 chan->num_conf_req++;
2660 if (result != L2CAP_CONF_SUCCESS)
2666 sk->sk_err = ECONNRESET;
2667 __set_chan_timer(chan, HZ * 5);
2668 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2675 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2677 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2678 set_default_fcs(chan);
2680 l2cap_state_change(chan, BT_CONNECTED);
2681 chan->next_tx_seq = 0;
2682 chan->expected_tx_seq = 0;
2683 skb_queue_head_init(&chan->tx_q);
2684 if (chan->mode == L2CAP_MODE_ERTM)
2685 l2cap_ertm_init(chan);
2687 l2cap_chan_ready(sk);
2695 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2697 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2698 struct l2cap_disconn_rsp rsp;
2700 struct l2cap_chan *chan;
2703 scid = __le16_to_cpu(req->scid);
2704 dcid = __le16_to_cpu(req->dcid);
2706 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2708 chan = l2cap_get_chan_by_scid(conn, dcid);
2714 rsp.dcid = cpu_to_le16(chan->scid);
2715 rsp.scid = cpu_to_le16(chan->dcid);
2716 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2718 sk->sk_shutdown = SHUTDOWN_MASK;
2720 /* don't delete l2cap channel if sk is owned by user */
2721 if (sock_owned_by_user(sk)) {
2722 l2cap_state_change(chan, BT_DISCONN);
2723 __clear_chan_timer(chan);
2724 __set_chan_timer(chan, HZ / 5);
2729 l2cap_chan_del(chan, ECONNRESET);
2732 chan->ops->close(chan->data);
2736 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2738 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2740 struct l2cap_chan *chan;
2743 scid = __le16_to_cpu(rsp->scid);
2744 dcid = __le16_to_cpu(rsp->dcid);
2746 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2748 chan = l2cap_get_chan_by_scid(conn, scid);
2754 /* don't delete l2cap channel if sk is owned by user */
2755 if (sock_owned_by_user(sk)) {
2756 l2cap_state_change(chan,BT_DISCONN);
2757 __clear_chan_timer(chan);
2758 __set_chan_timer(chan, HZ / 5);
2763 l2cap_chan_del(chan, 0);
2766 chan->ops->close(chan->data);
2770 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2772 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2775 type = __le16_to_cpu(req->type);
2777 BT_DBG("type 0x%4.4x", type);
2779 if (type == L2CAP_IT_FEAT_MASK) {
2781 u32 feat_mask = l2cap_feat_mask;
2782 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2783 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2784 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2786 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2788 put_unaligned_le32(feat_mask, rsp->data);
2789 l2cap_send_cmd(conn, cmd->ident,
2790 L2CAP_INFO_RSP, sizeof(buf), buf);
2791 } else if (type == L2CAP_IT_FIXED_CHAN) {
2793 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2794 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2795 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2796 memcpy(buf + 4, l2cap_fixed_chan, 8);
2797 l2cap_send_cmd(conn, cmd->ident,
2798 L2CAP_INFO_RSP, sizeof(buf), buf);
2800 struct l2cap_info_rsp rsp;
2801 rsp.type = cpu_to_le16(type);
2802 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2803 l2cap_send_cmd(conn, cmd->ident,
2804 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2810 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2812 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2815 type = __le16_to_cpu(rsp->type);
2816 result = __le16_to_cpu(rsp->result);
2818 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2820 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2821 if (cmd->ident != conn->info_ident ||
2822 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2825 del_timer(&conn->info_timer);
2827 if (result != L2CAP_IR_SUCCESS) {
2828 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2829 conn->info_ident = 0;
2831 l2cap_conn_start(conn);
2836 if (type == L2CAP_IT_FEAT_MASK) {
2837 conn->feat_mask = get_unaligned_le32(rsp->data);
2839 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2840 struct l2cap_info_req req;
2841 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2843 conn->info_ident = l2cap_get_ident(conn);
2845 l2cap_send_cmd(conn, conn->info_ident,
2846 L2CAP_INFO_REQ, sizeof(req), &req);
2848 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2849 conn->info_ident = 0;
2851 l2cap_conn_start(conn);
2853 } else if (type == L2CAP_IT_FIXED_CHAN) {
2854 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2855 conn->info_ident = 0;
2857 l2cap_conn_start(conn);
2863 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2868 if (min > max || min < 6 || max > 3200)
2871 if (to_multiplier < 10 || to_multiplier > 3200)
2874 if (max >= to_multiplier * 8)
2877 max_latency = (to_multiplier * 8 / max) - 1;
2878 if (latency > 499 || latency > max_latency)
2884 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2885 struct l2cap_cmd_hdr *cmd, u8 *data)
2887 struct hci_conn *hcon = conn->hcon;
2888 struct l2cap_conn_param_update_req *req;
2889 struct l2cap_conn_param_update_rsp rsp;
2890 u16 min, max, latency, to_multiplier, cmd_len;
2893 if (!(hcon->link_mode & HCI_LM_MASTER))
2896 cmd_len = __le16_to_cpu(cmd->len);
2897 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2900 req = (struct l2cap_conn_param_update_req *) data;
2901 min = __le16_to_cpu(req->min);
2902 max = __le16_to_cpu(req->max);
2903 latency = __le16_to_cpu(req->latency);
2904 to_multiplier = __le16_to_cpu(req->to_multiplier);
2906 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2907 min, max, latency, to_multiplier);
2909 memset(&rsp, 0, sizeof(rsp));
2911 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2913 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2915 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2917 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2921 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2926 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2927 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2931 switch (cmd->code) {
2932 case L2CAP_COMMAND_REJ:
2933 l2cap_command_rej(conn, cmd, data);
2936 case L2CAP_CONN_REQ:
2937 err = l2cap_connect_req(conn, cmd, data);
2940 case L2CAP_CONN_RSP:
2941 err = l2cap_connect_rsp(conn, cmd, data);
2944 case L2CAP_CONF_REQ:
2945 err = l2cap_config_req(conn, cmd, cmd_len, data);
2948 case L2CAP_CONF_RSP:
2949 err = l2cap_config_rsp(conn, cmd, data);
2952 case L2CAP_DISCONN_REQ:
2953 err = l2cap_disconnect_req(conn, cmd, data);
2956 case L2CAP_DISCONN_RSP:
2957 err = l2cap_disconnect_rsp(conn, cmd, data);
2960 case L2CAP_ECHO_REQ:
2961 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2964 case L2CAP_ECHO_RSP:
2967 case L2CAP_INFO_REQ:
2968 err = l2cap_information_req(conn, cmd, data);
2971 case L2CAP_INFO_RSP:
2972 err = l2cap_information_rsp(conn, cmd, data);
2976 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2984 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2985 struct l2cap_cmd_hdr *cmd, u8 *data)
2987 switch (cmd->code) {
2988 case L2CAP_COMMAND_REJ:
2991 case L2CAP_CONN_PARAM_UPDATE_REQ:
2992 return l2cap_conn_param_update_req(conn, cmd, data);
2994 case L2CAP_CONN_PARAM_UPDATE_RSP:
2998 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3003 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3004 struct sk_buff *skb)
3006 u8 *data = skb->data;
3008 struct l2cap_cmd_hdr cmd;
3011 l2cap_raw_recv(conn, skb);
3013 while (len >= L2CAP_CMD_HDR_SIZE) {
3015 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3016 data += L2CAP_CMD_HDR_SIZE;
3017 len -= L2CAP_CMD_HDR_SIZE;
3019 cmd_len = le16_to_cpu(cmd.len);
3021 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3023 if (cmd_len > len || !cmd.ident) {
3024 BT_DBG("corrupted command");
3028 if (conn->hcon->type == LE_LINK)
3029 err = l2cap_le_sig_cmd(conn, &cmd, data);
3031 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3034 struct l2cap_cmd_rej rej;
3036 BT_ERR("Wrong link type (%d)", err);
3038 /* FIXME: Map err to a valid reason */
3039 rej.reason = cpu_to_le16(0);
3040 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3050 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3052 u16 our_fcs, rcv_fcs;
3053 int hdr_size = L2CAP_HDR_SIZE + 2;
3055 if (chan->fcs == L2CAP_FCS_CRC16) {
3056 skb_trim(skb, skb->len - 2);
3057 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3058 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3060 if (our_fcs != rcv_fcs)
3066 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3070 chan->frames_sent = 0;
3072 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3074 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3075 control |= L2CAP_SUPER_RCV_NOT_READY;
3076 l2cap_send_sframe(chan, control);
3077 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3080 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
3081 l2cap_retransmit_frames(chan);
3083 l2cap_ertm_send(chan);
3085 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3086 chan->frames_sent == 0) {
3087 control |= L2CAP_SUPER_RCV_READY;
3088 l2cap_send_sframe(chan, control);
3092 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3094 struct sk_buff *next_skb;
3095 int tx_seq_offset, next_tx_seq_offset;
3097 bt_cb(skb)->tx_seq = tx_seq;
3098 bt_cb(skb)->sar = sar;
3100 next_skb = skb_peek(&chan->srej_q);
3102 __skb_queue_tail(&chan->srej_q, skb);
3106 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3107 if (tx_seq_offset < 0)
3108 tx_seq_offset += 64;
3111 if (bt_cb(next_skb)->tx_seq == tx_seq)
3114 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3115 chan->buffer_seq) % 64;
3116 if (next_tx_seq_offset < 0)
3117 next_tx_seq_offset += 64;
3119 if (next_tx_seq_offset > tx_seq_offset) {
3120 __skb_queue_before(&chan->srej_q, next_skb, skb);
3124 if (skb_queue_is_last(&chan->srej_q, next_skb))
3127 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3129 __skb_queue_tail(&chan->srej_q, skb);
3134 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3136 struct sk_buff *_skb;
3139 switch (control & L2CAP_CTRL_SAR) {
3140 case L2CAP_SDU_UNSEGMENTED:
3141 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3144 return chan->ops->recv(chan->data, skb);
3146 case L2CAP_SDU_START:
3147 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
3150 chan->sdu_len = get_unaligned_le16(skb->data);
3152 if (chan->sdu_len > chan->imtu)
3155 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3159 /* pull sdu_len bytes only after alloc, because of Local Busy
3160 * condition we have to be sure that this will be executed
3161 * only once, i.e., when alloc does not fail */
3164 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3166 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3167 chan->partial_sdu_len = skb->len;
3170 case L2CAP_SDU_CONTINUE:
3171 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3177 chan->partial_sdu_len += skb->len;
3178 if (chan->partial_sdu_len > chan->sdu_len)
3181 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3186 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3192 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
3193 chan->partial_sdu_len += skb->len;
3195 if (chan->partial_sdu_len > chan->imtu)
3198 if (chan->partial_sdu_len != chan->sdu_len)
3201 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3204 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3206 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3210 err = chan->ops->recv(chan->data, _skb);
3213 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3217 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3218 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3220 kfree_skb(chan->sdu);
3228 kfree_skb(chan->sdu);
3232 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3237 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3239 struct sk_buff *skb;
3243 while ((skb = skb_dequeue(&chan->busy_q))) {
3244 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3245 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3247 skb_queue_head(&chan->busy_q, skb);
3251 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3254 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3257 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3258 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3259 l2cap_send_sframe(chan, control);
3260 chan->retry_count = 1;
3262 __clear_retrans_timer(chan);
3263 __set_monitor_timer(chan);
3265 chan->conn_state |= L2CAP_CONN_WAIT_F;
3268 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3269 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3271 BT_DBG("chan %p, Exit local busy", chan);
3276 static void l2cap_busy_work(struct work_struct *work)
3278 DECLARE_WAITQUEUE(wait, current);
3279 struct l2cap_chan *chan =
3280 container_of(work, struct l2cap_chan, busy_work);
3281 struct sock *sk = chan->sk;
3282 int n_tries = 0, timeo = HZ/5, err;
3283 struct sk_buff *skb;
3287 add_wait_queue(sk_sleep(sk), &wait);
3288 while ((skb = skb_peek(&chan->busy_q))) {
3289 set_current_state(TASK_INTERRUPTIBLE);
3291 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3293 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3300 if (signal_pending(current)) {
3301 err = sock_intr_errno(timeo);
3306 timeo = schedule_timeout(timeo);
3309 err = sock_error(sk);
3313 if (l2cap_try_push_rx_skb(chan) == 0)
3317 set_current_state(TASK_RUNNING);
3318 remove_wait_queue(sk_sleep(sk), &wait);
3323 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3327 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3328 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3329 __skb_queue_tail(&chan->busy_q, skb);
3330 return l2cap_try_push_rx_skb(chan);
3335 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3337 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3341 /* Busy Condition */
3342 BT_DBG("chan %p, Enter local busy", chan);
3344 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3345 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3346 __skb_queue_tail(&chan->busy_q, skb);
3348 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3349 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3350 l2cap_send_sframe(chan, sctrl);
3352 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3354 __clear_ack_timer(chan);
3356 queue_work(_busy_wq, &chan->busy_work);
3361 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3363 struct sk_buff *_skb;
3367 * TODO: We have to notify the userland if some data is lost with the
3371 switch (control & L2CAP_CTRL_SAR) {
3372 case L2CAP_SDU_UNSEGMENTED:
3373 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3374 kfree_skb(chan->sdu);
3378 err = chan->ops->recv(chan->data, skb);
3384 case L2CAP_SDU_START:
3385 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3386 kfree_skb(chan->sdu);
3390 chan->sdu_len = get_unaligned_le16(skb->data);
3393 if (chan->sdu_len > chan->imtu) {
3398 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3404 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3406 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3407 chan->partial_sdu_len = skb->len;
3411 case L2CAP_SDU_CONTINUE:
3412 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3415 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3417 chan->partial_sdu_len += skb->len;
3418 if (chan->partial_sdu_len > chan->sdu_len)
3419 kfree_skb(chan->sdu);
3426 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3429 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3431 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3432 chan->partial_sdu_len += skb->len;
3434 if (chan->partial_sdu_len > chan->imtu)
3437 if (chan->partial_sdu_len == chan->sdu_len) {
3438 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3439 err = chan->ops->recv(chan->data, _skb);
3446 kfree_skb(chan->sdu);
3454 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3456 struct sk_buff *skb;
3459 while ((skb = skb_peek(&chan->srej_q))) {
3460 if (bt_cb(skb)->tx_seq != tx_seq)
3463 skb = skb_dequeue(&chan->srej_q);
3464 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3465 l2cap_ertm_reassembly_sdu(chan, skb, control);
3466 chan->buffer_seq_srej =
3467 (chan->buffer_seq_srej + 1) % 64;
3468 tx_seq = (tx_seq + 1) % 64;
3472 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3474 struct srej_list *l, *tmp;
3477 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3478 if (l->tx_seq == tx_seq) {
3483 control = L2CAP_SUPER_SELECT_REJECT;
3484 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3485 l2cap_send_sframe(chan, control);
3487 list_add_tail(&l->list, &chan->srej_l);
3491 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3493 struct srej_list *new;
3496 while (tx_seq != chan->expected_tx_seq) {
3497 control = L2CAP_SUPER_SELECT_REJECT;
3498 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3499 l2cap_send_sframe(chan, control);
3501 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3502 new->tx_seq = chan->expected_tx_seq;
3503 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3504 list_add_tail(&new->list, &chan->srej_l);
3506 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3509 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3511 u8 tx_seq = __get_txseq(rx_control);
3512 u8 req_seq = __get_reqseq(rx_control);
3513 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3514 int tx_seq_offset, expected_tx_seq_offset;
3515 int num_to_ack = (chan->tx_win/6) + 1;
3518 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3519 tx_seq, rx_control);
3521 if (L2CAP_CTRL_FINAL & rx_control &&
3522 chan->conn_state & L2CAP_CONN_WAIT_F) {
3523 __clear_monitor_timer(chan);
3524 if (chan->unacked_frames > 0)
3525 __set_retrans_timer(chan);
3526 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3529 chan->expected_ack_seq = req_seq;
3530 l2cap_drop_acked_frames(chan);
3532 if (tx_seq == chan->expected_tx_seq)
3535 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3536 if (tx_seq_offset < 0)
3537 tx_seq_offset += 64;
3539 /* invalid tx_seq */
3540 if (tx_seq_offset >= chan->tx_win) {
3541 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3545 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY)
3548 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3549 struct srej_list *first;
3551 first = list_first_entry(&chan->srej_l,
3552 struct srej_list, list);
3553 if (tx_seq == first->tx_seq) {
3554 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3555 l2cap_check_srej_gap(chan, tx_seq);
3557 list_del(&first->list);
3560 if (list_empty(&chan->srej_l)) {
3561 chan->buffer_seq = chan->buffer_seq_srej;
3562 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3563 l2cap_send_ack(chan);
3564 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3567 struct srej_list *l;
3569 /* duplicated tx_seq */
3570 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3573 list_for_each_entry(l, &chan->srej_l, list) {
3574 if (l->tx_seq == tx_seq) {
3575 l2cap_resend_srejframe(chan, tx_seq);
3579 l2cap_send_srejframe(chan, tx_seq);
3582 expected_tx_seq_offset =
3583 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3584 if (expected_tx_seq_offset < 0)
3585 expected_tx_seq_offset += 64;
3587 /* duplicated tx_seq */
3588 if (tx_seq_offset < expected_tx_seq_offset)
3591 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3593 BT_DBG("chan %p, Enter SREJ", chan);
3595 INIT_LIST_HEAD(&chan->srej_l);
3596 chan->buffer_seq_srej = chan->buffer_seq;
3598 __skb_queue_head_init(&chan->srej_q);
3599 __skb_queue_head_init(&chan->busy_q);
3600 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3602 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3604 l2cap_send_srejframe(chan, tx_seq);
3606 __clear_ack_timer(chan);
3611 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3613 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3614 bt_cb(skb)->tx_seq = tx_seq;
3615 bt_cb(skb)->sar = sar;
3616 __skb_queue_tail(&chan->srej_q, skb);
3620 err = l2cap_push_rx_skb(chan, skb, rx_control);
3624 if (rx_control & L2CAP_CTRL_FINAL) {
3625 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3626 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3628 l2cap_retransmit_frames(chan);
3631 __set_ack_timer(chan);
3633 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3634 if (chan->num_acked == num_to_ack - 1)
3635 l2cap_send_ack(chan);
3644 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3646 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3649 chan->expected_ack_seq = __get_reqseq(rx_control);
3650 l2cap_drop_acked_frames(chan);
3652 if (rx_control & L2CAP_CTRL_POLL) {
3653 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3654 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3655 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3656 (chan->unacked_frames > 0))
3657 __set_retrans_timer(chan);
3659 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3660 l2cap_send_srejtail(chan);
3662 l2cap_send_i_or_rr_or_rnr(chan);
3665 } else if (rx_control & L2CAP_CTRL_FINAL) {
3666 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3668 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3669 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3671 l2cap_retransmit_frames(chan);
3674 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3675 (chan->unacked_frames > 0))
3676 __set_retrans_timer(chan);
3678 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3679 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3680 l2cap_send_ack(chan);
3682 l2cap_ertm_send(chan);
3686 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3688 u8 tx_seq = __get_reqseq(rx_control);
3690 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3692 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3694 chan->expected_ack_seq = tx_seq;
3695 l2cap_drop_acked_frames(chan);
3697 if (rx_control & L2CAP_CTRL_FINAL) {
3698 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3699 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3701 l2cap_retransmit_frames(chan);
3703 l2cap_retransmit_frames(chan);
3705 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3706 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3709 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3711 u8 tx_seq = __get_reqseq(rx_control);
3713 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3715 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3717 if (rx_control & L2CAP_CTRL_POLL) {
3718 chan->expected_ack_seq = tx_seq;
3719 l2cap_drop_acked_frames(chan);
3721 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3722 l2cap_retransmit_one_frame(chan, tx_seq);
3724 l2cap_ertm_send(chan);
3726 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3727 chan->srej_save_reqseq = tx_seq;
3728 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3730 } else if (rx_control & L2CAP_CTRL_FINAL) {
3731 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3732 chan->srej_save_reqseq == tx_seq)
3733 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3735 l2cap_retransmit_one_frame(chan, tx_seq);
3737 l2cap_retransmit_one_frame(chan, tx_seq);
3738 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3739 chan->srej_save_reqseq = tx_seq;
3740 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3745 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3747 u8 tx_seq = __get_reqseq(rx_control);
3749 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3751 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3752 chan->expected_ack_seq = tx_seq;
3753 l2cap_drop_acked_frames(chan);
3755 if (rx_control & L2CAP_CTRL_POLL)
3756 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3758 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3759 __clear_retrans_timer(chan);
3760 if (rx_control & L2CAP_CTRL_POLL)
3761 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3765 if (rx_control & L2CAP_CTRL_POLL)
3766 l2cap_send_srejtail(chan);
3768 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3771 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3773 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3775 if (L2CAP_CTRL_FINAL & rx_control &&
3776 chan->conn_state & L2CAP_CONN_WAIT_F) {
3777 __clear_monitor_timer(chan);
3778 if (chan->unacked_frames > 0)
3779 __set_retrans_timer(chan);
3780 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3783 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3784 case L2CAP_SUPER_RCV_READY:
3785 l2cap_data_channel_rrframe(chan, rx_control);
3788 case L2CAP_SUPER_REJECT:
3789 l2cap_data_channel_rejframe(chan, rx_control);
3792 case L2CAP_SUPER_SELECT_REJECT:
3793 l2cap_data_channel_srejframe(chan, rx_control);
3796 case L2CAP_SUPER_RCV_NOT_READY:
3797 l2cap_data_channel_rnrframe(chan, rx_control);
3805 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3807 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3810 int len, next_tx_seq_offset, req_seq_offset;
3812 control = get_unaligned_le16(skb->data);
3817 * We can just drop the corrupted I-frame here.
3818 * Receiver will miss it and start proper recovery
3819 * procedures and ask retransmission.
3821 if (l2cap_check_fcs(chan, skb))
3824 if (__is_sar_start(control) && __is_iframe(control))
3827 if (chan->fcs == L2CAP_FCS_CRC16)
3830 if (len > chan->mps) {
3831 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3835 req_seq = __get_reqseq(control);
3836 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3837 if (req_seq_offset < 0)
3838 req_seq_offset += 64;
3840 next_tx_seq_offset =
3841 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3842 if (next_tx_seq_offset < 0)
3843 next_tx_seq_offset += 64;
3845 /* check for invalid req-seq */
3846 if (req_seq_offset > next_tx_seq_offset) {
3847 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3851 if (__is_iframe(control)) {
3853 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3857 l2cap_data_channel_iframe(chan, control, skb);
3861 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3865 l2cap_data_channel_sframe(chan, control, skb);
3875 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3877 struct l2cap_chan *chan;
3878 struct sock *sk = NULL;
3883 chan = l2cap_get_chan_by_scid(conn, cid);
3885 BT_DBG("unknown cid 0x%4.4x", cid);
3891 BT_DBG("chan %p, len %d", chan, skb->len);
3893 if (chan->state != BT_CONNECTED)
3896 switch (chan->mode) {
3897 case L2CAP_MODE_BASIC:
3898 /* If socket recv buffers overflows we drop data here
3899 * which is *bad* because L2CAP has to be reliable.
3900 * But we don't have any other choice. L2CAP doesn't
3901 * provide flow control mechanism. */
3903 if (chan->imtu < skb->len)
3906 if (!chan->ops->recv(chan->data, skb))
3910 case L2CAP_MODE_ERTM:
3911 if (!sock_owned_by_user(sk)) {
3912 l2cap_ertm_data_rcv(sk, skb);
3914 if (sk_add_backlog(sk, skb))
3920 case L2CAP_MODE_STREAMING:
3921 control = get_unaligned_le16(skb->data);
3925 if (l2cap_check_fcs(chan, skb))
3928 if (__is_sar_start(control))
3931 if (chan->fcs == L2CAP_FCS_CRC16)
3934 if (len > chan->mps || len < 0 || __is_sframe(control))
3937 tx_seq = __get_txseq(control);
3939 if (chan->expected_tx_seq == tx_seq)
3940 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3942 chan->expected_tx_seq = (tx_seq + 1) % 64;
3944 l2cap_streaming_reassembly_sdu(chan, skb, control);
3949 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3963 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3965 struct sock *sk = NULL;
3966 struct l2cap_chan *chan;
3968 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3976 BT_DBG("sk %p, len %d", sk, skb->len);
3978 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3981 if (l2cap_pi(sk)->chan->imtu < skb->len)
3984 if (!chan->ops->recv(chan->data, skb))
3996 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3998 struct sock *sk = NULL;
3999 struct l2cap_chan *chan;
4001 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4009 BT_DBG("sk %p, len %d", sk, skb->len);
4011 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4014 if (l2cap_pi(sk)->chan->imtu < skb->len)
4017 if (!chan->ops->recv(chan->data, skb))
4029 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4031 struct l2cap_hdr *lh = (void *) skb->data;
4035 skb_pull(skb, L2CAP_HDR_SIZE);
4036 cid = __le16_to_cpu(lh->cid);
4037 len = __le16_to_cpu(lh->len);
4039 if (len != skb->len) {
4044 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4047 case L2CAP_CID_LE_SIGNALING:
4048 case L2CAP_CID_SIGNALING:
4049 l2cap_sig_channel(conn, skb);
4052 case L2CAP_CID_CONN_LESS:
4053 psm = get_unaligned_le16(skb->data);
4055 l2cap_conless_channel(conn, psm, skb);
4058 case L2CAP_CID_LE_DATA:
4059 l2cap_att_channel(conn, cid, skb);
4063 if (smp_sig_channel(conn, skb))
4064 l2cap_conn_del(conn->hcon, EACCES);
4068 l2cap_data_channel(conn, cid, skb);
4073 /* ---- L2CAP interface with lower layer (HCI) ---- */
4075 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4077 int exact = 0, lm1 = 0, lm2 = 0;
4078 struct l2cap_chan *c;
4080 if (type != ACL_LINK)
4083 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4085 /* Find listening sockets and check their link_mode */
4086 read_lock(&chan_list_lock);
4087 list_for_each_entry(c, &chan_list, global_l) {
4088 struct sock *sk = c->sk;
4090 if (c->state != BT_LISTEN)
4093 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4094 lm1 |= HCI_LM_ACCEPT;
4096 lm1 |= HCI_LM_MASTER;
4098 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4099 lm2 |= HCI_LM_ACCEPT;
4101 lm2 |= HCI_LM_MASTER;
4104 read_unlock(&chan_list_lock);
4106 return exact ? lm1 : lm2;
4109 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4111 struct l2cap_conn *conn;
4113 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4115 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4119 conn = l2cap_conn_add(hcon, status);
4121 l2cap_conn_ready(conn);
4123 l2cap_conn_del(hcon, bt_err(status));
4128 static int l2cap_disconn_ind(struct hci_conn *hcon)
4130 struct l2cap_conn *conn = hcon->l2cap_data;
4132 BT_DBG("hcon %p", hcon);
4134 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4137 return conn->disc_reason;
4140 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4142 BT_DBG("hcon %p reason %d", hcon, reason);
4144 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4147 l2cap_conn_del(hcon, bt_err(reason));
4152 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4154 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4157 if (encrypt == 0x00) {
4158 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4159 __clear_chan_timer(chan);
4160 __set_chan_timer(chan, HZ * 5);
4161 } else if (chan->sec_level == BT_SECURITY_HIGH)
4162 l2cap_chan_close(chan, ECONNREFUSED);
4164 if (chan->sec_level == BT_SECURITY_MEDIUM)
4165 __clear_chan_timer(chan);
4169 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4171 struct l2cap_conn *conn = hcon->l2cap_data;
4172 struct l2cap_chan *chan;
4177 BT_DBG("conn %p", conn);
4179 read_lock(&conn->chan_lock);
4181 list_for_each_entry(chan, &conn->chan_l, list) {
4182 struct sock *sk = chan->sk;
4186 BT_DBG("chan->scid %d", chan->scid);
4188 if (chan->scid == L2CAP_CID_LE_DATA) {
4189 if (!status && encrypt) {
4190 chan->sec_level = hcon->sec_level;
4191 del_timer(&conn->security_timer);
4192 l2cap_chan_ready(sk);
4199 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4204 if (!status && (chan->state == BT_CONNECTED ||
4205 chan->state == BT_CONFIG)) {
4206 l2cap_check_encryption(chan, encrypt);
4211 if (chan->state == BT_CONNECT) {
4213 struct l2cap_conn_req req;
4214 req.scid = cpu_to_le16(chan->scid);
4215 req.psm = chan->psm;
4217 chan->ident = l2cap_get_ident(conn);
4218 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4220 l2cap_send_cmd(conn, chan->ident,
4221 L2CAP_CONN_REQ, sizeof(req), &req);
4223 __clear_chan_timer(chan);
4224 __set_chan_timer(chan, HZ / 10);
4226 } else if (chan->state == BT_CONNECT2) {
4227 struct l2cap_conn_rsp rsp;
4231 l2cap_state_change(chan, BT_CONFIG);
4232 result = L2CAP_CR_SUCCESS;
4234 l2cap_state_change(chan, BT_DISCONN);
4235 __set_chan_timer(chan, HZ / 10);
4236 result = L2CAP_CR_SEC_BLOCK;
4239 rsp.scid = cpu_to_le16(chan->dcid);
4240 rsp.dcid = cpu_to_le16(chan->scid);
4241 rsp.result = cpu_to_le16(result);
4242 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4243 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4250 read_unlock(&conn->chan_lock);
4255 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4257 struct l2cap_conn *conn = hcon->l2cap_data;
4260 conn = l2cap_conn_add(hcon, 0);
4265 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4267 if (!(flags & ACL_CONT)) {
4268 struct l2cap_hdr *hdr;
4269 struct l2cap_chan *chan;
4274 BT_ERR("Unexpected start frame (len %d)", skb->len);
4275 kfree_skb(conn->rx_skb);
4276 conn->rx_skb = NULL;
4278 l2cap_conn_unreliable(conn, ECOMM);
4281 /* Start fragment always begin with Basic L2CAP header */
4282 if (skb->len < L2CAP_HDR_SIZE) {
4283 BT_ERR("Frame is too short (len %d)", skb->len);
4284 l2cap_conn_unreliable(conn, ECOMM);
4288 hdr = (struct l2cap_hdr *) skb->data;
4289 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4290 cid = __le16_to_cpu(hdr->cid);
4292 if (len == skb->len) {
4293 /* Complete frame received */
4294 l2cap_recv_frame(conn, skb);
4298 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4300 if (skb->len > len) {
4301 BT_ERR("Frame is too long (len %d, expected len %d)",
4303 l2cap_conn_unreliable(conn, ECOMM);
4307 chan = l2cap_get_chan_by_scid(conn, cid);
4309 if (chan && chan->sk) {
4310 struct sock *sk = chan->sk;
4312 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4313 BT_ERR("Frame exceeding recv MTU (len %d, "
4317 l2cap_conn_unreliable(conn, ECOMM);
4323 /* Allocate skb for the complete frame (with header) */
4324 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4328 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4330 conn->rx_len = len - skb->len;
4332 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4334 if (!conn->rx_len) {
4335 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4336 l2cap_conn_unreliable(conn, ECOMM);
4340 if (skb->len > conn->rx_len) {
4341 BT_ERR("Fragment is too long (len %d, expected %d)",
4342 skb->len, conn->rx_len);
4343 kfree_skb(conn->rx_skb);
4344 conn->rx_skb = NULL;
4346 l2cap_conn_unreliable(conn, ECOMM);
4350 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4352 conn->rx_len -= skb->len;
4354 if (!conn->rx_len) {
4355 /* Complete frame received */
4356 l2cap_recv_frame(conn, conn->rx_skb);
4357 conn->rx_skb = NULL;
4366 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4368 struct l2cap_chan *c;
4370 read_lock_bh(&chan_list_lock);
4372 list_for_each_entry(c, &chan_list, global_l) {
4373 struct sock *sk = c->sk;
4375 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4376 batostr(&bt_sk(sk)->src),
4377 batostr(&bt_sk(sk)->dst),
4378 c->state, __le16_to_cpu(c->psm),
4379 c->scid, c->dcid, c->imtu, c->omtu,
4380 c->sec_level, c->mode);
4383 read_unlock_bh(&chan_list_lock);
4388 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4390 return single_open(file, l2cap_debugfs_show, inode->i_private);
4393 static const struct file_operations l2cap_debugfs_fops = {
4394 .open = l2cap_debugfs_open,
4396 .llseek = seq_lseek,
4397 .release = single_release,
4400 static struct dentry *l2cap_debugfs;
4402 static struct hci_proto l2cap_hci_proto = {
4404 .id = HCI_PROTO_L2CAP,
4405 .connect_ind = l2cap_connect_ind,
4406 .connect_cfm = l2cap_connect_cfm,
4407 .disconn_ind = l2cap_disconn_ind,
4408 .disconn_cfm = l2cap_disconn_cfm,
4409 .security_cfm = l2cap_security_cfm,
4410 .recv_acldata = l2cap_recv_acldata
4413 int __init l2cap_init(void)
4417 err = l2cap_init_sockets();
4421 _busy_wq = create_singlethread_workqueue("l2cap");
4427 err = hci_register_proto(&l2cap_hci_proto);
4429 BT_ERR("L2CAP protocol registration failed");
4430 bt_sock_unregister(BTPROTO_L2CAP);
4435 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4436 bt_debugfs, NULL, &l2cap_debugfs_fops);
4438 BT_ERR("Failed to create L2CAP debug file");
4444 destroy_workqueue(_busy_wq);
4445 l2cap_cleanup_sockets();
4449 void l2cap_exit(void)
4451 debugfs_remove(l2cap_debugfs);
4453 flush_workqueue(_busy_wq);
4454 destroy_workqueue(_busy_wq);
4456 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4457 BT_ERR("L2CAP protocol unregistration failed");
4459 l2cap_cleanup_sockets();
4462 module_param(disable_ertm, bool, 0644);
4463 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");