2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 struct l2cap_chan *c, *r = NULL;
86 list_for_each_entry_rcu(c, &conn->chan_l, list) {
97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
99 struct l2cap_chan *c, *r = NULL;
103 list_for_each_entry_rcu(c, &conn->chan_l, list) {
104 if (c->scid == cid) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 c = __l2cap_get_chan_by_scid(conn, cid);
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c, *r = NULL;
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
145 struct l2cap_chan *c;
147 c = __l2cap_get_chan_by_ident(conn, ident);
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 struct l2cap_chan *c;
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock(&chan_list_lock);
203 write_unlock(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static char *state_to_string(int state)
224 return "BT_CONNECTED";
234 return "BT_CONNECT2";
243 return "invalid state";
246 static void l2cap_state_change(struct l2cap_chan *chan, int state)
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
249 state_to_string(state));
252 chan->ops->state_change(chan->data, state);
255 static void l2cap_chan_timeout(struct work_struct *work)
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
259 struct sock *sk = chan->sk;
262 BT_DBG("chan %p state %d", chan, chan->state);
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED;
268 else if (chan->state == BT_CONNECT &&
269 chan->sec_level != BT_SECURITY_SDP)
270 reason = ECONNREFUSED;
274 l2cap_chan_close(chan, reason);
278 chan->ops->close(chan->data);
279 l2cap_chan_put(chan);
282 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
284 struct l2cap_chan *chan;
286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
292 write_lock(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list);
294 write_unlock(&chan_list_lock);
296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
298 chan->state = BT_OPEN;
300 atomic_set(&chan->refcnt, 1);
302 BT_DBG("sk %p chan %p", sk, chan);
307 void l2cap_chan_destroy(struct l2cap_chan *chan)
309 write_lock(&chan_list_lock);
310 list_del(&chan->global_l);
311 write_unlock(&chan_list_lock);
313 l2cap_chan_put(chan);
316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid);
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
326 if (conn->hcon->type == LE_LINK) {
328 chan->omtu = L2CAP_LE_DEFAULT_MTU;
329 chan->scid = L2CAP_CID_LE_DATA;
330 chan->dcid = L2CAP_CID_LE_DATA;
332 /* Alloc CID for connection-oriented socket */
333 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU;
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
337 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU;
342 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING;
345 chan->omtu = L2CAP_DEFAULT_MTU;
348 chan->local_id = L2CAP_BESTEFFORT_ID;
349 chan->local_stype = L2CAP_SERV_BESTEFFORT;
350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
355 l2cap_chan_hold(chan);
357 list_add_rcu(&chan->list, &conn->chan_l);
361 * Must be called on the locked socket. */
362 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
364 struct sock *sk = chan->sk;
365 struct l2cap_conn *conn = chan->conn;
366 struct sock *parent = bt_sk(sk)->parent;
368 __clear_chan_timer(chan);
370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
373 /* Delete from channel list */
374 list_del_rcu(&chan->list);
377 l2cap_chan_put(chan);
380 hci_conn_put(conn->hcon);
383 l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED);
390 bt_accept_unlink(sk);
391 parent->sk_data_ready(parent, 0);
393 sk->sk_state_change(sk);
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
399 skb_queue_purge(&chan->tx_q);
401 if (chan->mode == L2CAP_MODE_ERTM) {
402 struct srej_list *l, *tmp;
404 __clear_retrans_timer(chan);
405 __clear_monitor_timer(chan);
406 __clear_ack_timer(chan);
408 skb_queue_purge(&chan->srej_q);
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
417 static void l2cap_chan_cleanup_listen(struct sock *parent)
421 BT_DBG("parent %p", parent);
423 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 __clear_chan_timer(chan);
428 l2cap_chan_close(chan, ECONNRESET);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
441 switch (chan->state) {
443 l2cap_chan_cleanup_listen(sk);
445 l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED);
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason);
457 l2cap_chan_del(chan, reason);
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 struct l2cap_conn_rsp rsp;
466 if (bt_sk(sk)->defer_setup)
467 result = L2CAP_CR_SEC_BLOCK;
469 result = L2CAP_CR_BAD_PSM;
470 l2cap_state_change(chan, BT_DISCONN);
472 rsp.scid = cpu_to_le16(chan->dcid);
473 rsp.dcid = cpu_to_le16(chan->scid);
474 rsp.result = cpu_to_le16(result);
475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
480 l2cap_chan_del(chan, reason);
485 l2cap_chan_del(chan, reason);
489 sock_set_flag(sk, SOCK_ZAPPED);
494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
496 if (chan->chan_type == L2CAP_CHAN_RAW) {
497 switch (chan->sec_level) {
498 case BT_SECURITY_HIGH:
499 return HCI_AT_DEDICATED_BONDING_MITM;
500 case BT_SECURITY_MEDIUM:
501 return HCI_AT_DEDICATED_BONDING;
503 return HCI_AT_NO_BONDING;
505 } else if (chan->psm == cpu_to_le16(0x0001)) {
506 if (chan->sec_level == BT_SECURITY_LOW)
507 chan->sec_level = BT_SECURITY_SDP;
509 if (chan->sec_level == BT_SECURITY_HIGH)
510 return HCI_AT_NO_BONDING_MITM;
512 return HCI_AT_NO_BONDING;
514 switch (chan->sec_level) {
515 case BT_SECURITY_HIGH:
516 return HCI_AT_GENERAL_BONDING_MITM;
517 case BT_SECURITY_MEDIUM:
518 return HCI_AT_GENERAL_BONDING;
520 return HCI_AT_NO_BONDING;
525 /* Service level security */
526 int l2cap_chan_check_security(struct l2cap_chan *chan)
528 struct l2cap_conn *conn = chan->conn;
531 auth_type = l2cap_get_auth_type(chan);
533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
536 static u8 l2cap_get_ident(struct l2cap_conn *conn)
540 /* Get next available identificator.
541 * 1 - 128 are used by kernel.
542 * 129 - 199 are reserved.
543 * 200 - 254 are used by utilities like l2ping, etc.
546 spin_lock(&conn->lock);
548 if (++conn->tx_ident > 128)
553 spin_unlock(&conn->lock);
558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
563 BT_DBG("code 0x%2.2x", code);
568 if (lmp_no_flush_capable(conn->hcon->hdev))
569 flags = ACL_START_NO_FLUSH;
573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
574 skb->priority = HCI_PRIO_MAX;
576 hci_send_acl(conn->hchan, skb, flags);
579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
581 struct hci_conn *hcon = chan->conn->hcon;
584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
588 lmp_no_flush_capable(hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
594 hci_send_acl(chan->conn->hchan, skb, flags);
597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
600 struct l2cap_hdr *lh;
601 struct l2cap_conn *conn = chan->conn;
604 if (chan->state != BT_CONNECTED)
607 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
608 hlen = L2CAP_EXT_HDR_SIZE;
610 hlen = L2CAP_ENH_HDR_SIZE;
612 if (chan->fcs == L2CAP_FCS_CRC16)
613 hlen += L2CAP_FCS_SIZE;
615 BT_DBG("chan %p, control 0x%8.8x", chan, control);
617 count = min_t(unsigned int, conn->mtu, hlen);
619 control |= __set_sframe(chan);
621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
622 control |= __set_ctrl_final(chan);
624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
625 control |= __set_ctrl_poll(chan);
627 skb = bt_skb_alloc(count, GFP_ATOMIC);
631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
633 lh->cid = cpu_to_le16(chan->dcid);
635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
637 if (chan->fcs == L2CAP_FCS_CRC16) {
638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
642 skb->priority = HCI_PRIO_MAX;
643 l2cap_do_send(chan, skb);
646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
650 set_bit(CONN_RNR_SENT, &chan->conn_state);
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
654 control |= __set_reqseq(chan, chan->buffer_seq);
656 l2cap_send_sframe(chan, control);
659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
664 static void l2cap_do_start(struct l2cap_chan *chan)
666 struct l2cap_conn *conn = chan->conn;
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
672 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) {
674 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
685 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn);
691 schedule_delayed_work(&conn->info_timer,
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
694 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req);
699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
701 u32 local_feat_mask = l2cap_feat_mask;
703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
706 case L2CAP_MODE_ERTM:
707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
708 case L2CAP_MODE_STREAMING:
709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
718 struct l2cap_disconn_req req;
725 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan);
728 __clear_ack_timer(chan);
731 req.dcid = cpu_to_le16(chan->dcid);
732 req.scid = cpu_to_le16(chan->scid);
733 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req);
736 l2cap_state_change(chan, BT_DISCONN);
740 /* ---- L2CAP connections ---- */
741 static void l2cap_conn_start(struct l2cap_conn *conn)
743 struct l2cap_chan *chan;
745 BT_DBG("conn %p", conn);
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
750 struct sock *sk = chan->sk;
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
759 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
762 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) {
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET);
778 req.scid = cpu_to_le16(chan->scid);
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
793 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
799 parent->sk_data_ready(parent, 0);
802 l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) {
820 set_bit(CONF_REQ_SENT, &chan->conf_state);
821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
822 l2cap_build_conf_req(chan, buf), buf);
823 chan->num_conf_req++;
832 /* Find socket with cid and source bdaddr.
833 * Returns closest match, locked.
835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
837 struct l2cap_chan *c, *c1 = NULL;
839 read_lock(&chan_list_lock);
841 list_for_each_entry(c, &chan_list, global_l) {
842 struct sock *sk = c->sk;
844 if (state && c->state != state)
847 if (c->scid == cid) {
849 if (!bacmp(&bt_sk(sk)->src, src)) {
850 read_unlock(&chan_list_lock);
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
860 read_unlock(&chan_list_lock);
865 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
867 struct sock *parent, *sk;
868 struct l2cap_chan *chan, *pchan;
872 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
882 /* Check for backlog size */
883 if (sk_acceptq_is_full(parent)) {
884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
888 chan = pchan->ops->new_connection(pchan->data);
894 hci_conn_hold(conn->hcon);
896 bacpy(&bt_sk(sk)->src, conn->src);
897 bacpy(&bt_sk(sk)->dst, conn->dst);
899 bt_accept_enqueue(parent, sk);
901 l2cap_chan_add(conn, chan);
903 __set_chan_timer(chan, sk->sk_sndtimeo);
905 l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0);
909 release_sock(parent);
912 static void l2cap_chan_ready(struct sock *sk)
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
917 BT_DBG("sk %p, parent %p", sk, parent);
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
926 parent->sk_data_ready(parent, 0);
929 static void l2cap_conn_ready(struct l2cap_conn *conn)
931 struct l2cap_chan *chan;
933 BT_DBG("conn %p", conn);
935 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
936 l2cap_le_conn_ready(conn);
938 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level);
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
948 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk);
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
953 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk);
957 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan);
966 /* Notify sockets that we cannot guaranty reliability anymore */
967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
969 struct l2cap_chan *chan;
971 BT_DBG("conn %p", conn);
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
985 static void l2cap_info_timeout(struct work_struct *work)
987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
993 l2cap_conn_start(conn);
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1013 l2cap_chan_del(chan, err);
1015 chan->ops->close(chan->data);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 __cancel_delayed_work(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1032 static void security_timeout(struct work_struct *work)
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1048 hchan = hci_chan_create(hcon);
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1054 hci_chan_del(hchan);
1058 hcon->l2cap_data = conn;
1060 conn->hchan = hchan;
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1067 conn->mtu = hcon->hdev->acl_mtu;
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1072 conn->feat_mask = 0;
1074 spin_lock_init(&conn->lock);
1076 INIT_LIST_HEAD(&conn->chan_l);
1078 if (hcon->type == LE_LINK)
1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1088 /* ---- Socket interface ---- */
1090 /* Find socket with psm and source bdaddr.
1091 * Returns closest match.
1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1095 struct l2cap_chan *c, *c1 = NULL;
1097 read_lock(&chan_list_lock);
1099 list_for_each_entry(c, &chan_list, global_l) {
1100 struct sock *sk = c->sk;
1102 if (state && c->state != state)
1105 if (c->psm == psm) {
1107 if (!bacmp(&bt_sk(sk)->src, src)) {
1108 read_unlock(&chan_list_lock);
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1118 read_unlock(&chan_list_lock);
1123 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src;
1127 struct l2cap_conn *conn;
1128 struct hci_conn *hcon;
1129 struct hci_dev *hdev;
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1136 hdev = hci_get_route(dst, src);
1138 return -EHOSTUNREACH;
1144 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1146 chan->chan_type != L2CAP_CHAN_RAW) {
1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1156 switch (chan->mode) {
1157 case L2CAP_MODE_BASIC:
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1169 switch (sk->sk_state) {
1173 /* Already connecting */
1178 /* Already connected */
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst);
1197 auth_type = l2cap_get_auth_type(chan);
1199 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst,
1201 chan->sec_level, auth_type);
1203 hcon = hci_connect(hdev, ACL_LINK, dst,
1204 chan->sec_level, auth_type);
1207 err = PTR_ERR(hcon);
1211 conn = l2cap_conn_add(hcon, 0);
1218 /* Update source addr of the socket */
1219 bacpy(src, conn->src);
1221 l2cap_chan_add(conn, chan);
1223 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo);
1226 if (hcon->state == BT_CONNECTED) {
1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 __clear_chan_timer(chan);
1229 if (l2cap_chan_check_security(chan))
1230 l2cap_state_change(chan, BT_CONNECTED);
1232 l2cap_do_start(chan);
1238 hci_dev_unlock(hdev);
1243 int __l2cap_wait_ack(struct sock *sk)
1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1246 DECLARE_WAITQUEUE(wait, current);
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 set_current_state(TASK_INTERRUPTIBLE);
1252 while (chan->unacked_frames > 0 && chan->conn) {
1256 if (signal_pending(current)) {
1257 err = sock_intr_errno(timeo);
1262 timeo = schedule_timeout(timeo);
1264 set_current_state(TASK_INTERRUPTIBLE);
1266 err = sock_error(sk);
1270 set_current_state(TASK_RUNNING);
1271 remove_wait_queue(sk_sleep(sk), &wait);
1275 static void l2cap_monitor_timeout(struct work_struct *work)
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1281 BT_DBG("chan %p", chan);
1284 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1290 chan->retry_count++;
1291 __set_monitor_timer(chan);
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1297 static void l2cap_retrans_timeout(struct work_struct *work)
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1303 BT_DBG("chan %p", chan);
1306 chan->retry_count = 1;
1307 __set_monitor_timer(chan);
1309 set_bit(CONN_WAIT_F, &chan->conn_state);
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1317 struct sk_buff *skb;
1319 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1324 skb = skb_dequeue(&chan->tx_q);
1327 chan->unacked_frames--;
1330 if (!chan->unacked_frames)
1331 __clear_retrans_timer(chan);
1334 static void l2cap_streaming_send(struct l2cap_chan *chan)
1336 struct sk_buff *skb;
1340 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)skb->data,
1347 skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 skb->data + skb->len - L2CAP_FCS_SIZE);
1352 l2cap_do_send(chan, skb);
1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1360 struct sk_buff *skb, *tx_skb;
1364 skb = skb_peek(&chan->tx_q);
1368 while (bt_cb(skb)->tx_seq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb))
1372 skb = skb_queue_next(&chan->tx_q, skb);
1375 if (chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan);
1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1388 control |= __set_ctrl_final(chan);
1390 control |= __set_reqseq(chan, chan->buffer_seq);
1391 control |= __set_txseq(chan, tx_seq);
1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1395 if (chan->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)tx_skb->data,
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 put_unaligned_le16(fcs,
1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1402 l2cap_do_send(chan, tx_skb);
1405 static int l2cap_ertm_send(struct l2cap_chan *chan)
1407 struct sk_buff *skb, *tx_skb;
1412 if (chan->state != BT_CONNECTED)
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1417 if (chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1423 tx_skb = skb_clone(skb, GFP_ATOMIC);
1425 bt_cb(skb)->retries++;
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan);
1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1431 control |= __set_ctrl_final(chan);
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq);
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1438 if (chan->fcs == L2CAP_FCS_CRC16) {
1439 fcs = crc16(0, (u8 *)skb->data,
1440 tx_skb->len - L2CAP_FCS_SIZE);
1441 put_unaligned_le16(fcs, skb->data +
1442 tx_skb->len - L2CAP_FCS_SIZE);
1445 l2cap_do_send(chan, tx_skb);
1447 __set_retrans_timer(chan);
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1453 if (bt_cb(skb)->retries == 1)
1454 chan->unacked_frames++;
1456 chan->frames_sent++;
1458 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL;
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1473 if (!skb_queue_empty(&chan->tx_q))
1474 chan->tx_send_head = chan->tx_q.next;
1476 chan->next_tx_seq = chan->expected_ack_seq;
1477 ret = l2cap_ertm_send(chan);
1481 static void __l2cap_send_ack(struct l2cap_chan *chan)
1485 control |= __set_reqseq(chan, chan->buffer_seq);
1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1489 set_bit(CONN_RNR_SENT, &chan->conn_state);
1490 l2cap_send_sframe(chan, control);
1494 if (l2cap_ertm_send(chan) > 0)
1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1498 l2cap_send_sframe(chan, control);
1501 static void l2cap_send_ack(struct l2cap_chan *chan)
1503 __clear_ack_timer(chan);
1504 __l2cap_send_ack(chan);
1507 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1509 struct srej_list *tail;
1512 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1513 control |= __set_ctrl_final(chan);
1515 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1516 control |= __set_reqseq(chan, tail->tx_seq);
1518 l2cap_send_sframe(chan, control);
1521 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1523 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1524 struct sk_buff **frag;
1527 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1533 /* Continuation fragments (no L2CAP header) */
1534 frag = &skb_shinfo(skb)->frag_list;
1536 count = min_t(unsigned int, conn->mtu, len);
1538 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1541 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1544 (*frag)->priority = skb->priority;
1549 frag = &(*frag)->next;
1555 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1556 struct msghdr *msg, size_t len,
1559 struct sock *sk = chan->sk;
1560 struct l2cap_conn *conn = chan->conn;
1561 struct sk_buff *skb;
1562 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1563 struct l2cap_hdr *lh;
1565 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1567 count = min_t(unsigned int, (conn->mtu - hlen), len);
1568 skb = bt_skb_send_alloc(sk, count + hlen,
1569 msg->msg_flags & MSG_DONTWAIT, &err);
1571 return ERR_PTR(err);
1573 skb->priority = priority;
1575 /* Create L2CAP header */
1576 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1577 lh->cid = cpu_to_le16(chan->dcid);
1578 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1579 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1581 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1582 if (unlikely(err < 0)) {
1584 return ERR_PTR(err);
1589 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1590 struct msghdr *msg, size_t len,
1593 struct sock *sk = chan->sk;
1594 struct l2cap_conn *conn = chan->conn;
1595 struct sk_buff *skb;
1596 int err, count, hlen = L2CAP_HDR_SIZE;
1597 struct l2cap_hdr *lh;
1599 BT_DBG("sk %p len %d", sk, (int)len);
1601 count = min_t(unsigned int, (conn->mtu - hlen), len);
1602 skb = bt_skb_send_alloc(sk, count + hlen,
1603 msg->msg_flags & MSG_DONTWAIT, &err);
1605 return ERR_PTR(err);
1607 skb->priority = priority;
1609 /* Create L2CAP header */
1610 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1611 lh->cid = cpu_to_le16(chan->dcid);
1612 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1614 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1615 if (unlikely(err < 0)) {
1617 return ERR_PTR(err);
1622 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1623 struct msghdr *msg, size_t len,
1624 u32 control, u16 sdulen)
1626 struct sock *sk = chan->sk;
1627 struct l2cap_conn *conn = chan->conn;
1628 struct sk_buff *skb;
1629 int err, count, hlen;
1630 struct l2cap_hdr *lh;
1632 BT_DBG("sk %p len %d", sk, (int)len);
1635 return ERR_PTR(-ENOTCONN);
1637 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1638 hlen = L2CAP_EXT_HDR_SIZE;
1640 hlen = L2CAP_ENH_HDR_SIZE;
1643 hlen += L2CAP_SDULEN_SIZE;
1645 if (chan->fcs == L2CAP_FCS_CRC16)
1646 hlen += L2CAP_FCS_SIZE;
1648 count = min_t(unsigned int, (conn->mtu - hlen), len);
1649 skb = bt_skb_send_alloc(sk, count + hlen,
1650 msg->msg_flags & MSG_DONTWAIT, &err);
1652 return ERR_PTR(err);
1654 /* Create L2CAP header */
1655 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1656 lh->cid = cpu_to_le16(chan->dcid);
1657 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1662 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1664 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1665 if (unlikely(err < 0)) {
1667 return ERR_PTR(err);
1670 if (chan->fcs == L2CAP_FCS_CRC16)
1671 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1673 bt_cb(skb)->retries = 0;
1677 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1679 struct sk_buff *skb;
1680 struct sk_buff_head sar_queue;
1684 skb_queue_head_init(&sar_queue);
1685 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1686 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1688 return PTR_ERR(skb);
1690 __skb_queue_tail(&sar_queue, skb);
1691 len -= chan->remote_mps;
1692 size += chan->remote_mps;
1697 if (len > chan->remote_mps) {
1698 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1699 buflen = chan->remote_mps;
1701 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1705 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1707 skb_queue_purge(&sar_queue);
1708 return PTR_ERR(skb);
1711 __skb_queue_tail(&sar_queue, skb);
1715 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1716 if (chan->tx_send_head == NULL)
1717 chan->tx_send_head = sar_queue.next;
1722 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1725 struct sk_buff *skb;
1729 /* Connectionless channel */
1730 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1731 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1733 return PTR_ERR(skb);
1735 l2cap_do_send(chan, skb);
1739 switch (chan->mode) {
1740 case L2CAP_MODE_BASIC:
1741 /* Check outgoing MTU */
1742 if (len > chan->omtu)
1745 /* Create a basic PDU */
1746 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1748 return PTR_ERR(skb);
1750 l2cap_do_send(chan, skb);
1754 case L2CAP_MODE_ERTM:
1755 case L2CAP_MODE_STREAMING:
1756 /* Entire SDU fits into one PDU */
1757 if (len <= chan->remote_mps) {
1758 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1759 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1762 return PTR_ERR(skb);
1764 __skb_queue_tail(&chan->tx_q, skb);
1766 if (chan->tx_send_head == NULL)
1767 chan->tx_send_head = skb;
1770 /* Segment SDU into multiples PDUs */
1771 err = l2cap_sar_segment_sdu(chan, msg, len);
1776 if (chan->mode == L2CAP_MODE_STREAMING) {
1777 l2cap_streaming_send(chan);
1782 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1783 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1788 err = l2cap_ertm_send(chan);
1795 BT_DBG("bad state %1.1x", chan->mode);
1802 /* Copy frame to all raw sockets on that connection */
1803 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1805 struct sk_buff *nskb;
1806 struct l2cap_chan *chan;
1808 BT_DBG("conn %p", conn);
1812 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1813 struct sock *sk = chan->sk;
1814 if (chan->chan_type != L2CAP_CHAN_RAW)
1817 /* Don't send frame to the socket it came from */
1820 nskb = skb_clone(skb, GFP_ATOMIC);
1824 if (chan->ops->recv(chan->data, nskb))
1831 /* ---- L2CAP signalling commands ---- */
1832 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1833 u8 code, u8 ident, u16 dlen, void *data)
1835 struct sk_buff *skb, **frag;
1836 struct l2cap_cmd_hdr *cmd;
1837 struct l2cap_hdr *lh;
1840 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1841 conn, code, ident, dlen);
1843 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1844 count = min_t(unsigned int, conn->mtu, len);
1846 skb = bt_skb_alloc(count, GFP_ATOMIC);
1850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1851 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1853 if (conn->hcon->type == LE_LINK)
1854 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1856 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1858 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1861 cmd->len = cpu_to_le16(dlen);
1864 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1865 memcpy(skb_put(skb, count), data, count);
1871 /* Continuation fragments (no L2CAP header) */
1872 frag = &skb_shinfo(skb)->frag_list;
1874 count = min_t(unsigned int, conn->mtu, len);
1876 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1880 memcpy(skb_put(*frag, count), data, count);
1885 frag = &(*frag)->next;
1895 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1897 struct l2cap_conf_opt *opt = *ptr;
1900 len = L2CAP_CONF_OPT_SIZE + opt->len;
1908 *val = *((u8 *) opt->val);
1912 *val = get_unaligned_le16(opt->val);
1916 *val = get_unaligned_le32(opt->val);
1920 *val = (unsigned long) opt->val;
1924 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1928 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1930 struct l2cap_conf_opt *opt = *ptr;
1932 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1939 *((u8 *) opt->val) = val;
1943 put_unaligned_le16(val, opt->val);
1947 put_unaligned_le32(val, opt->val);
1951 memcpy(opt->val, (void *) val, len);
1955 *ptr += L2CAP_CONF_OPT_SIZE + len;
1958 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1960 struct l2cap_conf_efs efs;
1962 switch (chan->mode) {
1963 case L2CAP_MODE_ERTM:
1964 efs.id = chan->local_id;
1965 efs.stype = chan->local_stype;
1966 efs.msdu = cpu_to_le16(chan->local_msdu);
1967 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1968 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1969 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1972 case L2CAP_MODE_STREAMING:
1974 efs.stype = L2CAP_SERV_BESTEFFORT;
1975 efs.msdu = cpu_to_le16(chan->local_msdu);
1976 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1985 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1986 (unsigned long) &efs);
1989 static void l2cap_ack_timeout(struct work_struct *work)
1991 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1994 BT_DBG("chan %p", chan);
1996 lock_sock(chan->sk);
1997 __l2cap_send_ack(chan);
1998 release_sock(chan->sk);
2000 l2cap_chan_put(chan);
2003 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2005 chan->expected_ack_seq = 0;
2006 chan->unacked_frames = 0;
2007 chan->buffer_seq = 0;
2008 chan->num_acked = 0;
2009 chan->frames_sent = 0;
2011 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2012 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2013 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2015 skb_queue_head_init(&chan->srej_q);
2017 INIT_LIST_HEAD(&chan->srej_l);
2020 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2023 case L2CAP_MODE_STREAMING:
2024 case L2CAP_MODE_ERTM:
2025 if (l2cap_mode_supported(mode, remote_feat_mask))
2029 return L2CAP_MODE_BASIC;
2033 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2035 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2038 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2040 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2043 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2045 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2046 __l2cap_ews_supported(chan)) {
2047 /* use extended control field */
2048 set_bit(FLAG_EXT_CTRL, &chan->flags);
2049 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2051 chan->tx_win = min_t(u16, chan->tx_win,
2052 L2CAP_DEFAULT_TX_WINDOW);
2053 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2057 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2059 struct l2cap_conf_req *req = data;
2060 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2061 void *ptr = req->data;
2064 BT_DBG("chan %p", chan);
2066 if (chan->num_conf_req || chan->num_conf_rsp)
2069 switch (chan->mode) {
2070 case L2CAP_MODE_STREAMING:
2071 case L2CAP_MODE_ERTM:
2072 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2075 if (__l2cap_efs_supported(chan))
2076 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2080 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2085 if (chan->imtu != L2CAP_DEFAULT_MTU)
2086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2088 switch (chan->mode) {
2089 case L2CAP_MODE_BASIC:
2090 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2091 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2094 rfc.mode = L2CAP_MODE_BASIC;
2096 rfc.max_transmit = 0;
2097 rfc.retrans_timeout = 0;
2098 rfc.monitor_timeout = 0;
2099 rfc.max_pdu_size = 0;
2101 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2102 (unsigned long) &rfc);
2105 case L2CAP_MODE_ERTM:
2106 rfc.mode = L2CAP_MODE_ERTM;
2107 rfc.max_transmit = chan->max_tx;
2108 rfc.retrans_timeout = 0;
2109 rfc.monitor_timeout = 0;
2111 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2112 L2CAP_EXT_HDR_SIZE -
2115 rfc.max_pdu_size = cpu_to_le16(size);
2117 l2cap_txwin_setup(chan);
2119 rfc.txwin_size = min_t(u16, chan->tx_win,
2120 L2CAP_DEFAULT_TX_WINDOW);
2122 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2123 (unsigned long) &rfc);
2125 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2126 l2cap_add_opt_efs(&ptr, chan);
2128 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2131 if (chan->fcs == L2CAP_FCS_NONE ||
2132 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2133 chan->fcs = L2CAP_FCS_NONE;
2134 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2137 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2138 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2142 case L2CAP_MODE_STREAMING:
2143 rfc.mode = L2CAP_MODE_STREAMING;
2145 rfc.max_transmit = 0;
2146 rfc.retrans_timeout = 0;
2147 rfc.monitor_timeout = 0;
2149 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2150 L2CAP_EXT_HDR_SIZE -
2153 rfc.max_pdu_size = cpu_to_le16(size);
2155 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2156 (unsigned long) &rfc);
2158 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2159 l2cap_add_opt_efs(&ptr, chan);
2161 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2164 if (chan->fcs == L2CAP_FCS_NONE ||
2165 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2166 chan->fcs = L2CAP_FCS_NONE;
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2172 req->dcid = cpu_to_le16(chan->dcid);
2173 req->flags = cpu_to_le16(0);
2178 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2180 struct l2cap_conf_rsp *rsp = data;
2181 void *ptr = rsp->data;
2182 void *req = chan->conf_req;
2183 int len = chan->conf_len;
2184 int type, hint, olen;
2186 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2187 struct l2cap_conf_efs efs;
2189 u16 mtu = L2CAP_DEFAULT_MTU;
2190 u16 result = L2CAP_CONF_SUCCESS;
2193 BT_DBG("chan %p", chan);
2195 while (len >= L2CAP_CONF_OPT_SIZE) {
2196 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2198 hint = type & L2CAP_CONF_HINT;
2199 type &= L2CAP_CONF_MASK;
2202 case L2CAP_CONF_MTU:
2206 case L2CAP_CONF_FLUSH_TO:
2207 chan->flush_to = val;
2210 case L2CAP_CONF_QOS:
2213 case L2CAP_CONF_RFC:
2214 if (olen == sizeof(rfc))
2215 memcpy(&rfc, (void *) val, olen);
2218 case L2CAP_CONF_FCS:
2219 if (val == L2CAP_FCS_NONE)
2220 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2223 case L2CAP_CONF_EFS:
2225 if (olen == sizeof(efs))
2226 memcpy(&efs, (void *) val, olen);
2229 case L2CAP_CONF_EWS:
2231 return -ECONNREFUSED;
2233 set_bit(FLAG_EXT_CTRL, &chan->flags);
2234 set_bit(CONF_EWS_RECV, &chan->conf_state);
2235 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2236 chan->remote_tx_win = val;
2243 result = L2CAP_CONF_UNKNOWN;
2244 *((u8 *) ptr++) = type;
2249 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2252 switch (chan->mode) {
2253 case L2CAP_MODE_STREAMING:
2254 case L2CAP_MODE_ERTM:
2255 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2256 chan->mode = l2cap_select_mode(rfc.mode,
2257 chan->conn->feat_mask);
2262 if (__l2cap_efs_supported(chan))
2263 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2265 return -ECONNREFUSED;
2268 if (chan->mode != rfc.mode)
2269 return -ECONNREFUSED;
2275 if (chan->mode != rfc.mode) {
2276 result = L2CAP_CONF_UNACCEPT;
2277 rfc.mode = chan->mode;
2279 if (chan->num_conf_rsp == 1)
2280 return -ECONNREFUSED;
2282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2283 sizeof(rfc), (unsigned long) &rfc);
2286 if (result == L2CAP_CONF_SUCCESS) {
2287 /* Configure output options and let the other side know
2288 * which ones we don't like. */
2290 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2291 result = L2CAP_CONF_UNACCEPT;
2294 set_bit(CONF_MTU_DONE, &chan->conf_state);
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2299 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2300 efs.stype != L2CAP_SERV_NOTRAFIC &&
2301 efs.stype != chan->local_stype) {
2303 result = L2CAP_CONF_UNACCEPT;
2305 if (chan->num_conf_req >= 1)
2306 return -ECONNREFUSED;
2308 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2310 (unsigned long) &efs);
2312 /* Send PENDING Conf Rsp */
2313 result = L2CAP_CONF_PENDING;
2314 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2319 case L2CAP_MODE_BASIC:
2320 chan->fcs = L2CAP_FCS_NONE;
2321 set_bit(CONF_MODE_DONE, &chan->conf_state);
2324 case L2CAP_MODE_ERTM:
2325 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2326 chan->remote_tx_win = rfc.txwin_size;
2328 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2330 chan->remote_max_tx = rfc.max_transmit;
2332 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2334 L2CAP_EXT_HDR_SIZE -
2337 rfc.max_pdu_size = cpu_to_le16(size);
2338 chan->remote_mps = size;
2340 rfc.retrans_timeout =
2341 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2342 rfc.monitor_timeout =
2343 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2345 set_bit(CONF_MODE_DONE, &chan->conf_state);
2347 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2348 sizeof(rfc), (unsigned long) &rfc);
2350 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2351 chan->remote_id = efs.id;
2352 chan->remote_stype = efs.stype;
2353 chan->remote_msdu = le16_to_cpu(efs.msdu);
2354 chan->remote_flush_to =
2355 le32_to_cpu(efs.flush_to);
2356 chan->remote_acc_lat =
2357 le32_to_cpu(efs.acc_lat);
2358 chan->remote_sdu_itime =
2359 le32_to_cpu(efs.sdu_itime);
2360 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2361 sizeof(efs), (unsigned long) &efs);
2365 case L2CAP_MODE_STREAMING:
2366 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2368 L2CAP_EXT_HDR_SIZE -
2371 rfc.max_pdu_size = cpu_to_le16(size);
2372 chan->remote_mps = size;
2374 set_bit(CONF_MODE_DONE, &chan->conf_state);
2376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2377 sizeof(rfc), (unsigned long) &rfc);
2382 result = L2CAP_CONF_UNACCEPT;
2384 memset(&rfc, 0, sizeof(rfc));
2385 rfc.mode = chan->mode;
2388 if (result == L2CAP_CONF_SUCCESS)
2389 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2391 rsp->scid = cpu_to_le16(chan->dcid);
2392 rsp->result = cpu_to_le16(result);
2393 rsp->flags = cpu_to_le16(0x0000);
2398 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2400 struct l2cap_conf_req *req = data;
2401 void *ptr = req->data;
2404 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2405 struct l2cap_conf_efs efs;
2407 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2409 while (len >= L2CAP_CONF_OPT_SIZE) {
2410 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2413 case L2CAP_CONF_MTU:
2414 if (val < L2CAP_DEFAULT_MIN_MTU) {
2415 *result = L2CAP_CONF_UNACCEPT;
2416 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2419 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2422 case L2CAP_CONF_FLUSH_TO:
2423 chan->flush_to = val;
2424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2428 case L2CAP_CONF_RFC:
2429 if (olen == sizeof(rfc))
2430 memcpy(&rfc, (void *)val, olen);
2432 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2433 rfc.mode != chan->mode)
2434 return -ECONNREFUSED;
2438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2439 sizeof(rfc), (unsigned long) &rfc);
2442 case L2CAP_CONF_EWS:
2443 chan->tx_win = min_t(u16, val,
2444 L2CAP_DEFAULT_EXT_WINDOW);
2445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2449 case L2CAP_CONF_EFS:
2450 if (olen == sizeof(efs))
2451 memcpy(&efs, (void *)val, olen);
2453 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2454 efs.stype != L2CAP_SERV_NOTRAFIC &&
2455 efs.stype != chan->local_stype)
2456 return -ECONNREFUSED;
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2459 sizeof(efs), (unsigned long) &efs);
2464 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2465 return -ECONNREFUSED;
2467 chan->mode = rfc.mode;
2469 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2471 case L2CAP_MODE_ERTM:
2472 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2473 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2474 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2476 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2477 chan->local_msdu = le16_to_cpu(efs.msdu);
2478 chan->local_sdu_itime =
2479 le32_to_cpu(efs.sdu_itime);
2480 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2481 chan->local_flush_to =
2482 le32_to_cpu(efs.flush_to);
2486 case L2CAP_MODE_STREAMING:
2487 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2491 req->dcid = cpu_to_le16(chan->dcid);
2492 req->flags = cpu_to_le16(0x0000);
2497 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2499 struct l2cap_conf_rsp *rsp = data;
2500 void *ptr = rsp->data;
2502 BT_DBG("chan %p", chan);
2504 rsp->scid = cpu_to_le16(chan->dcid);
2505 rsp->result = cpu_to_le16(result);
2506 rsp->flags = cpu_to_le16(flags);
2511 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2513 struct l2cap_conn_rsp rsp;
2514 struct l2cap_conn *conn = chan->conn;
2517 rsp.scid = cpu_to_le16(chan->dcid);
2518 rsp.dcid = cpu_to_le16(chan->scid);
2519 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2520 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2521 l2cap_send_cmd(conn, chan->ident,
2522 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2524 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2527 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2528 l2cap_build_conf_req(chan, buf), buf);
2529 chan->num_conf_req++;
2532 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2536 struct l2cap_conf_rfc rfc;
2538 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2540 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2543 while (len >= L2CAP_CONF_OPT_SIZE) {
2544 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2547 case L2CAP_CONF_RFC:
2548 if (olen == sizeof(rfc))
2549 memcpy(&rfc, (void *)val, olen);
2554 /* Use sane default values in case a misbehaving remote device
2555 * did not send an RFC option.
2557 rfc.mode = chan->mode;
2558 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2559 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2560 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2562 BT_ERR("Expected RFC option was not found, using defaults");
2566 case L2CAP_MODE_ERTM:
2567 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2568 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2569 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2571 case L2CAP_MODE_STREAMING:
2572 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2576 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2578 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2580 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2583 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2584 cmd->ident == conn->info_ident) {
2585 __cancel_delayed_work(&conn->info_timer);
2587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2588 conn->info_ident = 0;
2590 l2cap_conn_start(conn);
2596 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2598 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2599 struct l2cap_conn_rsp rsp;
2600 struct l2cap_chan *chan = NULL, *pchan;
2601 struct sock *parent, *sk = NULL;
2602 int result, status = L2CAP_CS_NO_INFO;
2604 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2605 __le16 psm = req->psm;
2607 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2609 /* Check if we have socket listening on psm */
2610 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2612 result = L2CAP_CR_BAD_PSM;
2620 /* Check if the ACL is secure enough (if not SDP) */
2621 if (psm != cpu_to_le16(0x0001) &&
2622 !hci_conn_check_link_mode(conn->hcon)) {
2623 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2624 result = L2CAP_CR_SEC_BLOCK;
2628 result = L2CAP_CR_NO_MEM;
2630 /* Check for backlog size */
2631 if (sk_acceptq_is_full(parent)) {
2632 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2636 chan = pchan->ops->new_connection(pchan->data);
2642 /* Check if we already have channel with that dcid */
2643 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2644 sock_set_flag(sk, SOCK_ZAPPED);
2645 chan->ops->close(chan->data);
2649 hci_conn_hold(conn->hcon);
2651 bacpy(&bt_sk(sk)->src, conn->src);
2652 bacpy(&bt_sk(sk)->dst, conn->dst);
2656 bt_accept_enqueue(parent, sk);
2658 l2cap_chan_add(conn, chan);
2662 __set_chan_timer(chan, sk->sk_sndtimeo);
2664 chan->ident = cmd->ident;
2666 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2667 if (l2cap_chan_check_security(chan)) {
2668 if (bt_sk(sk)->defer_setup) {
2669 l2cap_state_change(chan, BT_CONNECT2);
2670 result = L2CAP_CR_PEND;
2671 status = L2CAP_CS_AUTHOR_PEND;
2672 parent->sk_data_ready(parent, 0);
2674 l2cap_state_change(chan, BT_CONFIG);
2675 result = L2CAP_CR_SUCCESS;
2676 status = L2CAP_CS_NO_INFO;
2679 l2cap_state_change(chan, BT_CONNECT2);
2680 result = L2CAP_CR_PEND;
2681 status = L2CAP_CS_AUTHEN_PEND;
2684 l2cap_state_change(chan, BT_CONNECT2);
2685 result = L2CAP_CR_PEND;
2686 status = L2CAP_CS_NO_INFO;
2690 release_sock(parent);
2693 rsp.scid = cpu_to_le16(scid);
2694 rsp.dcid = cpu_to_le16(dcid);
2695 rsp.result = cpu_to_le16(result);
2696 rsp.status = cpu_to_le16(status);
2697 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2699 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2700 struct l2cap_info_req info;
2701 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2703 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2704 conn->info_ident = l2cap_get_ident(conn);
2706 schedule_delayed_work(&conn->info_timer,
2707 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2709 l2cap_send_cmd(conn, conn->info_ident,
2710 L2CAP_INFO_REQ, sizeof(info), &info);
2713 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2714 result == L2CAP_CR_SUCCESS) {
2716 set_bit(CONF_REQ_SENT, &chan->conf_state);
2717 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2718 l2cap_build_conf_req(chan, buf), buf);
2719 chan->num_conf_req++;
2725 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2727 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2728 u16 scid, dcid, result, status;
2729 struct l2cap_chan *chan;
2733 scid = __le16_to_cpu(rsp->scid);
2734 dcid = __le16_to_cpu(rsp->dcid);
2735 result = __le16_to_cpu(rsp->result);
2736 status = __le16_to_cpu(rsp->status);
2738 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2741 chan = l2cap_get_chan_by_scid(conn, scid);
2745 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2753 case L2CAP_CR_SUCCESS:
2754 l2cap_state_change(chan, BT_CONFIG);
2757 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2759 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2762 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2763 l2cap_build_conf_req(chan, req), req);
2764 chan->num_conf_req++;
2768 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2772 l2cap_chan_del(chan, ECONNREFUSED);
2780 static inline void set_default_fcs(struct l2cap_chan *chan)
2782 /* FCS is enabled only in ERTM or streaming mode, if one or both
2785 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2786 chan->fcs = L2CAP_FCS_NONE;
2787 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2788 chan->fcs = L2CAP_FCS_CRC16;
2791 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2793 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2796 struct l2cap_chan *chan;
2800 dcid = __le16_to_cpu(req->dcid);
2801 flags = __le16_to_cpu(req->flags);
2803 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2805 chan = l2cap_get_chan_by_scid(conn, dcid);
2811 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2812 struct l2cap_cmd_rej_cid rej;
2814 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2815 rej.scid = cpu_to_le16(chan->scid);
2816 rej.dcid = cpu_to_le16(chan->dcid);
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2823 /* Reject if config buffer is too small. */
2824 len = cmd_len - sizeof(*req);
2825 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2826 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2827 l2cap_build_conf_rsp(chan, rsp,
2828 L2CAP_CONF_REJECT, flags), rsp);
2833 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2834 chan->conf_len += len;
2836 if (flags & 0x0001) {
2837 /* Incomplete config. Send empty response. */
2838 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2839 l2cap_build_conf_rsp(chan, rsp,
2840 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2844 /* Complete config. */
2845 len = l2cap_parse_conf_req(chan, rsp);
2847 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2851 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2852 chan->num_conf_rsp++;
2854 /* Reset config buffer. */
2857 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2860 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2861 set_default_fcs(chan);
2863 l2cap_state_change(chan, BT_CONNECTED);
2865 chan->next_tx_seq = 0;
2866 chan->expected_tx_seq = 0;
2867 skb_queue_head_init(&chan->tx_q);
2868 if (chan->mode == L2CAP_MODE_ERTM)
2869 l2cap_ertm_init(chan);
2871 l2cap_chan_ready(sk);
2875 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2877 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2878 l2cap_build_conf_req(chan, buf), buf);
2879 chan->num_conf_req++;
2882 /* Got Conf Rsp PENDING from remote side and asume we sent
2883 Conf Rsp PENDING in the code above */
2884 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2885 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2887 /* check compatibility */
2889 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2890 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2892 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2893 l2cap_build_conf_rsp(chan, rsp,
2894 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2902 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2904 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2905 u16 scid, flags, result;
2906 struct l2cap_chan *chan;
2908 int len = cmd->len - sizeof(*rsp);
2910 scid = __le16_to_cpu(rsp->scid);
2911 flags = __le16_to_cpu(rsp->flags);
2912 result = __le16_to_cpu(rsp->result);
2914 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2915 scid, flags, result);
2917 chan = l2cap_get_chan_by_scid(conn, scid);
2924 case L2CAP_CONF_SUCCESS:
2925 l2cap_conf_rfc_get(chan, rsp->data, len);
2926 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2929 case L2CAP_CONF_PENDING:
2930 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2932 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2935 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2938 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2942 /* check compatibility */
2944 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2945 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2947 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2948 l2cap_build_conf_rsp(chan, buf,
2949 L2CAP_CONF_SUCCESS, 0x0000), buf);
2953 case L2CAP_CONF_UNACCEPT:
2954 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2957 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2958 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2962 /* throw out any old stored conf requests */
2963 result = L2CAP_CONF_SUCCESS;
2964 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2967 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2971 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2972 L2CAP_CONF_REQ, len, req);
2973 chan->num_conf_req++;
2974 if (result != L2CAP_CONF_SUCCESS)
2980 sk->sk_err = ECONNRESET;
2981 __set_chan_timer(chan,
2982 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2983 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2990 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2992 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2993 set_default_fcs(chan);
2995 l2cap_state_change(chan, BT_CONNECTED);
2996 chan->next_tx_seq = 0;
2997 chan->expected_tx_seq = 0;
2998 skb_queue_head_init(&chan->tx_q);
2999 if (chan->mode == L2CAP_MODE_ERTM)
3000 l2cap_ertm_init(chan);
3002 l2cap_chan_ready(sk);
3010 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3013 struct l2cap_disconn_rsp rsp;
3015 struct l2cap_chan *chan;
3018 scid = __le16_to_cpu(req->scid);
3019 dcid = __le16_to_cpu(req->dcid);
3021 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3023 chan = l2cap_get_chan_by_scid(conn, dcid);
3029 rsp.dcid = cpu_to_le16(chan->scid);
3030 rsp.scid = cpu_to_le16(chan->dcid);
3031 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3033 sk->sk_shutdown = SHUTDOWN_MASK;
3035 l2cap_chan_del(chan, ECONNRESET);
3038 chan->ops->close(chan->data);
3042 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3044 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3046 struct l2cap_chan *chan;
3049 scid = __le16_to_cpu(rsp->scid);
3050 dcid = __le16_to_cpu(rsp->dcid);
3052 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3054 chan = l2cap_get_chan_by_scid(conn, scid);
3060 l2cap_chan_del(chan, 0);
3063 chan->ops->close(chan->data);
3067 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3069 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3072 type = __le16_to_cpu(req->type);
3074 BT_DBG("type 0x%4.4x", type);
3076 if (type == L2CAP_IT_FEAT_MASK) {
3078 u32 feat_mask = l2cap_feat_mask;
3079 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3080 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3081 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3083 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3086 feat_mask |= L2CAP_FEAT_EXT_FLOW
3087 | L2CAP_FEAT_EXT_WINDOW;
3089 put_unaligned_le32(feat_mask, rsp->data);
3090 l2cap_send_cmd(conn, cmd->ident,
3091 L2CAP_INFO_RSP, sizeof(buf), buf);
3092 } else if (type == L2CAP_IT_FIXED_CHAN) {
3094 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3097 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3099 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3101 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3102 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3103 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3104 l2cap_send_cmd(conn, cmd->ident,
3105 L2CAP_INFO_RSP, sizeof(buf), buf);
3107 struct l2cap_info_rsp rsp;
3108 rsp.type = cpu_to_le16(type);
3109 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3110 l2cap_send_cmd(conn, cmd->ident,
3111 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3117 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3119 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3122 type = __le16_to_cpu(rsp->type);
3123 result = __le16_to_cpu(rsp->result);
3125 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3127 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3128 if (cmd->ident != conn->info_ident ||
3129 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3132 __cancel_delayed_work(&conn->info_timer);
3134 if (result != L2CAP_IR_SUCCESS) {
3135 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3136 conn->info_ident = 0;
3138 l2cap_conn_start(conn);
3143 if (type == L2CAP_IT_FEAT_MASK) {
3144 conn->feat_mask = get_unaligned_le32(rsp->data);
3146 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3147 struct l2cap_info_req req;
3148 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3150 conn->info_ident = l2cap_get_ident(conn);
3152 l2cap_send_cmd(conn, conn->info_ident,
3153 L2CAP_INFO_REQ, sizeof(req), &req);
3155 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3156 conn->info_ident = 0;
3158 l2cap_conn_start(conn);
3160 } else if (type == L2CAP_IT_FIXED_CHAN) {
3161 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3162 conn->info_ident = 0;
3164 l2cap_conn_start(conn);
3170 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3171 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3174 struct l2cap_create_chan_req *req = data;
3175 struct l2cap_create_chan_rsp rsp;
3178 if (cmd_len != sizeof(*req))
3184 psm = le16_to_cpu(req->psm);
3185 scid = le16_to_cpu(req->scid);
3187 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3189 /* Placeholder: Always reject */
3191 rsp.scid = cpu_to_le16(scid);
3192 rsp.result = L2CAP_CR_NO_MEM;
3193 rsp.status = L2CAP_CS_NO_INFO;
3195 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3201 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3202 struct l2cap_cmd_hdr *cmd, void *data)
3204 BT_DBG("conn %p", conn);
3206 return l2cap_connect_rsp(conn, cmd, data);
3209 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3210 u16 icid, u16 result)
3212 struct l2cap_move_chan_rsp rsp;
3214 BT_DBG("icid %d, result %d", icid, result);
3216 rsp.icid = cpu_to_le16(icid);
3217 rsp.result = cpu_to_le16(result);
3219 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3222 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3223 struct l2cap_chan *chan, u16 icid, u16 result)
3225 struct l2cap_move_chan_cfm cfm;
3228 BT_DBG("icid %d, result %d", icid, result);
3230 ident = l2cap_get_ident(conn);
3232 chan->ident = ident;
3234 cfm.icid = cpu_to_le16(icid);
3235 cfm.result = cpu_to_le16(result);
3237 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3240 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3243 struct l2cap_move_chan_cfm_rsp rsp;
3245 BT_DBG("icid %d", icid);
3247 rsp.icid = cpu_to_le16(icid);
3248 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3251 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3252 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3254 struct l2cap_move_chan_req *req = data;
3256 u16 result = L2CAP_MR_NOT_ALLOWED;
3258 if (cmd_len != sizeof(*req))
3261 icid = le16_to_cpu(req->icid);
3263 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3268 /* Placeholder: Always refuse */
3269 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3274 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3275 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3277 struct l2cap_move_chan_rsp *rsp = data;
3280 if (cmd_len != sizeof(*rsp))
3283 icid = le16_to_cpu(rsp->icid);
3284 result = le16_to_cpu(rsp->result);
3286 BT_DBG("icid %d, result %d", icid, result);
3288 /* Placeholder: Always unconfirmed */
3289 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3294 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3295 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3297 struct l2cap_move_chan_cfm *cfm = data;
3300 if (cmd_len != sizeof(*cfm))
3303 icid = le16_to_cpu(cfm->icid);
3304 result = le16_to_cpu(cfm->result);
3306 BT_DBG("icid %d, result %d", icid, result);
3308 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3313 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3314 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3316 struct l2cap_move_chan_cfm_rsp *rsp = data;
3319 if (cmd_len != sizeof(*rsp))
3322 icid = le16_to_cpu(rsp->icid);
3324 BT_DBG("icid %d", icid);
3329 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3334 if (min > max || min < 6 || max > 3200)
3337 if (to_multiplier < 10 || to_multiplier > 3200)
3340 if (max >= to_multiplier * 8)
3343 max_latency = (to_multiplier * 8 / max) - 1;
3344 if (latency > 499 || latency > max_latency)
3350 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3351 struct l2cap_cmd_hdr *cmd, u8 *data)
3353 struct hci_conn *hcon = conn->hcon;
3354 struct l2cap_conn_param_update_req *req;
3355 struct l2cap_conn_param_update_rsp rsp;
3356 u16 min, max, latency, to_multiplier, cmd_len;
3359 if (!(hcon->link_mode & HCI_LM_MASTER))
3362 cmd_len = __le16_to_cpu(cmd->len);
3363 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3366 req = (struct l2cap_conn_param_update_req *) data;
3367 min = __le16_to_cpu(req->min);
3368 max = __le16_to_cpu(req->max);
3369 latency = __le16_to_cpu(req->latency);
3370 to_multiplier = __le16_to_cpu(req->to_multiplier);
3372 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3373 min, max, latency, to_multiplier);
3375 memset(&rsp, 0, sizeof(rsp));
3377 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3379 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3381 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3383 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3387 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3392 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3393 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3397 switch (cmd->code) {
3398 case L2CAP_COMMAND_REJ:
3399 l2cap_command_rej(conn, cmd, data);
3402 case L2CAP_CONN_REQ:
3403 err = l2cap_connect_req(conn, cmd, data);
3406 case L2CAP_CONN_RSP:
3407 err = l2cap_connect_rsp(conn, cmd, data);
3410 case L2CAP_CONF_REQ:
3411 err = l2cap_config_req(conn, cmd, cmd_len, data);
3414 case L2CAP_CONF_RSP:
3415 err = l2cap_config_rsp(conn, cmd, data);
3418 case L2CAP_DISCONN_REQ:
3419 err = l2cap_disconnect_req(conn, cmd, data);
3422 case L2CAP_DISCONN_RSP:
3423 err = l2cap_disconnect_rsp(conn, cmd, data);
3426 case L2CAP_ECHO_REQ:
3427 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3430 case L2CAP_ECHO_RSP:
3433 case L2CAP_INFO_REQ:
3434 err = l2cap_information_req(conn, cmd, data);
3437 case L2CAP_INFO_RSP:
3438 err = l2cap_information_rsp(conn, cmd, data);
3441 case L2CAP_CREATE_CHAN_REQ:
3442 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3445 case L2CAP_CREATE_CHAN_RSP:
3446 err = l2cap_create_channel_rsp(conn, cmd, data);
3449 case L2CAP_MOVE_CHAN_REQ:
3450 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3453 case L2CAP_MOVE_CHAN_RSP:
3454 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3457 case L2CAP_MOVE_CHAN_CFM:
3458 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3461 case L2CAP_MOVE_CHAN_CFM_RSP:
3462 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3466 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3474 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3475 struct l2cap_cmd_hdr *cmd, u8 *data)
3477 switch (cmd->code) {
3478 case L2CAP_COMMAND_REJ:
3481 case L2CAP_CONN_PARAM_UPDATE_REQ:
3482 return l2cap_conn_param_update_req(conn, cmd, data);
3484 case L2CAP_CONN_PARAM_UPDATE_RSP:
3488 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3493 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3494 struct sk_buff *skb)
3496 u8 *data = skb->data;
3498 struct l2cap_cmd_hdr cmd;
3501 l2cap_raw_recv(conn, skb);
3503 while (len >= L2CAP_CMD_HDR_SIZE) {
3505 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3506 data += L2CAP_CMD_HDR_SIZE;
3507 len -= L2CAP_CMD_HDR_SIZE;
3509 cmd_len = le16_to_cpu(cmd.len);
3511 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3513 if (cmd_len > len || !cmd.ident) {
3514 BT_DBG("corrupted command");
3518 if (conn->hcon->type == LE_LINK)
3519 err = l2cap_le_sig_cmd(conn, &cmd, data);
3521 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3524 struct l2cap_cmd_rej_unk rej;
3526 BT_ERR("Wrong link type (%d)", err);
3528 /* FIXME: Map err to a valid reason */
3529 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3530 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3540 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3542 u16 our_fcs, rcv_fcs;
3545 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3546 hdr_size = L2CAP_EXT_HDR_SIZE;
3548 hdr_size = L2CAP_ENH_HDR_SIZE;
3550 if (chan->fcs == L2CAP_FCS_CRC16) {
3551 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3552 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3553 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3555 if (our_fcs != rcv_fcs)
3561 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3565 chan->frames_sent = 0;
3567 control |= __set_reqseq(chan, chan->buffer_seq);
3569 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3570 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3571 l2cap_send_sframe(chan, control);
3572 set_bit(CONN_RNR_SENT, &chan->conn_state);
3575 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3576 l2cap_retransmit_frames(chan);
3578 l2cap_ertm_send(chan);
3580 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3581 chan->frames_sent == 0) {
3582 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3583 l2cap_send_sframe(chan, control);
3587 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3589 struct sk_buff *next_skb;
3590 int tx_seq_offset, next_tx_seq_offset;
3592 bt_cb(skb)->tx_seq = tx_seq;
3593 bt_cb(skb)->sar = sar;
3595 next_skb = skb_peek(&chan->srej_q);
3597 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3600 if (bt_cb(next_skb)->tx_seq == tx_seq)
3603 next_tx_seq_offset = __seq_offset(chan,
3604 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3606 if (next_tx_seq_offset > tx_seq_offset) {
3607 __skb_queue_before(&chan->srej_q, next_skb, skb);
3611 if (skb_queue_is_last(&chan->srej_q, next_skb))
3614 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3617 __skb_queue_tail(&chan->srej_q, skb);
3622 static void append_skb_frag(struct sk_buff *skb,
3623 struct sk_buff *new_frag, struct sk_buff **last_frag)
3625 /* skb->len reflects data in skb as well as all fragments
3626 * skb->data_len reflects only data in fragments
3628 if (!skb_has_frag_list(skb))
3629 skb_shinfo(skb)->frag_list = new_frag;
3631 new_frag->next = NULL;
3633 (*last_frag)->next = new_frag;
3634 *last_frag = new_frag;
3636 skb->len += new_frag->len;
3637 skb->data_len += new_frag->len;
3638 skb->truesize += new_frag->truesize;
3641 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3645 switch (__get_ctrl_sar(chan, control)) {
3646 case L2CAP_SAR_UNSEGMENTED:
3650 err = chan->ops->recv(chan->data, skb);
3653 case L2CAP_SAR_START:
3657 chan->sdu_len = get_unaligned_le16(skb->data);
3658 skb_pull(skb, L2CAP_SDULEN_SIZE);
3660 if (chan->sdu_len > chan->imtu) {
3665 if (skb->len >= chan->sdu_len)
3669 chan->sdu_last_frag = skb;
3675 case L2CAP_SAR_CONTINUE:
3679 append_skb_frag(chan->sdu, skb,
3680 &chan->sdu_last_frag);
3683 if (chan->sdu->len >= chan->sdu_len)
3693 append_skb_frag(chan->sdu, skb,
3694 &chan->sdu_last_frag);
3697 if (chan->sdu->len != chan->sdu_len)
3700 err = chan->ops->recv(chan->data, chan->sdu);
3703 /* Reassembly complete */
3705 chan->sdu_last_frag = NULL;
3713 kfree_skb(chan->sdu);
3715 chan->sdu_last_frag = NULL;
3722 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3724 BT_DBG("chan %p, Enter local busy", chan);
3726 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3728 __set_ack_timer(chan);
3731 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3735 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3738 control = __set_reqseq(chan, chan->buffer_seq);
3739 control |= __set_ctrl_poll(chan);
3740 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3741 l2cap_send_sframe(chan, control);
3742 chan->retry_count = 1;
3744 __clear_retrans_timer(chan);
3745 __set_monitor_timer(chan);
3747 set_bit(CONN_WAIT_F, &chan->conn_state);
3750 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3751 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3753 BT_DBG("chan %p, Exit local busy", chan);
3756 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3758 if (chan->mode == L2CAP_MODE_ERTM) {
3760 l2cap_ertm_enter_local_busy(chan);
3762 l2cap_ertm_exit_local_busy(chan);
3766 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3768 struct sk_buff *skb;
3771 while ((skb = skb_peek(&chan->srej_q)) &&
3772 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3775 if (bt_cb(skb)->tx_seq != tx_seq)
3778 skb = skb_dequeue(&chan->srej_q);
3779 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3780 err = l2cap_reassemble_sdu(chan, skb, control);
3783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3787 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3788 tx_seq = __next_seq(chan, tx_seq);
3792 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3794 struct srej_list *l, *tmp;
3797 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3798 if (l->tx_seq == tx_seq) {
3803 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3804 control |= __set_reqseq(chan, l->tx_seq);
3805 l2cap_send_sframe(chan, control);
3807 list_add_tail(&l->list, &chan->srej_l);
3811 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3813 struct srej_list *new;
3816 while (tx_seq != chan->expected_tx_seq) {
3817 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3818 control |= __set_reqseq(chan, chan->expected_tx_seq);
3819 l2cap_send_sframe(chan, control);
3821 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3825 new->tx_seq = chan->expected_tx_seq;
3827 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3829 list_add_tail(&new->list, &chan->srej_l);
3832 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3837 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3839 u16 tx_seq = __get_txseq(chan, rx_control);
3840 u16 req_seq = __get_reqseq(chan, rx_control);
3841 u8 sar = __get_ctrl_sar(chan, rx_control);
3842 int tx_seq_offset, expected_tx_seq_offset;
3843 int num_to_ack = (chan->tx_win/6) + 1;
3846 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3847 tx_seq, rx_control);
3849 if (__is_ctrl_final(chan, rx_control) &&
3850 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3851 __clear_monitor_timer(chan);
3852 if (chan->unacked_frames > 0)
3853 __set_retrans_timer(chan);
3854 clear_bit(CONN_WAIT_F, &chan->conn_state);
3857 chan->expected_ack_seq = req_seq;
3858 l2cap_drop_acked_frames(chan);
3860 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3862 /* invalid tx_seq */
3863 if (tx_seq_offset >= chan->tx_win) {
3864 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3868 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3869 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3870 l2cap_send_ack(chan);
3874 if (tx_seq == chan->expected_tx_seq)
3877 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3878 struct srej_list *first;
3880 first = list_first_entry(&chan->srej_l,
3881 struct srej_list, list);
3882 if (tx_seq == first->tx_seq) {
3883 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3884 l2cap_check_srej_gap(chan, tx_seq);
3886 list_del(&first->list);
3889 if (list_empty(&chan->srej_l)) {
3890 chan->buffer_seq = chan->buffer_seq_srej;
3891 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3892 l2cap_send_ack(chan);
3893 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3896 struct srej_list *l;
3898 /* duplicated tx_seq */
3899 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3902 list_for_each_entry(l, &chan->srej_l, list) {
3903 if (l->tx_seq == tx_seq) {
3904 l2cap_resend_srejframe(chan, tx_seq);
3909 err = l2cap_send_srejframe(chan, tx_seq);
3911 l2cap_send_disconn_req(chan->conn, chan, -err);
3916 expected_tx_seq_offset = __seq_offset(chan,
3917 chan->expected_tx_seq, chan->buffer_seq);
3919 /* duplicated tx_seq */
3920 if (tx_seq_offset < expected_tx_seq_offset)
3923 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3925 BT_DBG("chan %p, Enter SREJ", chan);
3927 INIT_LIST_HEAD(&chan->srej_l);
3928 chan->buffer_seq_srej = chan->buffer_seq;
3930 __skb_queue_head_init(&chan->srej_q);
3931 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3933 /* Set P-bit only if there are some I-frames to ack. */
3934 if (__clear_ack_timer(chan))
3935 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3937 err = l2cap_send_srejframe(chan, tx_seq);
3939 l2cap_send_disconn_req(chan->conn, chan, -err);
3946 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3948 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3949 bt_cb(skb)->tx_seq = tx_seq;
3950 bt_cb(skb)->sar = sar;
3951 __skb_queue_tail(&chan->srej_q, skb);
3955 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3956 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3959 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3963 if (__is_ctrl_final(chan, rx_control)) {
3964 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3965 l2cap_retransmit_frames(chan);
3969 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3970 if (chan->num_acked == num_to_ack - 1)
3971 l2cap_send_ack(chan);
3973 __set_ack_timer(chan);
3982 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3984 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3985 __get_reqseq(chan, rx_control), rx_control);
3987 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3988 l2cap_drop_acked_frames(chan);
3990 if (__is_ctrl_poll(chan, rx_control)) {
3991 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3992 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3993 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3994 (chan->unacked_frames > 0))
3995 __set_retrans_timer(chan);
3997 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3998 l2cap_send_srejtail(chan);
4000 l2cap_send_i_or_rr_or_rnr(chan);
4003 } else if (__is_ctrl_final(chan, rx_control)) {
4004 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4006 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4007 l2cap_retransmit_frames(chan);
4010 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4011 (chan->unacked_frames > 0))
4012 __set_retrans_timer(chan);
4014 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4015 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4016 l2cap_send_ack(chan);
4018 l2cap_ertm_send(chan);
4022 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4024 u16 tx_seq = __get_reqseq(chan, rx_control);
4026 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4028 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4030 chan->expected_ack_seq = tx_seq;
4031 l2cap_drop_acked_frames(chan);
4033 if (__is_ctrl_final(chan, rx_control)) {
4034 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4035 l2cap_retransmit_frames(chan);
4037 l2cap_retransmit_frames(chan);
4039 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4040 set_bit(CONN_REJ_ACT, &chan->conn_state);
4043 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4045 u16 tx_seq = __get_reqseq(chan, rx_control);
4047 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4049 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4051 if (__is_ctrl_poll(chan, rx_control)) {
4052 chan->expected_ack_seq = tx_seq;
4053 l2cap_drop_acked_frames(chan);
4055 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4056 l2cap_retransmit_one_frame(chan, tx_seq);
4058 l2cap_ertm_send(chan);
4060 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4061 chan->srej_save_reqseq = tx_seq;
4062 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4064 } else if (__is_ctrl_final(chan, rx_control)) {
4065 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4066 chan->srej_save_reqseq == tx_seq)
4067 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4069 l2cap_retransmit_one_frame(chan, tx_seq);
4071 l2cap_retransmit_one_frame(chan, tx_seq);
4072 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4073 chan->srej_save_reqseq = tx_seq;
4074 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4079 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4081 u16 tx_seq = __get_reqseq(chan, rx_control);
4083 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4085 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4086 chan->expected_ack_seq = tx_seq;
4087 l2cap_drop_acked_frames(chan);
4089 if (__is_ctrl_poll(chan, rx_control))
4090 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4092 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4093 __clear_retrans_timer(chan);
4094 if (__is_ctrl_poll(chan, rx_control))
4095 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4099 if (__is_ctrl_poll(chan, rx_control)) {
4100 l2cap_send_srejtail(chan);
4102 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4103 l2cap_send_sframe(chan, rx_control);
4107 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4109 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4111 if (__is_ctrl_final(chan, rx_control) &&
4112 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4113 __clear_monitor_timer(chan);
4114 if (chan->unacked_frames > 0)
4115 __set_retrans_timer(chan);
4116 clear_bit(CONN_WAIT_F, &chan->conn_state);
4119 switch (__get_ctrl_super(chan, rx_control)) {
4120 case L2CAP_SUPER_RR:
4121 l2cap_data_channel_rrframe(chan, rx_control);
4124 case L2CAP_SUPER_REJ:
4125 l2cap_data_channel_rejframe(chan, rx_control);
4128 case L2CAP_SUPER_SREJ:
4129 l2cap_data_channel_srejframe(chan, rx_control);
4132 case L2CAP_SUPER_RNR:
4133 l2cap_data_channel_rnrframe(chan, rx_control);
4141 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4143 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4146 int len, next_tx_seq_offset, req_seq_offset;
4148 control = __get_control(chan, skb->data);
4149 skb_pull(skb, __ctrl_size(chan));
4153 * We can just drop the corrupted I-frame here.
4154 * Receiver will miss it and start proper recovery
4155 * procedures and ask retransmission.
4157 if (l2cap_check_fcs(chan, skb))
4160 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4161 len -= L2CAP_SDULEN_SIZE;
4163 if (chan->fcs == L2CAP_FCS_CRC16)
4164 len -= L2CAP_FCS_SIZE;
4166 if (len > chan->mps) {
4167 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4171 req_seq = __get_reqseq(chan, control);
4173 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4175 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4176 chan->expected_ack_seq);
4178 /* check for invalid req-seq */
4179 if (req_seq_offset > next_tx_seq_offset) {
4180 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4184 if (!__is_sframe(chan, control)) {
4186 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4190 l2cap_data_channel_iframe(chan, control, skb);
4194 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4198 l2cap_data_channel_sframe(chan, control, skb);
4208 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4210 struct l2cap_chan *chan;
4211 struct sock *sk = NULL;
4216 chan = l2cap_get_chan_by_scid(conn, cid);
4218 BT_DBG("unknown cid 0x%4.4x", cid);
4224 BT_DBG("chan %p, len %d", chan, skb->len);
4226 if (chan->state != BT_CONNECTED)
4229 switch (chan->mode) {
4230 case L2CAP_MODE_BASIC:
4231 /* If socket recv buffers overflows we drop data here
4232 * which is *bad* because L2CAP has to be reliable.
4233 * But we don't have any other choice. L2CAP doesn't
4234 * provide flow control mechanism. */
4236 if (chan->imtu < skb->len)
4239 if (!chan->ops->recv(chan->data, skb))
4243 case L2CAP_MODE_ERTM:
4244 l2cap_ertm_data_rcv(sk, skb);
4248 case L2CAP_MODE_STREAMING:
4249 control = __get_control(chan, skb->data);
4250 skb_pull(skb, __ctrl_size(chan));
4253 if (l2cap_check_fcs(chan, skb))
4256 if (__is_sar_start(chan, control))
4257 len -= L2CAP_SDULEN_SIZE;
4259 if (chan->fcs == L2CAP_FCS_CRC16)
4260 len -= L2CAP_FCS_SIZE;
4262 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4265 tx_seq = __get_txseq(chan, control);
4267 if (chan->expected_tx_seq != tx_seq) {
4268 /* Frame(s) missing - must discard partial SDU */
4269 kfree_skb(chan->sdu);
4271 chan->sdu_last_frag = NULL;
4274 /* TODO: Notify userland of missing data */
4277 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4279 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4285 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4299 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4301 struct sock *sk = NULL;
4302 struct l2cap_chan *chan;
4304 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4312 BT_DBG("sk %p, len %d", sk, skb->len);
4314 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4317 if (chan->imtu < skb->len)
4320 if (!chan->ops->recv(chan->data, skb))
4332 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4334 struct sock *sk = NULL;
4335 struct l2cap_chan *chan;
4337 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4345 BT_DBG("sk %p, len %d", sk, skb->len);
4347 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4350 if (chan->imtu < skb->len)
4353 if (!chan->ops->recv(chan->data, skb))
4365 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4367 struct l2cap_hdr *lh = (void *) skb->data;
4371 skb_pull(skb, L2CAP_HDR_SIZE);
4372 cid = __le16_to_cpu(lh->cid);
4373 len = __le16_to_cpu(lh->len);
4375 if (len != skb->len) {
4380 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4383 case L2CAP_CID_LE_SIGNALING:
4384 case L2CAP_CID_SIGNALING:
4385 l2cap_sig_channel(conn, skb);
4388 case L2CAP_CID_CONN_LESS:
4389 psm = get_unaligned_le16(skb->data);
4391 l2cap_conless_channel(conn, psm, skb);
4394 case L2CAP_CID_LE_DATA:
4395 l2cap_att_channel(conn, cid, skb);
4399 if (smp_sig_channel(conn, skb))
4400 l2cap_conn_del(conn->hcon, EACCES);
4404 l2cap_data_channel(conn, cid, skb);
4409 /* ---- L2CAP interface with lower layer (HCI) ---- */
4411 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4413 int exact = 0, lm1 = 0, lm2 = 0;
4414 struct l2cap_chan *c;
4416 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4418 /* Find listening sockets and check their link_mode */
4419 read_lock(&chan_list_lock);
4420 list_for_each_entry(c, &chan_list, global_l) {
4421 struct sock *sk = c->sk;
4423 if (c->state != BT_LISTEN)
4426 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4427 lm1 |= HCI_LM_ACCEPT;
4428 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4429 lm1 |= HCI_LM_MASTER;
4431 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4432 lm2 |= HCI_LM_ACCEPT;
4433 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4434 lm2 |= HCI_LM_MASTER;
4437 read_unlock(&chan_list_lock);
4439 return exact ? lm1 : lm2;
4442 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4444 struct l2cap_conn *conn;
4446 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4449 conn = l2cap_conn_add(hcon, status);
4451 l2cap_conn_ready(conn);
4453 l2cap_conn_del(hcon, bt_to_errno(status));
4458 int l2cap_disconn_ind(struct hci_conn *hcon)
4460 struct l2cap_conn *conn = hcon->l2cap_data;
4462 BT_DBG("hcon %p", hcon);
4465 return HCI_ERROR_REMOTE_USER_TERM;
4466 return conn->disc_reason;
4469 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4471 BT_DBG("hcon %p reason %d", hcon, reason);
4473 l2cap_conn_del(hcon, bt_to_errno(reason));
4477 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4479 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4482 if (encrypt == 0x00) {
4483 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4484 __clear_chan_timer(chan);
4485 __set_chan_timer(chan,
4486 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4487 } else if (chan->sec_level == BT_SECURITY_HIGH)
4488 l2cap_chan_close(chan, ECONNREFUSED);
4490 if (chan->sec_level == BT_SECURITY_MEDIUM)
4491 __clear_chan_timer(chan);
4495 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4497 struct l2cap_conn *conn = hcon->l2cap_data;
4498 struct l2cap_chan *chan;
4503 BT_DBG("conn %p", conn);
4505 if (hcon->type == LE_LINK) {
4506 smp_distribute_keys(conn, 0);
4507 __cancel_delayed_work(&conn->security_timer);
4512 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4513 struct sock *sk = chan->sk;
4517 BT_DBG("chan->scid %d", chan->scid);
4519 if (chan->scid == L2CAP_CID_LE_DATA) {
4520 if (!status && encrypt) {
4521 chan->sec_level = hcon->sec_level;
4522 l2cap_chan_ready(sk);
4529 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4534 if (!status && (chan->state == BT_CONNECTED ||
4535 chan->state == BT_CONFIG)) {
4536 l2cap_check_encryption(chan, encrypt);
4541 if (chan->state == BT_CONNECT) {
4543 struct l2cap_conn_req req;
4544 req.scid = cpu_to_le16(chan->scid);
4545 req.psm = chan->psm;
4547 chan->ident = l2cap_get_ident(conn);
4548 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4550 l2cap_send_cmd(conn, chan->ident,
4551 L2CAP_CONN_REQ, sizeof(req), &req);
4553 __clear_chan_timer(chan);
4554 __set_chan_timer(chan,
4555 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4557 } else if (chan->state == BT_CONNECT2) {
4558 struct l2cap_conn_rsp rsp;
4562 if (bt_sk(sk)->defer_setup) {
4563 struct sock *parent = bt_sk(sk)->parent;
4564 res = L2CAP_CR_PEND;
4565 stat = L2CAP_CS_AUTHOR_PEND;
4567 parent->sk_data_ready(parent, 0);
4569 l2cap_state_change(chan, BT_CONFIG);
4570 res = L2CAP_CR_SUCCESS;
4571 stat = L2CAP_CS_NO_INFO;
4574 l2cap_state_change(chan, BT_DISCONN);
4575 __set_chan_timer(chan,
4576 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4577 res = L2CAP_CR_SEC_BLOCK;
4578 stat = L2CAP_CS_NO_INFO;
4581 rsp.scid = cpu_to_le16(chan->dcid);
4582 rsp.dcid = cpu_to_le16(chan->scid);
4583 rsp.result = cpu_to_le16(res);
4584 rsp.status = cpu_to_le16(stat);
4585 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4597 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4599 struct l2cap_conn *conn = hcon->l2cap_data;
4602 conn = l2cap_conn_add(hcon, 0);
4607 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4609 if (!(flags & ACL_CONT)) {
4610 struct l2cap_hdr *hdr;
4611 struct l2cap_chan *chan;
4616 BT_ERR("Unexpected start frame (len %d)", skb->len);
4617 kfree_skb(conn->rx_skb);
4618 conn->rx_skb = NULL;
4620 l2cap_conn_unreliable(conn, ECOMM);
4623 /* Start fragment always begin with Basic L2CAP header */
4624 if (skb->len < L2CAP_HDR_SIZE) {
4625 BT_ERR("Frame is too short (len %d)", skb->len);
4626 l2cap_conn_unreliable(conn, ECOMM);
4630 hdr = (struct l2cap_hdr *) skb->data;
4631 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4632 cid = __le16_to_cpu(hdr->cid);
4634 if (len == skb->len) {
4635 /* Complete frame received */
4636 l2cap_recv_frame(conn, skb);
4640 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4642 if (skb->len > len) {
4643 BT_ERR("Frame is too long (len %d, expected len %d)",
4645 l2cap_conn_unreliable(conn, ECOMM);
4649 chan = l2cap_get_chan_by_scid(conn, cid);
4651 if (chan && chan->sk) {
4652 struct sock *sk = chan->sk;
4654 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4655 BT_ERR("Frame exceeding recv MTU (len %d, "
4659 l2cap_conn_unreliable(conn, ECOMM);
4665 /* Allocate skb for the complete frame (with header) */
4666 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4670 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4672 conn->rx_len = len - skb->len;
4674 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4676 if (!conn->rx_len) {
4677 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4678 l2cap_conn_unreliable(conn, ECOMM);
4682 if (skb->len > conn->rx_len) {
4683 BT_ERR("Fragment is too long (len %d, expected %d)",
4684 skb->len, conn->rx_len);
4685 kfree_skb(conn->rx_skb);
4686 conn->rx_skb = NULL;
4688 l2cap_conn_unreliable(conn, ECOMM);
4692 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4694 conn->rx_len -= skb->len;
4696 if (!conn->rx_len) {
4697 /* Complete frame received */
4698 l2cap_recv_frame(conn, conn->rx_skb);
4699 conn->rx_skb = NULL;
4708 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4710 struct l2cap_chan *c;
4712 read_lock(&chan_list_lock);
4714 list_for_each_entry(c, &chan_list, global_l) {
4715 struct sock *sk = c->sk;
4717 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4718 batostr(&bt_sk(sk)->src),
4719 batostr(&bt_sk(sk)->dst),
4720 c->state, __le16_to_cpu(c->psm),
4721 c->scid, c->dcid, c->imtu, c->omtu,
4722 c->sec_level, c->mode);
4725 read_unlock(&chan_list_lock);
4730 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4732 return single_open(file, l2cap_debugfs_show, inode->i_private);
4735 static const struct file_operations l2cap_debugfs_fops = {
4736 .open = l2cap_debugfs_open,
4738 .llseek = seq_lseek,
4739 .release = single_release,
4742 static struct dentry *l2cap_debugfs;
4744 int __init l2cap_init(void)
4748 err = l2cap_init_sockets();
4753 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4754 bt_debugfs, NULL, &l2cap_debugfs_fops);
4756 BT_ERR("Failed to create L2CAP debug file");
4762 void l2cap_exit(void)
4764 debugfs_remove(l2cap_debugfs);
4765 l2cap_cleanup_sockets();
4768 module_param(disable_ertm, bool, 0644);
4769 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");