2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 struct l2cap_chan *c;
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 read_unlock(&conn->chan_lock);
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 struct l2cap_chan *c;
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 struct l2cap_chan *c;
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
146 read_unlock(&conn->chan_lock);
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 struct l2cap_chan *c;
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
161 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165 write_lock_bh(&chan_list_lock);
167 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
180 for (p = 0x1001; p < 0x1100; p += 2)
181 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
182 chan->psm = cpu_to_le16(p);
183 chan->sport = cpu_to_le16(p);
190 write_unlock_bh(&chan_list_lock);
194 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
196 write_lock_bh(&chan_list_lock);
200 write_unlock_bh(&chan_list_lock);
205 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
207 u16 cid = L2CAP_CID_DYN_START;
209 for (; cid < L2CAP_CID_DYN_END; cid++) {
210 if (!__l2cap_get_chan_by_scid(conn, cid))
217 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
219 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
221 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
227 BT_DBG("chan %p state %d", chan, chan->state);
229 if (timer_pending(timer) && del_timer(timer))
233 static void l2cap_state_change(struct l2cap_chan *chan, int state)
236 chan->ops->state_change(chan->data, state);
239 static void l2cap_chan_timeout(unsigned long arg)
241 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
242 struct sock *sk = chan->sk;
245 BT_DBG("chan %p state %d", chan, chan->state);
249 if (sock_owned_by_user(sk)) {
250 /* sk is owned by user. Try again later */
251 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
257 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
258 reason = ECONNREFUSED;
259 else if (chan->state == BT_CONNECT &&
260 chan->sec_level != BT_SECURITY_SDP)
261 reason = ECONNREFUSED;
265 l2cap_chan_close(chan, reason);
269 chan->ops->close(chan->data);
273 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
275 struct l2cap_chan *chan;
277 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
283 write_lock_bh(&chan_list_lock);
284 list_add(&chan->global_l, &chan_list);
285 write_unlock_bh(&chan_list_lock);
287 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
289 chan->state = BT_OPEN;
291 atomic_set(&chan->refcnt, 1);
293 BT_DBG("sk %p chan %p", sk, chan);
298 void l2cap_chan_destroy(struct l2cap_chan *chan)
300 write_lock_bh(&chan_list_lock);
301 list_del(&chan->global_l);
302 write_unlock_bh(&chan_list_lock);
307 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
309 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
310 chan->psm, chan->dcid);
312 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
316 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
317 if (conn->hcon->type == LE_LINK) {
319 chan->omtu = L2CAP_LE_DEFAULT_MTU;
320 chan->scid = L2CAP_CID_LE_DATA;
321 chan->dcid = L2CAP_CID_LE_DATA;
323 /* Alloc CID for connection-oriented socket */
324 chan->scid = l2cap_alloc_cid(conn);
325 chan->omtu = L2CAP_DEFAULT_MTU;
327 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
328 /* Connectionless socket */
329 chan->scid = L2CAP_CID_CONN_LESS;
330 chan->dcid = L2CAP_CID_CONN_LESS;
331 chan->omtu = L2CAP_DEFAULT_MTU;
333 /* Raw socket can send/recv signalling messages only */
334 chan->scid = L2CAP_CID_SIGNALING;
335 chan->dcid = L2CAP_CID_SIGNALING;
336 chan->omtu = L2CAP_DEFAULT_MTU;
339 chan->local_id = L2CAP_BESTEFFORT_ID;
340 chan->local_stype = L2CAP_SERV_BESTEFFORT;
341 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
342 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
343 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
344 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
348 list_add(&chan->list, &conn->chan_l);
352 * Must be called on the locked socket. */
353 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
355 struct sock *sk = chan->sk;
356 struct l2cap_conn *conn = chan->conn;
357 struct sock *parent = bt_sk(sk)->parent;
359 __clear_chan_timer(chan);
361 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
364 /* Delete from channel list */
365 write_lock_bh(&conn->chan_lock);
366 list_del(&chan->list);
367 write_unlock_bh(&conn->chan_lock);
371 hci_conn_put(conn->hcon);
374 l2cap_state_change(chan, BT_CLOSED);
375 sock_set_flag(sk, SOCK_ZAPPED);
381 bt_accept_unlink(sk);
382 parent->sk_data_ready(parent, 0);
384 sk->sk_state_change(sk);
386 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
387 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
390 skb_queue_purge(&chan->tx_q);
392 if (chan->mode == L2CAP_MODE_ERTM) {
393 struct srej_list *l, *tmp;
395 __clear_retrans_timer(chan);
396 __clear_monitor_timer(chan);
397 __clear_ack_timer(chan);
399 skb_queue_purge(&chan->srej_q);
401 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
408 static void l2cap_chan_cleanup_listen(struct sock *parent)
412 BT_DBG("parent %p", parent);
414 /* Close not yet accepted channels */
415 while ((sk = bt_accept_dequeue(parent, NULL))) {
416 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
417 __clear_chan_timer(chan);
419 l2cap_chan_close(chan, ECONNRESET);
421 chan->ops->close(chan->data);
425 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
427 struct l2cap_conn *conn = chan->conn;
428 struct sock *sk = chan->sk;
430 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
432 switch (chan->state) {
434 l2cap_chan_cleanup_listen(sk);
436 l2cap_state_change(chan, BT_CLOSED);
437 sock_set_flag(sk, SOCK_ZAPPED);
442 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
443 conn->hcon->type == ACL_LINK) {
444 __clear_chan_timer(chan);
445 __set_chan_timer(chan, sk->sk_sndtimeo);
446 l2cap_send_disconn_req(conn, chan, reason);
448 l2cap_chan_del(chan, reason);
452 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
453 conn->hcon->type == ACL_LINK) {
454 struct l2cap_conn_rsp rsp;
457 if (bt_sk(sk)->defer_setup)
458 result = L2CAP_CR_SEC_BLOCK;
460 result = L2CAP_CR_BAD_PSM;
461 l2cap_state_change(chan, BT_DISCONN);
463 rsp.scid = cpu_to_le16(chan->dcid);
464 rsp.dcid = cpu_to_le16(chan->scid);
465 rsp.result = cpu_to_le16(result);
466 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
467 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
471 l2cap_chan_del(chan, reason);
476 l2cap_chan_del(chan, reason);
480 sock_set_flag(sk, SOCK_ZAPPED);
485 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
487 if (chan->chan_type == L2CAP_CHAN_RAW) {
488 switch (chan->sec_level) {
489 case BT_SECURITY_HIGH:
490 return HCI_AT_DEDICATED_BONDING_MITM;
491 case BT_SECURITY_MEDIUM:
492 return HCI_AT_DEDICATED_BONDING;
494 return HCI_AT_NO_BONDING;
496 } else if (chan->psm == cpu_to_le16(0x0001)) {
497 if (chan->sec_level == BT_SECURITY_LOW)
498 chan->sec_level = BT_SECURITY_SDP;
500 if (chan->sec_level == BT_SECURITY_HIGH)
501 return HCI_AT_NO_BONDING_MITM;
503 return HCI_AT_NO_BONDING;
505 switch (chan->sec_level) {
506 case BT_SECURITY_HIGH:
507 return HCI_AT_GENERAL_BONDING_MITM;
508 case BT_SECURITY_MEDIUM:
509 return HCI_AT_GENERAL_BONDING;
511 return HCI_AT_NO_BONDING;
516 /* Service level security */
517 int l2cap_chan_check_security(struct l2cap_chan *chan)
519 struct l2cap_conn *conn = chan->conn;
522 auth_type = l2cap_get_auth_type(chan);
524 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
527 static u8 l2cap_get_ident(struct l2cap_conn *conn)
531 /* Get next available identificator.
532 * 1 - 128 are used by kernel.
533 * 129 - 199 are reserved.
534 * 200 - 254 are used by utilities like l2ping, etc.
537 spin_lock_bh(&conn->lock);
539 if (++conn->tx_ident > 128)
544 spin_unlock_bh(&conn->lock);
549 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
551 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
554 BT_DBG("code 0x%2.2x", code);
559 if (lmp_no_flush_capable(conn->hcon->hdev))
560 flags = ACL_START_NO_FLUSH;
564 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
565 skb->priority = HCI_PRIO_MAX;
567 hci_send_acl(conn->hchan, skb, flags);
570 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
572 struct hci_conn *hcon = chan->conn->hcon;
575 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
578 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
579 lmp_no_flush_capable(hcon->hdev))
580 flags = ACL_START_NO_FLUSH;
584 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
585 hci_send_acl(chan->conn->hchan, skb, flags);
588 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
591 struct l2cap_hdr *lh;
592 struct l2cap_conn *conn = chan->conn;
595 if (chan->state != BT_CONNECTED)
598 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
599 hlen = L2CAP_EXT_HDR_SIZE;
601 hlen = L2CAP_ENH_HDR_SIZE;
603 if (chan->fcs == L2CAP_FCS_CRC16)
604 hlen += L2CAP_FCS_SIZE;
606 BT_DBG("chan %p, control 0x%8.8x", chan, control);
608 count = min_t(unsigned int, conn->mtu, hlen);
610 control |= __set_sframe(chan);
612 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
613 control |= __set_ctrl_final(chan);
615 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
616 control |= __set_ctrl_poll(chan);
618 skb = bt_skb_alloc(count, GFP_ATOMIC);
622 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
623 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
624 lh->cid = cpu_to_le16(chan->dcid);
626 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
628 if (chan->fcs == L2CAP_FCS_CRC16) {
629 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
630 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
633 skb->priority = HCI_PRIO_MAX;
634 l2cap_do_send(chan, skb);
637 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
639 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
640 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
641 set_bit(CONN_RNR_SENT, &chan->conn_state);
643 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
645 control |= __set_reqseq(chan, chan->buffer_seq);
647 l2cap_send_sframe(chan, control);
650 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
652 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
655 static void l2cap_do_start(struct l2cap_chan *chan)
657 struct l2cap_conn *conn = chan->conn;
659 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
660 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
663 if (l2cap_chan_check_security(chan) &&
664 __l2cap_no_conn_pending(chan)) {
665 struct l2cap_conn_req req;
666 req.scid = cpu_to_le16(chan->scid);
669 chan->ident = l2cap_get_ident(conn);
670 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
672 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
676 struct l2cap_info_req req;
677 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
679 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
680 conn->info_ident = l2cap_get_ident(conn);
682 mod_timer(&conn->info_timer, jiffies +
683 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
685 l2cap_send_cmd(conn, conn->info_ident,
686 L2CAP_INFO_REQ, sizeof(req), &req);
690 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
692 u32 local_feat_mask = l2cap_feat_mask;
694 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
697 case L2CAP_MODE_ERTM:
698 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
699 case L2CAP_MODE_STREAMING:
700 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
706 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
709 struct l2cap_disconn_req req;
716 if (chan->mode == L2CAP_MODE_ERTM) {
717 __clear_retrans_timer(chan);
718 __clear_monitor_timer(chan);
719 __clear_ack_timer(chan);
722 req.dcid = cpu_to_le16(chan->dcid);
723 req.scid = cpu_to_le16(chan->scid);
724 l2cap_send_cmd(conn, l2cap_get_ident(conn),
725 L2CAP_DISCONN_REQ, sizeof(req), &req);
727 l2cap_state_change(chan, BT_DISCONN);
731 /* ---- L2CAP connections ---- */
732 static void l2cap_conn_start(struct l2cap_conn *conn)
734 struct l2cap_chan *chan, *tmp;
736 BT_DBG("conn %p", conn);
738 read_lock(&conn->chan_lock);
740 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
741 struct sock *sk = chan->sk;
745 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
750 if (chan->state == BT_CONNECT) {
751 struct l2cap_conn_req req;
753 if (!l2cap_chan_check_security(chan) ||
754 !__l2cap_no_conn_pending(chan)) {
759 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
760 && test_bit(CONF_STATE2_DEVICE,
761 &chan->conf_state)) {
762 /* l2cap_chan_close() calls list_del(chan)
763 * so release the lock */
764 read_unlock(&conn->chan_lock);
765 l2cap_chan_close(chan, ECONNRESET);
766 read_lock(&conn->chan_lock);
771 req.scid = cpu_to_le16(chan->scid);
774 chan->ident = l2cap_get_ident(conn);
775 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
777 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
780 } else if (chan->state == BT_CONNECT2) {
781 struct l2cap_conn_rsp rsp;
783 rsp.scid = cpu_to_le16(chan->dcid);
784 rsp.dcid = cpu_to_le16(chan->scid);
786 if (l2cap_chan_check_security(chan)) {
787 if (bt_sk(sk)->defer_setup) {
788 struct sock *parent = bt_sk(sk)->parent;
789 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
790 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
792 parent->sk_data_ready(parent, 0);
795 l2cap_state_change(chan, BT_CONFIG);
796 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
797 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
800 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
801 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
804 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
807 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
808 rsp.result != L2CAP_CR_SUCCESS) {
813 set_bit(CONF_REQ_SENT, &chan->conf_state);
814 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
815 l2cap_build_conf_req(chan, buf), buf);
816 chan->num_conf_req++;
822 read_unlock(&conn->chan_lock);
825 /* Find socket with cid and source bdaddr.
826 * Returns closest match, locked.
828 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
830 struct l2cap_chan *c, *c1 = NULL;
832 read_lock(&chan_list_lock);
834 list_for_each_entry(c, &chan_list, global_l) {
835 struct sock *sk = c->sk;
837 if (state && c->state != state)
840 if (c->scid == cid) {
842 if (!bacmp(&bt_sk(sk)->src, src)) {
843 read_unlock(&chan_list_lock);
848 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
853 read_unlock(&chan_list_lock);
858 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
860 struct sock *parent, *sk;
861 struct l2cap_chan *chan, *pchan;
865 /* Check if we have socket listening on cid */
866 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
873 bh_lock_sock(parent);
875 /* Check for backlog size */
876 if (sk_acceptq_is_full(parent)) {
877 BT_DBG("backlog full %d", parent->sk_ack_backlog);
881 chan = pchan->ops->new_connection(pchan->data);
887 write_lock_bh(&conn->chan_lock);
889 hci_conn_hold(conn->hcon);
891 bacpy(&bt_sk(sk)->src, conn->src);
892 bacpy(&bt_sk(sk)->dst, conn->dst);
894 bt_accept_enqueue(parent, sk);
896 __l2cap_chan_add(conn, chan);
898 __set_chan_timer(chan, sk->sk_sndtimeo);
900 l2cap_state_change(chan, BT_CONNECTED);
901 parent->sk_data_ready(parent, 0);
903 write_unlock_bh(&conn->chan_lock);
906 bh_unlock_sock(parent);
909 static void l2cap_chan_ready(struct sock *sk)
911 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
912 struct sock *parent = bt_sk(sk)->parent;
914 BT_DBG("sk %p, parent %p", sk, parent);
916 chan->conf_state = 0;
917 __clear_chan_timer(chan);
919 l2cap_state_change(chan, BT_CONNECTED);
920 sk->sk_state_change(sk);
923 parent->sk_data_ready(parent, 0);
926 static void l2cap_conn_ready(struct l2cap_conn *conn)
928 struct l2cap_chan *chan;
930 BT_DBG("conn %p", conn);
932 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
933 l2cap_le_conn_ready(conn);
935 if (conn->hcon->out && conn->hcon->type == LE_LINK)
936 smp_conn_security(conn, conn->hcon->pending_sec_level);
938 read_lock(&conn->chan_lock);
940 list_for_each_entry(chan, &conn->chan_l, list) {
941 struct sock *sk = chan->sk;
945 if (conn->hcon->type == LE_LINK) {
946 if (smp_conn_security(conn, chan->sec_level))
947 l2cap_chan_ready(sk);
949 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
950 __clear_chan_timer(chan);
951 l2cap_state_change(chan, BT_CONNECTED);
952 sk->sk_state_change(sk);
954 } else if (chan->state == BT_CONNECT)
955 l2cap_do_start(chan);
960 read_unlock(&conn->chan_lock);
963 /* Notify sockets that we cannot guaranty reliability anymore */
964 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
966 struct l2cap_chan *chan;
968 BT_DBG("conn %p", conn);
970 read_lock(&conn->chan_lock);
972 list_for_each_entry(chan, &conn->chan_l, list) {
973 struct sock *sk = chan->sk;
975 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 read_unlock(&conn->chan_lock);
982 static void l2cap_info_timeout(unsigned long arg)
984 struct l2cap_conn *conn = (void *) arg;
986 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
987 conn->info_ident = 0;
989 l2cap_conn_start(conn);
992 static void l2cap_conn_del(struct hci_conn *hcon, int err)
994 struct l2cap_conn *conn = hcon->l2cap_data;
995 struct l2cap_chan *chan, *l;
1001 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1003 kfree_skb(conn->rx_skb);
1006 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1009 l2cap_chan_del(chan, err);
1011 chan->ops->close(chan->data);
1014 hci_chan_del(conn->hchan);
1016 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1017 del_timer_sync(&conn->info_timer);
1019 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1020 del_timer(&conn->security_timer);
1021 smp_chan_destroy(conn);
1024 hcon->l2cap_data = NULL;
1028 static void security_timeout(unsigned long arg)
1030 struct l2cap_conn *conn = (void *) arg;
1032 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1035 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1037 struct l2cap_conn *conn = hcon->l2cap_data;
1038 struct hci_chan *hchan;
1043 hchan = hci_chan_create(hcon);
1047 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1049 hci_chan_del(hchan);
1053 hcon->l2cap_data = conn;
1055 conn->hchan = hchan;
1057 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1059 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1060 conn->mtu = hcon->hdev->le_mtu;
1062 conn->mtu = hcon->hdev->acl_mtu;
1064 conn->src = &hcon->hdev->bdaddr;
1065 conn->dst = &hcon->dst;
1067 conn->feat_mask = 0;
1069 spin_lock_init(&conn->lock);
1070 rwlock_init(&conn->chan_lock);
1072 INIT_LIST_HEAD(&conn->chan_l);
1074 if (hcon->type == LE_LINK)
1075 setup_timer(&conn->security_timer, security_timeout,
1076 (unsigned long) conn);
1078 setup_timer(&conn->info_timer, l2cap_info_timeout,
1079 (unsigned long) conn);
1081 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1086 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1088 write_lock_bh(&conn->chan_lock);
1089 __l2cap_chan_add(conn, chan);
1090 write_unlock_bh(&conn->chan_lock);
1093 /* ---- Socket interface ---- */
1095 /* Find socket with psm and source bdaddr.
1096 * Returns closest match.
1098 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1100 struct l2cap_chan *c, *c1 = NULL;
1102 read_lock(&chan_list_lock);
1104 list_for_each_entry(c, &chan_list, global_l) {
1105 struct sock *sk = c->sk;
1107 if (state && c->state != state)
1110 if (c->psm == psm) {
1112 if (!bacmp(&bt_sk(sk)->src, src)) {
1113 read_unlock(&chan_list_lock);
1118 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1123 read_unlock(&chan_list_lock);
1128 int l2cap_chan_connect(struct l2cap_chan *chan)
1130 struct sock *sk = chan->sk;
1131 bdaddr_t *src = &bt_sk(sk)->src;
1132 bdaddr_t *dst = &bt_sk(sk)->dst;
1133 struct l2cap_conn *conn;
1134 struct hci_conn *hcon;
1135 struct hci_dev *hdev;
1139 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1142 hdev = hci_get_route(dst, src);
1144 return -EHOSTUNREACH;
1146 hci_dev_lock_bh(hdev);
1148 auth_type = l2cap_get_auth_type(chan);
1150 if (chan->dcid == L2CAP_CID_LE_DATA)
1151 hcon = hci_connect(hdev, LE_LINK, dst,
1152 chan->sec_level, auth_type);
1154 hcon = hci_connect(hdev, ACL_LINK, dst,
1155 chan->sec_level, auth_type);
1158 err = PTR_ERR(hcon);
1162 conn = l2cap_conn_add(hcon, 0);
1169 /* Update source addr of the socket */
1170 bacpy(src, conn->src);
1172 l2cap_chan_add(conn, chan);
1174 l2cap_state_change(chan, BT_CONNECT);
1175 __set_chan_timer(chan, sk->sk_sndtimeo);
1177 if (hcon->state == BT_CONNECTED) {
1178 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1179 __clear_chan_timer(chan);
1180 if (l2cap_chan_check_security(chan))
1181 l2cap_state_change(chan, BT_CONNECTED);
1183 l2cap_do_start(chan);
1189 hci_dev_unlock_bh(hdev);
1194 int __l2cap_wait_ack(struct sock *sk)
1196 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1197 DECLARE_WAITQUEUE(wait, current);
1201 add_wait_queue(sk_sleep(sk), &wait);
1202 set_current_state(TASK_INTERRUPTIBLE);
1203 while (chan->unacked_frames > 0 && chan->conn) {
1207 if (signal_pending(current)) {
1208 err = sock_intr_errno(timeo);
1213 timeo = schedule_timeout(timeo);
1215 set_current_state(TASK_INTERRUPTIBLE);
1217 err = sock_error(sk);
1221 set_current_state(TASK_RUNNING);
1222 remove_wait_queue(sk_sleep(sk), &wait);
1226 static void l2cap_monitor_timeout(unsigned long arg)
1228 struct l2cap_chan *chan = (void *) arg;
1229 struct sock *sk = chan->sk;
1231 BT_DBG("chan %p", chan);
1234 if (chan->retry_count >= chan->remote_max_tx) {
1235 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1240 chan->retry_count++;
1241 __set_monitor_timer(chan);
1243 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1247 static void l2cap_retrans_timeout(unsigned long arg)
1249 struct l2cap_chan *chan = (void *) arg;
1250 struct sock *sk = chan->sk;
1252 BT_DBG("chan %p", chan);
1255 chan->retry_count = 1;
1256 __set_monitor_timer(chan);
1258 set_bit(CONN_WAIT_F, &chan->conn_state);
1260 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1264 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1266 struct sk_buff *skb;
1268 while ((skb = skb_peek(&chan->tx_q)) &&
1269 chan->unacked_frames) {
1270 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1273 skb = skb_dequeue(&chan->tx_q);
1276 chan->unacked_frames--;
1279 if (!chan->unacked_frames)
1280 __clear_retrans_timer(chan);
1283 static void l2cap_streaming_send(struct l2cap_chan *chan)
1285 struct sk_buff *skb;
1289 while ((skb = skb_dequeue(&chan->tx_q))) {
1290 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1291 control |= __set_txseq(chan, chan->next_tx_seq);
1292 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1294 if (chan->fcs == L2CAP_FCS_CRC16) {
1295 fcs = crc16(0, (u8 *)skb->data,
1296 skb->len - L2CAP_FCS_SIZE);
1297 put_unaligned_le16(fcs,
1298 skb->data + skb->len - L2CAP_FCS_SIZE);
1301 l2cap_do_send(chan, skb);
1303 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1307 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1309 struct sk_buff *skb, *tx_skb;
1313 skb = skb_peek(&chan->tx_q);
1317 while (bt_cb(skb)->tx_seq != tx_seq) {
1318 if (skb_queue_is_last(&chan->tx_q, skb))
1321 skb = skb_queue_next(&chan->tx_q, skb);
1324 if (chan->remote_max_tx &&
1325 bt_cb(skb)->retries == chan->remote_max_tx) {
1326 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1330 tx_skb = skb_clone(skb, GFP_ATOMIC);
1331 bt_cb(skb)->retries++;
1333 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1334 control &= __get_sar_mask(chan);
1336 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1337 control |= __set_ctrl_final(chan);
1339 control |= __set_reqseq(chan, chan->buffer_seq);
1340 control |= __set_txseq(chan, tx_seq);
1342 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1344 if (chan->fcs == L2CAP_FCS_CRC16) {
1345 fcs = crc16(0, (u8 *)tx_skb->data,
1346 tx_skb->len - L2CAP_FCS_SIZE);
1347 put_unaligned_le16(fcs,
1348 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1351 l2cap_do_send(chan, tx_skb);
1354 static int l2cap_ertm_send(struct l2cap_chan *chan)
1356 struct sk_buff *skb, *tx_skb;
1361 if (chan->state != BT_CONNECTED)
1364 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1366 if (chan->remote_max_tx &&
1367 bt_cb(skb)->retries == chan->remote_max_tx) {
1368 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1372 tx_skb = skb_clone(skb, GFP_ATOMIC);
1374 bt_cb(skb)->retries++;
1376 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1377 control &= __get_sar_mask(chan);
1379 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1380 control |= __set_ctrl_final(chan);
1382 control |= __set_reqseq(chan, chan->buffer_seq);
1383 control |= __set_txseq(chan, chan->next_tx_seq);
1385 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1387 if (chan->fcs == L2CAP_FCS_CRC16) {
1388 fcs = crc16(0, (u8 *)skb->data,
1389 tx_skb->len - L2CAP_FCS_SIZE);
1390 put_unaligned_le16(fcs, skb->data +
1391 tx_skb->len - L2CAP_FCS_SIZE);
1394 l2cap_do_send(chan, tx_skb);
1396 __set_retrans_timer(chan);
1398 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1400 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1402 if (bt_cb(skb)->retries == 1)
1403 chan->unacked_frames++;
1405 chan->frames_sent++;
1407 if (skb_queue_is_last(&chan->tx_q, skb))
1408 chan->tx_send_head = NULL;
1410 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1418 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1422 if (!skb_queue_empty(&chan->tx_q))
1423 chan->tx_send_head = chan->tx_q.next;
1425 chan->next_tx_seq = chan->expected_ack_seq;
1426 ret = l2cap_ertm_send(chan);
1430 static void l2cap_send_ack(struct l2cap_chan *chan)
1434 control |= __set_reqseq(chan, chan->buffer_seq);
1436 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1437 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1438 set_bit(CONN_RNR_SENT, &chan->conn_state);
1439 l2cap_send_sframe(chan, control);
1443 if (l2cap_ertm_send(chan) > 0)
1446 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1447 l2cap_send_sframe(chan, control);
1450 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1452 struct srej_list *tail;
1455 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1456 control |= __set_ctrl_final(chan);
1458 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1459 control |= __set_reqseq(chan, tail->tx_seq);
1461 l2cap_send_sframe(chan, control);
1464 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1466 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1467 struct sk_buff **frag;
1470 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1476 /* Continuation fragments (no L2CAP header) */
1477 frag = &skb_shinfo(skb)->frag_list;
1479 count = min_t(unsigned int, conn->mtu, len);
1481 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1484 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1487 (*frag)->priority = skb->priority;
1492 frag = &(*frag)->next;
1498 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1499 struct msghdr *msg, size_t len,
1502 struct sock *sk = chan->sk;
1503 struct l2cap_conn *conn = chan->conn;
1504 struct sk_buff *skb;
1505 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1506 struct l2cap_hdr *lh;
1508 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1514 return ERR_PTR(err);
1516 skb->priority = priority;
1518 /* Create L2CAP header */
1519 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1520 lh->cid = cpu_to_le16(chan->dcid);
1521 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1522 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1524 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1525 if (unlikely(err < 0)) {
1527 return ERR_PTR(err);
1532 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1533 struct msghdr *msg, size_t len,
1536 struct sock *sk = chan->sk;
1537 struct l2cap_conn *conn = chan->conn;
1538 struct sk_buff *skb;
1539 int err, count, hlen = L2CAP_HDR_SIZE;
1540 struct l2cap_hdr *lh;
1542 BT_DBG("sk %p len %d", sk, (int)len);
1544 count = min_t(unsigned int, (conn->mtu - hlen), len);
1545 skb = bt_skb_send_alloc(sk, count + hlen,
1546 msg->msg_flags & MSG_DONTWAIT, &err);
1548 return ERR_PTR(err);
1550 skb->priority = priority;
1552 /* Create L2CAP header */
1553 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1554 lh->cid = cpu_to_le16(chan->dcid);
1555 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1557 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1558 if (unlikely(err < 0)) {
1560 return ERR_PTR(err);
1565 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1566 struct msghdr *msg, size_t len,
1567 u32 control, u16 sdulen)
1569 struct sock *sk = chan->sk;
1570 struct l2cap_conn *conn = chan->conn;
1571 struct sk_buff *skb;
1572 int err, count, hlen;
1573 struct l2cap_hdr *lh;
1575 BT_DBG("sk %p len %d", sk, (int)len);
1578 return ERR_PTR(-ENOTCONN);
1580 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1581 hlen = L2CAP_EXT_HDR_SIZE;
1583 hlen = L2CAP_ENH_HDR_SIZE;
1586 hlen += L2CAP_SDULEN_SIZE;
1588 if (chan->fcs == L2CAP_FCS_CRC16)
1589 hlen += L2CAP_FCS_SIZE;
1591 count = min_t(unsigned int, (conn->mtu - hlen), len);
1592 skb = bt_skb_send_alloc(sk, count + hlen,
1593 msg->msg_flags & MSG_DONTWAIT, &err);
1595 return ERR_PTR(err);
1597 /* Create L2CAP header */
1598 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1599 lh->cid = cpu_to_le16(chan->dcid);
1600 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1602 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1605 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1607 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1608 if (unlikely(err < 0)) {
1610 return ERR_PTR(err);
1613 if (chan->fcs == L2CAP_FCS_CRC16)
1614 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1616 bt_cb(skb)->retries = 0;
1620 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1622 struct sk_buff *skb;
1623 struct sk_buff_head sar_queue;
1627 skb_queue_head_init(&sar_queue);
1628 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1629 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1631 return PTR_ERR(skb);
1633 __skb_queue_tail(&sar_queue, skb);
1634 len -= chan->remote_mps;
1635 size += chan->remote_mps;
1640 if (len > chan->remote_mps) {
1641 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1642 buflen = chan->remote_mps;
1644 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1648 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1650 skb_queue_purge(&sar_queue);
1651 return PTR_ERR(skb);
1654 __skb_queue_tail(&sar_queue, skb);
1658 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1659 if (chan->tx_send_head == NULL)
1660 chan->tx_send_head = sar_queue.next;
1665 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1668 struct sk_buff *skb;
1672 /* Connectionless channel */
1673 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1674 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1676 return PTR_ERR(skb);
1678 l2cap_do_send(chan, skb);
1682 switch (chan->mode) {
1683 case L2CAP_MODE_BASIC:
1684 /* Check outgoing MTU */
1685 if (len > chan->omtu)
1688 /* Create a basic PDU */
1689 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1691 return PTR_ERR(skb);
1693 l2cap_do_send(chan, skb);
1697 case L2CAP_MODE_ERTM:
1698 case L2CAP_MODE_STREAMING:
1699 /* Entire SDU fits into one PDU */
1700 if (len <= chan->remote_mps) {
1701 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1702 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1705 return PTR_ERR(skb);
1707 __skb_queue_tail(&chan->tx_q, skb);
1709 if (chan->tx_send_head == NULL)
1710 chan->tx_send_head = skb;
1713 /* Segment SDU into multiples PDUs */
1714 err = l2cap_sar_segment_sdu(chan, msg, len);
1719 if (chan->mode == L2CAP_MODE_STREAMING) {
1720 l2cap_streaming_send(chan);
1725 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1726 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1731 err = l2cap_ertm_send(chan);
1738 BT_DBG("bad state %1.1x", chan->mode);
1745 /* Copy frame to all raw sockets on that connection */
1746 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1748 struct sk_buff *nskb;
1749 struct l2cap_chan *chan;
1751 BT_DBG("conn %p", conn);
1753 read_lock(&conn->chan_lock);
1754 list_for_each_entry(chan, &conn->chan_l, list) {
1755 struct sock *sk = chan->sk;
1756 if (chan->chan_type != L2CAP_CHAN_RAW)
1759 /* Don't send frame to the socket it came from */
1762 nskb = skb_clone(skb, GFP_ATOMIC);
1766 if (chan->ops->recv(chan->data, nskb))
1769 read_unlock(&conn->chan_lock);
1772 /* ---- L2CAP signalling commands ---- */
1773 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1774 u8 code, u8 ident, u16 dlen, void *data)
1776 struct sk_buff *skb, **frag;
1777 struct l2cap_cmd_hdr *cmd;
1778 struct l2cap_hdr *lh;
1781 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1782 conn, code, ident, dlen);
1784 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1785 count = min_t(unsigned int, conn->mtu, len);
1787 skb = bt_skb_alloc(count, GFP_ATOMIC);
1791 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1792 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1794 if (conn->hcon->type == LE_LINK)
1795 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1797 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1799 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1802 cmd->len = cpu_to_le16(dlen);
1805 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1806 memcpy(skb_put(skb, count), data, count);
1812 /* Continuation fragments (no L2CAP header) */
1813 frag = &skb_shinfo(skb)->frag_list;
1815 count = min_t(unsigned int, conn->mtu, len);
1817 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1821 memcpy(skb_put(*frag, count), data, count);
1826 frag = &(*frag)->next;
1836 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1838 struct l2cap_conf_opt *opt = *ptr;
1841 len = L2CAP_CONF_OPT_SIZE + opt->len;
1849 *val = *((u8 *) opt->val);
1853 *val = get_unaligned_le16(opt->val);
1857 *val = get_unaligned_le32(opt->val);
1861 *val = (unsigned long) opt->val;
1865 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1869 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1871 struct l2cap_conf_opt *opt = *ptr;
1873 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1880 *((u8 *) opt->val) = val;
1884 put_unaligned_le16(val, opt->val);
1888 put_unaligned_le32(val, opt->val);
1892 memcpy(opt->val, (void *) val, len);
1896 *ptr += L2CAP_CONF_OPT_SIZE + len;
1899 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1901 struct l2cap_conf_efs efs;
1903 switch (chan->mode) {
1904 case L2CAP_MODE_ERTM:
1905 efs.id = chan->local_id;
1906 efs.stype = chan->local_stype;
1907 efs.msdu = cpu_to_le16(chan->local_msdu);
1908 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1909 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1910 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1913 case L2CAP_MODE_STREAMING:
1915 efs.stype = L2CAP_SERV_BESTEFFORT;
1916 efs.msdu = cpu_to_le16(chan->local_msdu);
1917 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1926 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1927 (unsigned long) &efs);
1930 static void l2cap_ack_timeout(unsigned long arg)
1932 struct l2cap_chan *chan = (void *) arg;
1934 bh_lock_sock(chan->sk);
1935 l2cap_send_ack(chan);
1936 bh_unlock_sock(chan->sk);
1939 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1941 struct sock *sk = chan->sk;
1943 chan->expected_ack_seq = 0;
1944 chan->unacked_frames = 0;
1945 chan->buffer_seq = 0;
1946 chan->num_acked = 0;
1947 chan->frames_sent = 0;
1949 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1950 (unsigned long) chan);
1951 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1952 (unsigned long) chan);
1953 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1955 skb_queue_head_init(&chan->srej_q);
1957 INIT_LIST_HEAD(&chan->srej_l);
1960 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1963 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1966 case L2CAP_MODE_STREAMING:
1967 case L2CAP_MODE_ERTM:
1968 if (l2cap_mode_supported(mode, remote_feat_mask))
1972 return L2CAP_MODE_BASIC;
1976 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1978 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1981 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1983 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1986 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1988 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1989 __l2cap_ews_supported(chan)) {
1990 /* use extended control field */
1991 set_bit(FLAG_EXT_CTRL, &chan->flags);
1992 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1994 chan->tx_win = min_t(u16, chan->tx_win,
1995 L2CAP_DEFAULT_TX_WINDOW);
1996 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2000 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2002 struct l2cap_conf_req *req = data;
2003 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2004 void *ptr = req->data;
2007 BT_DBG("chan %p", chan);
2009 if (chan->num_conf_req || chan->num_conf_rsp)
2012 switch (chan->mode) {
2013 case L2CAP_MODE_STREAMING:
2014 case L2CAP_MODE_ERTM:
2015 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2018 if (__l2cap_efs_supported(chan))
2019 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2023 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2028 if (chan->imtu != L2CAP_DEFAULT_MTU)
2029 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2031 switch (chan->mode) {
2032 case L2CAP_MODE_BASIC:
2033 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2034 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2037 rfc.mode = L2CAP_MODE_BASIC;
2039 rfc.max_transmit = 0;
2040 rfc.retrans_timeout = 0;
2041 rfc.monitor_timeout = 0;
2042 rfc.max_pdu_size = 0;
2044 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2045 (unsigned long) &rfc);
2048 case L2CAP_MODE_ERTM:
2049 rfc.mode = L2CAP_MODE_ERTM;
2050 rfc.max_transmit = chan->max_tx;
2051 rfc.retrans_timeout = 0;
2052 rfc.monitor_timeout = 0;
2054 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2055 L2CAP_EXT_HDR_SIZE -
2058 rfc.max_pdu_size = cpu_to_le16(size);
2060 l2cap_txwin_setup(chan);
2062 rfc.txwin_size = min_t(u16, chan->tx_win,
2063 L2CAP_DEFAULT_TX_WINDOW);
2065 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2066 (unsigned long) &rfc);
2068 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2069 l2cap_add_opt_efs(&ptr, chan);
2071 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2074 if (chan->fcs == L2CAP_FCS_NONE ||
2075 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2076 chan->fcs = L2CAP_FCS_NONE;
2077 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2080 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2085 case L2CAP_MODE_STREAMING:
2086 rfc.mode = L2CAP_MODE_STREAMING;
2088 rfc.max_transmit = 0;
2089 rfc.retrans_timeout = 0;
2090 rfc.monitor_timeout = 0;
2092 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2093 L2CAP_EXT_HDR_SIZE -
2096 rfc.max_pdu_size = cpu_to_le16(size);
2098 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2099 (unsigned long) &rfc);
2101 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2102 l2cap_add_opt_efs(&ptr, chan);
2104 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2107 if (chan->fcs == L2CAP_FCS_NONE ||
2108 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2109 chan->fcs = L2CAP_FCS_NONE;
2110 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2115 req->dcid = cpu_to_le16(chan->dcid);
2116 req->flags = cpu_to_le16(0);
2121 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2123 struct l2cap_conf_rsp *rsp = data;
2124 void *ptr = rsp->data;
2125 void *req = chan->conf_req;
2126 int len = chan->conf_len;
2127 int type, hint, olen;
2129 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2130 struct l2cap_conf_efs efs;
2132 u16 mtu = L2CAP_DEFAULT_MTU;
2133 u16 result = L2CAP_CONF_SUCCESS;
2136 BT_DBG("chan %p", chan);
2138 while (len >= L2CAP_CONF_OPT_SIZE) {
2139 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2141 hint = type & L2CAP_CONF_HINT;
2142 type &= L2CAP_CONF_MASK;
2145 case L2CAP_CONF_MTU:
2149 case L2CAP_CONF_FLUSH_TO:
2150 chan->flush_to = val;
2153 case L2CAP_CONF_QOS:
2156 case L2CAP_CONF_RFC:
2157 if (olen == sizeof(rfc))
2158 memcpy(&rfc, (void *) val, olen);
2161 case L2CAP_CONF_FCS:
2162 if (val == L2CAP_FCS_NONE)
2163 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2166 case L2CAP_CONF_EFS:
2168 if (olen == sizeof(efs))
2169 memcpy(&efs, (void *) val, olen);
2172 case L2CAP_CONF_EWS:
2174 return -ECONNREFUSED;
2176 set_bit(FLAG_EXT_CTRL, &chan->flags);
2177 set_bit(CONF_EWS_RECV, &chan->conf_state);
2178 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2179 chan->remote_tx_win = val;
2186 result = L2CAP_CONF_UNKNOWN;
2187 *((u8 *) ptr++) = type;
2192 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2195 switch (chan->mode) {
2196 case L2CAP_MODE_STREAMING:
2197 case L2CAP_MODE_ERTM:
2198 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2199 chan->mode = l2cap_select_mode(rfc.mode,
2200 chan->conn->feat_mask);
2205 if (__l2cap_efs_supported(chan))
2206 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2208 return -ECONNREFUSED;
2211 if (chan->mode != rfc.mode)
2212 return -ECONNREFUSED;
2218 if (chan->mode != rfc.mode) {
2219 result = L2CAP_CONF_UNACCEPT;
2220 rfc.mode = chan->mode;
2222 if (chan->num_conf_rsp == 1)
2223 return -ECONNREFUSED;
2225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2226 sizeof(rfc), (unsigned long) &rfc);
2229 if (result == L2CAP_CONF_SUCCESS) {
2230 /* Configure output options and let the other side know
2231 * which ones we don't like. */
2233 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2234 result = L2CAP_CONF_UNACCEPT;
2237 set_bit(CONF_MTU_DONE, &chan->conf_state);
2239 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2242 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2243 efs.stype != L2CAP_SERV_NOTRAFIC &&
2244 efs.stype != chan->local_stype) {
2246 result = L2CAP_CONF_UNACCEPT;
2248 if (chan->num_conf_req >= 1)
2249 return -ECONNREFUSED;
2251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2253 (unsigned long) &efs);
2255 /* Send PENDING Conf Rsp */
2256 result = L2CAP_CONF_PENDING;
2257 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2262 case L2CAP_MODE_BASIC:
2263 chan->fcs = L2CAP_FCS_NONE;
2264 set_bit(CONF_MODE_DONE, &chan->conf_state);
2267 case L2CAP_MODE_ERTM:
2268 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2269 chan->remote_tx_win = rfc.txwin_size;
2271 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2273 chan->remote_max_tx = rfc.max_transmit;
2275 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2277 L2CAP_EXT_HDR_SIZE -
2280 rfc.max_pdu_size = cpu_to_le16(size);
2281 chan->remote_mps = size;
2283 rfc.retrans_timeout =
2284 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2285 rfc.monitor_timeout =
2286 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2288 set_bit(CONF_MODE_DONE, &chan->conf_state);
2290 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2291 sizeof(rfc), (unsigned long) &rfc);
2293 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2294 chan->remote_id = efs.id;
2295 chan->remote_stype = efs.stype;
2296 chan->remote_msdu = le16_to_cpu(efs.msdu);
2297 chan->remote_flush_to =
2298 le32_to_cpu(efs.flush_to);
2299 chan->remote_acc_lat =
2300 le32_to_cpu(efs.acc_lat);
2301 chan->remote_sdu_itime =
2302 le32_to_cpu(efs.sdu_itime);
2303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2304 sizeof(efs), (unsigned long) &efs);
2308 case L2CAP_MODE_STREAMING:
2309 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2311 L2CAP_EXT_HDR_SIZE -
2314 rfc.max_pdu_size = cpu_to_le16(size);
2315 chan->remote_mps = size;
2317 set_bit(CONF_MODE_DONE, &chan->conf_state);
2319 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2320 sizeof(rfc), (unsigned long) &rfc);
2325 result = L2CAP_CONF_UNACCEPT;
2327 memset(&rfc, 0, sizeof(rfc));
2328 rfc.mode = chan->mode;
2331 if (result == L2CAP_CONF_SUCCESS)
2332 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2334 rsp->scid = cpu_to_le16(chan->dcid);
2335 rsp->result = cpu_to_le16(result);
2336 rsp->flags = cpu_to_le16(0x0000);
2341 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2343 struct l2cap_conf_req *req = data;
2344 void *ptr = req->data;
2347 struct l2cap_conf_rfc rfc;
2348 struct l2cap_conf_efs efs;
2350 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2352 while (len >= L2CAP_CONF_OPT_SIZE) {
2353 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2356 case L2CAP_CONF_MTU:
2357 if (val < L2CAP_DEFAULT_MIN_MTU) {
2358 *result = L2CAP_CONF_UNACCEPT;
2359 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2365 case L2CAP_CONF_FLUSH_TO:
2366 chan->flush_to = val;
2367 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2371 case L2CAP_CONF_RFC:
2372 if (olen == sizeof(rfc))
2373 memcpy(&rfc, (void *)val, olen);
2375 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2376 rfc.mode != chan->mode)
2377 return -ECONNREFUSED;
2381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2382 sizeof(rfc), (unsigned long) &rfc);
2385 case L2CAP_CONF_EWS:
2386 chan->tx_win = min_t(u16, val,
2387 L2CAP_DEFAULT_EXT_WINDOW);
2388 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2392 case L2CAP_CONF_EFS:
2393 if (olen == sizeof(efs))
2394 memcpy(&efs, (void *)val, olen);
2396 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2397 efs.stype != L2CAP_SERV_NOTRAFIC &&
2398 efs.stype != chan->local_stype)
2399 return -ECONNREFUSED;
2401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2402 sizeof(efs), (unsigned long) &efs);
2407 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2408 return -ECONNREFUSED;
2410 chan->mode = rfc.mode;
2412 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2414 case L2CAP_MODE_ERTM:
2415 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2416 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2417 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2419 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2420 chan->local_msdu = le16_to_cpu(efs.msdu);
2421 chan->local_sdu_itime =
2422 le32_to_cpu(efs.sdu_itime);
2423 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2424 chan->local_flush_to =
2425 le32_to_cpu(efs.flush_to);
2429 case L2CAP_MODE_STREAMING:
2430 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2434 req->dcid = cpu_to_le16(chan->dcid);
2435 req->flags = cpu_to_le16(0x0000);
2440 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2442 struct l2cap_conf_rsp *rsp = data;
2443 void *ptr = rsp->data;
2445 BT_DBG("chan %p", chan);
2447 rsp->scid = cpu_to_le16(chan->dcid);
2448 rsp->result = cpu_to_le16(result);
2449 rsp->flags = cpu_to_le16(flags);
2454 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2456 struct l2cap_conn_rsp rsp;
2457 struct l2cap_conn *conn = chan->conn;
2460 rsp.scid = cpu_to_le16(chan->dcid);
2461 rsp.dcid = cpu_to_le16(chan->scid);
2462 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2463 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2464 l2cap_send_cmd(conn, chan->ident,
2465 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2467 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2470 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2471 l2cap_build_conf_req(chan, buf), buf);
2472 chan->num_conf_req++;
2475 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2479 struct l2cap_conf_rfc rfc;
2481 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2483 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2486 while (len >= L2CAP_CONF_OPT_SIZE) {
2487 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2490 case L2CAP_CONF_RFC:
2491 if (olen == sizeof(rfc))
2492 memcpy(&rfc, (void *)val, olen);
2499 case L2CAP_MODE_ERTM:
2500 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2501 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2502 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2504 case L2CAP_MODE_STREAMING:
2505 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2509 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2511 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2513 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2516 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2517 cmd->ident == conn->info_ident) {
2518 del_timer(&conn->info_timer);
2520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2521 conn->info_ident = 0;
2523 l2cap_conn_start(conn);
2529 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2531 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2532 struct l2cap_conn_rsp rsp;
2533 struct l2cap_chan *chan = NULL, *pchan;
2534 struct sock *parent, *sk = NULL;
2535 int result, status = L2CAP_CS_NO_INFO;
2537 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2538 __le16 psm = req->psm;
2540 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2542 /* Check if we have socket listening on psm */
2543 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2545 result = L2CAP_CR_BAD_PSM;
2551 bh_lock_sock(parent);
2553 /* Check if the ACL is secure enough (if not SDP) */
2554 if (psm != cpu_to_le16(0x0001) &&
2555 !hci_conn_check_link_mode(conn->hcon)) {
2556 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2557 result = L2CAP_CR_SEC_BLOCK;
2561 result = L2CAP_CR_NO_MEM;
2563 /* Check for backlog size */
2564 if (sk_acceptq_is_full(parent)) {
2565 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2569 chan = pchan->ops->new_connection(pchan->data);
2575 write_lock_bh(&conn->chan_lock);
2577 /* Check if we already have channel with that dcid */
2578 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2579 write_unlock_bh(&conn->chan_lock);
2580 sock_set_flag(sk, SOCK_ZAPPED);
2581 chan->ops->close(chan->data);
2585 hci_conn_hold(conn->hcon);
2587 bacpy(&bt_sk(sk)->src, conn->src);
2588 bacpy(&bt_sk(sk)->dst, conn->dst);
2592 bt_accept_enqueue(parent, sk);
2594 __l2cap_chan_add(conn, chan);
2598 __set_chan_timer(chan, sk->sk_sndtimeo);
2600 chan->ident = cmd->ident;
2602 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2603 if (l2cap_chan_check_security(chan)) {
2604 if (bt_sk(sk)->defer_setup) {
2605 l2cap_state_change(chan, BT_CONNECT2);
2606 result = L2CAP_CR_PEND;
2607 status = L2CAP_CS_AUTHOR_PEND;
2608 parent->sk_data_ready(parent, 0);
2610 l2cap_state_change(chan, BT_CONFIG);
2611 result = L2CAP_CR_SUCCESS;
2612 status = L2CAP_CS_NO_INFO;
2615 l2cap_state_change(chan, BT_CONNECT2);
2616 result = L2CAP_CR_PEND;
2617 status = L2CAP_CS_AUTHEN_PEND;
2620 l2cap_state_change(chan, BT_CONNECT2);
2621 result = L2CAP_CR_PEND;
2622 status = L2CAP_CS_NO_INFO;
2625 write_unlock_bh(&conn->chan_lock);
2628 bh_unlock_sock(parent);
2631 rsp.scid = cpu_to_le16(scid);
2632 rsp.dcid = cpu_to_le16(dcid);
2633 rsp.result = cpu_to_le16(result);
2634 rsp.status = cpu_to_le16(status);
2635 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2637 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2638 struct l2cap_info_req info;
2639 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2641 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2642 conn->info_ident = l2cap_get_ident(conn);
2644 mod_timer(&conn->info_timer, jiffies +
2645 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2647 l2cap_send_cmd(conn, conn->info_ident,
2648 L2CAP_INFO_REQ, sizeof(info), &info);
2651 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2652 result == L2CAP_CR_SUCCESS) {
2654 set_bit(CONF_REQ_SENT, &chan->conf_state);
2655 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2656 l2cap_build_conf_req(chan, buf), buf);
2657 chan->num_conf_req++;
2663 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2665 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2666 u16 scid, dcid, result, status;
2667 struct l2cap_chan *chan;
2671 scid = __le16_to_cpu(rsp->scid);
2672 dcid = __le16_to_cpu(rsp->dcid);
2673 result = __le16_to_cpu(rsp->result);
2674 status = __le16_to_cpu(rsp->status);
2676 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2679 chan = l2cap_get_chan_by_scid(conn, scid);
2683 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2691 case L2CAP_CR_SUCCESS:
2692 l2cap_state_change(chan, BT_CONFIG);
2695 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2697 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2700 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2701 l2cap_build_conf_req(chan, req), req);
2702 chan->num_conf_req++;
2706 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2710 /* don't delete l2cap channel if sk is owned by user */
2711 if (sock_owned_by_user(sk)) {
2712 l2cap_state_change(chan, BT_DISCONN);
2713 __clear_chan_timer(chan);
2714 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2718 l2cap_chan_del(chan, ECONNREFUSED);
2726 static inline void set_default_fcs(struct l2cap_chan *chan)
2728 /* FCS is enabled only in ERTM or streaming mode, if one or both
2731 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2732 chan->fcs = L2CAP_FCS_NONE;
2733 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2734 chan->fcs = L2CAP_FCS_CRC16;
2737 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2739 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2742 struct l2cap_chan *chan;
2746 dcid = __le16_to_cpu(req->dcid);
2747 flags = __le16_to_cpu(req->flags);
2749 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2751 chan = l2cap_get_chan_by_scid(conn, dcid);
2757 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2758 struct l2cap_cmd_rej_cid rej;
2760 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2761 rej.scid = cpu_to_le16(chan->scid);
2762 rej.dcid = cpu_to_le16(chan->dcid);
2764 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2769 /* Reject if config buffer is too small. */
2770 len = cmd_len - sizeof(*req);
2771 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2772 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2773 l2cap_build_conf_rsp(chan, rsp,
2774 L2CAP_CONF_REJECT, flags), rsp);
2779 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2780 chan->conf_len += len;
2782 if (flags & 0x0001) {
2783 /* Incomplete config. Send empty response. */
2784 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2785 l2cap_build_conf_rsp(chan, rsp,
2786 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2790 /* Complete config. */
2791 len = l2cap_parse_conf_req(chan, rsp);
2793 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2797 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2798 chan->num_conf_rsp++;
2800 /* Reset config buffer. */
2803 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2806 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2807 set_default_fcs(chan);
2809 l2cap_state_change(chan, BT_CONNECTED);
2811 chan->next_tx_seq = 0;
2812 chan->expected_tx_seq = 0;
2813 skb_queue_head_init(&chan->tx_q);
2814 if (chan->mode == L2CAP_MODE_ERTM)
2815 l2cap_ertm_init(chan);
2817 l2cap_chan_ready(sk);
2821 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2823 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2824 l2cap_build_conf_req(chan, buf), buf);
2825 chan->num_conf_req++;
2828 /* Got Conf Rsp PENDING from remote side and asume we sent
2829 Conf Rsp PENDING in the code above */
2830 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2831 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2833 /* check compatibility */
2835 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2836 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2838 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2839 l2cap_build_conf_rsp(chan, rsp,
2840 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2848 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2850 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2851 u16 scid, flags, result;
2852 struct l2cap_chan *chan;
2854 int len = cmd->len - sizeof(*rsp);
2856 scid = __le16_to_cpu(rsp->scid);
2857 flags = __le16_to_cpu(rsp->flags);
2858 result = __le16_to_cpu(rsp->result);
2860 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2861 scid, flags, result);
2863 chan = l2cap_get_chan_by_scid(conn, scid);
2870 case L2CAP_CONF_SUCCESS:
2871 l2cap_conf_rfc_get(chan, rsp->data, len);
2872 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2875 case L2CAP_CONF_PENDING:
2876 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2878 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2881 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2884 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2888 /* check compatibility */
2890 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2891 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2893 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2894 l2cap_build_conf_rsp(chan, buf,
2895 L2CAP_CONF_SUCCESS, 0x0000), buf);
2899 case L2CAP_CONF_UNACCEPT:
2900 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2903 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2904 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2908 /* throw out any old stored conf requests */
2909 result = L2CAP_CONF_SUCCESS;
2910 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2913 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2917 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2918 L2CAP_CONF_REQ, len, req);
2919 chan->num_conf_req++;
2920 if (result != L2CAP_CONF_SUCCESS)
2926 sk->sk_err = ECONNRESET;
2927 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2928 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2935 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2937 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2938 set_default_fcs(chan);
2940 l2cap_state_change(chan, BT_CONNECTED);
2941 chan->next_tx_seq = 0;
2942 chan->expected_tx_seq = 0;
2943 skb_queue_head_init(&chan->tx_q);
2944 if (chan->mode == L2CAP_MODE_ERTM)
2945 l2cap_ertm_init(chan);
2947 l2cap_chan_ready(sk);
2955 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2957 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2958 struct l2cap_disconn_rsp rsp;
2960 struct l2cap_chan *chan;
2963 scid = __le16_to_cpu(req->scid);
2964 dcid = __le16_to_cpu(req->dcid);
2966 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2968 chan = l2cap_get_chan_by_scid(conn, dcid);
2974 rsp.dcid = cpu_to_le16(chan->scid);
2975 rsp.scid = cpu_to_le16(chan->dcid);
2976 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2978 sk->sk_shutdown = SHUTDOWN_MASK;
2980 /* don't delete l2cap channel if sk is owned by user */
2981 if (sock_owned_by_user(sk)) {
2982 l2cap_state_change(chan, BT_DISCONN);
2983 __clear_chan_timer(chan);
2984 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2989 l2cap_chan_del(chan, ECONNRESET);
2992 chan->ops->close(chan->data);
2996 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2998 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3000 struct l2cap_chan *chan;
3003 scid = __le16_to_cpu(rsp->scid);
3004 dcid = __le16_to_cpu(rsp->dcid);
3006 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3008 chan = l2cap_get_chan_by_scid(conn, scid);
3014 /* don't delete l2cap channel if sk is owned by user */
3015 if (sock_owned_by_user(sk)) {
3016 l2cap_state_change(chan, BT_DISCONN);
3017 __clear_chan_timer(chan);
3018 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3023 l2cap_chan_del(chan, 0);
3026 chan->ops->close(chan->data);
3030 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3032 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3035 type = __le16_to_cpu(req->type);
3037 BT_DBG("type 0x%4.4x", type);
3039 if (type == L2CAP_IT_FEAT_MASK) {
3041 u32 feat_mask = l2cap_feat_mask;
3042 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3043 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3044 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3046 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3049 feat_mask |= L2CAP_FEAT_EXT_FLOW
3050 | L2CAP_FEAT_EXT_WINDOW;
3052 put_unaligned_le32(feat_mask, rsp->data);
3053 l2cap_send_cmd(conn, cmd->ident,
3054 L2CAP_INFO_RSP, sizeof(buf), buf);
3055 } else if (type == L2CAP_IT_FIXED_CHAN) {
3057 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3060 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3062 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3064 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3065 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3066 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3067 l2cap_send_cmd(conn, cmd->ident,
3068 L2CAP_INFO_RSP, sizeof(buf), buf);
3070 struct l2cap_info_rsp rsp;
3071 rsp.type = cpu_to_le16(type);
3072 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3073 l2cap_send_cmd(conn, cmd->ident,
3074 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3080 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3082 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3085 type = __le16_to_cpu(rsp->type);
3086 result = __le16_to_cpu(rsp->result);
3088 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3090 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3091 if (cmd->ident != conn->info_ident ||
3092 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3095 del_timer(&conn->info_timer);
3097 if (result != L2CAP_IR_SUCCESS) {
3098 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3099 conn->info_ident = 0;
3101 l2cap_conn_start(conn);
3106 if (type == L2CAP_IT_FEAT_MASK) {
3107 conn->feat_mask = get_unaligned_le32(rsp->data);
3109 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3110 struct l2cap_info_req req;
3111 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3113 conn->info_ident = l2cap_get_ident(conn);
3115 l2cap_send_cmd(conn, conn->info_ident,
3116 L2CAP_INFO_REQ, sizeof(req), &req);
3118 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3119 conn->info_ident = 0;
3121 l2cap_conn_start(conn);
3123 } else if (type == L2CAP_IT_FIXED_CHAN) {
3124 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3125 conn->info_ident = 0;
3127 l2cap_conn_start(conn);
3133 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3134 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3137 struct l2cap_create_chan_req *req = data;
3138 struct l2cap_create_chan_rsp rsp;
3141 if (cmd_len != sizeof(*req))
3147 psm = le16_to_cpu(req->psm);
3148 scid = le16_to_cpu(req->scid);
3150 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3152 /* Placeholder: Always reject */
3154 rsp.scid = cpu_to_le16(scid);
3155 rsp.result = L2CAP_CR_NO_MEM;
3156 rsp.status = L2CAP_CS_NO_INFO;
3158 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3164 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3165 struct l2cap_cmd_hdr *cmd, void *data)
3167 BT_DBG("conn %p", conn);
3169 return l2cap_connect_rsp(conn, cmd, data);
3172 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3173 u16 icid, u16 result)
3175 struct l2cap_move_chan_rsp rsp;
3177 BT_DBG("icid %d, result %d", icid, result);
3179 rsp.icid = cpu_to_le16(icid);
3180 rsp.result = cpu_to_le16(result);
3182 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3185 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3186 struct l2cap_chan *chan, u16 icid, u16 result)
3188 struct l2cap_move_chan_cfm cfm;
3191 BT_DBG("icid %d, result %d", icid, result);
3193 ident = l2cap_get_ident(conn);
3195 chan->ident = ident;
3197 cfm.icid = cpu_to_le16(icid);
3198 cfm.result = cpu_to_le16(result);
3200 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3203 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3206 struct l2cap_move_chan_cfm_rsp rsp;
3208 BT_DBG("icid %d", icid);
3210 rsp.icid = cpu_to_le16(icid);
3211 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3214 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3215 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3217 struct l2cap_move_chan_req *req = data;
3219 u16 result = L2CAP_MR_NOT_ALLOWED;
3221 if (cmd_len != sizeof(*req))
3224 icid = le16_to_cpu(req->icid);
3226 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3231 /* Placeholder: Always refuse */
3232 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3237 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3238 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3240 struct l2cap_move_chan_rsp *rsp = data;
3243 if (cmd_len != sizeof(*rsp))
3246 icid = le16_to_cpu(rsp->icid);
3247 result = le16_to_cpu(rsp->result);
3249 BT_DBG("icid %d, result %d", icid, result);
3251 /* Placeholder: Always unconfirmed */
3252 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3257 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3258 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3260 struct l2cap_move_chan_cfm *cfm = data;
3263 if (cmd_len != sizeof(*cfm))
3266 icid = le16_to_cpu(cfm->icid);
3267 result = le16_to_cpu(cfm->result);
3269 BT_DBG("icid %d, result %d", icid, result);
3271 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3276 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3277 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3279 struct l2cap_move_chan_cfm_rsp *rsp = data;
3282 if (cmd_len != sizeof(*rsp))
3285 icid = le16_to_cpu(rsp->icid);
3287 BT_DBG("icid %d", icid);
3292 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3297 if (min > max || min < 6 || max > 3200)
3300 if (to_multiplier < 10 || to_multiplier > 3200)
3303 if (max >= to_multiplier * 8)
3306 max_latency = (to_multiplier * 8 / max) - 1;
3307 if (latency > 499 || latency > max_latency)
3313 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3314 struct l2cap_cmd_hdr *cmd, u8 *data)
3316 struct hci_conn *hcon = conn->hcon;
3317 struct l2cap_conn_param_update_req *req;
3318 struct l2cap_conn_param_update_rsp rsp;
3319 u16 min, max, latency, to_multiplier, cmd_len;
3322 if (!(hcon->link_mode & HCI_LM_MASTER))
3325 cmd_len = __le16_to_cpu(cmd->len);
3326 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3329 req = (struct l2cap_conn_param_update_req *) data;
3330 min = __le16_to_cpu(req->min);
3331 max = __le16_to_cpu(req->max);
3332 latency = __le16_to_cpu(req->latency);
3333 to_multiplier = __le16_to_cpu(req->to_multiplier);
3335 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3336 min, max, latency, to_multiplier);
3338 memset(&rsp, 0, sizeof(rsp));
3340 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3342 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3344 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3346 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3350 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3355 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3356 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3360 switch (cmd->code) {
3361 case L2CAP_COMMAND_REJ:
3362 l2cap_command_rej(conn, cmd, data);
3365 case L2CAP_CONN_REQ:
3366 err = l2cap_connect_req(conn, cmd, data);
3369 case L2CAP_CONN_RSP:
3370 err = l2cap_connect_rsp(conn, cmd, data);
3373 case L2CAP_CONF_REQ:
3374 err = l2cap_config_req(conn, cmd, cmd_len, data);
3377 case L2CAP_CONF_RSP:
3378 err = l2cap_config_rsp(conn, cmd, data);
3381 case L2CAP_DISCONN_REQ:
3382 err = l2cap_disconnect_req(conn, cmd, data);
3385 case L2CAP_DISCONN_RSP:
3386 err = l2cap_disconnect_rsp(conn, cmd, data);
3389 case L2CAP_ECHO_REQ:
3390 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3393 case L2CAP_ECHO_RSP:
3396 case L2CAP_INFO_REQ:
3397 err = l2cap_information_req(conn, cmd, data);
3400 case L2CAP_INFO_RSP:
3401 err = l2cap_information_rsp(conn, cmd, data);
3404 case L2CAP_CREATE_CHAN_REQ:
3405 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3408 case L2CAP_CREATE_CHAN_RSP:
3409 err = l2cap_create_channel_rsp(conn, cmd, data);
3412 case L2CAP_MOVE_CHAN_REQ:
3413 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3416 case L2CAP_MOVE_CHAN_RSP:
3417 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3420 case L2CAP_MOVE_CHAN_CFM:
3421 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3424 case L2CAP_MOVE_CHAN_CFM_RSP:
3425 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3429 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3437 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3438 struct l2cap_cmd_hdr *cmd, u8 *data)
3440 switch (cmd->code) {
3441 case L2CAP_COMMAND_REJ:
3444 case L2CAP_CONN_PARAM_UPDATE_REQ:
3445 return l2cap_conn_param_update_req(conn, cmd, data);
3447 case L2CAP_CONN_PARAM_UPDATE_RSP:
3451 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3456 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3457 struct sk_buff *skb)
3459 u8 *data = skb->data;
3461 struct l2cap_cmd_hdr cmd;
3464 l2cap_raw_recv(conn, skb);
3466 while (len >= L2CAP_CMD_HDR_SIZE) {
3468 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3469 data += L2CAP_CMD_HDR_SIZE;
3470 len -= L2CAP_CMD_HDR_SIZE;
3472 cmd_len = le16_to_cpu(cmd.len);
3474 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3476 if (cmd_len > len || !cmd.ident) {
3477 BT_DBG("corrupted command");
3481 if (conn->hcon->type == LE_LINK)
3482 err = l2cap_le_sig_cmd(conn, &cmd, data);
3484 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3487 struct l2cap_cmd_rej_unk rej;
3489 BT_ERR("Wrong link type (%d)", err);
3491 /* FIXME: Map err to a valid reason */
3492 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3493 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3503 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3505 u16 our_fcs, rcv_fcs;
3508 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3509 hdr_size = L2CAP_EXT_HDR_SIZE;
3511 hdr_size = L2CAP_ENH_HDR_SIZE;
3513 if (chan->fcs == L2CAP_FCS_CRC16) {
3514 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3515 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3516 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3518 if (our_fcs != rcv_fcs)
3524 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3528 chan->frames_sent = 0;
3530 control |= __set_reqseq(chan, chan->buffer_seq);
3532 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3533 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3534 l2cap_send_sframe(chan, control);
3535 set_bit(CONN_RNR_SENT, &chan->conn_state);
3538 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3539 l2cap_retransmit_frames(chan);
3541 l2cap_ertm_send(chan);
3543 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3544 chan->frames_sent == 0) {
3545 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3546 l2cap_send_sframe(chan, control);
3550 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3552 struct sk_buff *next_skb;
3553 int tx_seq_offset, next_tx_seq_offset;
3555 bt_cb(skb)->tx_seq = tx_seq;
3556 bt_cb(skb)->sar = sar;
3558 next_skb = skb_peek(&chan->srej_q);
3560 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3563 if (bt_cb(next_skb)->tx_seq == tx_seq)
3566 next_tx_seq_offset = __seq_offset(chan,
3567 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3569 if (next_tx_seq_offset > tx_seq_offset) {
3570 __skb_queue_before(&chan->srej_q, next_skb, skb);
3574 if (skb_queue_is_last(&chan->srej_q, next_skb))
3577 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3580 __skb_queue_tail(&chan->srej_q, skb);
3585 static void append_skb_frag(struct sk_buff *skb,
3586 struct sk_buff *new_frag, struct sk_buff **last_frag)
3588 /* skb->len reflects data in skb as well as all fragments
3589 * skb->data_len reflects only data in fragments
3591 if (!skb_has_frag_list(skb))
3592 skb_shinfo(skb)->frag_list = new_frag;
3594 new_frag->next = NULL;
3596 (*last_frag)->next = new_frag;
3597 *last_frag = new_frag;
3599 skb->len += new_frag->len;
3600 skb->data_len += new_frag->len;
3601 skb->truesize += new_frag->truesize;
3604 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3608 switch (__get_ctrl_sar(chan, control)) {
3609 case L2CAP_SAR_UNSEGMENTED:
3613 err = chan->ops->recv(chan->data, skb);
3616 case L2CAP_SAR_START:
3620 chan->sdu_len = get_unaligned_le16(skb->data);
3621 skb_pull(skb, L2CAP_SDULEN_SIZE);
3623 if (chan->sdu_len > chan->imtu) {
3628 if (skb->len >= chan->sdu_len)
3632 chan->sdu_last_frag = skb;
3638 case L2CAP_SAR_CONTINUE:
3642 append_skb_frag(chan->sdu, skb,
3643 &chan->sdu_last_frag);
3646 if (chan->sdu->len >= chan->sdu_len)
3656 append_skb_frag(chan->sdu, skb,
3657 &chan->sdu_last_frag);
3660 if (chan->sdu->len != chan->sdu_len)
3663 err = chan->ops->recv(chan->data, chan->sdu);
3666 /* Reassembly complete */
3668 chan->sdu_last_frag = NULL;
3676 kfree_skb(chan->sdu);
3678 chan->sdu_last_frag = NULL;
3685 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3689 BT_DBG("chan %p, Enter local busy", chan);
3691 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3693 control = __set_reqseq(chan, chan->buffer_seq);
3694 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3695 l2cap_send_sframe(chan, control);
3697 set_bit(CONN_RNR_SENT, &chan->conn_state);
3699 __clear_ack_timer(chan);
3702 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3706 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3709 control = __set_reqseq(chan, chan->buffer_seq);
3710 control |= __set_ctrl_poll(chan);
3711 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3712 l2cap_send_sframe(chan, control);
3713 chan->retry_count = 1;
3715 __clear_retrans_timer(chan);
3716 __set_monitor_timer(chan);
3718 set_bit(CONN_WAIT_F, &chan->conn_state);
3721 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3722 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3724 BT_DBG("chan %p, Exit local busy", chan);
3727 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3729 if (chan->mode == L2CAP_MODE_ERTM) {
3731 l2cap_ertm_enter_local_busy(chan);
3733 l2cap_ertm_exit_local_busy(chan);
3737 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3739 struct sk_buff *skb;
3742 while ((skb = skb_peek(&chan->srej_q)) &&
3743 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3746 if (bt_cb(skb)->tx_seq != tx_seq)
3749 skb = skb_dequeue(&chan->srej_q);
3750 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3751 err = l2cap_reassemble_sdu(chan, skb, control);
3754 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3758 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3759 tx_seq = __next_seq(chan, tx_seq);
3763 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3765 struct srej_list *l, *tmp;
3768 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3769 if (l->tx_seq == tx_seq) {
3774 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3775 control |= __set_reqseq(chan, l->tx_seq);
3776 l2cap_send_sframe(chan, control);
3778 list_add_tail(&l->list, &chan->srej_l);
3782 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3784 struct srej_list *new;
3787 while (tx_seq != chan->expected_tx_seq) {
3788 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3789 control |= __set_reqseq(chan, chan->expected_tx_seq);
3790 l2cap_send_sframe(chan, control);
3792 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3796 new->tx_seq = chan->expected_tx_seq;
3798 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3800 list_add_tail(&new->list, &chan->srej_l);
3803 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3808 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3810 u16 tx_seq = __get_txseq(chan, rx_control);
3811 u16 req_seq = __get_reqseq(chan, rx_control);
3812 u8 sar = __get_ctrl_sar(chan, rx_control);
3813 int tx_seq_offset, expected_tx_seq_offset;
3814 int num_to_ack = (chan->tx_win/6) + 1;
3817 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3818 tx_seq, rx_control);
3820 if (__is_ctrl_final(chan, rx_control) &&
3821 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3822 __clear_monitor_timer(chan);
3823 if (chan->unacked_frames > 0)
3824 __set_retrans_timer(chan);
3825 clear_bit(CONN_WAIT_F, &chan->conn_state);
3828 chan->expected_ack_seq = req_seq;
3829 l2cap_drop_acked_frames(chan);
3831 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3833 /* invalid tx_seq */
3834 if (tx_seq_offset >= chan->tx_win) {
3835 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3839 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3842 if (tx_seq == chan->expected_tx_seq)
3845 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3846 struct srej_list *first;
3848 first = list_first_entry(&chan->srej_l,
3849 struct srej_list, list);
3850 if (tx_seq == first->tx_seq) {
3851 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3852 l2cap_check_srej_gap(chan, tx_seq);
3854 list_del(&first->list);
3857 if (list_empty(&chan->srej_l)) {
3858 chan->buffer_seq = chan->buffer_seq_srej;
3859 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3860 l2cap_send_ack(chan);
3861 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3864 struct srej_list *l;
3866 /* duplicated tx_seq */
3867 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3870 list_for_each_entry(l, &chan->srej_l, list) {
3871 if (l->tx_seq == tx_seq) {
3872 l2cap_resend_srejframe(chan, tx_seq);
3877 err = l2cap_send_srejframe(chan, tx_seq);
3879 l2cap_send_disconn_req(chan->conn, chan, -err);
3884 expected_tx_seq_offset = __seq_offset(chan,
3885 chan->expected_tx_seq, chan->buffer_seq);
3887 /* duplicated tx_seq */
3888 if (tx_seq_offset < expected_tx_seq_offset)
3891 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3893 BT_DBG("chan %p, Enter SREJ", chan);
3895 INIT_LIST_HEAD(&chan->srej_l);
3896 chan->buffer_seq_srej = chan->buffer_seq;
3898 __skb_queue_head_init(&chan->srej_q);
3899 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3901 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3903 err = l2cap_send_srejframe(chan, tx_seq);
3905 l2cap_send_disconn_req(chan->conn, chan, -err);
3909 __clear_ack_timer(chan);
3914 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3916 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3917 bt_cb(skb)->tx_seq = tx_seq;
3918 bt_cb(skb)->sar = sar;
3919 __skb_queue_tail(&chan->srej_q, skb);
3923 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3924 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3927 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3931 if (__is_ctrl_final(chan, rx_control)) {
3932 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3933 l2cap_retransmit_frames(chan);
3937 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3938 if (chan->num_acked == num_to_ack - 1)
3939 l2cap_send_ack(chan);
3941 __set_ack_timer(chan);
3950 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3952 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3953 __get_reqseq(chan, rx_control), rx_control);
3955 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3956 l2cap_drop_acked_frames(chan);
3958 if (__is_ctrl_poll(chan, rx_control)) {
3959 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3960 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3961 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3962 (chan->unacked_frames > 0))
3963 __set_retrans_timer(chan);
3965 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3966 l2cap_send_srejtail(chan);
3968 l2cap_send_i_or_rr_or_rnr(chan);
3971 } else if (__is_ctrl_final(chan, rx_control)) {
3972 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3974 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3975 l2cap_retransmit_frames(chan);
3978 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3979 (chan->unacked_frames > 0))
3980 __set_retrans_timer(chan);
3982 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3983 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3984 l2cap_send_ack(chan);
3986 l2cap_ertm_send(chan);
3990 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3992 u16 tx_seq = __get_reqseq(chan, rx_control);
3994 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3996 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3998 chan->expected_ack_seq = tx_seq;
3999 l2cap_drop_acked_frames(chan);
4001 if (__is_ctrl_final(chan, rx_control)) {
4002 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4003 l2cap_retransmit_frames(chan);
4005 l2cap_retransmit_frames(chan);
4007 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4008 set_bit(CONN_REJ_ACT, &chan->conn_state);
4011 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4013 u16 tx_seq = __get_reqseq(chan, rx_control);
4015 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4017 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4019 if (__is_ctrl_poll(chan, rx_control)) {
4020 chan->expected_ack_seq = tx_seq;
4021 l2cap_drop_acked_frames(chan);
4023 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4024 l2cap_retransmit_one_frame(chan, tx_seq);
4026 l2cap_ertm_send(chan);
4028 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4029 chan->srej_save_reqseq = tx_seq;
4030 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4032 } else if (__is_ctrl_final(chan, rx_control)) {
4033 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4034 chan->srej_save_reqseq == tx_seq)
4035 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4037 l2cap_retransmit_one_frame(chan, tx_seq);
4039 l2cap_retransmit_one_frame(chan, tx_seq);
4040 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4041 chan->srej_save_reqseq = tx_seq;
4042 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4047 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4049 u16 tx_seq = __get_reqseq(chan, rx_control);
4051 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4053 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4054 chan->expected_ack_seq = tx_seq;
4055 l2cap_drop_acked_frames(chan);
4057 if (__is_ctrl_poll(chan, rx_control))
4058 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4060 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4061 __clear_retrans_timer(chan);
4062 if (__is_ctrl_poll(chan, rx_control))
4063 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4067 if (__is_ctrl_poll(chan, rx_control)) {
4068 l2cap_send_srejtail(chan);
4070 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4071 l2cap_send_sframe(chan, rx_control);
4075 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4077 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4079 if (__is_ctrl_final(chan, rx_control) &&
4080 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4081 __clear_monitor_timer(chan);
4082 if (chan->unacked_frames > 0)
4083 __set_retrans_timer(chan);
4084 clear_bit(CONN_WAIT_F, &chan->conn_state);
4087 switch (__get_ctrl_super(chan, rx_control)) {
4088 case L2CAP_SUPER_RR:
4089 l2cap_data_channel_rrframe(chan, rx_control);
4092 case L2CAP_SUPER_REJ:
4093 l2cap_data_channel_rejframe(chan, rx_control);
4096 case L2CAP_SUPER_SREJ:
4097 l2cap_data_channel_srejframe(chan, rx_control);
4100 case L2CAP_SUPER_RNR:
4101 l2cap_data_channel_rnrframe(chan, rx_control);
4109 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4111 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4114 int len, next_tx_seq_offset, req_seq_offset;
4116 control = __get_control(chan, skb->data);
4117 skb_pull(skb, __ctrl_size(chan));
4121 * We can just drop the corrupted I-frame here.
4122 * Receiver will miss it and start proper recovery
4123 * procedures and ask retransmission.
4125 if (l2cap_check_fcs(chan, skb))
4128 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4129 len -= L2CAP_SDULEN_SIZE;
4131 if (chan->fcs == L2CAP_FCS_CRC16)
4132 len -= L2CAP_FCS_SIZE;
4134 if (len > chan->mps) {
4135 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4139 req_seq = __get_reqseq(chan, control);
4141 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4143 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4144 chan->expected_ack_seq);
4146 /* check for invalid req-seq */
4147 if (req_seq_offset > next_tx_seq_offset) {
4148 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4152 if (!__is_sframe(chan, control)) {
4154 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4158 l2cap_data_channel_iframe(chan, control, skb);
4162 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4166 l2cap_data_channel_sframe(chan, control, skb);
4176 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4178 struct l2cap_chan *chan;
4179 struct sock *sk = NULL;
4184 chan = l2cap_get_chan_by_scid(conn, cid);
4186 BT_DBG("unknown cid 0x%4.4x", cid);
4192 BT_DBG("chan %p, len %d", chan, skb->len);
4194 if (chan->state != BT_CONNECTED)
4197 switch (chan->mode) {
4198 case L2CAP_MODE_BASIC:
4199 /* If socket recv buffers overflows we drop data here
4200 * which is *bad* because L2CAP has to be reliable.
4201 * But we don't have any other choice. L2CAP doesn't
4202 * provide flow control mechanism. */
4204 if (chan->imtu < skb->len)
4207 if (!chan->ops->recv(chan->data, skb))
4211 case L2CAP_MODE_ERTM:
4212 if (!sock_owned_by_user(sk)) {
4213 l2cap_ertm_data_rcv(sk, skb);
4215 if (sk_add_backlog(sk, skb))
4221 case L2CAP_MODE_STREAMING:
4222 control = __get_control(chan, skb->data);
4223 skb_pull(skb, __ctrl_size(chan));
4226 if (l2cap_check_fcs(chan, skb))
4229 if (__is_sar_start(chan, control))
4230 len -= L2CAP_SDULEN_SIZE;
4232 if (chan->fcs == L2CAP_FCS_CRC16)
4233 len -= L2CAP_FCS_SIZE;
4235 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4238 tx_seq = __get_txseq(chan, control);
4240 if (chan->expected_tx_seq != tx_seq) {
4241 /* Frame(s) missing - must discard partial SDU */
4242 kfree_skb(chan->sdu);
4244 chan->sdu_last_frag = NULL;
4247 /* TODO: Notify userland of missing data */
4250 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4252 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4253 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4258 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4272 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4274 struct sock *sk = NULL;
4275 struct l2cap_chan *chan;
4277 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4285 BT_DBG("sk %p, len %d", sk, skb->len);
4287 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4290 if (chan->imtu < skb->len)
4293 if (!chan->ops->recv(chan->data, skb))
4305 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4307 struct sock *sk = NULL;
4308 struct l2cap_chan *chan;
4310 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4318 BT_DBG("sk %p, len %d", sk, skb->len);
4320 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4323 if (chan->imtu < skb->len)
4326 if (!chan->ops->recv(chan->data, skb))
4338 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4340 struct l2cap_hdr *lh = (void *) skb->data;
4344 skb_pull(skb, L2CAP_HDR_SIZE);
4345 cid = __le16_to_cpu(lh->cid);
4346 len = __le16_to_cpu(lh->len);
4348 if (len != skb->len) {
4353 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4356 case L2CAP_CID_LE_SIGNALING:
4357 case L2CAP_CID_SIGNALING:
4358 l2cap_sig_channel(conn, skb);
4361 case L2CAP_CID_CONN_LESS:
4362 psm = get_unaligned_le16(skb->data);
4364 l2cap_conless_channel(conn, psm, skb);
4367 case L2CAP_CID_LE_DATA:
4368 l2cap_att_channel(conn, cid, skb);
4372 if (smp_sig_channel(conn, skb))
4373 l2cap_conn_del(conn->hcon, EACCES);
4377 l2cap_data_channel(conn, cid, skb);
4382 /* ---- L2CAP interface with lower layer (HCI) ---- */
4384 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4386 int exact = 0, lm1 = 0, lm2 = 0;
4387 struct l2cap_chan *c;
4389 if (type != ACL_LINK)
4392 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4394 /* Find listening sockets and check their link_mode */
4395 read_lock(&chan_list_lock);
4396 list_for_each_entry(c, &chan_list, global_l) {
4397 struct sock *sk = c->sk;
4399 if (c->state != BT_LISTEN)
4402 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4403 lm1 |= HCI_LM_ACCEPT;
4404 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4405 lm1 |= HCI_LM_MASTER;
4407 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4408 lm2 |= HCI_LM_ACCEPT;
4409 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4410 lm2 |= HCI_LM_MASTER;
4413 read_unlock(&chan_list_lock);
4415 return exact ? lm1 : lm2;
4418 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4420 struct l2cap_conn *conn;
4422 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4424 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4428 conn = l2cap_conn_add(hcon, status);
4430 l2cap_conn_ready(conn);
4432 l2cap_conn_del(hcon, bt_to_errno(status));
4437 static int l2cap_disconn_ind(struct hci_conn *hcon)
4439 struct l2cap_conn *conn = hcon->l2cap_data;
4441 BT_DBG("hcon %p", hcon);
4443 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4444 return HCI_ERROR_REMOTE_USER_TERM;
4446 return conn->disc_reason;
4449 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4451 BT_DBG("hcon %p reason %d", hcon, reason);
4453 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4456 l2cap_conn_del(hcon, bt_to_errno(reason));
4461 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4463 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4466 if (encrypt == 0x00) {
4467 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4468 __clear_chan_timer(chan);
4469 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4470 } else if (chan->sec_level == BT_SECURITY_HIGH)
4471 l2cap_chan_close(chan, ECONNREFUSED);
4473 if (chan->sec_level == BT_SECURITY_MEDIUM)
4474 __clear_chan_timer(chan);
4478 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4480 struct l2cap_conn *conn = hcon->l2cap_data;
4481 struct l2cap_chan *chan;
4486 BT_DBG("conn %p", conn);
4488 if (hcon->type == LE_LINK) {
4489 smp_distribute_keys(conn, 0);
4490 del_timer(&conn->security_timer);
4493 read_lock(&conn->chan_lock);
4495 list_for_each_entry(chan, &conn->chan_l, list) {
4496 struct sock *sk = chan->sk;
4500 BT_DBG("chan->scid %d", chan->scid);
4502 if (chan->scid == L2CAP_CID_LE_DATA) {
4503 if (!status && encrypt) {
4504 chan->sec_level = hcon->sec_level;
4505 l2cap_chan_ready(sk);
4512 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4517 if (!status && (chan->state == BT_CONNECTED ||
4518 chan->state == BT_CONFIG)) {
4519 l2cap_check_encryption(chan, encrypt);
4524 if (chan->state == BT_CONNECT) {
4526 struct l2cap_conn_req req;
4527 req.scid = cpu_to_le16(chan->scid);
4528 req.psm = chan->psm;
4530 chan->ident = l2cap_get_ident(conn);
4531 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4533 l2cap_send_cmd(conn, chan->ident,
4534 L2CAP_CONN_REQ, sizeof(req), &req);
4536 __clear_chan_timer(chan);
4537 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4539 } else if (chan->state == BT_CONNECT2) {
4540 struct l2cap_conn_rsp rsp;
4544 if (bt_sk(sk)->defer_setup) {
4545 struct sock *parent = bt_sk(sk)->parent;
4546 res = L2CAP_CR_PEND;
4547 stat = L2CAP_CS_AUTHOR_PEND;
4549 parent->sk_data_ready(parent, 0);
4551 l2cap_state_change(chan, BT_CONFIG);
4552 res = L2CAP_CR_SUCCESS;
4553 stat = L2CAP_CS_NO_INFO;
4556 l2cap_state_change(chan, BT_DISCONN);
4557 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4558 res = L2CAP_CR_SEC_BLOCK;
4559 stat = L2CAP_CS_NO_INFO;
4562 rsp.scid = cpu_to_le16(chan->dcid);
4563 rsp.dcid = cpu_to_le16(chan->scid);
4564 rsp.result = cpu_to_le16(res);
4565 rsp.status = cpu_to_le16(stat);
4566 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4573 read_unlock(&conn->chan_lock);
4578 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4580 struct l2cap_conn *conn = hcon->l2cap_data;
4583 conn = l2cap_conn_add(hcon, 0);
4588 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4590 if (!(flags & ACL_CONT)) {
4591 struct l2cap_hdr *hdr;
4592 struct l2cap_chan *chan;
4597 BT_ERR("Unexpected start frame (len %d)", skb->len);
4598 kfree_skb(conn->rx_skb);
4599 conn->rx_skb = NULL;
4601 l2cap_conn_unreliable(conn, ECOMM);
4604 /* Start fragment always begin with Basic L2CAP header */
4605 if (skb->len < L2CAP_HDR_SIZE) {
4606 BT_ERR("Frame is too short (len %d)", skb->len);
4607 l2cap_conn_unreliable(conn, ECOMM);
4611 hdr = (struct l2cap_hdr *) skb->data;
4612 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4613 cid = __le16_to_cpu(hdr->cid);
4615 if (len == skb->len) {
4616 /* Complete frame received */
4617 l2cap_recv_frame(conn, skb);
4621 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4623 if (skb->len > len) {
4624 BT_ERR("Frame is too long (len %d, expected len %d)",
4626 l2cap_conn_unreliable(conn, ECOMM);
4630 chan = l2cap_get_chan_by_scid(conn, cid);
4632 if (chan && chan->sk) {
4633 struct sock *sk = chan->sk;
4635 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4636 BT_ERR("Frame exceeding recv MTU (len %d, "
4640 l2cap_conn_unreliable(conn, ECOMM);
4646 /* Allocate skb for the complete frame (with header) */
4647 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4651 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4653 conn->rx_len = len - skb->len;
4655 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4657 if (!conn->rx_len) {
4658 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4659 l2cap_conn_unreliable(conn, ECOMM);
4663 if (skb->len > conn->rx_len) {
4664 BT_ERR("Fragment is too long (len %d, expected %d)",
4665 skb->len, conn->rx_len);
4666 kfree_skb(conn->rx_skb);
4667 conn->rx_skb = NULL;
4669 l2cap_conn_unreliable(conn, ECOMM);
4673 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4675 conn->rx_len -= skb->len;
4677 if (!conn->rx_len) {
4678 /* Complete frame received */
4679 l2cap_recv_frame(conn, conn->rx_skb);
4680 conn->rx_skb = NULL;
4689 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4691 struct l2cap_chan *c;
4693 read_lock_bh(&chan_list_lock);
4695 list_for_each_entry(c, &chan_list, global_l) {
4696 struct sock *sk = c->sk;
4698 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4699 batostr(&bt_sk(sk)->src),
4700 batostr(&bt_sk(sk)->dst),
4701 c->state, __le16_to_cpu(c->psm),
4702 c->scid, c->dcid, c->imtu, c->omtu,
4703 c->sec_level, c->mode);
4706 read_unlock_bh(&chan_list_lock);
4711 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4713 return single_open(file, l2cap_debugfs_show, inode->i_private);
4716 static const struct file_operations l2cap_debugfs_fops = {
4717 .open = l2cap_debugfs_open,
4719 .llseek = seq_lseek,
4720 .release = single_release,
4723 static struct dentry *l2cap_debugfs;
4725 static struct hci_proto l2cap_hci_proto = {
4727 .id = HCI_PROTO_L2CAP,
4728 .connect_ind = l2cap_connect_ind,
4729 .connect_cfm = l2cap_connect_cfm,
4730 .disconn_ind = l2cap_disconn_ind,
4731 .disconn_cfm = l2cap_disconn_cfm,
4732 .security_cfm = l2cap_security_cfm,
4733 .recv_acldata = l2cap_recv_acldata
4736 int __init l2cap_init(void)
4740 err = l2cap_init_sockets();
4744 err = hci_register_proto(&l2cap_hci_proto);
4746 BT_ERR("L2CAP protocol registration failed");
4747 bt_sock_unregister(BTPROTO_L2CAP);
4752 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4753 bt_debugfs, NULL, &l2cap_debugfs_fops);
4755 BT_ERR("Failed to create L2CAP debug file");
4761 l2cap_cleanup_sockets();
4765 void l2cap_exit(void)
4767 debugfs_remove(l2cap_debugfs);
4769 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4770 BT_ERR("L2CAP protocol unregistration failed");
4772 l2cap_cleanup_sockets();
4775 module_param(disable_ertm, bool, 0644);
4776 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4778 module_param(enable_hs, bool, 0644);
4779 MODULE_PARM_DESC(enable_hs, "Enable High Speed");