2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 list_for_each_entry(c, &conn->chan_l, list) {
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 struct l2cap_chan *c;
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
149 write_lock(&chan_list_lock);
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
174 write_unlock(&chan_list_lock);
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 write_lock(&chan_list_lock);
184 write_unlock(&chan_list_lock);
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 u16 cid = L2CAP_CID_DYN_START;
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
207 chan->ops->state_change(chan->data, state);
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 struct sock *sk = chan->sk;
215 __l2cap_state_change(chan, state);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 struct sock *sk = chan->sk;
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 struct sock *sk = chan->sk;
231 __l2cap_chan_set_err(chan, err);
235 static void l2cap_chan_timeout(struct work_struct *work)
237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
239 struct l2cap_conn *conn = chan->conn;
242 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
244 mutex_lock(&conn->chan_lock);
245 l2cap_chan_lock(chan);
247 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
248 reason = ECONNREFUSED;
249 else if (chan->state == BT_CONNECT &&
250 chan->sec_level != BT_SECURITY_SDP)
251 reason = ECONNREFUSED;
255 l2cap_chan_close(chan, reason);
257 l2cap_chan_unlock(chan);
259 chan->ops->close(chan->data);
260 mutex_unlock(&conn->chan_lock);
262 l2cap_chan_put(chan);
265 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
267 struct l2cap_chan *chan;
269 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
273 mutex_init(&chan->lock);
277 write_lock(&chan_list_lock);
278 list_add(&chan->global_l, &chan_list);
279 write_unlock(&chan_list_lock);
281 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
283 chan->state = BT_OPEN;
285 atomic_set(&chan->refcnt, 1);
287 BT_DBG("sk %p chan %p", sk, chan);
292 void l2cap_chan_destroy(struct l2cap_chan *chan)
294 write_lock(&chan_list_lock);
295 list_del(&chan->global_l);
296 write_unlock(&chan_list_lock);
298 l2cap_chan_put(chan);
301 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
303 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
304 chan->psm, chan->dcid);
306 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
310 switch (chan->chan_type) {
311 case L2CAP_CHAN_CONN_ORIENTED:
312 if (conn->hcon->type == LE_LINK) {
314 chan->omtu = L2CAP_LE_DEFAULT_MTU;
315 chan->scid = L2CAP_CID_LE_DATA;
316 chan->dcid = L2CAP_CID_LE_DATA;
318 /* Alloc CID for connection-oriented socket */
319 chan->scid = l2cap_alloc_cid(conn);
320 chan->omtu = L2CAP_DEFAULT_MTU;
324 case L2CAP_CHAN_CONN_LESS:
325 /* Connectionless socket */
326 chan->scid = L2CAP_CID_CONN_LESS;
327 chan->dcid = L2CAP_CID_CONN_LESS;
328 chan->omtu = L2CAP_DEFAULT_MTU;
332 /* Raw socket can send/recv signalling messages only */
333 chan->scid = L2CAP_CID_SIGNALING;
334 chan->dcid = L2CAP_CID_SIGNALING;
335 chan->omtu = L2CAP_DEFAULT_MTU;
338 chan->local_id = L2CAP_BESTEFFORT_ID;
339 chan->local_stype = L2CAP_SERV_BESTEFFORT;
340 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
341 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
342 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
343 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
345 l2cap_chan_hold(chan);
347 list_add(&chan->list, &conn->chan_l);
350 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
352 mutex_lock(&conn->chan_lock);
353 __l2cap_chan_add(conn, chan);
354 mutex_unlock(&conn->chan_lock);
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
363 __clear_chan_timer(chan);
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
368 /* Delete from channel list */
369 list_del(&chan->list);
371 l2cap_chan_put(chan);
374 hci_conn_put(conn->hcon);
379 __l2cap_state_change(chan, BT_CLOSED);
380 sock_set_flag(sk, SOCK_ZAPPED);
383 __l2cap_chan_set_err(chan, err);
386 bt_accept_unlink(sk);
387 parent->sk_data_ready(parent, 0);
389 sk->sk_state_change(sk);
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 skb_queue_purge(&chan->tx_q);
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
406 skb_queue_purge(&chan->srej_q);
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
419 BT_DBG("parent %p", parent);
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan);
427 l2cap_chan_close(chan, ECONNRESET);
428 l2cap_chan_unlock(chan);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %s sk %p", chan,
440 state_to_string(chan->state), sk);
442 switch (chan->state) {
445 l2cap_chan_cleanup_listen(sk);
447 __l2cap_state_change(chan, BT_CLOSED);
448 sock_set_flag(sk, SOCK_ZAPPED);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
460 l2cap_chan_del(chan, reason);
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
483 l2cap_chan_del(chan, reason);
488 l2cap_chan_del(chan, reason);
493 sock_set_flag(sk, SOCK_ZAPPED);
499 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
501 if (chan->chan_type == L2CAP_CHAN_RAW) {
502 switch (chan->sec_level) {
503 case BT_SECURITY_HIGH:
504 return HCI_AT_DEDICATED_BONDING_MITM;
505 case BT_SECURITY_MEDIUM:
506 return HCI_AT_DEDICATED_BONDING;
508 return HCI_AT_NO_BONDING;
510 } else if (chan->psm == cpu_to_le16(0x0001)) {
511 if (chan->sec_level == BT_SECURITY_LOW)
512 chan->sec_level = BT_SECURITY_SDP;
514 if (chan->sec_level == BT_SECURITY_HIGH)
515 return HCI_AT_NO_BONDING_MITM;
517 return HCI_AT_NO_BONDING;
519 switch (chan->sec_level) {
520 case BT_SECURITY_HIGH:
521 return HCI_AT_GENERAL_BONDING_MITM;
522 case BT_SECURITY_MEDIUM:
523 return HCI_AT_GENERAL_BONDING;
525 return HCI_AT_NO_BONDING;
530 /* Service level security */
531 int l2cap_chan_check_security(struct l2cap_chan *chan)
533 struct l2cap_conn *conn = chan->conn;
536 auth_type = l2cap_get_auth_type(chan);
538 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
541 static u8 l2cap_get_ident(struct l2cap_conn *conn)
545 /* Get next available identificator.
546 * 1 - 128 are used by kernel.
547 * 129 - 199 are reserved.
548 * 200 - 254 are used by utilities like l2ping, etc.
551 spin_lock(&conn->lock);
553 if (++conn->tx_ident > 128)
558 spin_unlock(&conn->lock);
563 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
565 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
568 BT_DBG("code 0x%2.2x", code);
573 if (lmp_no_flush_capable(conn->hcon->hdev))
574 flags = ACL_START_NO_FLUSH;
578 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
579 skb->priority = HCI_PRIO_MAX;
581 hci_send_acl(conn->hchan, skb, flags);
584 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
586 struct hci_conn *hcon = chan->conn->hcon;
589 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
592 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
593 lmp_no_flush_capable(hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
598 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
599 hci_send_acl(chan->conn->hchan, skb, flags);
602 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
605 struct l2cap_hdr *lh;
606 struct l2cap_conn *conn = chan->conn;
609 if (chan->state != BT_CONNECTED)
612 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
613 hlen = L2CAP_EXT_HDR_SIZE;
615 hlen = L2CAP_ENH_HDR_SIZE;
617 if (chan->fcs == L2CAP_FCS_CRC16)
618 hlen += L2CAP_FCS_SIZE;
620 BT_DBG("chan %p, control 0x%8.8x", chan, control);
622 count = min_t(unsigned int, conn->mtu, hlen);
624 control |= __set_sframe(chan);
626 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
627 control |= __set_ctrl_final(chan);
629 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
630 control |= __set_ctrl_poll(chan);
632 skb = bt_skb_alloc(count, GFP_ATOMIC);
636 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
637 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
638 lh->cid = cpu_to_le16(chan->dcid);
640 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
642 if (chan->fcs == L2CAP_FCS_CRC16) {
643 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
644 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
647 skb->priority = HCI_PRIO_MAX;
648 l2cap_do_send(chan, skb);
651 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
653 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
654 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
655 set_bit(CONN_RNR_SENT, &chan->conn_state);
657 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
659 control |= __set_reqseq(chan, chan->buffer_seq);
661 l2cap_send_sframe(chan, control);
664 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
666 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
669 static void l2cap_send_conn_req(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
672 struct l2cap_conn_req req;
674 req.scid = cpu_to_le16(chan->scid);
677 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
684 static void l2cap_do_start(struct l2cap_chan *chan)
686 struct l2cap_conn *conn = chan->conn;
688 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
689 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
692 if (l2cap_chan_check_security(chan) &&
693 __l2cap_no_conn_pending(chan))
694 l2cap_send_conn_req(chan);
696 struct l2cap_info_req req;
697 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
700 conn->info_ident = l2cap_get_ident(conn);
702 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
711 u32 local_feat_mask = l2cap_feat_mask;
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
727 struct sock *sk = chan->sk;
728 struct l2cap_disconn_req req;
733 if (chan->mode == L2CAP_MODE_ERTM) {
734 __clear_retrans_timer(chan);
735 __clear_monitor_timer(chan);
736 __clear_ack_timer(chan);
739 req.dcid = cpu_to_le16(chan->dcid);
740 req.scid = cpu_to_le16(chan->scid);
741 l2cap_send_cmd(conn, l2cap_get_ident(conn),
742 L2CAP_DISCONN_REQ, sizeof(req), &req);
745 __l2cap_state_change(chan, BT_DISCONN);
746 __l2cap_chan_set_err(chan, err);
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
753 struct l2cap_chan *chan, *tmp;
755 BT_DBG("conn %p", conn);
757 mutex_lock(&conn->chan_lock);
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
762 l2cap_chan_lock(chan);
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
765 l2cap_chan_unlock(chan);
769 if (chan->state == BT_CONNECT) {
770 if (!l2cap_chan_check_security(chan) ||
771 !__l2cap_no_conn_pending(chan)) {
772 l2cap_chan_unlock(chan);
776 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
777 && test_bit(CONF_STATE2_DEVICE,
778 &chan->conf_state)) {
779 l2cap_chan_close(chan, ECONNRESET);
780 l2cap_chan_unlock(chan);
784 l2cap_send_conn_req(chan);
786 } else if (chan->state == BT_CONNECT2) {
787 struct l2cap_conn_rsp rsp;
789 rsp.scid = cpu_to_le16(chan->dcid);
790 rsp.dcid = cpu_to_le16(chan->scid);
792 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
799 parent->sk_data_ready(parent, 0);
802 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
815 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
816 rsp.result != L2CAP_CR_SUCCESS) {
817 l2cap_chan_unlock(chan);
821 set_bit(CONF_REQ_SENT, &chan->conf_state);
822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
823 l2cap_build_conf_req(chan, buf), buf);
824 chan->num_conf_req++;
827 l2cap_chan_unlock(chan);
830 mutex_unlock(&conn->chan_lock);
833 /* Find socket with cid and source bdaddr.
834 * Returns closest match, locked.
836 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
838 struct l2cap_chan *c, *c1 = NULL;
840 read_lock(&chan_list_lock);
842 list_for_each_entry(c, &chan_list, global_l) {
843 struct sock *sk = c->sk;
845 if (state && c->state != state)
848 if (c->scid == cid) {
850 if (!bacmp(&bt_sk(sk)->src, src)) {
851 read_unlock(&chan_list_lock);
856 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
861 read_unlock(&chan_list_lock);
866 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
868 struct sock *parent, *sk;
869 struct l2cap_chan *chan, *pchan;
873 /* Check if we have socket listening on cid */
874 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
883 /* Check for backlog size */
884 if (sk_acceptq_is_full(parent)) {
885 BT_DBG("backlog full %d", parent->sk_ack_backlog);
889 chan = pchan->ops->new_connection(pchan->data);
895 hci_conn_hold(conn->hcon);
897 bacpy(&bt_sk(sk)->src, conn->src);
898 bacpy(&bt_sk(sk)->dst, conn->dst);
900 bt_accept_enqueue(parent, sk);
902 l2cap_chan_add(conn, chan);
904 __set_chan_timer(chan, sk->sk_sndtimeo);
906 __l2cap_state_change(chan, BT_CONNECTED);
907 parent->sk_data_ready(parent, 0);
910 release_sock(parent);
913 static void l2cap_chan_ready(struct l2cap_chan *chan)
915 struct sock *sk = chan->sk;
920 parent = bt_sk(sk)->parent;
922 BT_DBG("sk %p, parent %p", sk, parent);
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
931 parent->sk_data_ready(parent, 0);
936 static void l2cap_conn_ready(struct l2cap_conn *conn)
938 struct l2cap_chan *chan;
940 BT_DBG("conn %p", conn);
942 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
943 l2cap_le_conn_ready(conn);
945 if (conn->hcon->out && conn->hcon->type == LE_LINK)
946 smp_conn_security(conn, conn->hcon->pending_sec_level);
948 mutex_lock(&conn->chan_lock);
950 list_for_each_entry(chan, &conn->chan_l, list) {
952 l2cap_chan_lock(chan);
954 if (conn->hcon->type == LE_LINK) {
955 if (smp_conn_security(conn, chan->sec_level))
956 l2cap_chan_ready(chan);
958 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
959 struct sock *sk = chan->sk;
960 __clear_chan_timer(chan);
962 __l2cap_state_change(chan, BT_CONNECTED);
963 sk->sk_state_change(sk);
966 } else if (chan->state == BT_CONNECT)
967 l2cap_do_start(chan);
969 l2cap_chan_unlock(chan);
972 mutex_unlock(&conn->chan_lock);
975 /* Notify sockets that we cannot guaranty reliability anymore */
976 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
978 struct l2cap_chan *chan;
980 BT_DBG("conn %p", conn);
982 mutex_lock(&conn->chan_lock);
984 list_for_each_entry(chan, &conn->chan_l, list) {
985 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
986 __l2cap_chan_set_err(chan, err);
989 mutex_unlock(&conn->chan_lock);
992 static void l2cap_info_timeout(struct work_struct *work)
994 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
998 conn->info_ident = 0;
1000 l2cap_conn_start(conn);
1003 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1005 struct l2cap_conn *conn = hcon->l2cap_data;
1006 struct l2cap_chan *chan, *l;
1011 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1013 kfree_skb(conn->rx_skb);
1015 mutex_lock(&conn->chan_lock);
1018 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1019 l2cap_chan_lock(chan);
1021 l2cap_chan_del(chan, err);
1023 l2cap_chan_unlock(chan);
1025 chan->ops->close(chan->data);
1028 mutex_unlock(&conn->chan_lock);
1030 hci_chan_del(conn->hchan);
1032 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1033 cancel_delayed_work_sync(&conn->info_timer);
1035 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1036 cancel_delayed_work_sync(&conn->security_timer);
1037 smp_chan_destroy(conn);
1040 hcon->l2cap_data = NULL;
1044 static void security_timeout(struct work_struct *work)
1046 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1047 security_timer.work);
1049 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1052 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1054 struct l2cap_conn *conn = hcon->l2cap_data;
1055 struct hci_chan *hchan;
1060 hchan = hci_chan_create(hcon);
1064 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1066 hci_chan_del(hchan);
1070 hcon->l2cap_data = conn;
1072 conn->hchan = hchan;
1074 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1076 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1077 conn->mtu = hcon->hdev->le_mtu;
1079 conn->mtu = hcon->hdev->acl_mtu;
1081 conn->src = &hcon->hdev->bdaddr;
1082 conn->dst = &hcon->dst;
1084 conn->feat_mask = 0;
1086 spin_lock_init(&conn->lock);
1087 mutex_init(&conn->chan_lock);
1089 INIT_LIST_HEAD(&conn->chan_l);
1091 if (hcon->type == LE_LINK)
1092 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1094 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1096 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1101 /* ---- Socket interface ---- */
1103 /* Find socket with psm and source bdaddr.
1104 * Returns closest match.
1106 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1108 struct l2cap_chan *c, *c1 = NULL;
1110 read_lock(&chan_list_lock);
1112 list_for_each_entry(c, &chan_list, global_l) {
1113 struct sock *sk = c->sk;
1115 if (state && c->state != state)
1118 if (c->psm == psm) {
1120 if (!bacmp(&bt_sk(sk)->src, src)) {
1121 read_unlock(&chan_list_lock);
1126 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1131 read_unlock(&chan_list_lock);
1136 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1138 struct sock *sk = chan->sk;
1139 bdaddr_t *src = &bt_sk(sk)->src;
1140 struct l2cap_conn *conn;
1141 struct hci_conn *hcon;
1142 struct hci_dev *hdev;
1146 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1149 hdev = hci_get_route(dst, src);
1151 return -EHOSTUNREACH;
1155 l2cap_chan_lock(chan);
1157 /* PSM must be odd and lsb of upper byte must be 0 */
1158 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1159 chan->chan_type != L2CAP_CHAN_RAW) {
1164 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1169 switch (chan->mode) {
1170 case L2CAP_MODE_BASIC:
1172 case L2CAP_MODE_ERTM:
1173 case L2CAP_MODE_STREAMING:
1184 switch (sk->sk_state) {
1188 /* Already connecting */
1194 /* Already connected */
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, dst);
1218 auth_type = l2cap_get_auth_type(chan);
1220 if (chan->dcid == L2CAP_CID_LE_DATA)
1221 hcon = hci_connect(hdev, LE_LINK, dst,
1222 chan->sec_level, auth_type);
1224 hcon = hci_connect(hdev, ACL_LINK, dst,
1225 chan->sec_level, auth_type);
1228 err = PTR_ERR(hcon);
1232 conn = l2cap_conn_add(hcon, 0);
1239 /* Update source addr of the socket */
1240 bacpy(src, conn->src);
1242 l2cap_chan_unlock(chan);
1243 l2cap_chan_add(conn, chan);
1244 l2cap_chan_lock(chan);
1246 l2cap_state_change(chan, BT_CONNECT);
1247 __set_chan_timer(chan, sk->sk_sndtimeo);
1249 if (hcon->state == BT_CONNECTED) {
1250 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1251 __clear_chan_timer(chan);
1252 if (l2cap_chan_check_security(chan))
1253 l2cap_state_change(chan, BT_CONNECTED);
1255 l2cap_do_start(chan);
1261 l2cap_chan_unlock(chan);
1262 hci_dev_unlock(hdev);
1267 int __l2cap_wait_ack(struct sock *sk)
1269 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1270 DECLARE_WAITQUEUE(wait, current);
1274 add_wait_queue(sk_sleep(sk), &wait);
1275 set_current_state(TASK_INTERRUPTIBLE);
1276 while (chan->unacked_frames > 0 && chan->conn) {
1280 if (signal_pending(current)) {
1281 err = sock_intr_errno(timeo);
1286 timeo = schedule_timeout(timeo);
1288 set_current_state(TASK_INTERRUPTIBLE);
1290 err = sock_error(sk);
1294 set_current_state(TASK_RUNNING);
1295 remove_wait_queue(sk_sleep(sk), &wait);
1299 static void l2cap_monitor_timeout(struct work_struct *work)
1301 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1302 monitor_timer.work);
1304 BT_DBG("chan %p", chan);
1306 l2cap_chan_lock(chan);
1308 if (chan->retry_count >= chan->remote_max_tx) {
1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 l2cap_chan_unlock(chan);
1311 l2cap_chan_put(chan);
1315 chan->retry_count++;
1316 __set_monitor_timer(chan);
1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1319 l2cap_chan_unlock(chan);
1320 l2cap_chan_put(chan);
1323 static void l2cap_retrans_timeout(struct work_struct *work)
1325 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1326 retrans_timer.work);
1328 BT_DBG("chan %p", chan);
1330 l2cap_chan_lock(chan);
1332 chan->retry_count = 1;
1333 __set_monitor_timer(chan);
1335 set_bit(CONN_WAIT_F, &chan->conn_state);
1337 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1339 l2cap_chan_unlock(chan);
1340 l2cap_chan_put(chan);
1343 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1345 struct sk_buff *skb;
1347 while ((skb = skb_peek(&chan->tx_q)) &&
1348 chan->unacked_frames) {
1349 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1352 skb = skb_dequeue(&chan->tx_q);
1355 chan->unacked_frames--;
1358 if (!chan->unacked_frames)
1359 __clear_retrans_timer(chan);
1362 static void l2cap_streaming_send(struct l2cap_chan *chan)
1364 struct sk_buff *skb;
1368 while ((skb = skb_dequeue(&chan->tx_q))) {
1369 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1370 control |= __set_txseq(chan, chan->next_tx_seq);
1371 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1373 if (chan->fcs == L2CAP_FCS_CRC16) {
1374 fcs = crc16(0, (u8 *)skb->data,
1375 skb->len - L2CAP_FCS_SIZE);
1376 put_unaligned_le16(fcs,
1377 skb->data + skb->len - L2CAP_FCS_SIZE);
1380 l2cap_do_send(chan, skb);
1382 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1386 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1388 struct sk_buff *skb, *tx_skb;
1392 skb = skb_peek(&chan->tx_q);
1396 while (bt_cb(skb)->tx_seq != tx_seq) {
1397 if (skb_queue_is_last(&chan->tx_q, skb))
1400 skb = skb_queue_next(&chan->tx_q, skb);
1403 if (chan->remote_max_tx &&
1404 bt_cb(skb)->retries == chan->remote_max_tx) {
1405 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1409 tx_skb = skb_clone(skb, GFP_ATOMIC);
1410 bt_cb(skb)->retries++;
1412 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1413 control &= __get_sar_mask(chan);
1415 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1416 control |= __set_ctrl_final(chan);
1418 control |= __set_reqseq(chan, chan->buffer_seq);
1419 control |= __set_txseq(chan, tx_seq);
1421 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1423 if (chan->fcs == L2CAP_FCS_CRC16) {
1424 fcs = crc16(0, (u8 *)tx_skb->data,
1425 tx_skb->len - L2CAP_FCS_SIZE);
1426 put_unaligned_le16(fcs,
1427 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1430 l2cap_do_send(chan, tx_skb);
1433 static int l2cap_ertm_send(struct l2cap_chan *chan)
1435 struct sk_buff *skb, *tx_skb;
1440 if (chan->state != BT_CONNECTED)
1443 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1445 if (chan->remote_max_tx &&
1446 bt_cb(skb)->retries == chan->remote_max_tx) {
1447 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1451 tx_skb = skb_clone(skb, GFP_ATOMIC);
1453 bt_cb(skb)->retries++;
1455 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1456 control &= __get_sar_mask(chan);
1458 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1459 control |= __set_ctrl_final(chan);
1461 control |= __set_reqseq(chan, chan->buffer_seq);
1462 control |= __set_txseq(chan, chan->next_tx_seq);
1464 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1466 if (chan->fcs == L2CAP_FCS_CRC16) {
1467 fcs = crc16(0, (u8 *)skb->data,
1468 tx_skb->len - L2CAP_FCS_SIZE);
1469 put_unaligned_le16(fcs, skb->data +
1470 tx_skb->len - L2CAP_FCS_SIZE);
1473 l2cap_do_send(chan, tx_skb);
1475 __set_retrans_timer(chan);
1477 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1479 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1481 if (bt_cb(skb)->retries == 1) {
1482 chan->unacked_frames++;
1485 __clear_ack_timer(chan);
1488 chan->frames_sent++;
1490 if (skb_queue_is_last(&chan->tx_q, skb))
1491 chan->tx_send_head = NULL;
1493 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1499 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1503 if (!skb_queue_empty(&chan->tx_q))
1504 chan->tx_send_head = chan->tx_q.next;
1506 chan->next_tx_seq = chan->expected_ack_seq;
1507 ret = l2cap_ertm_send(chan);
1511 static void __l2cap_send_ack(struct l2cap_chan *chan)
1515 control |= __set_reqseq(chan, chan->buffer_seq);
1517 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1518 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1519 set_bit(CONN_RNR_SENT, &chan->conn_state);
1520 l2cap_send_sframe(chan, control);
1524 if (l2cap_ertm_send(chan) > 0)
1527 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1528 l2cap_send_sframe(chan, control);
1531 static void l2cap_send_ack(struct l2cap_chan *chan)
1533 __clear_ack_timer(chan);
1534 __l2cap_send_ack(chan);
1537 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1539 struct srej_list *tail;
1542 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1543 control |= __set_ctrl_final(chan);
1545 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1546 control |= __set_reqseq(chan, tail->tx_seq);
1548 l2cap_send_sframe(chan, control);
1551 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1552 struct msghdr *msg, int len,
1553 int count, struct sk_buff *skb)
1555 struct l2cap_conn *conn = chan->conn;
1556 struct sk_buff **frag;
1559 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1565 /* Continuation fragments (no L2CAP header) */
1566 frag = &skb_shinfo(skb)->frag_list;
1568 count = min_t(unsigned int, conn->mtu, len);
1570 *frag = chan->ops->alloc_skb(chan, count,
1571 msg->msg_flags & MSG_DONTWAIT,
1576 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1579 (*frag)->priority = skb->priority;
1584 frag = &(*frag)->next;
1590 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1591 struct msghdr *msg, size_t len,
1594 struct l2cap_conn *conn = chan->conn;
1595 struct sk_buff *skb;
1596 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1597 struct l2cap_hdr *lh;
1599 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1601 count = min_t(unsigned int, (conn->mtu - hlen), len);
1603 skb = chan->ops->alloc_skb(chan, count + hlen,
1604 msg->msg_flags & MSG_DONTWAIT, &err);
1607 return ERR_PTR(err);
1609 skb->priority = priority;
1611 /* Create L2CAP header */
1612 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1613 lh->cid = cpu_to_le16(chan->dcid);
1614 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1615 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1617 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1618 if (unlikely(err < 0)) {
1620 return ERR_PTR(err);
1625 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1626 struct msghdr *msg, size_t len,
1629 struct l2cap_conn *conn = chan->conn;
1630 struct sk_buff *skb;
1631 int err, count, hlen = L2CAP_HDR_SIZE;
1632 struct l2cap_hdr *lh;
1634 BT_DBG("chan %p len %d", chan, (int)len);
1636 count = min_t(unsigned int, (conn->mtu - hlen), len);
1638 skb = chan->ops->alloc_skb(chan, count + hlen,
1639 msg->msg_flags & MSG_DONTWAIT, &err);
1642 return ERR_PTR(err);
1644 skb->priority = priority;
1646 /* Create L2CAP header */
1647 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1648 lh->cid = cpu_to_le16(chan->dcid);
1649 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1651 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1652 if (unlikely(err < 0)) {
1654 return ERR_PTR(err);
1659 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1660 struct msghdr *msg, size_t len,
1661 u32 control, u16 sdulen)
1663 struct l2cap_conn *conn = chan->conn;
1664 struct sk_buff *skb;
1665 int err, count, hlen;
1666 struct l2cap_hdr *lh;
1668 BT_DBG("chan %p len %d", chan, (int)len);
1671 return ERR_PTR(-ENOTCONN);
1673 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1674 hlen = L2CAP_EXT_HDR_SIZE;
1676 hlen = L2CAP_ENH_HDR_SIZE;
1679 hlen += L2CAP_SDULEN_SIZE;
1681 if (chan->fcs == L2CAP_FCS_CRC16)
1682 hlen += L2CAP_FCS_SIZE;
1684 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = chan->ops->alloc_skb(chan, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1690 return ERR_PTR(err);
1692 /* Create L2CAP header */
1693 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1694 lh->cid = cpu_to_le16(chan->dcid);
1695 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1697 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1700 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1702 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1703 if (unlikely(err < 0)) {
1705 return ERR_PTR(err);
1708 if (chan->fcs == L2CAP_FCS_CRC16)
1709 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1711 bt_cb(skb)->retries = 0;
1715 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1717 struct sk_buff *skb;
1718 struct sk_buff_head sar_queue;
1722 skb_queue_head_init(&sar_queue);
1723 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1724 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1726 return PTR_ERR(skb);
1728 __skb_queue_tail(&sar_queue, skb);
1729 len -= chan->remote_mps;
1730 size += chan->remote_mps;
1735 if (len > chan->remote_mps) {
1736 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1737 buflen = chan->remote_mps;
1739 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1743 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1745 skb_queue_purge(&sar_queue);
1746 return PTR_ERR(skb);
1749 __skb_queue_tail(&sar_queue, skb);
1753 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1754 if (chan->tx_send_head == NULL)
1755 chan->tx_send_head = sar_queue.next;
1760 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1763 struct sk_buff *skb;
1767 /* Connectionless channel */
1768 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1769 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1771 return PTR_ERR(skb);
1773 l2cap_do_send(chan, skb);
1777 switch (chan->mode) {
1778 case L2CAP_MODE_BASIC:
1779 /* Check outgoing MTU */
1780 if (len > chan->omtu)
1783 /* Create a basic PDU */
1784 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1786 return PTR_ERR(skb);
1788 l2cap_do_send(chan, skb);
1792 case L2CAP_MODE_ERTM:
1793 case L2CAP_MODE_STREAMING:
1794 /* Entire SDU fits into one PDU */
1795 if (len <= chan->remote_mps) {
1796 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1797 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1800 return PTR_ERR(skb);
1802 __skb_queue_tail(&chan->tx_q, skb);
1804 if (chan->tx_send_head == NULL)
1805 chan->tx_send_head = skb;
1808 /* Segment SDU into multiples PDUs */
1809 err = l2cap_sar_segment_sdu(chan, msg, len);
1814 if (chan->mode == L2CAP_MODE_STREAMING) {
1815 l2cap_streaming_send(chan);
1820 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1821 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1826 err = l2cap_ertm_send(chan);
1833 BT_DBG("bad state %1.1x", chan->mode);
1840 /* Copy frame to all raw sockets on that connection */
1841 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1843 struct sk_buff *nskb;
1844 struct l2cap_chan *chan;
1846 BT_DBG("conn %p", conn);
1848 mutex_lock(&conn->chan_lock);
1850 list_for_each_entry(chan, &conn->chan_l, list) {
1851 struct sock *sk = chan->sk;
1852 if (chan->chan_type != L2CAP_CHAN_RAW)
1855 /* Don't send frame to the socket it came from */
1858 nskb = skb_clone(skb, GFP_ATOMIC);
1862 if (chan->ops->recv(chan->data, nskb))
1866 mutex_unlock(&conn->chan_lock);
1869 /* ---- L2CAP signalling commands ---- */
1870 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1871 u8 code, u8 ident, u16 dlen, void *data)
1873 struct sk_buff *skb, **frag;
1874 struct l2cap_cmd_hdr *cmd;
1875 struct l2cap_hdr *lh;
1878 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1879 conn, code, ident, dlen);
1881 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1882 count = min_t(unsigned int, conn->mtu, len);
1884 skb = bt_skb_alloc(count, GFP_ATOMIC);
1888 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1889 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1891 if (conn->hcon->type == LE_LINK)
1892 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1894 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1896 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1899 cmd->len = cpu_to_le16(dlen);
1902 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1903 memcpy(skb_put(skb, count), data, count);
1909 /* Continuation fragments (no L2CAP header) */
1910 frag = &skb_shinfo(skb)->frag_list;
1912 count = min_t(unsigned int, conn->mtu, len);
1914 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1918 memcpy(skb_put(*frag, count), data, count);
1923 frag = &(*frag)->next;
1933 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1935 struct l2cap_conf_opt *opt = *ptr;
1938 len = L2CAP_CONF_OPT_SIZE + opt->len;
1946 *val = *((u8 *) opt->val);
1950 *val = get_unaligned_le16(opt->val);
1954 *val = get_unaligned_le32(opt->val);
1958 *val = (unsigned long) opt->val;
1962 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1966 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1968 struct l2cap_conf_opt *opt = *ptr;
1970 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1977 *((u8 *) opt->val) = val;
1981 put_unaligned_le16(val, opt->val);
1985 put_unaligned_le32(val, opt->val);
1989 memcpy(opt->val, (void *) val, len);
1993 *ptr += L2CAP_CONF_OPT_SIZE + len;
1996 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1998 struct l2cap_conf_efs efs;
2000 switch (chan->mode) {
2001 case L2CAP_MODE_ERTM:
2002 efs.id = chan->local_id;
2003 efs.stype = chan->local_stype;
2004 efs.msdu = cpu_to_le16(chan->local_msdu);
2005 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2006 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2007 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2010 case L2CAP_MODE_STREAMING:
2012 efs.stype = L2CAP_SERV_BESTEFFORT;
2013 efs.msdu = cpu_to_le16(chan->local_msdu);
2014 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2023 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2024 (unsigned long) &efs);
2027 static void l2cap_ack_timeout(struct work_struct *work)
2029 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2032 BT_DBG("chan %p", chan);
2034 l2cap_chan_lock(chan);
2036 __l2cap_send_ack(chan);
2038 l2cap_chan_unlock(chan);
2040 l2cap_chan_put(chan);
2043 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2045 chan->expected_ack_seq = 0;
2046 chan->unacked_frames = 0;
2047 chan->buffer_seq = 0;
2048 chan->num_acked = 0;
2049 chan->frames_sent = 0;
2051 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2052 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2053 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2055 skb_queue_head_init(&chan->srej_q);
2057 INIT_LIST_HEAD(&chan->srej_l);
2060 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2063 case L2CAP_MODE_STREAMING:
2064 case L2CAP_MODE_ERTM:
2065 if (l2cap_mode_supported(mode, remote_feat_mask))
2069 return L2CAP_MODE_BASIC;
2073 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2075 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2078 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2080 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2083 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2085 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2086 __l2cap_ews_supported(chan)) {
2087 /* use extended control field */
2088 set_bit(FLAG_EXT_CTRL, &chan->flags);
2089 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2091 chan->tx_win = min_t(u16, chan->tx_win,
2092 L2CAP_DEFAULT_TX_WINDOW);
2093 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2097 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2099 struct l2cap_conf_req *req = data;
2100 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2101 void *ptr = req->data;
2104 BT_DBG("chan %p", chan);
2106 if (chan->num_conf_req || chan->num_conf_rsp)
2109 switch (chan->mode) {
2110 case L2CAP_MODE_STREAMING:
2111 case L2CAP_MODE_ERTM:
2112 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2115 if (__l2cap_efs_supported(chan))
2116 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2120 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2125 if (chan->imtu != L2CAP_DEFAULT_MTU)
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2128 switch (chan->mode) {
2129 case L2CAP_MODE_BASIC:
2130 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2131 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2134 rfc.mode = L2CAP_MODE_BASIC;
2136 rfc.max_transmit = 0;
2137 rfc.retrans_timeout = 0;
2138 rfc.monitor_timeout = 0;
2139 rfc.max_pdu_size = 0;
2141 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2142 (unsigned long) &rfc);
2145 case L2CAP_MODE_ERTM:
2146 rfc.mode = L2CAP_MODE_ERTM;
2147 rfc.max_transmit = chan->max_tx;
2148 rfc.retrans_timeout = 0;
2149 rfc.monitor_timeout = 0;
2151 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2152 L2CAP_EXT_HDR_SIZE -
2155 rfc.max_pdu_size = cpu_to_le16(size);
2157 l2cap_txwin_setup(chan);
2159 rfc.txwin_size = min_t(u16, chan->tx_win,
2160 L2CAP_DEFAULT_TX_WINDOW);
2162 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2163 (unsigned long) &rfc);
2165 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2166 l2cap_add_opt_efs(&ptr, chan);
2168 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2171 if (chan->fcs == L2CAP_FCS_NONE ||
2172 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2173 chan->fcs = L2CAP_FCS_NONE;
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2177 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2182 case L2CAP_MODE_STREAMING:
2183 rfc.mode = L2CAP_MODE_STREAMING;
2185 rfc.max_transmit = 0;
2186 rfc.retrans_timeout = 0;
2187 rfc.monitor_timeout = 0;
2189 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2190 L2CAP_EXT_HDR_SIZE -
2193 rfc.max_pdu_size = cpu_to_le16(size);
2195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2196 (unsigned long) &rfc);
2198 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2199 l2cap_add_opt_efs(&ptr, chan);
2201 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2204 if (chan->fcs == L2CAP_FCS_NONE ||
2205 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2206 chan->fcs = L2CAP_FCS_NONE;
2207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2212 req->dcid = cpu_to_le16(chan->dcid);
2213 req->flags = cpu_to_le16(0);
2218 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2220 struct l2cap_conf_rsp *rsp = data;
2221 void *ptr = rsp->data;
2222 void *req = chan->conf_req;
2223 int len = chan->conf_len;
2224 int type, hint, olen;
2226 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2227 struct l2cap_conf_efs efs;
2229 u16 mtu = L2CAP_DEFAULT_MTU;
2230 u16 result = L2CAP_CONF_SUCCESS;
2233 BT_DBG("chan %p", chan);
2235 while (len >= L2CAP_CONF_OPT_SIZE) {
2236 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2238 hint = type & L2CAP_CONF_HINT;
2239 type &= L2CAP_CONF_MASK;
2242 case L2CAP_CONF_MTU:
2246 case L2CAP_CONF_FLUSH_TO:
2247 chan->flush_to = val;
2250 case L2CAP_CONF_QOS:
2253 case L2CAP_CONF_RFC:
2254 if (olen == sizeof(rfc))
2255 memcpy(&rfc, (void *) val, olen);
2258 case L2CAP_CONF_FCS:
2259 if (val == L2CAP_FCS_NONE)
2260 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2263 case L2CAP_CONF_EFS:
2265 if (olen == sizeof(efs))
2266 memcpy(&efs, (void *) val, olen);
2269 case L2CAP_CONF_EWS:
2271 return -ECONNREFUSED;
2273 set_bit(FLAG_EXT_CTRL, &chan->flags);
2274 set_bit(CONF_EWS_RECV, &chan->conf_state);
2275 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2276 chan->remote_tx_win = val;
2283 result = L2CAP_CONF_UNKNOWN;
2284 *((u8 *) ptr++) = type;
2289 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2292 switch (chan->mode) {
2293 case L2CAP_MODE_STREAMING:
2294 case L2CAP_MODE_ERTM:
2295 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2296 chan->mode = l2cap_select_mode(rfc.mode,
2297 chan->conn->feat_mask);
2302 if (__l2cap_efs_supported(chan))
2303 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2305 return -ECONNREFUSED;
2308 if (chan->mode != rfc.mode)
2309 return -ECONNREFUSED;
2315 if (chan->mode != rfc.mode) {
2316 result = L2CAP_CONF_UNACCEPT;
2317 rfc.mode = chan->mode;
2319 if (chan->num_conf_rsp == 1)
2320 return -ECONNREFUSED;
2322 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2323 sizeof(rfc), (unsigned long) &rfc);
2326 if (result == L2CAP_CONF_SUCCESS) {
2327 /* Configure output options and let the other side know
2328 * which ones we don't like. */
2330 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2331 result = L2CAP_CONF_UNACCEPT;
2334 set_bit(CONF_MTU_DONE, &chan->conf_state);
2336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2339 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2340 efs.stype != L2CAP_SERV_NOTRAFIC &&
2341 efs.stype != chan->local_stype) {
2343 result = L2CAP_CONF_UNACCEPT;
2345 if (chan->num_conf_req >= 1)
2346 return -ECONNREFUSED;
2348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2350 (unsigned long) &efs);
2352 /* Send PENDING Conf Rsp */
2353 result = L2CAP_CONF_PENDING;
2354 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2359 case L2CAP_MODE_BASIC:
2360 chan->fcs = L2CAP_FCS_NONE;
2361 set_bit(CONF_MODE_DONE, &chan->conf_state);
2364 case L2CAP_MODE_ERTM:
2365 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2366 chan->remote_tx_win = rfc.txwin_size;
2368 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2370 chan->remote_max_tx = rfc.max_transmit;
2372 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2374 L2CAP_EXT_HDR_SIZE -
2377 rfc.max_pdu_size = cpu_to_le16(size);
2378 chan->remote_mps = size;
2380 rfc.retrans_timeout =
2381 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2382 rfc.monitor_timeout =
2383 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2385 set_bit(CONF_MODE_DONE, &chan->conf_state);
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2388 sizeof(rfc), (unsigned long) &rfc);
2390 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2391 chan->remote_id = efs.id;
2392 chan->remote_stype = efs.stype;
2393 chan->remote_msdu = le16_to_cpu(efs.msdu);
2394 chan->remote_flush_to =
2395 le32_to_cpu(efs.flush_to);
2396 chan->remote_acc_lat =
2397 le32_to_cpu(efs.acc_lat);
2398 chan->remote_sdu_itime =
2399 le32_to_cpu(efs.sdu_itime);
2400 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2401 sizeof(efs), (unsigned long) &efs);
2405 case L2CAP_MODE_STREAMING:
2406 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2408 L2CAP_EXT_HDR_SIZE -
2411 rfc.max_pdu_size = cpu_to_le16(size);
2412 chan->remote_mps = size;
2414 set_bit(CONF_MODE_DONE, &chan->conf_state);
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2417 sizeof(rfc), (unsigned long) &rfc);
2422 result = L2CAP_CONF_UNACCEPT;
2424 memset(&rfc, 0, sizeof(rfc));
2425 rfc.mode = chan->mode;
2428 if (result == L2CAP_CONF_SUCCESS)
2429 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2431 rsp->scid = cpu_to_le16(chan->dcid);
2432 rsp->result = cpu_to_le16(result);
2433 rsp->flags = cpu_to_le16(0x0000);
2438 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2440 struct l2cap_conf_req *req = data;
2441 void *ptr = req->data;
2444 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2445 struct l2cap_conf_efs efs;
2447 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2449 while (len >= L2CAP_CONF_OPT_SIZE) {
2450 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2453 case L2CAP_CONF_MTU:
2454 if (val < L2CAP_DEFAULT_MIN_MTU) {
2455 *result = L2CAP_CONF_UNACCEPT;
2456 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2462 case L2CAP_CONF_FLUSH_TO:
2463 chan->flush_to = val;
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2468 case L2CAP_CONF_RFC:
2469 if (olen == sizeof(rfc))
2470 memcpy(&rfc, (void *)val, olen);
2472 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2473 rfc.mode != chan->mode)
2474 return -ECONNREFUSED;
2478 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2479 sizeof(rfc), (unsigned long) &rfc);
2482 case L2CAP_CONF_EWS:
2483 chan->tx_win = min_t(u16, val,
2484 L2CAP_DEFAULT_EXT_WINDOW);
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2489 case L2CAP_CONF_EFS:
2490 if (olen == sizeof(efs))
2491 memcpy(&efs, (void *)val, olen);
2493 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2494 efs.stype != L2CAP_SERV_NOTRAFIC &&
2495 efs.stype != chan->local_stype)
2496 return -ECONNREFUSED;
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2499 sizeof(efs), (unsigned long) &efs);
2504 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2505 return -ECONNREFUSED;
2507 chan->mode = rfc.mode;
2509 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2511 case L2CAP_MODE_ERTM:
2512 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2513 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2514 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2516 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2517 chan->local_msdu = le16_to_cpu(efs.msdu);
2518 chan->local_sdu_itime =
2519 le32_to_cpu(efs.sdu_itime);
2520 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2521 chan->local_flush_to =
2522 le32_to_cpu(efs.flush_to);
2526 case L2CAP_MODE_STREAMING:
2527 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2531 req->dcid = cpu_to_le16(chan->dcid);
2532 req->flags = cpu_to_le16(0x0000);
2537 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2539 struct l2cap_conf_rsp *rsp = data;
2540 void *ptr = rsp->data;
2542 BT_DBG("chan %p", chan);
2544 rsp->scid = cpu_to_le16(chan->dcid);
2545 rsp->result = cpu_to_le16(result);
2546 rsp->flags = cpu_to_le16(flags);
2551 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2553 struct l2cap_conn_rsp rsp;
2554 struct l2cap_conn *conn = chan->conn;
2557 rsp.scid = cpu_to_le16(chan->dcid);
2558 rsp.dcid = cpu_to_le16(chan->scid);
2559 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2560 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2561 l2cap_send_cmd(conn, chan->ident,
2562 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2564 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2567 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2568 l2cap_build_conf_req(chan, buf), buf);
2569 chan->num_conf_req++;
2572 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2576 struct l2cap_conf_rfc rfc;
2578 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2580 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2583 while (len >= L2CAP_CONF_OPT_SIZE) {
2584 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2587 case L2CAP_CONF_RFC:
2588 if (olen == sizeof(rfc))
2589 memcpy(&rfc, (void *)val, olen);
2594 /* Use sane default values in case a misbehaving remote device
2595 * did not send an RFC option.
2597 rfc.mode = chan->mode;
2598 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2599 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2600 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2602 BT_ERR("Expected RFC option was not found, using defaults");
2606 case L2CAP_MODE_ERTM:
2607 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2608 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2609 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2611 case L2CAP_MODE_STREAMING:
2612 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2616 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2618 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2620 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2623 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2624 cmd->ident == conn->info_ident) {
2625 cancel_delayed_work(&conn->info_timer);
2627 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2628 conn->info_ident = 0;
2630 l2cap_conn_start(conn);
2636 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2638 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2639 struct l2cap_conn_rsp rsp;
2640 struct l2cap_chan *chan = NULL, *pchan;
2641 struct sock *parent, *sk = NULL;
2642 int result, status = L2CAP_CS_NO_INFO;
2644 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2645 __le16 psm = req->psm;
2647 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2649 /* Check if we have socket listening on psm */
2650 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2652 result = L2CAP_CR_BAD_PSM;
2658 mutex_lock(&conn->chan_lock);
2661 /* Check if the ACL is secure enough (if not SDP) */
2662 if (psm != cpu_to_le16(0x0001) &&
2663 !hci_conn_check_link_mode(conn->hcon)) {
2664 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2665 result = L2CAP_CR_SEC_BLOCK;
2669 result = L2CAP_CR_NO_MEM;
2671 /* Check for backlog size */
2672 if (sk_acceptq_is_full(parent)) {
2673 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2677 chan = pchan->ops->new_connection(pchan->data);
2683 /* Check if we already have channel with that dcid */
2684 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2685 sock_set_flag(sk, SOCK_ZAPPED);
2686 chan->ops->close(chan->data);
2690 hci_conn_hold(conn->hcon);
2692 bacpy(&bt_sk(sk)->src, conn->src);
2693 bacpy(&bt_sk(sk)->dst, conn->dst);
2697 bt_accept_enqueue(parent, sk);
2699 __l2cap_chan_add(conn, chan);
2703 __set_chan_timer(chan, sk->sk_sndtimeo);
2705 chan->ident = cmd->ident;
2707 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2708 if (l2cap_chan_check_security(chan)) {
2709 if (bt_sk(sk)->defer_setup) {
2710 __l2cap_state_change(chan, BT_CONNECT2);
2711 result = L2CAP_CR_PEND;
2712 status = L2CAP_CS_AUTHOR_PEND;
2713 parent->sk_data_ready(parent, 0);
2715 __l2cap_state_change(chan, BT_CONFIG);
2716 result = L2CAP_CR_SUCCESS;
2717 status = L2CAP_CS_NO_INFO;
2720 __l2cap_state_change(chan, BT_CONNECT2);
2721 result = L2CAP_CR_PEND;
2722 status = L2CAP_CS_AUTHEN_PEND;
2725 __l2cap_state_change(chan, BT_CONNECT2);
2726 result = L2CAP_CR_PEND;
2727 status = L2CAP_CS_NO_INFO;
2731 release_sock(parent);
2732 mutex_unlock(&conn->chan_lock);
2735 rsp.scid = cpu_to_le16(scid);
2736 rsp.dcid = cpu_to_le16(dcid);
2737 rsp.result = cpu_to_le16(result);
2738 rsp.status = cpu_to_le16(status);
2739 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2741 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2742 struct l2cap_info_req info;
2743 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2745 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2746 conn->info_ident = l2cap_get_ident(conn);
2748 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2750 l2cap_send_cmd(conn, conn->info_ident,
2751 L2CAP_INFO_REQ, sizeof(info), &info);
2754 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2755 result == L2CAP_CR_SUCCESS) {
2757 set_bit(CONF_REQ_SENT, &chan->conf_state);
2758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2759 l2cap_build_conf_req(chan, buf), buf);
2760 chan->num_conf_req++;
2766 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2768 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2769 u16 scid, dcid, result, status;
2770 struct l2cap_chan *chan;
2774 scid = __le16_to_cpu(rsp->scid);
2775 dcid = __le16_to_cpu(rsp->dcid);
2776 result = __le16_to_cpu(rsp->result);
2777 status = __le16_to_cpu(rsp->status);
2779 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2780 dcid, scid, result, status);
2782 mutex_lock(&conn->chan_lock);
2785 chan = __l2cap_get_chan_by_scid(conn, scid);
2791 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2800 l2cap_chan_lock(chan);
2803 case L2CAP_CR_SUCCESS:
2804 l2cap_state_change(chan, BT_CONFIG);
2807 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2809 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2812 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2813 l2cap_build_conf_req(chan, req), req);
2814 chan->num_conf_req++;
2818 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2822 l2cap_chan_del(chan, ECONNREFUSED);
2826 l2cap_chan_unlock(chan);
2829 mutex_unlock(&conn->chan_lock);
2834 static inline void set_default_fcs(struct l2cap_chan *chan)
2836 /* FCS is enabled only in ERTM or streaming mode, if one or both
2839 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2840 chan->fcs = L2CAP_FCS_NONE;
2841 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2842 chan->fcs = L2CAP_FCS_CRC16;
2845 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2847 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2850 struct l2cap_chan *chan;
2853 dcid = __le16_to_cpu(req->dcid);
2854 flags = __le16_to_cpu(req->flags);
2856 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2858 chan = l2cap_get_chan_by_scid(conn, dcid);
2862 l2cap_chan_lock(chan);
2864 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2865 struct l2cap_cmd_rej_cid rej;
2867 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2868 rej.scid = cpu_to_le16(chan->scid);
2869 rej.dcid = cpu_to_le16(chan->dcid);
2871 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2876 /* Reject if config buffer is too small. */
2877 len = cmd_len - sizeof(*req);
2878 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2879 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2880 l2cap_build_conf_rsp(chan, rsp,
2881 L2CAP_CONF_REJECT, flags), rsp);
2886 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2887 chan->conf_len += len;
2889 if (flags & 0x0001) {
2890 /* Incomplete config. Send empty response. */
2891 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2892 l2cap_build_conf_rsp(chan, rsp,
2893 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2897 /* Complete config. */
2898 len = l2cap_parse_conf_req(chan, rsp);
2900 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2904 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2905 chan->num_conf_rsp++;
2907 /* Reset config buffer. */
2910 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2913 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2914 set_default_fcs(chan);
2916 l2cap_state_change(chan, BT_CONNECTED);
2918 chan->next_tx_seq = 0;
2919 chan->expected_tx_seq = 0;
2920 skb_queue_head_init(&chan->tx_q);
2921 if (chan->mode == L2CAP_MODE_ERTM)
2922 l2cap_ertm_init(chan);
2924 l2cap_chan_ready(chan);
2928 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2930 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2931 l2cap_build_conf_req(chan, buf), buf);
2932 chan->num_conf_req++;
2935 /* Got Conf Rsp PENDING from remote side and asume we sent
2936 Conf Rsp PENDING in the code above */
2937 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2938 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2940 /* check compatibility */
2942 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2943 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2945 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2946 l2cap_build_conf_rsp(chan, rsp,
2947 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2951 l2cap_chan_unlock(chan);
2955 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2957 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2958 u16 scid, flags, result;
2959 struct l2cap_chan *chan;
2960 int len = cmd->len - sizeof(*rsp);
2962 scid = __le16_to_cpu(rsp->scid);
2963 flags = __le16_to_cpu(rsp->flags);
2964 result = __le16_to_cpu(rsp->result);
2966 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2967 scid, flags, result);
2969 chan = l2cap_get_chan_by_scid(conn, scid);
2973 l2cap_chan_lock(chan);
2976 case L2CAP_CONF_SUCCESS:
2977 l2cap_conf_rfc_get(chan, rsp->data, len);
2978 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2981 case L2CAP_CONF_PENDING:
2982 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2984 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2987 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2990 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2994 /* check compatibility */
2996 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2997 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2999 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3000 l2cap_build_conf_rsp(chan, buf,
3001 L2CAP_CONF_SUCCESS, 0x0000), buf);
3005 case L2CAP_CONF_UNACCEPT:
3006 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3009 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3010 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3014 /* throw out any old stored conf requests */
3015 result = L2CAP_CONF_SUCCESS;
3016 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3019 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3024 L2CAP_CONF_REQ, len, req);
3025 chan->num_conf_req++;
3026 if (result != L2CAP_CONF_SUCCESS)
3032 l2cap_chan_set_err(chan, ECONNRESET);
3034 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3035 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3042 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3044 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3045 set_default_fcs(chan);
3047 l2cap_state_change(chan, BT_CONNECTED);
3048 chan->next_tx_seq = 0;
3049 chan->expected_tx_seq = 0;
3050 skb_queue_head_init(&chan->tx_q);
3051 if (chan->mode == L2CAP_MODE_ERTM)
3052 l2cap_ertm_init(chan);
3054 l2cap_chan_ready(chan);
3058 l2cap_chan_unlock(chan);
3062 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3064 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3065 struct l2cap_disconn_rsp rsp;
3067 struct l2cap_chan *chan;
3070 scid = __le16_to_cpu(req->scid);
3071 dcid = __le16_to_cpu(req->dcid);
3073 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3075 mutex_lock(&conn->chan_lock);
3077 chan = __l2cap_get_chan_by_scid(conn, dcid);
3079 mutex_unlock(&conn->chan_lock);
3083 l2cap_chan_lock(chan);
3087 rsp.dcid = cpu_to_le16(chan->scid);
3088 rsp.scid = cpu_to_le16(chan->dcid);
3089 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3092 sk->sk_shutdown = SHUTDOWN_MASK;
3095 l2cap_chan_del(chan, ECONNRESET);
3097 l2cap_chan_unlock(chan);
3099 chan->ops->close(chan->data);
3101 mutex_unlock(&conn->chan_lock);
3106 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3108 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3110 struct l2cap_chan *chan;
3112 scid = __le16_to_cpu(rsp->scid);
3113 dcid = __le16_to_cpu(rsp->dcid);
3115 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3117 mutex_lock(&conn->chan_lock);
3119 chan = __l2cap_get_chan_by_scid(conn, scid);
3121 mutex_unlock(&conn->chan_lock);
3125 l2cap_chan_lock(chan);
3127 l2cap_chan_del(chan, 0);
3129 l2cap_chan_unlock(chan);
3131 chan->ops->close(chan->data);
3133 mutex_unlock(&conn->chan_lock);
3138 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3140 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3143 type = __le16_to_cpu(req->type);
3145 BT_DBG("type 0x%4.4x", type);
3147 if (type == L2CAP_IT_FEAT_MASK) {
3149 u32 feat_mask = l2cap_feat_mask;
3150 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3151 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3152 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3154 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3157 feat_mask |= L2CAP_FEAT_EXT_FLOW
3158 | L2CAP_FEAT_EXT_WINDOW;
3160 put_unaligned_le32(feat_mask, rsp->data);
3161 l2cap_send_cmd(conn, cmd->ident,
3162 L2CAP_INFO_RSP, sizeof(buf), buf);
3163 } else if (type == L2CAP_IT_FIXED_CHAN) {
3165 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3168 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3170 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3172 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3173 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3174 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3175 l2cap_send_cmd(conn, cmd->ident,
3176 L2CAP_INFO_RSP, sizeof(buf), buf);
3178 struct l2cap_info_rsp rsp;
3179 rsp.type = cpu_to_le16(type);
3180 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3181 l2cap_send_cmd(conn, cmd->ident,
3182 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3188 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3190 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3193 type = __le16_to_cpu(rsp->type);
3194 result = __le16_to_cpu(rsp->result);
3196 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3198 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3199 if (cmd->ident != conn->info_ident ||
3200 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3203 cancel_delayed_work(&conn->info_timer);
3205 if (result != L2CAP_IR_SUCCESS) {
3206 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3207 conn->info_ident = 0;
3209 l2cap_conn_start(conn);
3215 case L2CAP_IT_FEAT_MASK:
3216 conn->feat_mask = get_unaligned_le32(rsp->data);
3218 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3219 struct l2cap_info_req req;
3220 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3222 conn->info_ident = l2cap_get_ident(conn);
3224 l2cap_send_cmd(conn, conn->info_ident,
3225 L2CAP_INFO_REQ, sizeof(req), &req);
3227 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3228 conn->info_ident = 0;
3230 l2cap_conn_start(conn);
3234 case L2CAP_IT_FIXED_CHAN:
3235 conn->fixed_chan_mask = rsp->data[0];
3236 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3237 conn->info_ident = 0;
3239 l2cap_conn_start(conn);
3246 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3247 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3250 struct l2cap_create_chan_req *req = data;
3251 struct l2cap_create_chan_rsp rsp;
3254 if (cmd_len != sizeof(*req))
3260 psm = le16_to_cpu(req->psm);
3261 scid = le16_to_cpu(req->scid);
3263 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3265 /* Placeholder: Always reject */
3267 rsp.scid = cpu_to_le16(scid);
3268 rsp.result = L2CAP_CR_NO_MEM;
3269 rsp.status = L2CAP_CS_NO_INFO;
3271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3277 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3278 struct l2cap_cmd_hdr *cmd, void *data)
3280 BT_DBG("conn %p", conn);
3282 return l2cap_connect_rsp(conn, cmd, data);
3285 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3286 u16 icid, u16 result)
3288 struct l2cap_move_chan_rsp rsp;
3290 BT_DBG("icid %d, result %d", icid, result);
3292 rsp.icid = cpu_to_le16(icid);
3293 rsp.result = cpu_to_le16(result);
3295 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3298 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3299 struct l2cap_chan *chan, u16 icid, u16 result)
3301 struct l2cap_move_chan_cfm cfm;
3304 BT_DBG("icid %d, result %d", icid, result);
3306 ident = l2cap_get_ident(conn);
3308 chan->ident = ident;
3310 cfm.icid = cpu_to_le16(icid);
3311 cfm.result = cpu_to_le16(result);
3313 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3316 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3319 struct l2cap_move_chan_cfm_rsp rsp;
3321 BT_DBG("icid %d", icid);
3323 rsp.icid = cpu_to_le16(icid);
3324 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3327 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3328 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3330 struct l2cap_move_chan_req *req = data;
3332 u16 result = L2CAP_MR_NOT_ALLOWED;
3334 if (cmd_len != sizeof(*req))
3337 icid = le16_to_cpu(req->icid);
3339 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3344 /* Placeholder: Always refuse */
3345 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3350 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3351 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3353 struct l2cap_move_chan_rsp *rsp = data;
3356 if (cmd_len != sizeof(*rsp))
3359 icid = le16_to_cpu(rsp->icid);
3360 result = le16_to_cpu(rsp->result);
3362 BT_DBG("icid %d, result %d", icid, result);
3364 /* Placeholder: Always unconfirmed */
3365 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3370 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3371 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3373 struct l2cap_move_chan_cfm *cfm = data;
3376 if (cmd_len != sizeof(*cfm))
3379 icid = le16_to_cpu(cfm->icid);
3380 result = le16_to_cpu(cfm->result);
3382 BT_DBG("icid %d, result %d", icid, result);
3384 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3389 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3390 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3392 struct l2cap_move_chan_cfm_rsp *rsp = data;
3395 if (cmd_len != sizeof(*rsp))
3398 icid = le16_to_cpu(rsp->icid);
3400 BT_DBG("icid %d", icid);
3405 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3410 if (min > max || min < 6 || max > 3200)
3413 if (to_multiplier < 10 || to_multiplier > 3200)
3416 if (max >= to_multiplier * 8)
3419 max_latency = (to_multiplier * 8 / max) - 1;
3420 if (latency > 499 || latency > max_latency)
3426 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3427 struct l2cap_cmd_hdr *cmd, u8 *data)
3429 struct hci_conn *hcon = conn->hcon;
3430 struct l2cap_conn_param_update_req *req;
3431 struct l2cap_conn_param_update_rsp rsp;
3432 u16 min, max, latency, to_multiplier, cmd_len;
3435 if (!(hcon->link_mode & HCI_LM_MASTER))
3438 cmd_len = __le16_to_cpu(cmd->len);
3439 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3442 req = (struct l2cap_conn_param_update_req *) data;
3443 min = __le16_to_cpu(req->min);
3444 max = __le16_to_cpu(req->max);
3445 latency = __le16_to_cpu(req->latency);
3446 to_multiplier = __le16_to_cpu(req->to_multiplier);
3448 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3449 min, max, latency, to_multiplier);
3451 memset(&rsp, 0, sizeof(rsp));
3453 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3455 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3457 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3459 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3463 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3468 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3469 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3473 switch (cmd->code) {
3474 case L2CAP_COMMAND_REJ:
3475 l2cap_command_rej(conn, cmd, data);
3478 case L2CAP_CONN_REQ:
3479 err = l2cap_connect_req(conn, cmd, data);
3482 case L2CAP_CONN_RSP:
3483 err = l2cap_connect_rsp(conn, cmd, data);
3486 case L2CAP_CONF_REQ:
3487 err = l2cap_config_req(conn, cmd, cmd_len, data);
3490 case L2CAP_CONF_RSP:
3491 err = l2cap_config_rsp(conn, cmd, data);
3494 case L2CAP_DISCONN_REQ:
3495 err = l2cap_disconnect_req(conn, cmd, data);
3498 case L2CAP_DISCONN_RSP:
3499 err = l2cap_disconnect_rsp(conn, cmd, data);
3502 case L2CAP_ECHO_REQ:
3503 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3506 case L2CAP_ECHO_RSP:
3509 case L2CAP_INFO_REQ:
3510 err = l2cap_information_req(conn, cmd, data);
3513 case L2CAP_INFO_RSP:
3514 err = l2cap_information_rsp(conn, cmd, data);
3517 case L2CAP_CREATE_CHAN_REQ:
3518 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3521 case L2CAP_CREATE_CHAN_RSP:
3522 err = l2cap_create_channel_rsp(conn, cmd, data);
3525 case L2CAP_MOVE_CHAN_REQ:
3526 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3529 case L2CAP_MOVE_CHAN_RSP:
3530 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3533 case L2CAP_MOVE_CHAN_CFM:
3534 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3537 case L2CAP_MOVE_CHAN_CFM_RSP:
3538 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3542 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3550 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3551 struct l2cap_cmd_hdr *cmd, u8 *data)
3553 switch (cmd->code) {
3554 case L2CAP_COMMAND_REJ:
3557 case L2CAP_CONN_PARAM_UPDATE_REQ:
3558 return l2cap_conn_param_update_req(conn, cmd, data);
3560 case L2CAP_CONN_PARAM_UPDATE_RSP:
3564 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3569 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3570 struct sk_buff *skb)
3572 u8 *data = skb->data;
3574 struct l2cap_cmd_hdr cmd;
3577 l2cap_raw_recv(conn, skb);
3579 while (len >= L2CAP_CMD_HDR_SIZE) {
3581 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3582 data += L2CAP_CMD_HDR_SIZE;
3583 len -= L2CAP_CMD_HDR_SIZE;
3585 cmd_len = le16_to_cpu(cmd.len);
3587 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3589 if (cmd_len > len || !cmd.ident) {
3590 BT_DBG("corrupted command");
3594 if (conn->hcon->type == LE_LINK)
3595 err = l2cap_le_sig_cmd(conn, &cmd, data);
3597 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3600 struct l2cap_cmd_rej_unk rej;
3602 BT_ERR("Wrong link type (%d)", err);
3604 /* FIXME: Map err to a valid reason */
3605 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3606 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3616 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3618 u16 our_fcs, rcv_fcs;
3621 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3622 hdr_size = L2CAP_EXT_HDR_SIZE;
3624 hdr_size = L2CAP_ENH_HDR_SIZE;
3626 if (chan->fcs == L2CAP_FCS_CRC16) {
3627 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3628 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3629 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3631 if (our_fcs != rcv_fcs)
3637 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3641 chan->frames_sent = 0;
3643 control |= __set_reqseq(chan, chan->buffer_seq);
3645 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3646 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3647 l2cap_send_sframe(chan, control);
3648 set_bit(CONN_RNR_SENT, &chan->conn_state);
3651 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3652 l2cap_retransmit_frames(chan);
3654 l2cap_ertm_send(chan);
3656 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3657 chan->frames_sent == 0) {
3658 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3659 l2cap_send_sframe(chan, control);
3663 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3665 struct sk_buff *next_skb;
3666 int tx_seq_offset, next_tx_seq_offset;
3668 bt_cb(skb)->tx_seq = tx_seq;
3669 bt_cb(skb)->sar = sar;
3671 next_skb = skb_peek(&chan->srej_q);
3673 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3676 if (bt_cb(next_skb)->tx_seq == tx_seq)
3679 next_tx_seq_offset = __seq_offset(chan,
3680 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3682 if (next_tx_seq_offset > tx_seq_offset) {
3683 __skb_queue_before(&chan->srej_q, next_skb, skb);
3687 if (skb_queue_is_last(&chan->srej_q, next_skb))
3690 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3693 __skb_queue_tail(&chan->srej_q, skb);
3698 static void append_skb_frag(struct sk_buff *skb,
3699 struct sk_buff *new_frag, struct sk_buff **last_frag)
3701 /* skb->len reflects data in skb as well as all fragments
3702 * skb->data_len reflects only data in fragments
3704 if (!skb_has_frag_list(skb))
3705 skb_shinfo(skb)->frag_list = new_frag;
3707 new_frag->next = NULL;
3709 (*last_frag)->next = new_frag;
3710 *last_frag = new_frag;
3712 skb->len += new_frag->len;
3713 skb->data_len += new_frag->len;
3714 skb->truesize += new_frag->truesize;
3717 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3721 switch (__get_ctrl_sar(chan, control)) {
3722 case L2CAP_SAR_UNSEGMENTED:
3726 err = chan->ops->recv(chan->data, skb);
3729 case L2CAP_SAR_START:
3733 chan->sdu_len = get_unaligned_le16(skb->data);
3734 skb_pull(skb, L2CAP_SDULEN_SIZE);
3736 if (chan->sdu_len > chan->imtu) {
3741 if (skb->len >= chan->sdu_len)
3745 chan->sdu_last_frag = skb;
3751 case L2CAP_SAR_CONTINUE:
3755 append_skb_frag(chan->sdu, skb,
3756 &chan->sdu_last_frag);
3759 if (chan->sdu->len >= chan->sdu_len)
3769 append_skb_frag(chan->sdu, skb,
3770 &chan->sdu_last_frag);
3773 if (chan->sdu->len != chan->sdu_len)
3776 err = chan->ops->recv(chan->data, chan->sdu);
3779 /* Reassembly complete */
3781 chan->sdu_last_frag = NULL;
3789 kfree_skb(chan->sdu);
3791 chan->sdu_last_frag = NULL;
3798 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3800 BT_DBG("chan %p, Enter local busy", chan);
3802 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3804 __set_ack_timer(chan);
3807 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3811 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3814 control = __set_reqseq(chan, chan->buffer_seq);
3815 control |= __set_ctrl_poll(chan);
3816 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3817 l2cap_send_sframe(chan, control);
3818 chan->retry_count = 1;
3820 __clear_retrans_timer(chan);
3821 __set_monitor_timer(chan);
3823 set_bit(CONN_WAIT_F, &chan->conn_state);
3826 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3827 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3829 BT_DBG("chan %p, Exit local busy", chan);
3832 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3834 if (chan->mode == L2CAP_MODE_ERTM) {
3836 l2cap_ertm_enter_local_busy(chan);
3838 l2cap_ertm_exit_local_busy(chan);
3842 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3844 struct sk_buff *skb;
3847 while ((skb = skb_peek(&chan->srej_q)) &&
3848 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3851 if (bt_cb(skb)->tx_seq != tx_seq)
3854 skb = skb_dequeue(&chan->srej_q);
3855 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3856 err = l2cap_reassemble_sdu(chan, skb, control);
3859 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3863 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3864 tx_seq = __next_seq(chan, tx_seq);
3868 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3870 struct srej_list *l, *tmp;
3873 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3874 if (l->tx_seq == tx_seq) {
3879 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3880 control |= __set_reqseq(chan, l->tx_seq);
3881 l2cap_send_sframe(chan, control);
3883 list_add_tail(&l->list, &chan->srej_l);
3887 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3889 struct srej_list *new;
3892 while (tx_seq != chan->expected_tx_seq) {
3893 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3894 control |= __set_reqseq(chan, chan->expected_tx_seq);
3895 l2cap_send_sframe(chan, control);
3897 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3901 new->tx_seq = chan->expected_tx_seq;
3903 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3905 list_add_tail(&new->list, &chan->srej_l);
3908 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3913 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3915 u16 tx_seq = __get_txseq(chan, rx_control);
3916 u16 req_seq = __get_reqseq(chan, rx_control);
3917 u8 sar = __get_ctrl_sar(chan, rx_control);
3918 int tx_seq_offset, expected_tx_seq_offset;
3919 int num_to_ack = (chan->tx_win/6) + 1;
3922 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3923 tx_seq, rx_control);
3925 if (__is_ctrl_final(chan, rx_control) &&
3926 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3927 __clear_monitor_timer(chan);
3928 if (chan->unacked_frames > 0)
3929 __set_retrans_timer(chan);
3930 clear_bit(CONN_WAIT_F, &chan->conn_state);
3933 chan->expected_ack_seq = req_seq;
3934 l2cap_drop_acked_frames(chan);
3936 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3938 /* invalid tx_seq */
3939 if (tx_seq_offset >= chan->tx_win) {
3940 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3944 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3945 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3946 l2cap_send_ack(chan);
3950 if (tx_seq == chan->expected_tx_seq)
3953 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3954 struct srej_list *first;
3956 first = list_first_entry(&chan->srej_l,
3957 struct srej_list, list);
3958 if (tx_seq == first->tx_seq) {
3959 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3960 l2cap_check_srej_gap(chan, tx_seq);
3962 list_del(&first->list);
3965 if (list_empty(&chan->srej_l)) {
3966 chan->buffer_seq = chan->buffer_seq_srej;
3967 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3968 l2cap_send_ack(chan);
3969 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3972 struct srej_list *l;
3974 /* duplicated tx_seq */
3975 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3978 list_for_each_entry(l, &chan->srej_l, list) {
3979 if (l->tx_seq == tx_seq) {
3980 l2cap_resend_srejframe(chan, tx_seq);
3985 err = l2cap_send_srejframe(chan, tx_seq);
3987 l2cap_send_disconn_req(chan->conn, chan, -err);
3992 expected_tx_seq_offset = __seq_offset(chan,
3993 chan->expected_tx_seq, chan->buffer_seq);
3995 /* duplicated tx_seq */
3996 if (tx_seq_offset < expected_tx_seq_offset)
3999 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4001 BT_DBG("chan %p, Enter SREJ", chan);
4003 INIT_LIST_HEAD(&chan->srej_l);
4004 chan->buffer_seq_srej = chan->buffer_seq;
4006 __skb_queue_head_init(&chan->srej_q);
4007 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4009 /* Set P-bit only if there are some I-frames to ack. */
4010 if (__clear_ack_timer(chan))
4011 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4013 err = l2cap_send_srejframe(chan, tx_seq);
4015 l2cap_send_disconn_req(chan->conn, chan, -err);
4022 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4024 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4025 bt_cb(skb)->tx_seq = tx_seq;
4026 bt_cb(skb)->sar = sar;
4027 __skb_queue_tail(&chan->srej_q, skb);
4031 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4032 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4035 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4039 if (__is_ctrl_final(chan, rx_control)) {
4040 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4041 l2cap_retransmit_frames(chan);
4045 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4046 if (chan->num_acked == num_to_ack - 1)
4047 l2cap_send_ack(chan);
4049 __set_ack_timer(chan);
4058 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4060 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4061 __get_reqseq(chan, rx_control), rx_control);
4063 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4064 l2cap_drop_acked_frames(chan);
4066 if (__is_ctrl_poll(chan, rx_control)) {
4067 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4068 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4069 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4070 (chan->unacked_frames > 0))
4071 __set_retrans_timer(chan);
4073 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4074 l2cap_send_srejtail(chan);
4076 l2cap_send_i_or_rr_or_rnr(chan);
4079 } else if (__is_ctrl_final(chan, rx_control)) {
4080 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4082 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4083 l2cap_retransmit_frames(chan);
4086 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4087 (chan->unacked_frames > 0))
4088 __set_retrans_timer(chan);
4090 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4091 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4092 l2cap_send_ack(chan);
4094 l2cap_ertm_send(chan);
4098 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4100 u16 tx_seq = __get_reqseq(chan, rx_control);
4102 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4104 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4106 chan->expected_ack_seq = tx_seq;
4107 l2cap_drop_acked_frames(chan);
4109 if (__is_ctrl_final(chan, rx_control)) {
4110 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4111 l2cap_retransmit_frames(chan);
4113 l2cap_retransmit_frames(chan);
4115 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4116 set_bit(CONN_REJ_ACT, &chan->conn_state);
4119 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4121 u16 tx_seq = __get_reqseq(chan, rx_control);
4123 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4125 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4127 if (__is_ctrl_poll(chan, rx_control)) {
4128 chan->expected_ack_seq = tx_seq;
4129 l2cap_drop_acked_frames(chan);
4131 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4132 l2cap_retransmit_one_frame(chan, tx_seq);
4134 l2cap_ertm_send(chan);
4136 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4137 chan->srej_save_reqseq = tx_seq;
4138 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4140 } else if (__is_ctrl_final(chan, rx_control)) {
4141 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4142 chan->srej_save_reqseq == tx_seq)
4143 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4145 l2cap_retransmit_one_frame(chan, tx_seq);
4147 l2cap_retransmit_one_frame(chan, tx_seq);
4148 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4149 chan->srej_save_reqseq = tx_seq;
4150 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4155 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4157 u16 tx_seq = __get_reqseq(chan, rx_control);
4159 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4161 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4162 chan->expected_ack_seq = tx_seq;
4163 l2cap_drop_acked_frames(chan);
4165 if (__is_ctrl_poll(chan, rx_control))
4166 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4168 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4169 __clear_retrans_timer(chan);
4170 if (__is_ctrl_poll(chan, rx_control))
4171 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4175 if (__is_ctrl_poll(chan, rx_control)) {
4176 l2cap_send_srejtail(chan);
4178 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4179 l2cap_send_sframe(chan, rx_control);
4183 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4185 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4187 if (__is_ctrl_final(chan, rx_control) &&
4188 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4189 __clear_monitor_timer(chan);
4190 if (chan->unacked_frames > 0)
4191 __set_retrans_timer(chan);
4192 clear_bit(CONN_WAIT_F, &chan->conn_state);
4195 switch (__get_ctrl_super(chan, rx_control)) {
4196 case L2CAP_SUPER_RR:
4197 l2cap_data_channel_rrframe(chan, rx_control);
4200 case L2CAP_SUPER_REJ:
4201 l2cap_data_channel_rejframe(chan, rx_control);
4204 case L2CAP_SUPER_SREJ:
4205 l2cap_data_channel_srejframe(chan, rx_control);
4208 case L2CAP_SUPER_RNR:
4209 l2cap_data_channel_rnrframe(chan, rx_control);
4217 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4221 int len, next_tx_seq_offset, req_seq_offset;
4223 control = __get_control(chan, skb->data);
4224 skb_pull(skb, __ctrl_size(chan));
4228 * We can just drop the corrupted I-frame here.
4229 * Receiver will miss it and start proper recovery
4230 * procedures and ask retransmission.
4232 if (l2cap_check_fcs(chan, skb))
4235 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4236 len -= L2CAP_SDULEN_SIZE;
4238 if (chan->fcs == L2CAP_FCS_CRC16)
4239 len -= L2CAP_FCS_SIZE;
4241 if (len > chan->mps) {
4242 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4246 req_seq = __get_reqseq(chan, control);
4248 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4250 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4251 chan->expected_ack_seq);
4253 /* check for invalid req-seq */
4254 if (req_seq_offset > next_tx_seq_offset) {
4255 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4259 if (!__is_sframe(chan, control)) {
4261 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4265 l2cap_data_channel_iframe(chan, control, skb);
4269 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4273 l2cap_data_channel_sframe(chan, control, skb);
4283 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4285 struct l2cap_chan *chan;
4290 chan = l2cap_get_chan_by_scid(conn, cid);
4292 BT_DBG("unknown cid 0x%4.4x", cid);
4293 /* Drop packet and return */
4298 l2cap_chan_lock(chan);
4300 BT_DBG("chan %p, len %d", chan, skb->len);
4302 if (chan->state != BT_CONNECTED)
4305 switch (chan->mode) {
4306 case L2CAP_MODE_BASIC:
4307 /* If socket recv buffers overflows we drop data here
4308 * which is *bad* because L2CAP has to be reliable.
4309 * But we don't have any other choice. L2CAP doesn't
4310 * provide flow control mechanism. */
4312 if (chan->imtu < skb->len)
4315 if (!chan->ops->recv(chan->data, skb))
4319 case L2CAP_MODE_ERTM:
4320 l2cap_ertm_data_rcv(chan, skb);
4324 case L2CAP_MODE_STREAMING:
4325 control = __get_control(chan, skb->data);
4326 skb_pull(skb, __ctrl_size(chan));
4329 if (l2cap_check_fcs(chan, skb))
4332 if (__is_sar_start(chan, control))
4333 len -= L2CAP_SDULEN_SIZE;
4335 if (chan->fcs == L2CAP_FCS_CRC16)
4336 len -= L2CAP_FCS_SIZE;
4338 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4341 tx_seq = __get_txseq(chan, control);
4343 if (chan->expected_tx_seq != tx_seq) {
4344 /* Frame(s) missing - must discard partial SDU */
4345 kfree_skb(chan->sdu);
4347 chan->sdu_last_frag = NULL;
4350 /* TODO: Notify userland of missing data */
4353 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4355 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4356 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4361 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4369 l2cap_chan_unlock(chan);
4374 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4376 struct l2cap_chan *chan;
4378 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4382 BT_DBG("chan %p, len %d", chan, skb->len);
4384 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4387 if (chan->imtu < skb->len)
4390 if (!chan->ops->recv(chan->data, skb))
4399 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4401 struct l2cap_chan *chan;
4403 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4407 BT_DBG("chan %p, len %d", chan, skb->len);
4409 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4412 if (chan->imtu < skb->len)
4415 if (!chan->ops->recv(chan->data, skb))
4424 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4426 struct l2cap_hdr *lh = (void *) skb->data;
4430 skb_pull(skb, L2CAP_HDR_SIZE);
4431 cid = __le16_to_cpu(lh->cid);
4432 len = __le16_to_cpu(lh->len);
4434 if (len != skb->len) {
4439 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4442 case L2CAP_CID_LE_SIGNALING:
4443 case L2CAP_CID_SIGNALING:
4444 l2cap_sig_channel(conn, skb);
4447 case L2CAP_CID_CONN_LESS:
4448 psm = get_unaligned_le16(skb->data);
4450 l2cap_conless_channel(conn, psm, skb);
4453 case L2CAP_CID_LE_DATA:
4454 l2cap_att_channel(conn, cid, skb);
4458 if (smp_sig_channel(conn, skb))
4459 l2cap_conn_del(conn->hcon, EACCES);
4463 l2cap_data_channel(conn, cid, skb);
4468 /* ---- L2CAP interface with lower layer (HCI) ---- */
4470 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4472 int exact = 0, lm1 = 0, lm2 = 0;
4473 struct l2cap_chan *c;
4475 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4477 /* Find listening sockets and check their link_mode */
4478 read_lock(&chan_list_lock);
4479 list_for_each_entry(c, &chan_list, global_l) {
4480 struct sock *sk = c->sk;
4482 if (c->state != BT_LISTEN)
4485 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4486 lm1 |= HCI_LM_ACCEPT;
4487 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4488 lm1 |= HCI_LM_MASTER;
4490 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4491 lm2 |= HCI_LM_ACCEPT;
4492 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4493 lm2 |= HCI_LM_MASTER;
4496 read_unlock(&chan_list_lock);
4498 return exact ? lm1 : lm2;
4501 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4503 struct l2cap_conn *conn;
4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4508 conn = l2cap_conn_add(hcon, status);
4510 l2cap_conn_ready(conn);
4512 l2cap_conn_del(hcon, bt_to_errno(status));
4517 int l2cap_disconn_ind(struct hci_conn *hcon)
4519 struct l2cap_conn *conn = hcon->l2cap_data;
4521 BT_DBG("hcon %p", hcon);
4524 return HCI_ERROR_REMOTE_USER_TERM;
4525 return conn->disc_reason;
4528 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4530 BT_DBG("hcon %p reason %d", hcon, reason);
4532 l2cap_conn_del(hcon, bt_to_errno(reason));
4536 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4538 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4541 if (encrypt == 0x00) {
4542 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4543 __clear_chan_timer(chan);
4544 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4545 } else if (chan->sec_level == BT_SECURITY_HIGH)
4546 l2cap_chan_close(chan, ECONNREFUSED);
4548 if (chan->sec_level == BT_SECURITY_MEDIUM)
4549 __clear_chan_timer(chan);
4553 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4555 struct l2cap_conn *conn = hcon->l2cap_data;
4556 struct l2cap_chan *chan;
4561 BT_DBG("conn %p", conn);
4563 if (hcon->type == LE_LINK) {
4564 smp_distribute_keys(conn, 0);
4565 cancel_delayed_work(&conn->security_timer);
4568 mutex_lock(&conn->chan_lock);
4570 list_for_each_entry(chan, &conn->chan_l, list) {
4571 l2cap_chan_lock(chan);
4573 BT_DBG("chan->scid %d", chan->scid);
4575 if (chan->scid == L2CAP_CID_LE_DATA) {
4576 if (!status && encrypt) {
4577 chan->sec_level = hcon->sec_level;
4578 l2cap_chan_ready(chan);
4581 l2cap_chan_unlock(chan);
4585 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4586 l2cap_chan_unlock(chan);
4590 if (!status && (chan->state == BT_CONNECTED ||
4591 chan->state == BT_CONFIG)) {
4592 struct sock *sk = chan->sk;
4594 bt_sk(sk)->suspended = false;
4595 sk->sk_state_change(sk);
4597 l2cap_check_encryption(chan, encrypt);
4598 l2cap_chan_unlock(chan);
4602 if (chan->state == BT_CONNECT) {
4604 l2cap_send_conn_req(chan);
4606 __clear_chan_timer(chan);
4607 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4609 } else if (chan->state == BT_CONNECT2) {
4610 struct sock *sk = chan->sk;
4611 struct l2cap_conn_rsp rsp;
4617 if (bt_sk(sk)->defer_setup) {
4618 struct sock *parent = bt_sk(sk)->parent;
4619 res = L2CAP_CR_PEND;
4620 stat = L2CAP_CS_AUTHOR_PEND;
4622 parent->sk_data_ready(parent, 0);
4624 __l2cap_state_change(chan, BT_CONFIG);
4625 res = L2CAP_CR_SUCCESS;
4626 stat = L2CAP_CS_NO_INFO;
4629 __l2cap_state_change(chan, BT_DISCONN);
4630 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4631 res = L2CAP_CR_SEC_BLOCK;
4632 stat = L2CAP_CS_NO_INFO;
4637 rsp.scid = cpu_to_le16(chan->dcid);
4638 rsp.dcid = cpu_to_le16(chan->scid);
4639 rsp.result = cpu_to_le16(res);
4640 rsp.status = cpu_to_le16(stat);
4641 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4645 l2cap_chan_unlock(chan);
4648 mutex_unlock(&conn->chan_lock);
4653 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4655 struct l2cap_conn *conn = hcon->l2cap_data;
4658 conn = l2cap_conn_add(hcon, 0);
4663 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4665 if (!(flags & ACL_CONT)) {
4666 struct l2cap_hdr *hdr;
4667 struct l2cap_chan *chan;
4672 BT_ERR("Unexpected start frame (len %d)", skb->len);
4673 kfree_skb(conn->rx_skb);
4674 conn->rx_skb = NULL;
4676 l2cap_conn_unreliable(conn, ECOMM);
4679 /* Start fragment always begin with Basic L2CAP header */
4680 if (skb->len < L2CAP_HDR_SIZE) {
4681 BT_ERR("Frame is too short (len %d)", skb->len);
4682 l2cap_conn_unreliable(conn, ECOMM);
4686 hdr = (struct l2cap_hdr *) skb->data;
4687 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4688 cid = __le16_to_cpu(hdr->cid);
4690 if (len == skb->len) {
4691 /* Complete frame received */
4692 l2cap_recv_frame(conn, skb);
4696 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4698 if (skb->len > len) {
4699 BT_ERR("Frame is too long (len %d, expected len %d)",
4701 l2cap_conn_unreliable(conn, ECOMM);
4705 chan = l2cap_get_chan_by_scid(conn, cid);
4707 if (chan && chan->sk) {
4708 struct sock *sk = chan->sk;
4711 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4712 BT_ERR("Frame exceeding recv MTU (len %d, "
4716 l2cap_conn_unreliable(conn, ECOMM);
4722 /* Allocate skb for the complete frame (with header) */
4723 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4727 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4729 conn->rx_len = len - skb->len;
4731 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4733 if (!conn->rx_len) {
4734 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4735 l2cap_conn_unreliable(conn, ECOMM);
4739 if (skb->len > conn->rx_len) {
4740 BT_ERR("Fragment is too long (len %d, expected %d)",
4741 skb->len, conn->rx_len);
4742 kfree_skb(conn->rx_skb);
4743 conn->rx_skb = NULL;
4745 l2cap_conn_unreliable(conn, ECOMM);
4749 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4751 conn->rx_len -= skb->len;
4753 if (!conn->rx_len) {
4754 /* Complete frame received */
4755 l2cap_recv_frame(conn, conn->rx_skb);
4756 conn->rx_skb = NULL;
4765 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4767 struct l2cap_chan *c;
4769 read_lock(&chan_list_lock);
4771 list_for_each_entry(c, &chan_list, global_l) {
4772 struct sock *sk = c->sk;
4774 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4775 batostr(&bt_sk(sk)->src),
4776 batostr(&bt_sk(sk)->dst),
4777 c->state, __le16_to_cpu(c->psm),
4778 c->scid, c->dcid, c->imtu, c->omtu,
4779 c->sec_level, c->mode);
4782 read_unlock(&chan_list_lock);
4787 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4789 return single_open(file, l2cap_debugfs_show, inode->i_private);
4792 static const struct file_operations l2cap_debugfs_fops = {
4793 .open = l2cap_debugfs_open,
4795 .llseek = seq_lseek,
4796 .release = single_release,
4799 static struct dentry *l2cap_debugfs;
4801 int __init l2cap_init(void)
4805 err = l2cap_init_sockets();
4810 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4811 bt_debugfs, NULL, &l2cap_debugfs_fops);
4813 BT_ERR("Failed to create L2CAP debug file");
4819 void l2cap_exit(void)
4821 debugfs_remove(l2cap_debugfs);
4822 l2cap_cleanup_sockets();
4825 module_param(disable_ertm, bool, 0644);
4826 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");