2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm = 1;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 mutex_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
131 struct l2cap_chan *c;
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 write_lock(&chan_list_lock);
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
169 write_unlock(&chan_list_lock);
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
175 write_lock(&chan_list_lock);
179 write_unlock(&chan_list_lock);
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
186 u16 cid = L2CAP_CID_DYN_START;
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
202 chan->ops->state_change(chan->data, state);
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
207 struct sock *sk = chan->sk;
210 __l2cap_state_change(chan, state);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
216 struct sock *sk = chan->sk;
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
223 struct sock *sk = chan->sk;
226 __l2cap_chan_set_err(chan, err);
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
483 l2cap_chan_hold(chan);
485 list_add(&chan->list, &conn->chan_l);
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
501 __clear_chan_timer(chan);
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
506 /* Delete from channel list */
507 list_del(&chan->list);
509 l2cap_chan_put(chan);
512 hci_conn_put(conn->hcon);
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
521 __l2cap_chan_set_err(chan, err);
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
527 sk->sk_state_change(sk);
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 skb_queue_purge(&chan->tx_q);
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 __clear_retrans_timer(chan);
538 __clear_monitor_timer(chan);
539 __clear_ack_timer(chan);
541 skb_queue_purge(&chan->srej_q);
543 l2cap_seq_list_free(&chan->srej_list);
544 l2cap_seq_list_free(&chan->retrans_list);
548 static void l2cap_chan_cleanup_listen(struct sock *parent)
552 BT_DBG("parent %p", parent);
554 /* Close not yet accepted channels */
555 while ((sk = bt_accept_dequeue(parent, NULL))) {
556 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
558 l2cap_chan_lock(chan);
559 __clear_chan_timer(chan);
560 l2cap_chan_close(chan, ECONNRESET);
561 l2cap_chan_unlock(chan);
563 chan->ops->close(chan->data);
567 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
569 struct l2cap_conn *conn = chan->conn;
570 struct sock *sk = chan->sk;
572 BT_DBG("chan %p state %s sk %p", chan,
573 state_to_string(chan->state), sk);
575 switch (chan->state) {
578 l2cap_chan_cleanup_listen(sk);
580 __l2cap_state_change(chan, BT_CLOSED);
581 sock_set_flag(sk, SOCK_ZAPPED);
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason);
592 l2cap_chan_del(chan, reason);
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp;
601 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 result = L2CAP_CR_SEC_BLOCK;
604 result = L2CAP_CR_BAD_PSM;
605 l2cap_state_change(chan, BT_DISCONN);
607 rsp.scid = cpu_to_le16(chan->dcid);
608 rsp.dcid = cpu_to_le16(chan->scid);
609 rsp.result = cpu_to_le16(result);
610 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
615 l2cap_chan_del(chan, reason);
620 l2cap_chan_del(chan, reason);
625 sock_set_flag(sk, SOCK_ZAPPED);
631 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
633 if (chan->chan_type == L2CAP_CHAN_RAW) {
634 switch (chan->sec_level) {
635 case BT_SECURITY_HIGH:
636 return HCI_AT_DEDICATED_BONDING_MITM;
637 case BT_SECURITY_MEDIUM:
638 return HCI_AT_DEDICATED_BONDING;
640 return HCI_AT_NO_BONDING;
642 } else if (chan->psm == cpu_to_le16(0x0001)) {
643 if (chan->sec_level == BT_SECURITY_LOW)
644 chan->sec_level = BT_SECURITY_SDP;
646 if (chan->sec_level == BT_SECURITY_HIGH)
647 return HCI_AT_NO_BONDING_MITM;
649 return HCI_AT_NO_BONDING;
651 switch (chan->sec_level) {
652 case BT_SECURITY_HIGH:
653 return HCI_AT_GENERAL_BONDING_MITM;
654 case BT_SECURITY_MEDIUM:
655 return HCI_AT_GENERAL_BONDING;
657 return HCI_AT_NO_BONDING;
662 /* Service level security */
663 int l2cap_chan_check_security(struct l2cap_chan *chan)
665 struct l2cap_conn *conn = chan->conn;
668 auth_type = l2cap_get_auth_type(chan);
670 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
673 static u8 l2cap_get_ident(struct l2cap_conn *conn)
677 /* Get next available identificator.
678 * 1 - 128 are used by kernel.
679 * 129 - 199 are reserved.
680 * 200 - 254 are used by utilities like l2ping, etc.
683 spin_lock(&conn->lock);
685 if (++conn->tx_ident > 128)
690 spin_unlock(&conn->lock);
695 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
697 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
700 BT_DBG("code 0x%2.2x", code);
705 if (lmp_no_flush_capable(conn->hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
710 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
711 skb->priority = HCI_PRIO_MAX;
713 hci_send_acl(conn->hchan, skb, flags);
716 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
718 struct hci_conn *hcon = chan->conn->hcon;
721 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
724 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
725 lmp_no_flush_capable(hcon->hdev))
726 flags = ACL_START_NO_FLUSH;
730 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
731 hci_send_acl(chan->conn->hchan, skb, flags);
734 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
736 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
737 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
739 if (enh & L2CAP_CTRL_FRAME_TYPE) {
742 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
743 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
750 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
751 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
758 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
760 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
761 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
763 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
766 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
767 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
774 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
775 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
782 static inline void __unpack_control(struct l2cap_chan *chan,
785 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
786 __unpack_extended_control(get_unaligned_le32(skb->data),
787 &bt_cb(skb)->control);
788 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
790 __unpack_enhanced_control(get_unaligned_le16(skb->data),
791 &bt_cb(skb)->control);
792 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
796 static u32 __pack_extended_control(struct l2cap_ctrl *control)
800 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
808 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
815 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
819 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
820 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
822 if (control->sframe) {
823 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
824 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
825 packed |= L2CAP_CTRL_FRAME_TYPE;
827 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
828 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
834 static inline void __pack_control(struct l2cap_chan *chan,
835 struct l2cap_ctrl *control,
838 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
839 put_unaligned_le32(__pack_extended_control(control),
840 skb->data + L2CAP_HDR_SIZE);
842 put_unaligned_le16(__pack_enhanced_control(control),
843 skb->data + L2CAP_HDR_SIZE);
847 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
851 struct l2cap_hdr *lh;
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 hlen = L2CAP_EXT_HDR_SIZE;
857 hlen = L2CAP_ENH_HDR_SIZE;
859 if (chan->fcs == L2CAP_FCS_CRC16)
860 hlen += L2CAP_FCS_SIZE;
862 skb = bt_skb_alloc(hlen, GFP_KERNEL);
865 return ERR_PTR(-ENOMEM);
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
872 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
874 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
876 if (chan->fcs == L2CAP_FCS_CRC16) {
877 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
878 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
881 skb->priority = HCI_PRIO_MAX;
885 static void l2cap_send_sframe(struct l2cap_chan *chan,
886 struct l2cap_ctrl *control)
891 BT_DBG("chan %p, control %p", chan, control);
893 if (!control->sframe)
896 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
900 if (control->super == L2CAP_SUPER_RR)
901 clear_bit(CONN_RNR_SENT, &chan->conn_state);
902 else if (control->super == L2CAP_SUPER_RNR)
903 set_bit(CONN_RNR_SENT, &chan->conn_state);
905 if (control->super != L2CAP_SUPER_SREJ) {
906 chan->last_acked_seq = control->reqseq;
907 __clear_ack_timer(chan);
910 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
911 control->final, control->poll, control->super);
913 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
914 control_field = __pack_extended_control(control);
916 control_field = __pack_enhanced_control(control);
918 skb = l2cap_create_sframe_pdu(chan, control_field);
920 l2cap_do_send(chan, skb);
923 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
926 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
927 set_bit(CONN_RNR_SENT, &chan->conn_state);
929 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
931 control |= __set_reqseq(chan, chan->buffer_seq);
934 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
936 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
939 static void l2cap_send_conn_req(struct l2cap_chan *chan)
941 struct l2cap_conn *conn = chan->conn;
942 struct l2cap_conn_req req;
944 req.scid = cpu_to_le16(chan->scid);
947 chan->ident = l2cap_get_ident(conn);
949 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
951 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
954 static void l2cap_chan_ready(struct l2cap_chan *chan)
956 struct sock *sk = chan->sk;
961 parent = bt_sk(sk)->parent;
963 BT_DBG("sk %p, parent %p", sk, parent);
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
969 __l2cap_state_change(chan, BT_CONNECTED);
970 sk->sk_state_change(sk);
973 parent->sk_data_ready(parent, 0);
978 static void l2cap_do_start(struct l2cap_chan *chan)
980 struct l2cap_conn *conn = chan->conn;
982 if (conn->hcon->type == LE_LINK) {
983 l2cap_chan_ready(chan);
987 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
988 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
991 if (l2cap_chan_check_security(chan) &&
992 __l2cap_no_conn_pending(chan))
993 l2cap_send_conn_req(chan);
995 struct l2cap_info_req req;
996 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
999 conn->info_ident = l2cap_get_ident(conn);
1001 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1003 l2cap_send_cmd(conn, conn->info_ident,
1004 L2CAP_INFO_REQ, sizeof(req), &req);
1008 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1010 u32 local_feat_mask = l2cap_feat_mask;
1012 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1015 case L2CAP_MODE_ERTM:
1016 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1017 case L2CAP_MODE_STREAMING:
1018 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1024 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1026 struct sock *sk = chan->sk;
1027 struct l2cap_disconn_req req;
1032 if (chan->mode == L2CAP_MODE_ERTM) {
1033 __clear_retrans_timer(chan);
1034 __clear_monitor_timer(chan);
1035 __clear_ack_timer(chan);
1038 req.dcid = cpu_to_le16(chan->dcid);
1039 req.scid = cpu_to_le16(chan->scid);
1040 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1041 L2CAP_DISCONN_REQ, sizeof(req), &req);
1044 __l2cap_state_change(chan, BT_DISCONN);
1045 __l2cap_chan_set_err(chan, err);
1049 /* ---- L2CAP connections ---- */
1050 static void l2cap_conn_start(struct l2cap_conn *conn)
1052 struct l2cap_chan *chan, *tmp;
1054 BT_DBG("conn %p", conn);
1056 mutex_lock(&conn->chan_lock);
1058 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1059 struct sock *sk = chan->sk;
1061 l2cap_chan_lock(chan);
1063 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1064 l2cap_chan_unlock(chan);
1068 if (chan->state == BT_CONNECT) {
1069 if (!l2cap_chan_check_security(chan) ||
1070 !__l2cap_no_conn_pending(chan)) {
1071 l2cap_chan_unlock(chan);
1075 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1076 && test_bit(CONF_STATE2_DEVICE,
1077 &chan->conf_state)) {
1078 l2cap_chan_close(chan, ECONNRESET);
1079 l2cap_chan_unlock(chan);
1083 l2cap_send_conn_req(chan);
1085 } else if (chan->state == BT_CONNECT2) {
1086 struct l2cap_conn_rsp rsp;
1088 rsp.scid = cpu_to_le16(chan->dcid);
1089 rsp.dcid = cpu_to_le16(chan->scid);
1091 if (l2cap_chan_check_security(chan)) {
1093 if (test_bit(BT_SK_DEFER_SETUP,
1094 &bt_sk(sk)->flags)) {
1095 struct sock *parent = bt_sk(sk)->parent;
1096 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1097 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1099 parent->sk_data_ready(parent, 0);
1102 __l2cap_state_change(chan, BT_CONFIG);
1103 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1104 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1108 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1109 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1112 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1115 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1116 rsp.result != L2CAP_CR_SUCCESS) {
1117 l2cap_chan_unlock(chan);
1121 set_bit(CONF_REQ_SENT, &chan->conf_state);
1122 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1123 l2cap_build_conf_req(chan, buf), buf);
1124 chan->num_conf_req++;
1127 l2cap_chan_unlock(chan);
1130 mutex_unlock(&conn->chan_lock);
1133 /* Find socket with cid and source/destination bdaddr.
1134 * Returns closest match, locked.
1136 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1140 struct l2cap_chan *c, *c1 = NULL;
1142 read_lock(&chan_list_lock);
1144 list_for_each_entry(c, &chan_list, global_l) {
1145 struct sock *sk = c->sk;
1147 if (state && c->state != state)
1150 if (c->scid == cid) {
1151 int src_match, dst_match;
1152 int src_any, dst_any;
1155 src_match = !bacmp(&bt_sk(sk)->src, src);
1156 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1157 if (src_match && dst_match) {
1158 read_unlock(&chan_list_lock);
1163 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1164 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1165 if ((src_match && dst_any) || (src_any && dst_match) ||
1166 (src_any && dst_any))
1171 read_unlock(&chan_list_lock);
1176 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1178 struct sock *parent, *sk;
1179 struct l2cap_chan *chan, *pchan;
1183 /* Check if we have socket listening on cid */
1184 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1185 conn->src, conn->dst);
1193 /* Check for backlog size */
1194 if (sk_acceptq_is_full(parent)) {
1195 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1199 chan = pchan->ops->new_connection(pchan->data);
1205 hci_conn_hold(conn->hcon);
1207 bacpy(&bt_sk(sk)->src, conn->src);
1208 bacpy(&bt_sk(sk)->dst, conn->dst);
1210 bt_accept_enqueue(parent, sk);
1212 l2cap_chan_add(conn, chan);
1214 __set_chan_timer(chan, sk->sk_sndtimeo);
1216 __l2cap_state_change(chan, BT_CONNECTED);
1217 parent->sk_data_ready(parent, 0);
1220 release_sock(parent);
1223 static void l2cap_conn_ready(struct l2cap_conn *conn)
1225 struct l2cap_chan *chan;
1227 BT_DBG("conn %p", conn);
1229 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1230 l2cap_le_conn_ready(conn);
1232 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1233 smp_conn_security(conn, conn->hcon->pending_sec_level);
1235 mutex_lock(&conn->chan_lock);
1237 list_for_each_entry(chan, &conn->chan_l, list) {
1239 l2cap_chan_lock(chan);
1241 if (conn->hcon->type == LE_LINK) {
1242 if (smp_conn_security(conn, chan->sec_level))
1243 l2cap_chan_ready(chan);
1245 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1246 struct sock *sk = chan->sk;
1247 __clear_chan_timer(chan);
1249 __l2cap_state_change(chan, BT_CONNECTED);
1250 sk->sk_state_change(sk);
1253 } else if (chan->state == BT_CONNECT)
1254 l2cap_do_start(chan);
1256 l2cap_chan_unlock(chan);
1259 mutex_unlock(&conn->chan_lock);
1262 /* Notify sockets that we cannot guaranty reliability anymore */
1263 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1265 struct l2cap_chan *chan;
1267 BT_DBG("conn %p", conn);
1269 mutex_lock(&conn->chan_lock);
1271 list_for_each_entry(chan, &conn->chan_l, list) {
1272 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 __l2cap_chan_set_err(chan, err);
1276 mutex_unlock(&conn->chan_lock);
1279 static void l2cap_info_timeout(struct work_struct *work)
1281 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 conn->info_ident = 0;
1287 l2cap_conn_start(conn);
1290 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1292 struct l2cap_conn *conn = hcon->l2cap_data;
1293 struct l2cap_chan *chan, *l;
1298 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1300 kfree_skb(conn->rx_skb);
1302 mutex_lock(&conn->chan_lock);
1305 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1306 l2cap_chan_hold(chan);
1307 l2cap_chan_lock(chan);
1309 l2cap_chan_del(chan, err);
1311 l2cap_chan_unlock(chan);
1313 chan->ops->close(chan->data);
1314 l2cap_chan_put(chan);
1317 mutex_unlock(&conn->chan_lock);
1319 hci_chan_del(conn->hchan);
1321 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1322 cancel_delayed_work_sync(&conn->info_timer);
1324 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1325 cancel_delayed_work_sync(&conn->security_timer);
1326 smp_chan_destroy(conn);
1329 hcon->l2cap_data = NULL;
1333 static void security_timeout(struct work_struct *work)
1335 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 security_timer.work);
1338 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1341 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1343 struct l2cap_conn *conn = hcon->l2cap_data;
1344 struct hci_chan *hchan;
1349 hchan = hci_chan_create(hcon);
1353 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1355 hci_chan_del(hchan);
1359 hcon->l2cap_data = conn;
1361 conn->hchan = hchan;
1363 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1365 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1366 conn->mtu = hcon->hdev->le_mtu;
1368 conn->mtu = hcon->hdev->acl_mtu;
1370 conn->src = &hcon->hdev->bdaddr;
1371 conn->dst = &hcon->dst;
1373 conn->feat_mask = 0;
1375 spin_lock_init(&conn->lock);
1376 mutex_init(&conn->chan_lock);
1378 INIT_LIST_HEAD(&conn->chan_l);
1380 if (hcon->type == LE_LINK)
1381 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1383 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1385 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1390 /* ---- Socket interface ---- */
1392 /* Find socket with psm and source / destination bdaddr.
1393 * Returns closest match.
1395 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1399 struct l2cap_chan *c, *c1 = NULL;
1401 read_lock(&chan_list_lock);
1403 list_for_each_entry(c, &chan_list, global_l) {
1404 struct sock *sk = c->sk;
1406 if (state && c->state != state)
1409 if (c->psm == psm) {
1410 int src_match, dst_match;
1411 int src_any, dst_any;
1414 src_match = !bacmp(&bt_sk(sk)->src, src);
1415 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1416 if (src_match && dst_match) {
1417 read_unlock(&chan_list_lock);
1422 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1423 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1424 if ((src_match && dst_any) || (src_any && dst_match) ||
1425 (src_any && dst_any))
1430 read_unlock(&chan_list_lock);
1435 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1436 bdaddr_t *dst, u8 dst_type)
1438 struct sock *sk = chan->sk;
1439 bdaddr_t *src = &bt_sk(sk)->src;
1440 struct l2cap_conn *conn;
1441 struct hci_conn *hcon;
1442 struct hci_dev *hdev;
1446 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1447 dst_type, __le16_to_cpu(chan->psm));
1449 hdev = hci_get_route(dst, src);
1451 return -EHOSTUNREACH;
1455 l2cap_chan_lock(chan);
1457 /* PSM must be odd and lsb of upper byte must be 0 */
1458 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1459 chan->chan_type != L2CAP_CHAN_RAW) {
1464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1469 switch (chan->mode) {
1470 case L2CAP_MODE_BASIC:
1472 case L2CAP_MODE_ERTM:
1473 case L2CAP_MODE_STREAMING:
1484 switch (sk->sk_state) {
1488 /* Already connecting */
1494 /* Already connected */
1510 /* Set destination address and psm */
1511 bacpy(&bt_sk(sk)->dst, dst);
1518 auth_type = l2cap_get_auth_type(chan);
1520 if (chan->dcid == L2CAP_CID_LE_DATA)
1521 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1522 chan->sec_level, auth_type);
1524 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1525 chan->sec_level, auth_type);
1528 err = PTR_ERR(hcon);
1532 conn = l2cap_conn_add(hcon, 0);
1539 if (hcon->type == LE_LINK) {
1542 if (!list_empty(&conn->chan_l)) {
1551 /* Update source addr of the socket */
1552 bacpy(src, conn->src);
1554 l2cap_chan_unlock(chan);
1555 l2cap_chan_add(conn, chan);
1556 l2cap_chan_lock(chan);
1558 l2cap_state_change(chan, BT_CONNECT);
1559 __set_chan_timer(chan, sk->sk_sndtimeo);
1561 if (hcon->state == BT_CONNECTED) {
1562 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1563 __clear_chan_timer(chan);
1564 if (l2cap_chan_check_security(chan))
1565 l2cap_state_change(chan, BT_CONNECTED);
1567 l2cap_do_start(chan);
1573 l2cap_chan_unlock(chan);
1574 hci_dev_unlock(hdev);
1579 int __l2cap_wait_ack(struct sock *sk)
1581 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1582 DECLARE_WAITQUEUE(wait, current);
1586 add_wait_queue(sk_sleep(sk), &wait);
1587 set_current_state(TASK_INTERRUPTIBLE);
1588 while (chan->unacked_frames > 0 && chan->conn) {
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeo);
1598 timeo = schedule_timeout(timeo);
1600 set_current_state(TASK_INTERRUPTIBLE);
1602 err = sock_error(sk);
1606 set_current_state(TASK_RUNNING);
1607 remove_wait_queue(sk_sleep(sk), &wait);
1611 static void l2cap_monitor_timeout(struct work_struct *work)
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 monitor_timer.work);
1616 BT_DBG("chan %p", chan);
1618 l2cap_chan_lock(chan);
1620 if (chan->retry_count >= chan->remote_max_tx) {
1621 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1627 chan->retry_count++;
1628 __set_monitor_timer(chan);
1630 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1631 l2cap_chan_unlock(chan);
1632 l2cap_chan_put(chan);
1635 static void l2cap_retrans_timeout(struct work_struct *work)
1637 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1638 retrans_timer.work);
1640 BT_DBG("chan %p", chan);
1642 l2cap_chan_lock(chan);
1644 chan->retry_count = 1;
1645 __set_monitor_timer(chan);
1647 set_bit(CONN_WAIT_F, &chan->conn_state);
1649 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1651 l2cap_chan_unlock(chan);
1652 l2cap_chan_put(chan);
1655 static int l2cap_streaming_send(struct l2cap_chan *chan,
1656 struct sk_buff_head *skbs)
1658 struct sk_buff *skb;
1659 struct l2cap_ctrl *control;
1661 BT_DBG("chan %p, skbs %p", chan, skbs);
1663 if (chan->state != BT_CONNECTED)
1666 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1668 while (!skb_queue_empty(&chan->tx_q)) {
1670 skb = skb_dequeue(&chan->tx_q);
1672 bt_cb(skb)->control.retries = 1;
1673 control = &bt_cb(skb)->control;
1675 control->reqseq = 0;
1676 control->txseq = chan->next_tx_seq;
1678 __pack_control(chan, control, skb);
1680 if (chan->fcs == L2CAP_FCS_CRC16) {
1681 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1682 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1685 l2cap_do_send(chan, skb);
1687 BT_DBG("Sent txseq %d", (int)control->txseq);
1689 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1690 chan->frames_sent++;
1696 static int l2cap_ertm_send(struct l2cap_chan *chan)
1698 struct sk_buff *skb, *tx_skb;
1699 struct l2cap_ctrl *control;
1702 BT_DBG("chan %p", chan);
1704 if (chan->state != BT_CONNECTED)
1707 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1710 while (chan->tx_send_head &&
1711 chan->unacked_frames < chan->remote_tx_win &&
1712 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1714 skb = chan->tx_send_head;
1716 bt_cb(skb)->control.retries = 1;
1717 control = &bt_cb(skb)->control;
1719 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1722 control->reqseq = chan->buffer_seq;
1723 chan->last_acked_seq = chan->buffer_seq;
1724 control->txseq = chan->next_tx_seq;
1726 __pack_control(chan, control, skb);
1728 if (chan->fcs == L2CAP_FCS_CRC16) {
1729 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1730 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1733 /* Clone after data has been modified. Data is assumed to be
1734 read-only (for locking purposes) on cloned sk_buffs.
1736 tx_skb = skb_clone(skb, GFP_KERNEL);
1741 __set_retrans_timer(chan);
1743 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1744 chan->unacked_frames++;
1745 chan->frames_sent++;
1748 if (skb_queue_is_last(&chan->tx_q, skb))
1749 chan->tx_send_head = NULL;
1751 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1753 l2cap_do_send(chan, tx_skb);
1754 BT_DBG("Sent txseq %d", (int)control->txseq);
1757 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1758 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1763 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1765 struct l2cap_ctrl control;
1766 struct sk_buff *skb;
1767 struct sk_buff *tx_skb;
1770 BT_DBG("chan %p", chan);
1772 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1775 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1776 seq = l2cap_seq_list_pop(&chan->retrans_list);
1778 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1780 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1785 bt_cb(skb)->control.retries++;
1786 control = bt_cb(skb)->control;
1788 if (chan->max_tx != 0 &&
1789 bt_cb(skb)->control.retries > chan->max_tx) {
1790 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1791 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1792 l2cap_seq_list_clear(&chan->retrans_list);
1796 control.reqseq = chan->buffer_seq;
1797 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1802 if (skb_cloned(skb)) {
1803 /* Cloned sk_buffs are read-only, so we need a
1806 tx_skb = skb_copy(skb, GFP_ATOMIC);
1808 tx_skb = skb_clone(skb, GFP_ATOMIC);
1812 l2cap_seq_list_clear(&chan->retrans_list);
1816 /* Update skb contents */
1817 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1818 put_unaligned_le32(__pack_extended_control(&control),
1819 tx_skb->data + L2CAP_HDR_SIZE);
1821 put_unaligned_le16(__pack_enhanced_control(&control),
1822 tx_skb->data + L2CAP_HDR_SIZE);
1825 if (chan->fcs == L2CAP_FCS_CRC16) {
1826 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1827 put_unaligned_le16(fcs, skb_put(tx_skb,
1831 l2cap_do_send(chan, tx_skb);
1833 BT_DBG("Resent txseq %d", control.txseq);
1835 chan->last_acked_seq = chan->buffer_seq;
1839 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1840 struct l2cap_ctrl *control)
1842 struct sk_buff *skb;
1844 BT_DBG("chan %p, control %p", chan, control);
1847 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1849 l2cap_seq_list_clear(&chan->retrans_list);
1851 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1854 if (chan->unacked_frames) {
1855 skb_queue_walk(&chan->tx_q, skb) {
1856 if (bt_cb(skb)->control.txseq == control->reqseq ||
1857 skb == chan->tx_send_head)
1861 skb_queue_walk_from(&chan->tx_q, skb) {
1862 if (skb == chan->tx_send_head)
1865 l2cap_seq_list_append(&chan->retrans_list,
1866 bt_cb(skb)->control.txseq);
1869 l2cap_ertm_resend(chan);
1873 static void l2cap_send_ack(struct l2cap_chan *chan)
1875 struct l2cap_ctrl control;
1876 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1877 chan->last_acked_seq);
1880 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1881 chan, chan->last_acked_seq, chan->buffer_seq);
1883 memset(&control, 0, sizeof(control));
1886 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1887 chan->rx_state == L2CAP_RX_STATE_RECV) {
1888 __clear_ack_timer(chan);
1889 control.super = L2CAP_SUPER_RNR;
1890 control.reqseq = chan->buffer_seq;
1891 l2cap_send_sframe(chan, &control);
1893 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1894 l2cap_ertm_send(chan);
1895 /* If any i-frames were sent, they included an ack */
1896 if (chan->buffer_seq == chan->last_acked_seq)
1900 /* Ack now if the tx window is 3/4ths full.
1901 * Calculate without mul or div
1903 threshold = chan->tx_win;
1904 threshold += threshold << 1;
1907 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1910 if (frames_to_ack >= threshold) {
1911 __clear_ack_timer(chan);
1912 control.super = L2CAP_SUPER_RR;
1913 control.reqseq = chan->buffer_seq;
1914 l2cap_send_sframe(chan, &control);
1919 __set_ack_timer(chan);
1923 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1924 struct msghdr *msg, int len,
1925 int count, struct sk_buff *skb)
1927 struct l2cap_conn *conn = chan->conn;
1928 struct sk_buff **frag;
1931 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1937 /* Continuation fragments (no L2CAP header) */
1938 frag = &skb_shinfo(skb)->frag_list;
1940 struct sk_buff *tmp;
1942 count = min_t(unsigned int, conn->mtu, len);
1944 tmp = chan->ops->alloc_skb(chan, count,
1945 msg->msg_flags & MSG_DONTWAIT);
1947 return PTR_ERR(tmp);
1951 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1954 (*frag)->priority = skb->priority;
1959 skb->len += (*frag)->len;
1960 skb->data_len += (*frag)->len;
1962 frag = &(*frag)->next;
1968 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1969 struct msghdr *msg, size_t len,
1972 struct l2cap_conn *conn = chan->conn;
1973 struct sk_buff *skb;
1974 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1975 struct l2cap_hdr *lh;
1977 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1979 count = min_t(unsigned int, (conn->mtu - hlen), len);
1981 skb = chan->ops->alloc_skb(chan, count + hlen,
1982 msg->msg_flags & MSG_DONTWAIT);
1986 skb->priority = priority;
1988 /* Create L2CAP header */
1989 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1990 lh->cid = cpu_to_le16(chan->dcid);
1991 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1992 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1994 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1995 if (unlikely(err < 0)) {
1997 return ERR_PTR(err);
2002 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2003 struct msghdr *msg, size_t len,
2006 struct l2cap_conn *conn = chan->conn;
2007 struct sk_buff *skb;
2009 struct l2cap_hdr *lh;
2011 BT_DBG("chan %p len %d", chan, (int)len);
2013 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2015 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2016 msg->msg_flags & MSG_DONTWAIT);
2020 skb->priority = priority;
2022 /* Create L2CAP header */
2023 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2024 lh->cid = cpu_to_le16(chan->dcid);
2025 lh->len = cpu_to_le16(len);
2027 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2028 if (unlikely(err < 0)) {
2030 return ERR_PTR(err);
2035 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2036 struct msghdr *msg, size_t len,
2039 struct l2cap_conn *conn = chan->conn;
2040 struct sk_buff *skb;
2041 int err, count, hlen;
2042 struct l2cap_hdr *lh;
2044 BT_DBG("chan %p len %d", chan, (int)len);
2047 return ERR_PTR(-ENOTCONN);
2049 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2050 hlen = L2CAP_EXT_HDR_SIZE;
2052 hlen = L2CAP_ENH_HDR_SIZE;
2055 hlen += L2CAP_SDULEN_SIZE;
2057 if (chan->fcs == L2CAP_FCS_CRC16)
2058 hlen += L2CAP_FCS_SIZE;
2060 count = min_t(unsigned int, (conn->mtu - hlen), len);
2062 skb = chan->ops->alloc_skb(chan, count + hlen,
2063 msg->msg_flags & MSG_DONTWAIT);
2067 /* Create L2CAP header */
2068 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2069 lh->cid = cpu_to_le16(chan->dcid);
2070 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2072 /* Control header is populated later */
2073 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2074 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2076 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2079 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2081 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2082 if (unlikely(err < 0)) {
2084 return ERR_PTR(err);
2087 bt_cb(skb)->control.fcs = chan->fcs;
2088 bt_cb(skb)->control.retries = 0;
2092 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2093 struct sk_buff_head *seg_queue,
2094 struct msghdr *msg, size_t len)
2096 struct sk_buff *skb;
2102 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2104 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2105 * so fragmented skbs are not used. The HCI layer's handling
2106 * of fragmented skbs is not compatible with ERTM's queueing.
2109 /* PDU size is derived from the HCI MTU */
2110 pdu_len = chan->conn->mtu;
2112 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2114 /* Adjust for largest possible L2CAP overhead. */
2115 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2117 /* Remote device may have requested smaller PDUs */
2118 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2120 if (len <= pdu_len) {
2121 sar = L2CAP_SAR_UNSEGMENTED;
2125 sar = L2CAP_SAR_START;
2127 pdu_len -= L2CAP_SDULEN_SIZE;
2131 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2134 __skb_queue_purge(seg_queue);
2135 return PTR_ERR(skb);
2138 bt_cb(skb)->control.sar = sar;
2139 __skb_queue_tail(seg_queue, skb);
2144 pdu_len += L2CAP_SDULEN_SIZE;
2147 if (len <= pdu_len) {
2148 sar = L2CAP_SAR_END;
2151 sar = L2CAP_SAR_CONTINUE;
2158 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2161 struct sk_buff *skb;
2163 struct sk_buff_head seg_queue;
2165 /* Connectionless channel */
2166 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2167 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2169 return PTR_ERR(skb);
2171 l2cap_do_send(chan, skb);
2175 switch (chan->mode) {
2176 case L2CAP_MODE_BASIC:
2177 /* Check outgoing MTU */
2178 if (len > chan->omtu)
2181 /* Create a basic PDU */
2182 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2184 return PTR_ERR(skb);
2186 l2cap_do_send(chan, skb);
2190 case L2CAP_MODE_ERTM:
2191 case L2CAP_MODE_STREAMING:
2192 /* Check outgoing MTU */
2193 if (len > chan->omtu) {
2198 __skb_queue_head_init(&seg_queue);
2200 /* Do segmentation before calling in to the state machine,
2201 * since it's possible to block while waiting for memory
2204 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2206 /* The channel could have been closed while segmenting,
2207 * check that it is still connected.
2209 if (chan->state != BT_CONNECTED) {
2210 __skb_queue_purge(&seg_queue);
2217 if (chan->mode == L2CAP_MODE_ERTM)
2218 err = l2cap_tx(chan, 0, &seg_queue,
2219 L2CAP_EV_DATA_REQUEST);
2221 err = l2cap_streaming_send(chan, &seg_queue);
2226 /* If the skbs were not queued for sending, they'll still be in
2227 * seg_queue and need to be purged.
2229 __skb_queue_purge(&seg_queue);
2233 BT_DBG("bad state %1.1x", chan->mode);
2240 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2242 struct l2cap_ctrl control;
2245 BT_DBG("chan %p, txseq %d", chan, txseq);
2247 memset(&control, 0, sizeof(control));
2249 control.super = L2CAP_SUPER_SREJ;
2251 for (seq = chan->expected_tx_seq; seq != txseq;
2252 seq = __next_seq(chan, seq)) {
2253 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2254 control.reqseq = seq;
2255 l2cap_send_sframe(chan, &control);
2256 l2cap_seq_list_append(&chan->srej_list, seq);
2260 chan->expected_tx_seq = __next_seq(chan, txseq);
2263 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2265 struct l2cap_ctrl control;
2267 BT_DBG("chan %p", chan);
2269 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2272 memset(&control, 0, sizeof(control));
2274 control.super = L2CAP_SUPER_SREJ;
2275 control.reqseq = chan->srej_list.tail;
2276 l2cap_send_sframe(chan, &control);
2279 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2281 struct l2cap_ctrl control;
2285 BT_DBG("chan %p, txseq %d", chan, txseq);
2287 memset(&control, 0, sizeof(control));
2289 control.super = L2CAP_SUPER_SREJ;
2291 /* Capture initial list head to allow only one pass through the list. */
2292 initial_head = chan->srej_list.head;
2295 seq = l2cap_seq_list_pop(&chan->srej_list);
2296 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2299 control.reqseq = seq;
2300 l2cap_send_sframe(chan, &control);
2301 l2cap_seq_list_append(&chan->srej_list, seq);
2302 } while (chan->srej_list.head != initial_head);
2305 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2307 struct sk_buff *acked_skb;
2310 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2312 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2315 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2316 chan->expected_ack_seq, chan->unacked_frames);
2318 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2319 ackseq = __next_seq(chan, ackseq)) {
2321 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2323 skb_unlink(acked_skb, &chan->tx_q);
2324 kfree_skb(acked_skb);
2325 chan->unacked_frames--;
2329 chan->expected_ack_seq = reqseq;
2331 if (chan->unacked_frames == 0)
2332 __clear_retrans_timer(chan);
2334 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2337 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2339 BT_DBG("chan %p", chan);
2341 chan->expected_tx_seq = chan->buffer_seq;
2342 l2cap_seq_list_clear(&chan->srej_list);
2343 skb_queue_purge(&chan->srej_q);
2344 chan->rx_state = L2CAP_RX_STATE_RECV;
2347 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2348 struct l2cap_ctrl *control,
2349 struct sk_buff_head *skbs, u8 event)
2353 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2357 case L2CAP_EV_DATA_REQUEST:
2358 if (chan->tx_send_head == NULL)
2359 chan->tx_send_head = skb_peek(skbs);
2361 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2362 l2cap_ertm_send(chan);
2364 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2365 BT_DBG("Enter LOCAL_BUSY");
2366 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2368 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2369 /* The SREJ_SENT state must be aborted if we are to
2370 * enter the LOCAL_BUSY state.
2372 l2cap_abort_rx_srej_sent(chan);
2375 l2cap_send_ack(chan);
2378 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2379 BT_DBG("Exit LOCAL_BUSY");
2380 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2382 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2383 struct l2cap_ctrl local_control;
2385 memset(&local_control, 0, sizeof(local_control));
2386 local_control.sframe = 1;
2387 local_control.super = L2CAP_SUPER_RR;
2388 local_control.poll = 1;
2389 local_control.reqseq = chan->buffer_seq;
2390 l2cap_send_sframe(chan, &local_control);
2392 chan->retry_count = 1;
2393 __set_monitor_timer(chan);
2394 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2397 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2398 l2cap_process_reqseq(chan, control->reqseq);
2400 case L2CAP_EV_EXPLICIT_POLL:
2401 l2cap_send_rr_or_rnr(chan, 1);
2402 chan->retry_count = 1;
2403 __set_monitor_timer(chan);
2404 __clear_ack_timer(chan);
2405 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2407 case L2CAP_EV_RETRANS_TO:
2408 l2cap_send_rr_or_rnr(chan, 1);
2409 chan->retry_count = 1;
2410 __set_monitor_timer(chan);
2411 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2413 case L2CAP_EV_RECV_FBIT:
2414 /* Nothing to process */
2423 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2424 struct l2cap_ctrl *control,
2425 struct sk_buff_head *skbs, u8 event)
2429 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2433 case L2CAP_EV_DATA_REQUEST:
2434 if (chan->tx_send_head == NULL)
2435 chan->tx_send_head = skb_peek(skbs);
2436 /* Queue data, but don't send. */
2437 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2439 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2440 BT_DBG("Enter LOCAL_BUSY");
2441 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2443 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2444 /* The SREJ_SENT state must be aborted if we are to
2445 * enter the LOCAL_BUSY state.
2447 l2cap_abort_rx_srej_sent(chan);
2450 l2cap_send_ack(chan);
2453 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2454 BT_DBG("Exit LOCAL_BUSY");
2455 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2457 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2458 struct l2cap_ctrl local_control;
2459 memset(&local_control, 0, sizeof(local_control));
2460 local_control.sframe = 1;
2461 local_control.super = L2CAP_SUPER_RR;
2462 local_control.poll = 1;
2463 local_control.reqseq = chan->buffer_seq;
2464 l2cap_send_sframe(chan, &local_control);
2466 chan->retry_count = 1;
2467 __set_monitor_timer(chan);
2468 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2471 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2472 l2cap_process_reqseq(chan, control->reqseq);
2476 case L2CAP_EV_RECV_FBIT:
2477 if (control && control->final) {
2478 __clear_monitor_timer(chan);
2479 if (chan->unacked_frames > 0)
2480 __set_retrans_timer(chan);
2481 chan->retry_count = 0;
2482 chan->tx_state = L2CAP_TX_STATE_XMIT;
2483 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2486 case L2CAP_EV_EXPLICIT_POLL:
2489 case L2CAP_EV_MONITOR_TO:
2490 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2491 l2cap_send_rr_or_rnr(chan, 1);
2492 __set_monitor_timer(chan);
2493 chan->retry_count++;
2495 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2505 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2506 struct sk_buff_head *skbs, u8 event)
2510 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2511 chan, control, skbs, event, chan->tx_state);
2513 switch (chan->tx_state) {
2514 case L2CAP_TX_STATE_XMIT:
2515 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2517 case L2CAP_TX_STATE_WAIT_F:
2518 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2528 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2529 struct l2cap_ctrl *control)
2531 BT_DBG("chan %p, control %p", chan, control);
2532 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2535 /* Copy frame to all raw sockets on that connection */
2536 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2538 struct sk_buff *nskb;
2539 struct l2cap_chan *chan;
2541 BT_DBG("conn %p", conn);
2543 mutex_lock(&conn->chan_lock);
2545 list_for_each_entry(chan, &conn->chan_l, list) {
2546 struct sock *sk = chan->sk;
2547 if (chan->chan_type != L2CAP_CHAN_RAW)
2550 /* Don't send frame to the socket it came from */
2553 nskb = skb_clone(skb, GFP_ATOMIC);
2557 if (chan->ops->recv(chan->data, nskb))
2561 mutex_unlock(&conn->chan_lock);
2564 /* ---- L2CAP signalling commands ---- */
2565 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2566 u8 code, u8 ident, u16 dlen, void *data)
2568 struct sk_buff *skb, **frag;
2569 struct l2cap_cmd_hdr *cmd;
2570 struct l2cap_hdr *lh;
2573 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2574 conn, code, ident, dlen);
2576 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2577 count = min_t(unsigned int, conn->mtu, len);
2579 skb = bt_skb_alloc(count, GFP_ATOMIC);
2583 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2584 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2586 if (conn->hcon->type == LE_LINK)
2587 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2589 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2591 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2594 cmd->len = cpu_to_le16(dlen);
2597 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2598 memcpy(skb_put(skb, count), data, count);
2604 /* Continuation fragments (no L2CAP header) */
2605 frag = &skb_shinfo(skb)->frag_list;
2607 count = min_t(unsigned int, conn->mtu, len);
2609 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2613 memcpy(skb_put(*frag, count), data, count);
2618 frag = &(*frag)->next;
2628 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2630 struct l2cap_conf_opt *opt = *ptr;
2633 len = L2CAP_CONF_OPT_SIZE + opt->len;
2641 *val = *((u8 *) opt->val);
2645 *val = get_unaligned_le16(opt->val);
2649 *val = get_unaligned_le32(opt->val);
2653 *val = (unsigned long) opt->val;
2657 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2661 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2663 struct l2cap_conf_opt *opt = *ptr;
2665 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2672 *((u8 *) opt->val) = val;
2676 put_unaligned_le16(val, opt->val);
2680 put_unaligned_le32(val, opt->val);
2684 memcpy(opt->val, (void *) val, len);
2688 *ptr += L2CAP_CONF_OPT_SIZE + len;
2691 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2693 struct l2cap_conf_efs efs;
2695 switch (chan->mode) {
2696 case L2CAP_MODE_ERTM:
2697 efs.id = chan->local_id;
2698 efs.stype = chan->local_stype;
2699 efs.msdu = cpu_to_le16(chan->local_msdu);
2700 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2701 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2702 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2705 case L2CAP_MODE_STREAMING:
2707 efs.stype = L2CAP_SERV_BESTEFFORT;
2708 efs.msdu = cpu_to_le16(chan->local_msdu);
2709 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2718 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2719 (unsigned long) &efs);
2722 static void l2cap_ack_timeout(struct work_struct *work)
2724 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2727 BT_DBG("chan %p", chan);
2729 l2cap_chan_lock(chan);
2731 l2cap_send_ack(chan);
2733 l2cap_chan_unlock(chan);
2735 l2cap_chan_put(chan);
2738 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2742 chan->next_tx_seq = 0;
2743 chan->expected_tx_seq = 0;
2744 chan->expected_ack_seq = 0;
2745 chan->unacked_frames = 0;
2746 chan->buffer_seq = 0;
2747 chan->frames_sent = 0;
2748 chan->last_acked_seq = 0;
2750 chan->sdu_last_frag = NULL;
2753 skb_queue_head_init(&chan->tx_q);
2755 if (chan->mode != L2CAP_MODE_ERTM)
2758 chan->rx_state = L2CAP_RX_STATE_RECV;
2759 chan->tx_state = L2CAP_TX_STATE_XMIT;
2761 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2762 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2763 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2765 skb_queue_head_init(&chan->srej_q);
2767 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2771 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2773 l2cap_seq_list_free(&chan->srej_list);
2778 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2781 case L2CAP_MODE_STREAMING:
2782 case L2CAP_MODE_ERTM:
2783 if (l2cap_mode_supported(mode, remote_feat_mask))
2787 return L2CAP_MODE_BASIC;
2791 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2793 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2796 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2798 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2801 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2803 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2804 __l2cap_ews_supported(chan)) {
2805 /* use extended control field */
2806 set_bit(FLAG_EXT_CTRL, &chan->flags);
2807 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2809 chan->tx_win = min_t(u16, chan->tx_win,
2810 L2CAP_DEFAULT_TX_WINDOW);
2811 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2815 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2817 struct l2cap_conf_req *req = data;
2818 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2819 void *ptr = req->data;
2822 BT_DBG("chan %p", chan);
2824 if (chan->num_conf_req || chan->num_conf_rsp)
2827 switch (chan->mode) {
2828 case L2CAP_MODE_STREAMING:
2829 case L2CAP_MODE_ERTM:
2830 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2833 if (__l2cap_efs_supported(chan))
2834 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2838 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2843 if (chan->imtu != L2CAP_DEFAULT_MTU)
2844 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2846 switch (chan->mode) {
2847 case L2CAP_MODE_BASIC:
2848 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2849 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2852 rfc.mode = L2CAP_MODE_BASIC;
2854 rfc.max_transmit = 0;
2855 rfc.retrans_timeout = 0;
2856 rfc.monitor_timeout = 0;
2857 rfc.max_pdu_size = 0;
2859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2860 (unsigned long) &rfc);
2863 case L2CAP_MODE_ERTM:
2864 rfc.mode = L2CAP_MODE_ERTM;
2865 rfc.max_transmit = chan->max_tx;
2866 rfc.retrans_timeout = 0;
2867 rfc.monitor_timeout = 0;
2869 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2870 L2CAP_EXT_HDR_SIZE -
2873 rfc.max_pdu_size = cpu_to_le16(size);
2875 l2cap_txwin_setup(chan);
2877 rfc.txwin_size = min_t(u16, chan->tx_win,
2878 L2CAP_DEFAULT_TX_WINDOW);
2880 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2881 (unsigned long) &rfc);
2883 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2884 l2cap_add_opt_efs(&ptr, chan);
2886 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2889 if (chan->fcs == L2CAP_FCS_NONE ||
2890 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2891 chan->fcs = L2CAP_FCS_NONE;
2892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2895 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2896 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2900 case L2CAP_MODE_STREAMING:
2901 rfc.mode = L2CAP_MODE_STREAMING;
2903 rfc.max_transmit = 0;
2904 rfc.retrans_timeout = 0;
2905 rfc.monitor_timeout = 0;
2907 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2908 L2CAP_EXT_HDR_SIZE -
2911 rfc.max_pdu_size = cpu_to_le16(size);
2913 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2914 (unsigned long) &rfc);
2916 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2917 l2cap_add_opt_efs(&ptr, chan);
2919 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2922 if (chan->fcs == L2CAP_FCS_NONE ||
2923 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2924 chan->fcs = L2CAP_FCS_NONE;
2925 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2930 req->dcid = cpu_to_le16(chan->dcid);
2931 req->flags = cpu_to_le16(0);
2936 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2938 struct l2cap_conf_rsp *rsp = data;
2939 void *ptr = rsp->data;
2940 void *req = chan->conf_req;
2941 int len = chan->conf_len;
2942 int type, hint, olen;
2944 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2945 struct l2cap_conf_efs efs;
2947 u16 mtu = L2CAP_DEFAULT_MTU;
2948 u16 result = L2CAP_CONF_SUCCESS;
2951 BT_DBG("chan %p", chan);
2953 while (len >= L2CAP_CONF_OPT_SIZE) {
2954 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2956 hint = type & L2CAP_CONF_HINT;
2957 type &= L2CAP_CONF_MASK;
2960 case L2CAP_CONF_MTU:
2964 case L2CAP_CONF_FLUSH_TO:
2965 chan->flush_to = val;
2968 case L2CAP_CONF_QOS:
2971 case L2CAP_CONF_RFC:
2972 if (olen == sizeof(rfc))
2973 memcpy(&rfc, (void *) val, olen);
2976 case L2CAP_CONF_FCS:
2977 if (val == L2CAP_FCS_NONE)
2978 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2981 case L2CAP_CONF_EFS:
2983 if (olen == sizeof(efs))
2984 memcpy(&efs, (void *) val, olen);
2987 case L2CAP_CONF_EWS:
2989 return -ECONNREFUSED;
2991 set_bit(FLAG_EXT_CTRL, &chan->flags);
2992 set_bit(CONF_EWS_RECV, &chan->conf_state);
2993 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2994 chan->remote_tx_win = val;
3001 result = L2CAP_CONF_UNKNOWN;
3002 *((u8 *) ptr++) = type;
3007 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3010 switch (chan->mode) {
3011 case L2CAP_MODE_STREAMING:
3012 case L2CAP_MODE_ERTM:
3013 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3014 chan->mode = l2cap_select_mode(rfc.mode,
3015 chan->conn->feat_mask);
3020 if (__l2cap_efs_supported(chan))
3021 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3023 return -ECONNREFUSED;
3026 if (chan->mode != rfc.mode)
3027 return -ECONNREFUSED;
3033 if (chan->mode != rfc.mode) {
3034 result = L2CAP_CONF_UNACCEPT;
3035 rfc.mode = chan->mode;
3037 if (chan->num_conf_rsp == 1)
3038 return -ECONNREFUSED;
3040 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3041 sizeof(rfc), (unsigned long) &rfc);
3044 if (result == L2CAP_CONF_SUCCESS) {
3045 /* Configure output options and let the other side know
3046 * which ones we don't like. */
3048 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3049 result = L2CAP_CONF_UNACCEPT;
3052 set_bit(CONF_MTU_DONE, &chan->conf_state);
3054 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3057 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3058 efs.stype != L2CAP_SERV_NOTRAFIC &&
3059 efs.stype != chan->local_stype) {
3061 result = L2CAP_CONF_UNACCEPT;
3063 if (chan->num_conf_req >= 1)
3064 return -ECONNREFUSED;
3066 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3068 (unsigned long) &efs);
3070 /* Send PENDING Conf Rsp */
3071 result = L2CAP_CONF_PENDING;
3072 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3077 case L2CAP_MODE_BASIC:
3078 chan->fcs = L2CAP_FCS_NONE;
3079 set_bit(CONF_MODE_DONE, &chan->conf_state);
3082 case L2CAP_MODE_ERTM:
3083 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3084 chan->remote_tx_win = rfc.txwin_size;
3086 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3088 chan->remote_max_tx = rfc.max_transmit;
3090 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3092 L2CAP_EXT_HDR_SIZE -
3095 rfc.max_pdu_size = cpu_to_le16(size);
3096 chan->remote_mps = size;
3098 rfc.retrans_timeout =
3099 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3100 rfc.monitor_timeout =
3101 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3103 set_bit(CONF_MODE_DONE, &chan->conf_state);
3105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3106 sizeof(rfc), (unsigned long) &rfc);
3108 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3109 chan->remote_id = efs.id;
3110 chan->remote_stype = efs.stype;
3111 chan->remote_msdu = le16_to_cpu(efs.msdu);
3112 chan->remote_flush_to =
3113 le32_to_cpu(efs.flush_to);
3114 chan->remote_acc_lat =
3115 le32_to_cpu(efs.acc_lat);
3116 chan->remote_sdu_itime =
3117 le32_to_cpu(efs.sdu_itime);
3118 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3119 sizeof(efs), (unsigned long) &efs);
3123 case L2CAP_MODE_STREAMING:
3124 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3126 L2CAP_EXT_HDR_SIZE -
3129 rfc.max_pdu_size = cpu_to_le16(size);
3130 chan->remote_mps = size;
3132 set_bit(CONF_MODE_DONE, &chan->conf_state);
3134 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3135 sizeof(rfc), (unsigned long) &rfc);
3140 result = L2CAP_CONF_UNACCEPT;
3142 memset(&rfc, 0, sizeof(rfc));
3143 rfc.mode = chan->mode;
3146 if (result == L2CAP_CONF_SUCCESS)
3147 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3149 rsp->scid = cpu_to_le16(chan->dcid);
3150 rsp->result = cpu_to_le16(result);
3151 rsp->flags = cpu_to_le16(0x0000);
3156 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3158 struct l2cap_conf_req *req = data;
3159 void *ptr = req->data;
3162 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3163 struct l2cap_conf_efs efs;
3165 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3167 while (len >= L2CAP_CONF_OPT_SIZE) {
3168 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3171 case L2CAP_CONF_MTU:
3172 if (val < L2CAP_DEFAULT_MIN_MTU) {
3173 *result = L2CAP_CONF_UNACCEPT;
3174 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3180 case L2CAP_CONF_FLUSH_TO:
3181 chan->flush_to = val;
3182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3186 case L2CAP_CONF_RFC:
3187 if (olen == sizeof(rfc))
3188 memcpy(&rfc, (void *)val, olen);
3190 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3191 rfc.mode != chan->mode)
3192 return -ECONNREFUSED;
3196 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3197 sizeof(rfc), (unsigned long) &rfc);
3200 case L2CAP_CONF_EWS:
3201 chan->tx_win = min_t(u16, val,
3202 L2CAP_DEFAULT_EXT_WINDOW);
3203 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3207 case L2CAP_CONF_EFS:
3208 if (olen == sizeof(efs))
3209 memcpy(&efs, (void *)val, olen);
3211 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3212 efs.stype != L2CAP_SERV_NOTRAFIC &&
3213 efs.stype != chan->local_stype)
3214 return -ECONNREFUSED;
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3217 sizeof(efs), (unsigned long) &efs);
3222 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3223 return -ECONNREFUSED;
3225 chan->mode = rfc.mode;
3227 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3229 case L2CAP_MODE_ERTM:
3230 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3231 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3232 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3234 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3235 chan->local_msdu = le16_to_cpu(efs.msdu);
3236 chan->local_sdu_itime =
3237 le32_to_cpu(efs.sdu_itime);
3238 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3239 chan->local_flush_to =
3240 le32_to_cpu(efs.flush_to);
3244 case L2CAP_MODE_STREAMING:
3245 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3249 req->dcid = cpu_to_le16(chan->dcid);
3250 req->flags = cpu_to_le16(0x0000);
3255 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3257 struct l2cap_conf_rsp *rsp = data;
3258 void *ptr = rsp->data;
3260 BT_DBG("chan %p", chan);
3262 rsp->scid = cpu_to_le16(chan->dcid);
3263 rsp->result = cpu_to_le16(result);
3264 rsp->flags = cpu_to_le16(flags);
3269 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3271 struct l2cap_conn_rsp rsp;
3272 struct l2cap_conn *conn = chan->conn;
3275 rsp.scid = cpu_to_le16(chan->dcid);
3276 rsp.dcid = cpu_to_le16(chan->scid);
3277 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3278 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3279 l2cap_send_cmd(conn, chan->ident,
3280 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3282 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3286 l2cap_build_conf_req(chan, buf), buf);
3287 chan->num_conf_req++;
3290 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3294 struct l2cap_conf_rfc rfc;
3296 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3298 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3301 while (len >= L2CAP_CONF_OPT_SIZE) {
3302 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3305 case L2CAP_CONF_RFC:
3306 if (olen == sizeof(rfc))
3307 memcpy(&rfc, (void *)val, olen);
3312 /* Use sane default values in case a misbehaving remote device
3313 * did not send an RFC option.
3315 rfc.mode = chan->mode;
3316 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3317 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3318 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3320 BT_ERR("Expected RFC option was not found, using defaults");
3324 case L2CAP_MODE_ERTM:
3325 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3326 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3327 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3329 case L2CAP_MODE_STREAMING:
3330 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3334 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3336 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3338 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3341 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3342 cmd->ident == conn->info_ident) {
3343 cancel_delayed_work(&conn->info_timer);
3345 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3346 conn->info_ident = 0;
3348 l2cap_conn_start(conn);
3354 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3356 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3357 struct l2cap_conn_rsp rsp;
3358 struct l2cap_chan *chan = NULL, *pchan;
3359 struct sock *parent, *sk = NULL;
3360 int result, status = L2CAP_CS_NO_INFO;
3362 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3363 __le16 psm = req->psm;
3365 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3367 /* Check if we have socket listening on psm */
3368 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3370 result = L2CAP_CR_BAD_PSM;
3376 mutex_lock(&conn->chan_lock);
3379 /* Check if the ACL is secure enough (if not SDP) */
3380 if (psm != cpu_to_le16(0x0001) &&
3381 !hci_conn_check_link_mode(conn->hcon)) {
3382 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3383 result = L2CAP_CR_SEC_BLOCK;
3387 result = L2CAP_CR_NO_MEM;
3389 /* Check for backlog size */
3390 if (sk_acceptq_is_full(parent)) {
3391 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3395 chan = pchan->ops->new_connection(pchan->data);
3401 /* Check if we already have channel with that dcid */
3402 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3403 sock_set_flag(sk, SOCK_ZAPPED);
3404 chan->ops->close(chan->data);
3408 hci_conn_hold(conn->hcon);
3410 bacpy(&bt_sk(sk)->src, conn->src);
3411 bacpy(&bt_sk(sk)->dst, conn->dst);
3415 bt_accept_enqueue(parent, sk);
3417 __l2cap_chan_add(conn, chan);
3421 __set_chan_timer(chan, sk->sk_sndtimeo);
3423 chan->ident = cmd->ident;
3425 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3426 if (l2cap_chan_check_security(chan)) {
3427 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3428 __l2cap_state_change(chan, BT_CONNECT2);
3429 result = L2CAP_CR_PEND;
3430 status = L2CAP_CS_AUTHOR_PEND;
3431 parent->sk_data_ready(parent, 0);
3433 __l2cap_state_change(chan, BT_CONFIG);
3434 result = L2CAP_CR_SUCCESS;
3435 status = L2CAP_CS_NO_INFO;
3438 __l2cap_state_change(chan, BT_CONNECT2);
3439 result = L2CAP_CR_PEND;
3440 status = L2CAP_CS_AUTHEN_PEND;
3443 __l2cap_state_change(chan, BT_CONNECT2);
3444 result = L2CAP_CR_PEND;
3445 status = L2CAP_CS_NO_INFO;
3449 release_sock(parent);
3450 mutex_unlock(&conn->chan_lock);
3453 rsp.scid = cpu_to_le16(scid);
3454 rsp.dcid = cpu_to_le16(dcid);
3455 rsp.result = cpu_to_le16(result);
3456 rsp.status = cpu_to_le16(status);
3457 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3459 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3460 struct l2cap_info_req info;
3461 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3463 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3464 conn->info_ident = l2cap_get_ident(conn);
3466 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3468 l2cap_send_cmd(conn, conn->info_ident,
3469 L2CAP_INFO_REQ, sizeof(info), &info);
3472 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3473 result == L2CAP_CR_SUCCESS) {
3475 set_bit(CONF_REQ_SENT, &chan->conf_state);
3476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3477 l2cap_build_conf_req(chan, buf), buf);
3478 chan->num_conf_req++;
3484 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3486 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3487 u16 scid, dcid, result, status;
3488 struct l2cap_chan *chan;
3492 scid = __le16_to_cpu(rsp->scid);
3493 dcid = __le16_to_cpu(rsp->dcid);
3494 result = __le16_to_cpu(rsp->result);
3495 status = __le16_to_cpu(rsp->status);
3497 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3498 dcid, scid, result, status);
3500 mutex_lock(&conn->chan_lock);
3503 chan = __l2cap_get_chan_by_scid(conn, scid);
3509 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3518 l2cap_chan_lock(chan);
3521 case L2CAP_CR_SUCCESS:
3522 l2cap_state_change(chan, BT_CONFIG);
3525 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3527 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3530 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3531 l2cap_build_conf_req(chan, req), req);
3532 chan->num_conf_req++;
3536 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3540 l2cap_chan_del(chan, ECONNREFUSED);
3544 l2cap_chan_unlock(chan);
3547 mutex_unlock(&conn->chan_lock);
3552 static inline void set_default_fcs(struct l2cap_chan *chan)
3554 /* FCS is enabled only in ERTM or streaming mode, if one or both
3557 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3558 chan->fcs = L2CAP_FCS_NONE;
3559 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3560 chan->fcs = L2CAP_FCS_CRC16;
3563 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3565 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3568 struct l2cap_chan *chan;
3571 dcid = __le16_to_cpu(req->dcid);
3572 flags = __le16_to_cpu(req->flags);
3574 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3576 chan = l2cap_get_chan_by_scid(conn, dcid);
3580 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3581 struct l2cap_cmd_rej_cid rej;
3583 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3584 rej.scid = cpu_to_le16(chan->scid);
3585 rej.dcid = cpu_to_le16(chan->dcid);
3587 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3592 /* Reject if config buffer is too small. */
3593 len = cmd_len - sizeof(*req);
3594 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3595 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3596 l2cap_build_conf_rsp(chan, rsp,
3597 L2CAP_CONF_REJECT, flags), rsp);
3602 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3603 chan->conf_len += len;
3605 if (flags & 0x0001) {
3606 /* Incomplete config. Send empty response. */
3607 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3608 l2cap_build_conf_rsp(chan, rsp,
3609 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3613 /* Complete config. */
3614 len = l2cap_parse_conf_req(chan, rsp);
3616 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3620 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3621 chan->num_conf_rsp++;
3623 /* Reset config buffer. */
3626 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3629 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3630 set_default_fcs(chan);
3632 l2cap_state_change(chan, BT_CONNECTED);
3634 if (chan->mode == L2CAP_MODE_ERTM ||
3635 chan->mode == L2CAP_MODE_STREAMING)
3636 err = l2cap_ertm_init(chan);
3639 l2cap_send_disconn_req(chan->conn, chan, -err);
3641 l2cap_chan_ready(chan);
3646 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3648 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3649 l2cap_build_conf_req(chan, buf), buf);
3650 chan->num_conf_req++;
3653 /* Got Conf Rsp PENDING from remote side and asume we sent
3654 Conf Rsp PENDING in the code above */
3655 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3656 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3658 /* check compatibility */
3660 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3661 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3663 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3664 l2cap_build_conf_rsp(chan, rsp,
3665 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3669 l2cap_chan_unlock(chan);
3673 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3675 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3676 u16 scid, flags, result;
3677 struct l2cap_chan *chan;
3678 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3681 scid = __le16_to_cpu(rsp->scid);
3682 flags = __le16_to_cpu(rsp->flags);
3683 result = __le16_to_cpu(rsp->result);
3685 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3688 chan = l2cap_get_chan_by_scid(conn, scid);
3693 case L2CAP_CONF_SUCCESS:
3694 l2cap_conf_rfc_get(chan, rsp->data, len);
3695 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3698 case L2CAP_CONF_PENDING:
3699 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3701 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3704 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3707 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3711 /* check compatibility */
3713 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3714 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3716 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3717 l2cap_build_conf_rsp(chan, buf,
3718 L2CAP_CONF_SUCCESS, 0x0000), buf);
3722 case L2CAP_CONF_UNACCEPT:
3723 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3726 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3727 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3731 /* throw out any old stored conf requests */
3732 result = L2CAP_CONF_SUCCESS;
3733 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3736 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3740 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3741 L2CAP_CONF_REQ, len, req);
3742 chan->num_conf_req++;
3743 if (result != L2CAP_CONF_SUCCESS)
3749 l2cap_chan_set_err(chan, ECONNRESET);
3751 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3752 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3759 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3761 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3762 set_default_fcs(chan);
3764 l2cap_state_change(chan, BT_CONNECTED);
3765 if (chan->mode == L2CAP_MODE_ERTM ||
3766 chan->mode == L2CAP_MODE_STREAMING)
3767 err = l2cap_ertm_init(chan);
3770 l2cap_send_disconn_req(chan->conn, chan, -err);
3772 l2cap_chan_ready(chan);
3776 l2cap_chan_unlock(chan);
3780 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3782 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3783 struct l2cap_disconn_rsp rsp;
3785 struct l2cap_chan *chan;
3788 scid = __le16_to_cpu(req->scid);
3789 dcid = __le16_to_cpu(req->dcid);
3791 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3793 mutex_lock(&conn->chan_lock);
3795 chan = __l2cap_get_chan_by_scid(conn, dcid);
3797 mutex_unlock(&conn->chan_lock);
3801 l2cap_chan_lock(chan);
3805 rsp.dcid = cpu_to_le16(chan->scid);
3806 rsp.scid = cpu_to_le16(chan->dcid);
3807 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3810 sk->sk_shutdown = SHUTDOWN_MASK;
3813 l2cap_chan_hold(chan);
3814 l2cap_chan_del(chan, ECONNRESET);
3816 l2cap_chan_unlock(chan);
3818 chan->ops->close(chan->data);
3819 l2cap_chan_put(chan);
3821 mutex_unlock(&conn->chan_lock);
3826 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3828 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3830 struct l2cap_chan *chan;
3832 scid = __le16_to_cpu(rsp->scid);
3833 dcid = __le16_to_cpu(rsp->dcid);
3835 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3837 mutex_lock(&conn->chan_lock);
3839 chan = __l2cap_get_chan_by_scid(conn, scid);
3841 mutex_unlock(&conn->chan_lock);
3845 l2cap_chan_lock(chan);
3847 l2cap_chan_hold(chan);
3848 l2cap_chan_del(chan, 0);
3850 l2cap_chan_unlock(chan);
3852 chan->ops->close(chan->data);
3853 l2cap_chan_put(chan);
3855 mutex_unlock(&conn->chan_lock);
3860 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3862 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3865 type = __le16_to_cpu(req->type);
3867 BT_DBG("type 0x%4.4x", type);
3869 if (type == L2CAP_IT_FEAT_MASK) {
3871 u32 feat_mask = l2cap_feat_mask;
3872 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3873 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3874 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3876 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3879 feat_mask |= L2CAP_FEAT_EXT_FLOW
3880 | L2CAP_FEAT_EXT_WINDOW;
3882 put_unaligned_le32(feat_mask, rsp->data);
3883 l2cap_send_cmd(conn, cmd->ident,
3884 L2CAP_INFO_RSP, sizeof(buf), buf);
3885 } else if (type == L2CAP_IT_FIXED_CHAN) {
3887 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3890 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3892 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3894 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3895 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3896 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3897 l2cap_send_cmd(conn, cmd->ident,
3898 L2CAP_INFO_RSP, sizeof(buf), buf);
3900 struct l2cap_info_rsp rsp;
3901 rsp.type = cpu_to_le16(type);
3902 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3903 l2cap_send_cmd(conn, cmd->ident,
3904 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3910 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3912 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3915 type = __le16_to_cpu(rsp->type);
3916 result = __le16_to_cpu(rsp->result);
3918 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3920 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3921 if (cmd->ident != conn->info_ident ||
3922 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3925 cancel_delayed_work(&conn->info_timer);
3927 if (result != L2CAP_IR_SUCCESS) {
3928 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3929 conn->info_ident = 0;
3931 l2cap_conn_start(conn);
3937 case L2CAP_IT_FEAT_MASK:
3938 conn->feat_mask = get_unaligned_le32(rsp->data);
3940 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3941 struct l2cap_info_req req;
3942 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3944 conn->info_ident = l2cap_get_ident(conn);
3946 l2cap_send_cmd(conn, conn->info_ident,
3947 L2CAP_INFO_REQ, sizeof(req), &req);
3949 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3950 conn->info_ident = 0;
3952 l2cap_conn_start(conn);
3956 case L2CAP_IT_FIXED_CHAN:
3957 conn->fixed_chan_mask = rsp->data[0];
3958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3959 conn->info_ident = 0;
3961 l2cap_conn_start(conn);
3968 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3969 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3972 struct l2cap_create_chan_req *req = data;
3973 struct l2cap_create_chan_rsp rsp;
3976 if (cmd_len != sizeof(*req))
3982 psm = le16_to_cpu(req->psm);
3983 scid = le16_to_cpu(req->scid);
3985 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3987 /* Placeholder: Always reject */
3989 rsp.scid = cpu_to_le16(scid);
3990 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3991 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3993 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3999 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4000 struct l2cap_cmd_hdr *cmd, void *data)
4002 BT_DBG("conn %p", conn);
4004 return l2cap_connect_rsp(conn, cmd, data);
4007 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4008 u16 icid, u16 result)
4010 struct l2cap_move_chan_rsp rsp;
4012 BT_DBG("icid %d, result %d", icid, result);
4014 rsp.icid = cpu_to_le16(icid);
4015 rsp.result = cpu_to_le16(result);
4017 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4020 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4021 struct l2cap_chan *chan, u16 icid, u16 result)
4023 struct l2cap_move_chan_cfm cfm;
4026 BT_DBG("icid %d, result %d", icid, result);
4028 ident = l2cap_get_ident(conn);
4030 chan->ident = ident;
4032 cfm.icid = cpu_to_le16(icid);
4033 cfm.result = cpu_to_le16(result);
4035 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4038 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4041 struct l2cap_move_chan_cfm_rsp rsp;
4043 BT_DBG("icid %d", icid);
4045 rsp.icid = cpu_to_le16(icid);
4046 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4049 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4050 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4052 struct l2cap_move_chan_req *req = data;
4054 u16 result = L2CAP_MR_NOT_ALLOWED;
4056 if (cmd_len != sizeof(*req))
4059 icid = le16_to_cpu(req->icid);
4061 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4066 /* Placeholder: Always refuse */
4067 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4072 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4073 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4075 struct l2cap_move_chan_rsp *rsp = data;
4078 if (cmd_len != sizeof(*rsp))
4081 icid = le16_to_cpu(rsp->icid);
4082 result = le16_to_cpu(rsp->result);
4084 BT_DBG("icid %d, result %d", icid, result);
4086 /* Placeholder: Always unconfirmed */
4087 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4092 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4093 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4095 struct l2cap_move_chan_cfm *cfm = data;
4098 if (cmd_len != sizeof(*cfm))
4101 icid = le16_to_cpu(cfm->icid);
4102 result = le16_to_cpu(cfm->result);
4104 BT_DBG("icid %d, result %d", icid, result);
4106 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4111 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4112 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4114 struct l2cap_move_chan_cfm_rsp *rsp = data;
4117 if (cmd_len != sizeof(*rsp))
4120 icid = le16_to_cpu(rsp->icid);
4122 BT_DBG("icid %d", icid);
4127 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4132 if (min > max || min < 6 || max > 3200)
4135 if (to_multiplier < 10 || to_multiplier > 3200)
4138 if (max >= to_multiplier * 8)
4141 max_latency = (to_multiplier * 8 / max) - 1;
4142 if (latency > 499 || latency > max_latency)
4148 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4149 struct l2cap_cmd_hdr *cmd, u8 *data)
4151 struct hci_conn *hcon = conn->hcon;
4152 struct l2cap_conn_param_update_req *req;
4153 struct l2cap_conn_param_update_rsp rsp;
4154 u16 min, max, latency, to_multiplier, cmd_len;
4157 if (!(hcon->link_mode & HCI_LM_MASTER))
4160 cmd_len = __le16_to_cpu(cmd->len);
4161 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4164 req = (struct l2cap_conn_param_update_req *) data;
4165 min = __le16_to_cpu(req->min);
4166 max = __le16_to_cpu(req->max);
4167 latency = __le16_to_cpu(req->latency);
4168 to_multiplier = __le16_to_cpu(req->to_multiplier);
4170 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4171 min, max, latency, to_multiplier);
4173 memset(&rsp, 0, sizeof(rsp));
4175 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4177 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4179 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4181 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4185 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4190 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4191 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4195 switch (cmd->code) {
4196 case L2CAP_COMMAND_REJ:
4197 l2cap_command_rej(conn, cmd, data);
4200 case L2CAP_CONN_REQ:
4201 err = l2cap_connect_req(conn, cmd, data);
4204 case L2CAP_CONN_RSP:
4205 err = l2cap_connect_rsp(conn, cmd, data);
4208 case L2CAP_CONF_REQ:
4209 err = l2cap_config_req(conn, cmd, cmd_len, data);
4212 case L2CAP_CONF_RSP:
4213 err = l2cap_config_rsp(conn, cmd, data);
4216 case L2CAP_DISCONN_REQ:
4217 err = l2cap_disconnect_req(conn, cmd, data);
4220 case L2CAP_DISCONN_RSP:
4221 err = l2cap_disconnect_rsp(conn, cmd, data);
4224 case L2CAP_ECHO_REQ:
4225 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4228 case L2CAP_ECHO_RSP:
4231 case L2CAP_INFO_REQ:
4232 err = l2cap_information_req(conn, cmd, data);
4235 case L2CAP_INFO_RSP:
4236 err = l2cap_information_rsp(conn, cmd, data);
4239 case L2CAP_CREATE_CHAN_REQ:
4240 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4243 case L2CAP_CREATE_CHAN_RSP:
4244 err = l2cap_create_channel_rsp(conn, cmd, data);
4247 case L2CAP_MOVE_CHAN_REQ:
4248 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4251 case L2CAP_MOVE_CHAN_RSP:
4252 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4255 case L2CAP_MOVE_CHAN_CFM:
4256 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4259 case L2CAP_MOVE_CHAN_CFM_RSP:
4260 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4264 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4272 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u8 *data)
4275 switch (cmd->code) {
4276 case L2CAP_COMMAND_REJ:
4279 case L2CAP_CONN_PARAM_UPDATE_REQ:
4280 return l2cap_conn_param_update_req(conn, cmd, data);
4282 case L2CAP_CONN_PARAM_UPDATE_RSP:
4286 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4291 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4292 struct sk_buff *skb)
4294 u8 *data = skb->data;
4296 struct l2cap_cmd_hdr cmd;
4299 l2cap_raw_recv(conn, skb);
4301 while (len >= L2CAP_CMD_HDR_SIZE) {
4303 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4304 data += L2CAP_CMD_HDR_SIZE;
4305 len -= L2CAP_CMD_HDR_SIZE;
4307 cmd_len = le16_to_cpu(cmd.len);
4309 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4311 if (cmd_len > len || !cmd.ident) {
4312 BT_DBG("corrupted command");
4316 if (conn->hcon->type == LE_LINK)
4317 err = l2cap_le_sig_cmd(conn, &cmd, data);
4319 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4322 struct l2cap_cmd_rej_unk rej;
4324 BT_ERR("Wrong link type (%d)", err);
4326 /* FIXME: Map err to a valid reason */
4327 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4328 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4338 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4340 u16 our_fcs, rcv_fcs;
4343 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4344 hdr_size = L2CAP_EXT_HDR_SIZE;
4346 hdr_size = L2CAP_ENH_HDR_SIZE;
4348 if (chan->fcs == L2CAP_FCS_CRC16) {
4349 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4350 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4351 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4353 if (our_fcs != rcv_fcs)
4359 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4361 struct l2cap_ctrl control;
4363 BT_DBG("chan %p", chan);
4365 memset(&control, 0, sizeof(control));
4368 control.reqseq = chan->buffer_seq;
4369 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4371 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4372 control.super = L2CAP_SUPER_RNR;
4373 l2cap_send_sframe(chan, &control);
4376 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4377 chan->unacked_frames > 0)
4378 __set_retrans_timer(chan);
4380 /* Send pending iframes */
4381 l2cap_ertm_send(chan);
4383 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4384 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4385 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4388 control.super = L2CAP_SUPER_RR;
4389 l2cap_send_sframe(chan, &control);
4393 static void append_skb_frag(struct sk_buff *skb,
4394 struct sk_buff *new_frag, struct sk_buff **last_frag)
4396 /* skb->len reflects data in skb as well as all fragments
4397 * skb->data_len reflects only data in fragments
4399 if (!skb_has_frag_list(skb))
4400 skb_shinfo(skb)->frag_list = new_frag;
4402 new_frag->next = NULL;
4404 (*last_frag)->next = new_frag;
4405 *last_frag = new_frag;
4407 skb->len += new_frag->len;
4408 skb->data_len += new_frag->len;
4409 skb->truesize += new_frag->truesize;
4412 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4413 struct l2cap_ctrl *control)
4417 switch (control->sar) {
4418 case L2CAP_SAR_UNSEGMENTED:
4422 err = chan->ops->recv(chan->data, skb);
4425 case L2CAP_SAR_START:
4429 chan->sdu_len = get_unaligned_le16(skb->data);
4430 skb_pull(skb, L2CAP_SDULEN_SIZE);
4432 if (chan->sdu_len > chan->imtu) {
4437 if (skb->len >= chan->sdu_len)
4441 chan->sdu_last_frag = skb;
4447 case L2CAP_SAR_CONTINUE:
4451 append_skb_frag(chan->sdu, skb,
4452 &chan->sdu_last_frag);
4455 if (chan->sdu->len >= chan->sdu_len)
4465 append_skb_frag(chan->sdu, skb,
4466 &chan->sdu_last_frag);
4469 if (chan->sdu->len != chan->sdu_len)
4472 err = chan->ops->recv(chan->data, chan->sdu);
4475 /* Reassembly complete */
4477 chan->sdu_last_frag = NULL;
4485 kfree_skb(chan->sdu);
4487 chan->sdu_last_frag = NULL;
4494 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4498 if (chan->mode != L2CAP_MODE_ERTM)
4501 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4502 l2cap_tx(chan, 0, 0, event);
4505 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4508 /* Pass sequential frames to l2cap_reassemble_sdu()
4509 * until a gap is encountered.
4512 BT_DBG("chan %p", chan);
4514 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4515 struct sk_buff *skb;
4516 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4517 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4519 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4524 skb_unlink(skb, &chan->srej_q);
4525 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4526 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4531 if (skb_queue_empty(&chan->srej_q)) {
4532 chan->rx_state = L2CAP_RX_STATE_RECV;
4533 l2cap_send_ack(chan);
4539 static void l2cap_handle_srej(struct l2cap_chan *chan,
4540 struct l2cap_ctrl *control)
4545 static void l2cap_handle_rej(struct l2cap_chan *chan,
4546 struct l2cap_ctrl *control)
4551 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4553 BT_DBG("chan %p, txseq %d", chan, txseq);
4555 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4556 chan->expected_tx_seq);
4558 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4559 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4561 /* See notes below regarding "double poll" and
4564 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4565 BT_DBG("Invalid/Ignore - after SREJ");
4566 return L2CAP_TXSEQ_INVALID_IGNORE;
4568 BT_DBG("Invalid - in window after SREJ sent");
4569 return L2CAP_TXSEQ_INVALID;
4573 if (chan->srej_list.head == txseq) {
4574 BT_DBG("Expected SREJ");
4575 return L2CAP_TXSEQ_EXPECTED_SREJ;
4578 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4579 BT_DBG("Duplicate SREJ - txseq already stored");
4580 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4583 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4584 BT_DBG("Unexpected SREJ - not requested");
4585 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4589 if (chan->expected_tx_seq == txseq) {
4590 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4592 BT_DBG("Invalid - txseq outside tx window");
4593 return L2CAP_TXSEQ_INVALID;
4596 return L2CAP_TXSEQ_EXPECTED;
4600 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4601 __seq_offset(chan, chan->expected_tx_seq,
4602 chan->last_acked_seq)){
4603 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4604 return L2CAP_TXSEQ_DUPLICATE;
4607 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4608 /* A source of invalid packets is a "double poll" condition,
4609 * where delays cause us to send multiple poll packets. If
4610 * the remote stack receives and processes both polls,
4611 * sequence numbers can wrap around in such a way that a
4612 * resent frame has a sequence number that looks like new data
4613 * with a sequence gap. This would trigger an erroneous SREJ
4616 * Fortunately, this is impossible with a tx window that's
4617 * less than half of the maximum sequence number, which allows
4618 * invalid frames to be safely ignored.
4620 * With tx window sizes greater than half of the tx window
4621 * maximum, the frame is invalid and cannot be ignored. This
4622 * causes a disconnect.
4625 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4626 BT_DBG("Invalid/Ignore - txseq outside tx window");
4627 return L2CAP_TXSEQ_INVALID_IGNORE;
4629 BT_DBG("Invalid - txseq outside tx window");
4630 return L2CAP_TXSEQ_INVALID;
4633 BT_DBG("Unexpected - txseq indicates missing frames");
4634 return L2CAP_TXSEQ_UNEXPECTED;
4638 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4639 struct l2cap_ctrl *control,
4640 struct sk_buff *skb, u8 event)
4643 bool skb_in_use = 0;
4645 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4649 case L2CAP_EV_RECV_IFRAME:
4650 switch (l2cap_classify_txseq(chan, control->txseq)) {
4651 case L2CAP_TXSEQ_EXPECTED:
4652 l2cap_pass_to_tx(chan, control);
4654 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4655 BT_DBG("Busy, discarding expected seq %d",
4660 chan->expected_tx_seq = __next_seq(chan,
4663 chan->buffer_seq = chan->expected_tx_seq;
4666 err = l2cap_reassemble_sdu(chan, skb, control);
4670 if (control->final) {
4671 if (!test_and_clear_bit(CONN_REJ_ACT,
4672 &chan->conn_state)) {
4674 l2cap_retransmit_all(chan, control);
4675 l2cap_ertm_send(chan);
4679 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4680 l2cap_send_ack(chan);
4682 case L2CAP_TXSEQ_UNEXPECTED:
4683 l2cap_pass_to_tx(chan, control);
4685 /* Can't issue SREJ frames in the local busy state.
4686 * Drop this frame, it will be seen as missing
4687 * when local busy is exited.
4689 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4690 BT_DBG("Busy, discarding unexpected seq %d",
4695 /* There was a gap in the sequence, so an SREJ
4696 * must be sent for each missing frame. The
4697 * current frame is stored for later use.
4699 skb_queue_tail(&chan->srej_q, skb);
4701 BT_DBG("Queued %p (queue len %d)", skb,
4702 skb_queue_len(&chan->srej_q));
4704 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4705 l2cap_seq_list_clear(&chan->srej_list);
4706 l2cap_send_srej(chan, control->txseq);
4708 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4710 case L2CAP_TXSEQ_DUPLICATE:
4711 l2cap_pass_to_tx(chan, control);
4713 case L2CAP_TXSEQ_INVALID_IGNORE:
4715 case L2CAP_TXSEQ_INVALID:
4717 l2cap_send_disconn_req(chan->conn, chan,
4722 case L2CAP_EV_RECV_RR:
4723 l2cap_pass_to_tx(chan, control);
4724 if (control->final) {
4725 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4727 if (!test_and_clear_bit(CONN_REJ_ACT,
4728 &chan->conn_state)) {
4730 l2cap_retransmit_all(chan, control);
4733 l2cap_ertm_send(chan);
4734 } else if (control->poll) {
4735 l2cap_send_i_or_rr_or_rnr(chan);
4737 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4738 &chan->conn_state) &&
4739 chan->unacked_frames)
4740 __set_retrans_timer(chan);
4742 l2cap_ertm_send(chan);
4745 case L2CAP_EV_RECV_RNR:
4746 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4747 l2cap_pass_to_tx(chan, control);
4748 if (control && control->poll) {
4749 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4750 l2cap_send_rr_or_rnr(chan, 0);
4752 __clear_retrans_timer(chan);
4753 l2cap_seq_list_clear(&chan->retrans_list);
4755 case L2CAP_EV_RECV_REJ:
4756 l2cap_handle_rej(chan, control);
4758 case L2CAP_EV_RECV_SREJ:
4759 l2cap_handle_srej(chan, control);
4765 if (skb && !skb_in_use) {
4766 BT_DBG("Freeing %p", skb);
4773 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4774 struct l2cap_ctrl *control,
4775 struct sk_buff *skb, u8 event)
4778 u16 txseq = control->txseq;
4779 bool skb_in_use = 0;
4781 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4785 case L2CAP_EV_RECV_IFRAME:
4786 switch (l2cap_classify_txseq(chan, txseq)) {
4787 case L2CAP_TXSEQ_EXPECTED:
4788 /* Keep frame for reassembly later */
4789 l2cap_pass_to_tx(chan, control);
4790 skb_queue_tail(&chan->srej_q, skb);
4792 BT_DBG("Queued %p (queue len %d)", skb,
4793 skb_queue_len(&chan->srej_q));
4795 chan->expected_tx_seq = __next_seq(chan, txseq);
4797 case L2CAP_TXSEQ_EXPECTED_SREJ:
4798 l2cap_seq_list_pop(&chan->srej_list);
4800 l2cap_pass_to_tx(chan, control);
4801 skb_queue_tail(&chan->srej_q, skb);
4803 BT_DBG("Queued %p (queue len %d)", skb,
4804 skb_queue_len(&chan->srej_q));
4806 err = l2cap_rx_queued_iframes(chan);
4811 case L2CAP_TXSEQ_UNEXPECTED:
4812 /* Got a frame that can't be reassembled yet.
4813 * Save it for later, and send SREJs to cover
4814 * the missing frames.
4816 skb_queue_tail(&chan->srej_q, skb);
4818 BT_DBG("Queued %p (queue len %d)", skb,
4819 skb_queue_len(&chan->srej_q));
4821 l2cap_pass_to_tx(chan, control);
4822 l2cap_send_srej(chan, control->txseq);
4824 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4825 /* This frame was requested with an SREJ, but
4826 * some expected retransmitted frames are
4827 * missing. Request retransmission of missing
4830 skb_queue_tail(&chan->srej_q, skb);
4832 BT_DBG("Queued %p (queue len %d)", skb,
4833 skb_queue_len(&chan->srej_q));
4835 l2cap_pass_to_tx(chan, control);
4836 l2cap_send_srej_list(chan, control->txseq);
4838 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4839 /* We've already queued this frame. Drop this copy. */
4840 l2cap_pass_to_tx(chan, control);
4842 case L2CAP_TXSEQ_DUPLICATE:
4843 /* Expecting a later sequence number, so this frame
4844 * was already received. Ignore it completely.
4847 case L2CAP_TXSEQ_INVALID_IGNORE:
4849 case L2CAP_TXSEQ_INVALID:
4851 l2cap_send_disconn_req(chan->conn, chan,
4856 case L2CAP_EV_RECV_RR:
4857 l2cap_pass_to_tx(chan, control);
4858 if (control->final) {
4859 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4861 if (!test_and_clear_bit(CONN_REJ_ACT,
4862 &chan->conn_state)) {
4864 l2cap_retransmit_all(chan, control);
4867 l2cap_ertm_send(chan);
4868 } else if (control->poll) {
4869 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4870 &chan->conn_state) &&
4871 chan->unacked_frames) {
4872 __set_retrans_timer(chan);
4875 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4876 l2cap_send_srej_tail(chan);
4878 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4879 &chan->conn_state) &&
4880 chan->unacked_frames)
4881 __set_retrans_timer(chan);
4883 l2cap_send_ack(chan);
4886 case L2CAP_EV_RECV_RNR:
4887 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4888 l2cap_pass_to_tx(chan, control);
4889 if (control->poll) {
4890 l2cap_send_srej_tail(chan);
4892 struct l2cap_ctrl rr_control;
4893 memset(&rr_control, 0, sizeof(rr_control));
4894 rr_control.sframe = 1;
4895 rr_control.super = L2CAP_SUPER_RR;
4896 rr_control.reqseq = chan->buffer_seq;
4897 l2cap_send_sframe(chan, &rr_control);
4901 case L2CAP_EV_RECV_REJ:
4902 l2cap_handle_rej(chan, control);
4904 case L2CAP_EV_RECV_SREJ:
4905 l2cap_handle_srej(chan, control);
4909 if (skb && !skb_in_use) {
4910 BT_DBG("Freeing %p", skb);
4917 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4919 /* Make sure reqseq is for a packet that has been sent but not acked */
4922 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4923 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4926 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4927 struct sk_buff *skb, u8 event)
4931 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4932 control, skb, event, chan->rx_state);
4934 if (__valid_reqseq(chan, control->reqseq)) {
4935 switch (chan->rx_state) {
4936 case L2CAP_RX_STATE_RECV:
4937 err = l2cap_rx_state_recv(chan, control, skb, event);
4939 case L2CAP_RX_STATE_SREJ_SENT:
4940 err = l2cap_rx_state_srej_sent(chan, control, skb,
4948 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4949 control->reqseq, chan->next_tx_seq,
4950 chan->expected_ack_seq);
4951 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4957 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4958 struct sk_buff *skb)
4962 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
4965 if (l2cap_classify_txseq(chan, control->txseq) ==
4966 L2CAP_TXSEQ_EXPECTED) {
4967 l2cap_pass_to_tx(chan, control);
4969 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4970 __next_seq(chan, chan->buffer_seq));
4972 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4974 l2cap_reassemble_sdu(chan, skb, control);
4977 kfree_skb(chan->sdu);
4980 chan->sdu_last_frag = NULL;
4984 BT_DBG("Freeing %p", skb);
4989 chan->last_acked_seq = control->txseq;
4990 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4995 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4997 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5001 __unpack_control(chan, skb);
5006 * We can just drop the corrupted I-frame here.
5007 * Receiver will miss it and start proper recovery
5008 * procedures and ask for retransmission.
5010 if (l2cap_check_fcs(chan, skb))
5013 if (!control->sframe && control->sar == L2CAP_SAR_START)
5014 len -= L2CAP_SDULEN_SIZE;
5016 if (chan->fcs == L2CAP_FCS_CRC16)
5017 len -= L2CAP_FCS_SIZE;
5019 if (len > chan->mps) {
5020 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5024 if (!control->sframe) {
5027 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5028 control->sar, control->reqseq, control->final,
5031 /* Validate F-bit - F=0 always valid, F=1 only
5032 * valid in TX WAIT_F
5034 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5037 if (chan->mode != L2CAP_MODE_STREAMING) {
5038 event = L2CAP_EV_RECV_IFRAME;
5039 err = l2cap_rx(chan, control, skb, event);
5041 err = l2cap_stream_rx(chan, control, skb);
5045 l2cap_send_disconn_req(chan->conn, chan,
5048 const u8 rx_func_to_event[4] = {
5049 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5050 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5053 /* Only I-frames are expected in streaming mode */
5054 if (chan->mode == L2CAP_MODE_STREAMING)
5057 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5058 control->reqseq, control->final, control->poll,
5063 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5067 /* Validate F and P bits */
5068 if (control->final && (control->poll ||
5069 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5072 event = rx_func_to_event[control->super];
5073 if (l2cap_rx(chan, control, skb, event))
5074 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5084 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5086 struct l2cap_chan *chan;
5088 chan = l2cap_get_chan_by_scid(conn, cid);
5090 BT_DBG("unknown cid 0x%4.4x", cid);
5091 /* Drop packet and return */
5096 BT_DBG("chan %p, len %d", chan, skb->len);
5098 if (chan->state != BT_CONNECTED)
5101 switch (chan->mode) {
5102 case L2CAP_MODE_BASIC:
5103 /* If socket recv buffers overflows we drop data here
5104 * which is *bad* because L2CAP has to be reliable.
5105 * But we don't have any other choice. L2CAP doesn't
5106 * provide flow control mechanism. */
5108 if (chan->imtu < skb->len)
5111 if (!chan->ops->recv(chan->data, skb))
5115 case L2CAP_MODE_ERTM:
5116 case L2CAP_MODE_STREAMING:
5117 l2cap_data_rcv(chan, skb);
5121 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5129 l2cap_chan_unlock(chan);
5134 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5136 struct l2cap_chan *chan;
5138 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5142 BT_DBG("chan %p, len %d", chan, skb->len);
5144 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5147 if (chan->imtu < skb->len)
5150 if (!chan->ops->recv(chan->data, skb))
5159 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5160 struct sk_buff *skb)
5162 struct l2cap_chan *chan;
5164 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5168 BT_DBG("chan %p, len %d", chan, skb->len);
5170 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5173 if (chan->imtu < skb->len)
5176 if (!chan->ops->recv(chan->data, skb))
5185 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5187 struct l2cap_hdr *lh = (void *) skb->data;
5191 skb_pull(skb, L2CAP_HDR_SIZE);
5192 cid = __le16_to_cpu(lh->cid);
5193 len = __le16_to_cpu(lh->len);
5195 if (len != skb->len) {
5200 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5203 case L2CAP_CID_LE_SIGNALING:
5204 case L2CAP_CID_SIGNALING:
5205 l2cap_sig_channel(conn, skb);
5208 case L2CAP_CID_CONN_LESS:
5209 psm = get_unaligned((__le16 *) skb->data);
5211 l2cap_conless_channel(conn, psm, skb);
5214 case L2CAP_CID_LE_DATA:
5215 l2cap_att_channel(conn, cid, skb);
5219 if (smp_sig_channel(conn, skb))
5220 l2cap_conn_del(conn->hcon, EACCES);
5224 l2cap_data_channel(conn, cid, skb);
5229 /* ---- L2CAP interface with lower layer (HCI) ---- */
5231 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5233 int exact = 0, lm1 = 0, lm2 = 0;
5234 struct l2cap_chan *c;
5236 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5238 /* Find listening sockets and check their link_mode */
5239 read_lock(&chan_list_lock);
5240 list_for_each_entry(c, &chan_list, global_l) {
5241 struct sock *sk = c->sk;
5243 if (c->state != BT_LISTEN)
5246 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5247 lm1 |= HCI_LM_ACCEPT;
5248 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5249 lm1 |= HCI_LM_MASTER;
5251 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5252 lm2 |= HCI_LM_ACCEPT;
5253 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5254 lm2 |= HCI_LM_MASTER;
5257 read_unlock(&chan_list_lock);
5259 return exact ? lm1 : lm2;
5262 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5264 struct l2cap_conn *conn;
5266 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5269 conn = l2cap_conn_add(hcon, status);
5271 l2cap_conn_ready(conn);
5273 l2cap_conn_del(hcon, bt_to_errno(status));
5278 int l2cap_disconn_ind(struct hci_conn *hcon)
5280 struct l2cap_conn *conn = hcon->l2cap_data;
5282 BT_DBG("hcon %p", hcon);
5285 return HCI_ERROR_REMOTE_USER_TERM;
5286 return conn->disc_reason;
5289 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5291 BT_DBG("hcon %p reason %d", hcon, reason);
5293 l2cap_conn_del(hcon, bt_to_errno(reason));
5297 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5299 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5302 if (encrypt == 0x00) {
5303 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5304 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5305 } else if (chan->sec_level == BT_SECURITY_HIGH)
5306 l2cap_chan_close(chan, ECONNREFUSED);
5308 if (chan->sec_level == BT_SECURITY_MEDIUM)
5309 __clear_chan_timer(chan);
5313 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5315 struct l2cap_conn *conn = hcon->l2cap_data;
5316 struct l2cap_chan *chan;
5321 BT_DBG("conn %p", conn);
5323 if (hcon->type == LE_LINK) {
5324 if (!status && encrypt)
5325 smp_distribute_keys(conn, 0);
5326 cancel_delayed_work(&conn->security_timer);
5329 mutex_lock(&conn->chan_lock);
5331 list_for_each_entry(chan, &conn->chan_l, list) {
5332 l2cap_chan_lock(chan);
5334 BT_DBG("chan->scid %d", chan->scid);
5336 if (chan->scid == L2CAP_CID_LE_DATA) {
5337 if (!status && encrypt) {
5338 chan->sec_level = hcon->sec_level;
5339 l2cap_chan_ready(chan);
5342 l2cap_chan_unlock(chan);
5346 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5347 l2cap_chan_unlock(chan);
5351 if (!status && (chan->state == BT_CONNECTED ||
5352 chan->state == BT_CONFIG)) {
5353 struct sock *sk = chan->sk;
5355 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5356 sk->sk_state_change(sk);
5358 l2cap_check_encryption(chan, encrypt);
5359 l2cap_chan_unlock(chan);
5363 if (chan->state == BT_CONNECT) {
5365 l2cap_send_conn_req(chan);
5367 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5369 } else if (chan->state == BT_CONNECT2) {
5370 struct sock *sk = chan->sk;
5371 struct l2cap_conn_rsp rsp;
5377 if (test_bit(BT_SK_DEFER_SETUP,
5378 &bt_sk(sk)->flags)) {
5379 struct sock *parent = bt_sk(sk)->parent;
5380 res = L2CAP_CR_PEND;
5381 stat = L2CAP_CS_AUTHOR_PEND;
5383 parent->sk_data_ready(parent, 0);
5385 __l2cap_state_change(chan, BT_CONFIG);
5386 res = L2CAP_CR_SUCCESS;
5387 stat = L2CAP_CS_NO_INFO;
5390 __l2cap_state_change(chan, BT_DISCONN);
5391 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5392 res = L2CAP_CR_SEC_BLOCK;
5393 stat = L2CAP_CS_NO_INFO;
5398 rsp.scid = cpu_to_le16(chan->dcid);
5399 rsp.dcid = cpu_to_le16(chan->scid);
5400 rsp.result = cpu_to_le16(res);
5401 rsp.status = cpu_to_le16(stat);
5402 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5406 l2cap_chan_unlock(chan);
5409 mutex_unlock(&conn->chan_lock);
5414 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5416 struct l2cap_conn *conn = hcon->l2cap_data;
5419 conn = l2cap_conn_add(hcon, 0);
5424 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5426 if (!(flags & ACL_CONT)) {
5427 struct l2cap_hdr *hdr;
5431 BT_ERR("Unexpected start frame (len %d)", skb->len);
5432 kfree_skb(conn->rx_skb);
5433 conn->rx_skb = NULL;
5435 l2cap_conn_unreliable(conn, ECOMM);
5438 /* Start fragment always begin with Basic L2CAP header */
5439 if (skb->len < L2CAP_HDR_SIZE) {
5440 BT_ERR("Frame is too short (len %d)", skb->len);
5441 l2cap_conn_unreliable(conn, ECOMM);
5445 hdr = (struct l2cap_hdr *) skb->data;
5446 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5448 if (len == skb->len) {
5449 /* Complete frame received */
5450 l2cap_recv_frame(conn, skb);
5454 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5456 if (skb->len > len) {
5457 BT_ERR("Frame is too long (len %d, expected len %d)",
5459 l2cap_conn_unreliable(conn, ECOMM);
5463 /* Allocate skb for the complete frame (with header) */
5464 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5468 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5470 conn->rx_len = len - skb->len;
5472 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5474 if (!conn->rx_len) {
5475 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5476 l2cap_conn_unreliable(conn, ECOMM);
5480 if (skb->len > conn->rx_len) {
5481 BT_ERR("Fragment is too long (len %d, expected %d)",
5482 skb->len, conn->rx_len);
5483 kfree_skb(conn->rx_skb);
5484 conn->rx_skb = NULL;
5486 l2cap_conn_unreliable(conn, ECOMM);
5490 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5492 conn->rx_len -= skb->len;
5494 if (!conn->rx_len) {
5495 /* Complete frame received */
5496 l2cap_recv_frame(conn, conn->rx_skb);
5497 conn->rx_skb = NULL;
5506 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5508 struct l2cap_chan *c;
5510 read_lock(&chan_list_lock);
5512 list_for_each_entry(c, &chan_list, global_l) {
5513 struct sock *sk = c->sk;
5515 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5516 batostr(&bt_sk(sk)->src),
5517 batostr(&bt_sk(sk)->dst),
5518 c->state, __le16_to_cpu(c->psm),
5519 c->scid, c->dcid, c->imtu, c->omtu,
5520 c->sec_level, c->mode);
5523 read_unlock(&chan_list_lock);
5528 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5530 return single_open(file, l2cap_debugfs_show, inode->i_private);
5533 static const struct file_operations l2cap_debugfs_fops = {
5534 .open = l2cap_debugfs_open,
5536 .llseek = seq_lseek,
5537 .release = single_release,
5540 static struct dentry *l2cap_debugfs;
5542 int __init l2cap_init(void)
5546 err = l2cap_init_sockets();
5551 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5552 bt_debugfs, NULL, &l2cap_debugfs_fops);
5554 BT_ERR("Failed to create L2CAP debug file");
5560 void l2cap_exit(void)
5562 debugfs_remove(l2cap_debugfs);
5563 l2cap_cleanup_sockets();
5566 module_param(disable_ertm, bool, 0644);
5567 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");