2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm = 1;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 mutex_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
131 struct l2cap_chan *c;
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 write_lock(&chan_list_lock);
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
169 write_unlock(&chan_list_lock);
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
175 write_lock(&chan_list_lock);
179 write_unlock(&chan_list_lock);
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
186 u16 cid = L2CAP_CID_DYN_START;
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
202 chan->ops->state_change(chan->data, state);
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
207 struct sock *sk = chan->sk;
210 __l2cap_state_change(chan, state);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
216 struct sock *sk = chan->sk;
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
223 struct sock *sk = chan->sk;
226 __l2cap_chan_set_err(chan, err);
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
483 l2cap_chan_hold(chan);
485 list_add(&chan->list, &conn->chan_l);
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
501 __clear_chan_timer(chan);
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
506 /* Delete from channel list */
507 list_del(&chan->list);
509 l2cap_chan_put(chan);
512 hci_conn_put(conn->hcon);
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
521 __l2cap_chan_set_err(chan, err);
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
527 sk->sk_state_change(sk);
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 skb_queue_purge(&chan->tx_q);
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 __clear_retrans_timer(chan);
538 __clear_monitor_timer(chan);
539 __clear_ack_timer(chan);
541 skb_queue_purge(&chan->srej_q);
543 l2cap_seq_list_free(&chan->srej_list);
544 l2cap_seq_list_free(&chan->retrans_list);
548 static void l2cap_chan_cleanup_listen(struct sock *parent)
552 BT_DBG("parent %p", parent);
554 /* Close not yet accepted channels */
555 while ((sk = bt_accept_dequeue(parent, NULL))) {
556 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
558 l2cap_chan_lock(chan);
559 __clear_chan_timer(chan);
560 l2cap_chan_close(chan, ECONNRESET);
561 l2cap_chan_unlock(chan);
563 chan->ops->close(chan->data);
567 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
569 struct l2cap_conn *conn = chan->conn;
570 struct sock *sk = chan->sk;
572 BT_DBG("chan %p state %s sk %p", chan,
573 state_to_string(chan->state), sk);
575 switch (chan->state) {
578 l2cap_chan_cleanup_listen(sk);
580 __l2cap_state_change(chan, BT_CLOSED);
581 sock_set_flag(sk, SOCK_ZAPPED);
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason);
592 l2cap_chan_del(chan, reason);
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp;
601 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 result = L2CAP_CR_SEC_BLOCK;
604 result = L2CAP_CR_BAD_PSM;
605 l2cap_state_change(chan, BT_DISCONN);
607 rsp.scid = cpu_to_le16(chan->dcid);
608 rsp.dcid = cpu_to_le16(chan->scid);
609 rsp.result = cpu_to_le16(result);
610 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
615 l2cap_chan_del(chan, reason);
620 l2cap_chan_del(chan, reason);
625 sock_set_flag(sk, SOCK_ZAPPED);
631 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
633 if (chan->chan_type == L2CAP_CHAN_RAW) {
634 switch (chan->sec_level) {
635 case BT_SECURITY_HIGH:
636 return HCI_AT_DEDICATED_BONDING_MITM;
637 case BT_SECURITY_MEDIUM:
638 return HCI_AT_DEDICATED_BONDING;
640 return HCI_AT_NO_BONDING;
642 } else if (chan->psm == cpu_to_le16(0x0001)) {
643 if (chan->sec_level == BT_SECURITY_LOW)
644 chan->sec_level = BT_SECURITY_SDP;
646 if (chan->sec_level == BT_SECURITY_HIGH)
647 return HCI_AT_NO_BONDING_MITM;
649 return HCI_AT_NO_BONDING;
651 switch (chan->sec_level) {
652 case BT_SECURITY_HIGH:
653 return HCI_AT_GENERAL_BONDING_MITM;
654 case BT_SECURITY_MEDIUM:
655 return HCI_AT_GENERAL_BONDING;
657 return HCI_AT_NO_BONDING;
662 /* Service level security */
663 int l2cap_chan_check_security(struct l2cap_chan *chan)
665 struct l2cap_conn *conn = chan->conn;
668 auth_type = l2cap_get_auth_type(chan);
670 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
673 static u8 l2cap_get_ident(struct l2cap_conn *conn)
677 /* Get next available identificator.
678 * 1 - 128 are used by kernel.
679 * 129 - 199 are reserved.
680 * 200 - 254 are used by utilities like l2ping, etc.
683 spin_lock(&conn->lock);
685 if (++conn->tx_ident > 128)
690 spin_unlock(&conn->lock);
695 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
697 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
700 BT_DBG("code 0x%2.2x", code);
705 if (lmp_no_flush_capable(conn->hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
710 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
711 skb->priority = HCI_PRIO_MAX;
713 hci_send_acl(conn->hchan, skb, flags);
716 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
718 struct hci_conn *hcon = chan->conn->hcon;
721 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
724 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
725 lmp_no_flush_capable(hcon->hdev))
726 flags = ACL_START_NO_FLUSH;
730 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
731 hci_send_acl(chan->conn->hchan, skb, flags);
734 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
736 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
737 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
739 if (enh & L2CAP_CTRL_FRAME_TYPE) {
742 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
743 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
750 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
751 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
758 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
760 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
761 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
763 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
766 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
767 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
774 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
775 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
782 static inline void __unpack_control(struct l2cap_chan *chan,
785 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
786 __unpack_extended_control(get_unaligned_le32(skb->data),
787 &bt_cb(skb)->control);
788 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
790 __unpack_enhanced_control(get_unaligned_le16(skb->data),
791 &bt_cb(skb)->control);
792 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
796 static u32 __pack_extended_control(struct l2cap_ctrl *control)
800 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
808 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
815 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
819 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
820 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
822 if (control->sframe) {
823 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
824 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
825 packed |= L2CAP_CTRL_FRAME_TYPE;
827 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
828 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
834 static inline void __pack_control(struct l2cap_chan *chan,
835 struct l2cap_ctrl *control,
838 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
839 put_unaligned_le32(__pack_extended_control(control),
840 skb->data + L2CAP_HDR_SIZE);
842 put_unaligned_le16(__pack_enhanced_control(control),
843 skb->data + L2CAP_HDR_SIZE);
847 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
851 struct l2cap_hdr *lh;
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 hlen = L2CAP_EXT_HDR_SIZE;
857 hlen = L2CAP_ENH_HDR_SIZE;
859 if (chan->fcs == L2CAP_FCS_CRC16)
860 hlen += L2CAP_FCS_SIZE;
862 skb = bt_skb_alloc(hlen, GFP_KERNEL);
865 return ERR_PTR(-ENOMEM);
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
872 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
874 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
876 if (chan->fcs == L2CAP_FCS_CRC16) {
877 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
878 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
881 skb->priority = HCI_PRIO_MAX;
885 static void l2cap_send_sframe(struct l2cap_chan *chan,
886 struct l2cap_ctrl *control)
891 BT_DBG("chan %p, control %p", chan, control);
893 if (!control->sframe)
896 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
900 if (control->super == L2CAP_SUPER_RR)
901 clear_bit(CONN_RNR_SENT, &chan->conn_state);
902 else if (control->super == L2CAP_SUPER_RNR)
903 set_bit(CONN_RNR_SENT, &chan->conn_state);
905 if (control->super != L2CAP_SUPER_SREJ) {
906 chan->last_acked_seq = control->reqseq;
907 __clear_ack_timer(chan);
910 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
911 control->final, control->poll, control->super);
913 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
914 control_field = __pack_extended_control(control);
916 control_field = __pack_enhanced_control(control);
918 skb = l2cap_create_sframe_pdu(chan, control_field);
920 l2cap_do_send(chan, skb);
923 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
926 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
927 set_bit(CONN_RNR_SENT, &chan->conn_state);
929 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
931 control |= __set_reqseq(chan, chan->buffer_seq);
934 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
936 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
939 static void l2cap_send_conn_req(struct l2cap_chan *chan)
941 struct l2cap_conn *conn = chan->conn;
942 struct l2cap_conn_req req;
944 req.scid = cpu_to_le16(chan->scid);
947 chan->ident = l2cap_get_ident(conn);
949 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
951 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
954 static void l2cap_chan_ready(struct l2cap_chan *chan)
956 struct sock *sk = chan->sk;
961 parent = bt_sk(sk)->parent;
963 BT_DBG("sk %p, parent %p", sk, parent);
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
969 __l2cap_state_change(chan, BT_CONNECTED);
970 sk->sk_state_change(sk);
973 parent->sk_data_ready(parent, 0);
978 static void l2cap_do_start(struct l2cap_chan *chan)
980 struct l2cap_conn *conn = chan->conn;
982 if (conn->hcon->type == LE_LINK) {
983 l2cap_chan_ready(chan);
987 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
988 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
991 if (l2cap_chan_check_security(chan) &&
992 __l2cap_no_conn_pending(chan))
993 l2cap_send_conn_req(chan);
995 struct l2cap_info_req req;
996 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
999 conn->info_ident = l2cap_get_ident(conn);
1001 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1003 l2cap_send_cmd(conn, conn->info_ident,
1004 L2CAP_INFO_REQ, sizeof(req), &req);
1008 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1010 u32 local_feat_mask = l2cap_feat_mask;
1012 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1015 case L2CAP_MODE_ERTM:
1016 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1017 case L2CAP_MODE_STREAMING:
1018 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1024 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1026 struct sock *sk = chan->sk;
1027 struct l2cap_disconn_req req;
1032 if (chan->mode == L2CAP_MODE_ERTM) {
1033 __clear_retrans_timer(chan);
1034 __clear_monitor_timer(chan);
1035 __clear_ack_timer(chan);
1038 req.dcid = cpu_to_le16(chan->dcid);
1039 req.scid = cpu_to_le16(chan->scid);
1040 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1041 L2CAP_DISCONN_REQ, sizeof(req), &req);
1044 __l2cap_state_change(chan, BT_DISCONN);
1045 __l2cap_chan_set_err(chan, err);
1049 /* ---- L2CAP connections ---- */
1050 static void l2cap_conn_start(struct l2cap_conn *conn)
1052 struct l2cap_chan *chan, *tmp;
1054 BT_DBG("conn %p", conn);
1056 mutex_lock(&conn->chan_lock);
1058 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1059 struct sock *sk = chan->sk;
1061 l2cap_chan_lock(chan);
1063 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1064 l2cap_chan_unlock(chan);
1068 if (chan->state == BT_CONNECT) {
1069 if (!l2cap_chan_check_security(chan) ||
1070 !__l2cap_no_conn_pending(chan)) {
1071 l2cap_chan_unlock(chan);
1075 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1076 && test_bit(CONF_STATE2_DEVICE,
1077 &chan->conf_state)) {
1078 l2cap_chan_close(chan, ECONNRESET);
1079 l2cap_chan_unlock(chan);
1083 l2cap_send_conn_req(chan);
1085 } else if (chan->state == BT_CONNECT2) {
1086 struct l2cap_conn_rsp rsp;
1088 rsp.scid = cpu_to_le16(chan->dcid);
1089 rsp.dcid = cpu_to_le16(chan->scid);
1091 if (l2cap_chan_check_security(chan)) {
1093 if (test_bit(BT_SK_DEFER_SETUP,
1094 &bt_sk(sk)->flags)) {
1095 struct sock *parent = bt_sk(sk)->parent;
1096 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1097 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1099 parent->sk_data_ready(parent, 0);
1102 __l2cap_state_change(chan, BT_CONFIG);
1103 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1104 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1108 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1109 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1112 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1115 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1116 rsp.result != L2CAP_CR_SUCCESS) {
1117 l2cap_chan_unlock(chan);
1121 set_bit(CONF_REQ_SENT, &chan->conf_state);
1122 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1123 l2cap_build_conf_req(chan, buf), buf);
1124 chan->num_conf_req++;
1127 l2cap_chan_unlock(chan);
1130 mutex_unlock(&conn->chan_lock);
1133 /* Find socket with cid and source/destination bdaddr.
1134 * Returns closest match, locked.
1136 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1140 struct l2cap_chan *c, *c1 = NULL;
1142 read_lock(&chan_list_lock);
1144 list_for_each_entry(c, &chan_list, global_l) {
1145 struct sock *sk = c->sk;
1147 if (state && c->state != state)
1150 if (c->scid == cid) {
1151 int src_match, dst_match;
1152 int src_any, dst_any;
1155 src_match = !bacmp(&bt_sk(sk)->src, src);
1156 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1157 if (src_match && dst_match) {
1158 read_unlock(&chan_list_lock);
1163 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1164 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1165 if ((src_match && dst_any) || (src_any && dst_match) ||
1166 (src_any && dst_any))
1171 read_unlock(&chan_list_lock);
1176 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1178 struct sock *parent, *sk;
1179 struct l2cap_chan *chan, *pchan;
1183 /* Check if we have socket listening on cid */
1184 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1185 conn->src, conn->dst);
1193 /* Check for backlog size */
1194 if (sk_acceptq_is_full(parent)) {
1195 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1199 chan = pchan->ops->new_connection(pchan->data);
1205 hci_conn_hold(conn->hcon);
1207 bacpy(&bt_sk(sk)->src, conn->src);
1208 bacpy(&bt_sk(sk)->dst, conn->dst);
1210 bt_accept_enqueue(parent, sk);
1212 l2cap_chan_add(conn, chan);
1214 __set_chan_timer(chan, sk->sk_sndtimeo);
1216 __l2cap_state_change(chan, BT_CONNECTED);
1217 parent->sk_data_ready(parent, 0);
1220 release_sock(parent);
1223 static void l2cap_conn_ready(struct l2cap_conn *conn)
1225 struct l2cap_chan *chan;
1227 BT_DBG("conn %p", conn);
1229 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1230 l2cap_le_conn_ready(conn);
1232 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1233 smp_conn_security(conn, conn->hcon->pending_sec_level);
1235 mutex_lock(&conn->chan_lock);
1237 list_for_each_entry(chan, &conn->chan_l, list) {
1239 l2cap_chan_lock(chan);
1241 if (conn->hcon->type == LE_LINK) {
1242 if (smp_conn_security(conn, chan->sec_level))
1243 l2cap_chan_ready(chan);
1245 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1246 struct sock *sk = chan->sk;
1247 __clear_chan_timer(chan);
1249 __l2cap_state_change(chan, BT_CONNECTED);
1250 sk->sk_state_change(sk);
1253 } else if (chan->state == BT_CONNECT)
1254 l2cap_do_start(chan);
1256 l2cap_chan_unlock(chan);
1259 mutex_unlock(&conn->chan_lock);
1262 /* Notify sockets that we cannot guaranty reliability anymore */
1263 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1265 struct l2cap_chan *chan;
1267 BT_DBG("conn %p", conn);
1269 mutex_lock(&conn->chan_lock);
1271 list_for_each_entry(chan, &conn->chan_l, list) {
1272 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 __l2cap_chan_set_err(chan, err);
1276 mutex_unlock(&conn->chan_lock);
1279 static void l2cap_info_timeout(struct work_struct *work)
1281 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 conn->info_ident = 0;
1287 l2cap_conn_start(conn);
1290 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1292 struct l2cap_conn *conn = hcon->l2cap_data;
1293 struct l2cap_chan *chan, *l;
1298 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1300 kfree_skb(conn->rx_skb);
1302 mutex_lock(&conn->chan_lock);
1305 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1306 l2cap_chan_hold(chan);
1307 l2cap_chan_lock(chan);
1309 l2cap_chan_del(chan, err);
1311 l2cap_chan_unlock(chan);
1313 chan->ops->close(chan->data);
1314 l2cap_chan_put(chan);
1317 mutex_unlock(&conn->chan_lock);
1319 hci_chan_del(conn->hchan);
1321 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1322 cancel_delayed_work_sync(&conn->info_timer);
1324 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1325 cancel_delayed_work_sync(&conn->security_timer);
1326 smp_chan_destroy(conn);
1329 hcon->l2cap_data = NULL;
1333 static void security_timeout(struct work_struct *work)
1335 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 security_timer.work);
1338 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1341 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1343 struct l2cap_conn *conn = hcon->l2cap_data;
1344 struct hci_chan *hchan;
1349 hchan = hci_chan_create(hcon);
1353 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1355 hci_chan_del(hchan);
1359 hcon->l2cap_data = conn;
1361 conn->hchan = hchan;
1363 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1365 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1366 conn->mtu = hcon->hdev->le_mtu;
1368 conn->mtu = hcon->hdev->acl_mtu;
1370 conn->src = &hcon->hdev->bdaddr;
1371 conn->dst = &hcon->dst;
1373 conn->feat_mask = 0;
1375 spin_lock_init(&conn->lock);
1376 mutex_init(&conn->chan_lock);
1378 INIT_LIST_HEAD(&conn->chan_l);
1380 if (hcon->type == LE_LINK)
1381 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1383 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1385 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1390 /* ---- Socket interface ---- */
1392 /* Find socket with psm and source / destination bdaddr.
1393 * Returns closest match.
1395 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1399 struct l2cap_chan *c, *c1 = NULL;
1401 read_lock(&chan_list_lock);
1403 list_for_each_entry(c, &chan_list, global_l) {
1404 struct sock *sk = c->sk;
1406 if (state && c->state != state)
1409 if (c->psm == psm) {
1410 int src_match, dst_match;
1411 int src_any, dst_any;
1414 src_match = !bacmp(&bt_sk(sk)->src, src);
1415 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1416 if (src_match && dst_match) {
1417 read_unlock(&chan_list_lock);
1422 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1423 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1424 if ((src_match && dst_any) || (src_any && dst_match) ||
1425 (src_any && dst_any))
1430 read_unlock(&chan_list_lock);
1435 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1436 bdaddr_t *dst, u8 dst_type)
1438 struct sock *sk = chan->sk;
1439 bdaddr_t *src = &bt_sk(sk)->src;
1440 struct l2cap_conn *conn;
1441 struct hci_conn *hcon;
1442 struct hci_dev *hdev;
1446 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1447 dst_type, __le16_to_cpu(chan->psm));
1449 hdev = hci_get_route(dst, src);
1451 return -EHOSTUNREACH;
1455 l2cap_chan_lock(chan);
1457 /* PSM must be odd and lsb of upper byte must be 0 */
1458 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1459 chan->chan_type != L2CAP_CHAN_RAW) {
1464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1469 switch (chan->mode) {
1470 case L2CAP_MODE_BASIC:
1472 case L2CAP_MODE_ERTM:
1473 case L2CAP_MODE_STREAMING:
1484 switch (sk->sk_state) {
1488 /* Already connecting */
1494 /* Already connected */
1510 /* Set destination address and psm */
1511 bacpy(&bt_sk(sk)->dst, dst);
1518 auth_type = l2cap_get_auth_type(chan);
1520 if (chan->dcid == L2CAP_CID_LE_DATA)
1521 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1522 chan->sec_level, auth_type);
1524 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1525 chan->sec_level, auth_type);
1528 err = PTR_ERR(hcon);
1532 conn = l2cap_conn_add(hcon, 0);
1539 if (hcon->type == LE_LINK) {
1542 if (!list_empty(&conn->chan_l)) {
1551 /* Update source addr of the socket */
1552 bacpy(src, conn->src);
1554 l2cap_chan_unlock(chan);
1555 l2cap_chan_add(conn, chan);
1556 l2cap_chan_lock(chan);
1558 l2cap_state_change(chan, BT_CONNECT);
1559 __set_chan_timer(chan, sk->sk_sndtimeo);
1561 if (hcon->state == BT_CONNECTED) {
1562 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1563 __clear_chan_timer(chan);
1564 if (l2cap_chan_check_security(chan))
1565 l2cap_state_change(chan, BT_CONNECTED);
1567 l2cap_do_start(chan);
1573 l2cap_chan_unlock(chan);
1574 hci_dev_unlock(hdev);
1579 int __l2cap_wait_ack(struct sock *sk)
1581 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1582 DECLARE_WAITQUEUE(wait, current);
1586 add_wait_queue(sk_sleep(sk), &wait);
1587 set_current_state(TASK_INTERRUPTIBLE);
1588 while (chan->unacked_frames > 0 && chan->conn) {
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeo);
1598 timeo = schedule_timeout(timeo);
1600 set_current_state(TASK_INTERRUPTIBLE);
1602 err = sock_error(sk);
1606 set_current_state(TASK_RUNNING);
1607 remove_wait_queue(sk_sleep(sk), &wait);
1611 static void l2cap_monitor_timeout(struct work_struct *work)
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 monitor_timer.work);
1616 BT_DBG("chan %p", chan);
1618 l2cap_chan_lock(chan);
1620 if (chan->retry_count >= chan->remote_max_tx) {
1621 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1627 chan->retry_count++;
1628 __set_monitor_timer(chan);
1630 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1631 l2cap_chan_unlock(chan);
1632 l2cap_chan_put(chan);
1635 static void l2cap_retrans_timeout(struct work_struct *work)
1637 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1638 retrans_timer.work);
1640 BT_DBG("chan %p", chan);
1642 l2cap_chan_lock(chan);
1644 chan->retry_count = 1;
1645 __set_monitor_timer(chan);
1647 set_bit(CONN_WAIT_F, &chan->conn_state);
1649 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1651 l2cap_chan_unlock(chan);
1652 l2cap_chan_put(chan);
1655 static int l2cap_streaming_send(struct l2cap_chan *chan,
1656 struct sk_buff_head *skbs)
1658 struct sk_buff *skb;
1659 struct l2cap_ctrl *control;
1661 BT_DBG("chan %p, skbs %p", chan, skbs);
1663 if (chan->state != BT_CONNECTED)
1666 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1668 while (!skb_queue_empty(&chan->tx_q)) {
1670 skb = skb_dequeue(&chan->tx_q);
1672 bt_cb(skb)->control.retries = 1;
1673 control = &bt_cb(skb)->control;
1675 control->reqseq = 0;
1676 control->txseq = chan->next_tx_seq;
1678 __pack_control(chan, control, skb);
1680 if (chan->fcs == L2CAP_FCS_CRC16) {
1681 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1682 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1685 l2cap_do_send(chan, skb);
1687 BT_DBG("Sent txseq %d", (int)control->txseq);
1689 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1690 chan->frames_sent++;
1696 static int l2cap_ertm_send(struct l2cap_chan *chan)
1698 struct sk_buff *skb, *tx_skb;
1699 struct l2cap_ctrl *control;
1702 BT_DBG("chan %p", chan);
1704 if (chan->state != BT_CONNECTED)
1707 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1710 while (chan->tx_send_head &&
1711 chan->unacked_frames < chan->remote_tx_win &&
1712 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1714 skb = chan->tx_send_head;
1716 bt_cb(skb)->control.retries = 1;
1717 control = &bt_cb(skb)->control;
1719 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1722 control->reqseq = chan->buffer_seq;
1723 chan->last_acked_seq = chan->buffer_seq;
1724 control->txseq = chan->next_tx_seq;
1726 __pack_control(chan, control, skb);
1728 if (chan->fcs == L2CAP_FCS_CRC16) {
1729 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1730 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1733 /* Clone after data has been modified. Data is assumed to be
1734 read-only (for locking purposes) on cloned sk_buffs.
1736 tx_skb = skb_clone(skb, GFP_KERNEL);
1741 __set_retrans_timer(chan);
1743 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1744 chan->unacked_frames++;
1745 chan->frames_sent++;
1748 if (skb_queue_is_last(&chan->tx_q, skb))
1749 chan->tx_send_head = NULL;
1751 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1753 l2cap_do_send(chan, tx_skb);
1754 BT_DBG("Sent txseq %d", (int)control->txseq);
1757 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1758 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1763 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1765 struct l2cap_ctrl control;
1766 struct sk_buff *skb;
1767 struct sk_buff *tx_skb;
1770 BT_DBG("chan %p", chan);
1772 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1775 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1776 seq = l2cap_seq_list_pop(&chan->retrans_list);
1778 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1780 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1785 bt_cb(skb)->control.retries++;
1786 control = bt_cb(skb)->control;
1788 if (chan->max_tx != 0 &&
1789 bt_cb(skb)->control.retries > chan->max_tx) {
1790 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1791 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1792 l2cap_seq_list_clear(&chan->retrans_list);
1796 control.reqseq = chan->buffer_seq;
1797 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1802 if (skb_cloned(skb)) {
1803 /* Cloned sk_buffs are read-only, so we need a
1806 tx_skb = skb_copy(skb, GFP_ATOMIC);
1808 tx_skb = skb_clone(skb, GFP_ATOMIC);
1812 l2cap_seq_list_clear(&chan->retrans_list);
1816 /* Update skb contents */
1817 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1818 put_unaligned_le32(__pack_extended_control(&control),
1819 tx_skb->data + L2CAP_HDR_SIZE);
1821 put_unaligned_le16(__pack_enhanced_control(&control),
1822 tx_skb->data + L2CAP_HDR_SIZE);
1825 if (chan->fcs == L2CAP_FCS_CRC16) {
1826 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1827 put_unaligned_le16(fcs, skb_put(tx_skb,
1831 l2cap_do_send(chan, tx_skb);
1833 BT_DBG("Resent txseq %d", control.txseq);
1835 chan->last_acked_seq = chan->buffer_seq;
1839 static void l2cap_retransmit(struct l2cap_chan *chan,
1840 struct l2cap_ctrl *control)
1842 BT_DBG("chan %p, control %p", chan, control);
1844 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1845 l2cap_ertm_resend(chan);
1848 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1849 struct l2cap_ctrl *control)
1851 struct sk_buff *skb;
1853 BT_DBG("chan %p, control %p", chan, control);
1856 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1858 l2cap_seq_list_clear(&chan->retrans_list);
1860 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1863 if (chan->unacked_frames) {
1864 skb_queue_walk(&chan->tx_q, skb) {
1865 if (bt_cb(skb)->control.txseq == control->reqseq ||
1866 skb == chan->tx_send_head)
1870 skb_queue_walk_from(&chan->tx_q, skb) {
1871 if (skb == chan->tx_send_head)
1874 l2cap_seq_list_append(&chan->retrans_list,
1875 bt_cb(skb)->control.txseq);
1878 l2cap_ertm_resend(chan);
1882 static void l2cap_send_ack(struct l2cap_chan *chan)
1884 struct l2cap_ctrl control;
1885 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1886 chan->last_acked_seq);
1889 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1890 chan, chan->last_acked_seq, chan->buffer_seq);
1892 memset(&control, 0, sizeof(control));
1895 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1896 chan->rx_state == L2CAP_RX_STATE_RECV) {
1897 __clear_ack_timer(chan);
1898 control.super = L2CAP_SUPER_RNR;
1899 control.reqseq = chan->buffer_seq;
1900 l2cap_send_sframe(chan, &control);
1902 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1903 l2cap_ertm_send(chan);
1904 /* If any i-frames were sent, they included an ack */
1905 if (chan->buffer_seq == chan->last_acked_seq)
1909 /* Ack now if the tx window is 3/4ths full.
1910 * Calculate without mul or div
1912 threshold = chan->tx_win;
1913 threshold += threshold << 1;
1916 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1919 if (frames_to_ack >= threshold) {
1920 __clear_ack_timer(chan);
1921 control.super = L2CAP_SUPER_RR;
1922 control.reqseq = chan->buffer_seq;
1923 l2cap_send_sframe(chan, &control);
1928 __set_ack_timer(chan);
1932 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1933 struct msghdr *msg, int len,
1934 int count, struct sk_buff *skb)
1936 struct l2cap_conn *conn = chan->conn;
1937 struct sk_buff **frag;
1940 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1946 /* Continuation fragments (no L2CAP header) */
1947 frag = &skb_shinfo(skb)->frag_list;
1949 struct sk_buff *tmp;
1951 count = min_t(unsigned int, conn->mtu, len);
1953 tmp = chan->ops->alloc_skb(chan, count,
1954 msg->msg_flags & MSG_DONTWAIT);
1956 return PTR_ERR(tmp);
1960 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1963 (*frag)->priority = skb->priority;
1968 skb->len += (*frag)->len;
1969 skb->data_len += (*frag)->len;
1971 frag = &(*frag)->next;
1977 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1978 struct msghdr *msg, size_t len,
1981 struct l2cap_conn *conn = chan->conn;
1982 struct sk_buff *skb;
1983 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1984 struct l2cap_hdr *lh;
1986 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1988 count = min_t(unsigned int, (conn->mtu - hlen), len);
1990 skb = chan->ops->alloc_skb(chan, count + hlen,
1991 msg->msg_flags & MSG_DONTWAIT);
1995 skb->priority = priority;
1997 /* Create L2CAP header */
1998 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1999 lh->cid = cpu_to_le16(chan->dcid);
2000 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2001 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2003 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2004 if (unlikely(err < 0)) {
2006 return ERR_PTR(err);
2011 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2012 struct msghdr *msg, size_t len,
2015 struct l2cap_conn *conn = chan->conn;
2016 struct sk_buff *skb;
2018 struct l2cap_hdr *lh;
2020 BT_DBG("chan %p len %d", chan, (int)len);
2022 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2024 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2025 msg->msg_flags & MSG_DONTWAIT);
2029 skb->priority = priority;
2031 /* Create L2CAP header */
2032 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2033 lh->cid = cpu_to_le16(chan->dcid);
2034 lh->len = cpu_to_le16(len);
2036 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2037 if (unlikely(err < 0)) {
2039 return ERR_PTR(err);
2044 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2045 struct msghdr *msg, size_t len,
2048 struct l2cap_conn *conn = chan->conn;
2049 struct sk_buff *skb;
2050 int err, count, hlen;
2051 struct l2cap_hdr *lh;
2053 BT_DBG("chan %p len %d", chan, (int)len);
2056 return ERR_PTR(-ENOTCONN);
2058 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2059 hlen = L2CAP_EXT_HDR_SIZE;
2061 hlen = L2CAP_ENH_HDR_SIZE;
2064 hlen += L2CAP_SDULEN_SIZE;
2066 if (chan->fcs == L2CAP_FCS_CRC16)
2067 hlen += L2CAP_FCS_SIZE;
2069 count = min_t(unsigned int, (conn->mtu - hlen), len);
2071 skb = chan->ops->alloc_skb(chan, count + hlen,
2072 msg->msg_flags & MSG_DONTWAIT);
2076 /* Create L2CAP header */
2077 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2078 lh->cid = cpu_to_le16(chan->dcid);
2079 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2081 /* Control header is populated later */
2082 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2083 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2085 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2088 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2090 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2091 if (unlikely(err < 0)) {
2093 return ERR_PTR(err);
2096 bt_cb(skb)->control.fcs = chan->fcs;
2097 bt_cb(skb)->control.retries = 0;
2101 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2102 struct sk_buff_head *seg_queue,
2103 struct msghdr *msg, size_t len)
2105 struct sk_buff *skb;
2111 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2113 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2114 * so fragmented skbs are not used. The HCI layer's handling
2115 * of fragmented skbs is not compatible with ERTM's queueing.
2118 /* PDU size is derived from the HCI MTU */
2119 pdu_len = chan->conn->mtu;
2121 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2123 /* Adjust for largest possible L2CAP overhead. */
2124 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2126 /* Remote device may have requested smaller PDUs */
2127 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2129 if (len <= pdu_len) {
2130 sar = L2CAP_SAR_UNSEGMENTED;
2134 sar = L2CAP_SAR_START;
2136 pdu_len -= L2CAP_SDULEN_SIZE;
2140 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2143 __skb_queue_purge(seg_queue);
2144 return PTR_ERR(skb);
2147 bt_cb(skb)->control.sar = sar;
2148 __skb_queue_tail(seg_queue, skb);
2153 pdu_len += L2CAP_SDULEN_SIZE;
2156 if (len <= pdu_len) {
2157 sar = L2CAP_SAR_END;
2160 sar = L2CAP_SAR_CONTINUE;
2167 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2170 struct sk_buff *skb;
2172 struct sk_buff_head seg_queue;
2174 /* Connectionless channel */
2175 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2176 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2178 return PTR_ERR(skb);
2180 l2cap_do_send(chan, skb);
2184 switch (chan->mode) {
2185 case L2CAP_MODE_BASIC:
2186 /* Check outgoing MTU */
2187 if (len > chan->omtu)
2190 /* Create a basic PDU */
2191 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2193 return PTR_ERR(skb);
2195 l2cap_do_send(chan, skb);
2199 case L2CAP_MODE_ERTM:
2200 case L2CAP_MODE_STREAMING:
2201 /* Check outgoing MTU */
2202 if (len > chan->omtu) {
2207 __skb_queue_head_init(&seg_queue);
2209 /* Do segmentation before calling in to the state machine,
2210 * since it's possible to block while waiting for memory
2213 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2215 /* The channel could have been closed while segmenting,
2216 * check that it is still connected.
2218 if (chan->state != BT_CONNECTED) {
2219 __skb_queue_purge(&seg_queue);
2226 if (chan->mode == L2CAP_MODE_ERTM)
2227 err = l2cap_tx(chan, 0, &seg_queue,
2228 L2CAP_EV_DATA_REQUEST);
2230 err = l2cap_streaming_send(chan, &seg_queue);
2235 /* If the skbs were not queued for sending, they'll still be in
2236 * seg_queue and need to be purged.
2238 __skb_queue_purge(&seg_queue);
2242 BT_DBG("bad state %1.1x", chan->mode);
2249 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2251 struct l2cap_ctrl control;
2254 BT_DBG("chan %p, txseq %d", chan, txseq);
2256 memset(&control, 0, sizeof(control));
2258 control.super = L2CAP_SUPER_SREJ;
2260 for (seq = chan->expected_tx_seq; seq != txseq;
2261 seq = __next_seq(chan, seq)) {
2262 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2263 control.reqseq = seq;
2264 l2cap_send_sframe(chan, &control);
2265 l2cap_seq_list_append(&chan->srej_list, seq);
2269 chan->expected_tx_seq = __next_seq(chan, txseq);
2272 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2274 struct l2cap_ctrl control;
2276 BT_DBG("chan %p", chan);
2278 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2281 memset(&control, 0, sizeof(control));
2283 control.super = L2CAP_SUPER_SREJ;
2284 control.reqseq = chan->srej_list.tail;
2285 l2cap_send_sframe(chan, &control);
2288 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2290 struct l2cap_ctrl control;
2294 BT_DBG("chan %p, txseq %d", chan, txseq);
2296 memset(&control, 0, sizeof(control));
2298 control.super = L2CAP_SUPER_SREJ;
2300 /* Capture initial list head to allow only one pass through the list. */
2301 initial_head = chan->srej_list.head;
2304 seq = l2cap_seq_list_pop(&chan->srej_list);
2305 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2308 control.reqseq = seq;
2309 l2cap_send_sframe(chan, &control);
2310 l2cap_seq_list_append(&chan->srej_list, seq);
2311 } while (chan->srej_list.head != initial_head);
2314 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2316 struct sk_buff *acked_skb;
2319 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2321 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2324 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2325 chan->expected_ack_seq, chan->unacked_frames);
2327 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2328 ackseq = __next_seq(chan, ackseq)) {
2330 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2332 skb_unlink(acked_skb, &chan->tx_q);
2333 kfree_skb(acked_skb);
2334 chan->unacked_frames--;
2338 chan->expected_ack_seq = reqseq;
2340 if (chan->unacked_frames == 0)
2341 __clear_retrans_timer(chan);
2343 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2346 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2348 BT_DBG("chan %p", chan);
2350 chan->expected_tx_seq = chan->buffer_seq;
2351 l2cap_seq_list_clear(&chan->srej_list);
2352 skb_queue_purge(&chan->srej_q);
2353 chan->rx_state = L2CAP_RX_STATE_RECV;
2356 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2357 struct l2cap_ctrl *control,
2358 struct sk_buff_head *skbs, u8 event)
2362 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2366 case L2CAP_EV_DATA_REQUEST:
2367 if (chan->tx_send_head == NULL)
2368 chan->tx_send_head = skb_peek(skbs);
2370 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2371 l2cap_ertm_send(chan);
2373 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2374 BT_DBG("Enter LOCAL_BUSY");
2375 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2377 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2378 /* The SREJ_SENT state must be aborted if we are to
2379 * enter the LOCAL_BUSY state.
2381 l2cap_abort_rx_srej_sent(chan);
2384 l2cap_send_ack(chan);
2387 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2388 BT_DBG("Exit LOCAL_BUSY");
2389 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2391 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2392 struct l2cap_ctrl local_control;
2394 memset(&local_control, 0, sizeof(local_control));
2395 local_control.sframe = 1;
2396 local_control.super = L2CAP_SUPER_RR;
2397 local_control.poll = 1;
2398 local_control.reqseq = chan->buffer_seq;
2399 l2cap_send_sframe(chan, &local_control);
2401 chan->retry_count = 1;
2402 __set_monitor_timer(chan);
2403 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2406 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2407 l2cap_process_reqseq(chan, control->reqseq);
2409 case L2CAP_EV_EXPLICIT_POLL:
2410 l2cap_send_rr_or_rnr(chan, 1);
2411 chan->retry_count = 1;
2412 __set_monitor_timer(chan);
2413 __clear_ack_timer(chan);
2414 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2416 case L2CAP_EV_RETRANS_TO:
2417 l2cap_send_rr_or_rnr(chan, 1);
2418 chan->retry_count = 1;
2419 __set_monitor_timer(chan);
2420 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2422 case L2CAP_EV_RECV_FBIT:
2423 /* Nothing to process */
2432 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2433 struct l2cap_ctrl *control,
2434 struct sk_buff_head *skbs, u8 event)
2438 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2442 case L2CAP_EV_DATA_REQUEST:
2443 if (chan->tx_send_head == NULL)
2444 chan->tx_send_head = skb_peek(skbs);
2445 /* Queue data, but don't send. */
2446 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2448 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2449 BT_DBG("Enter LOCAL_BUSY");
2450 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2452 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2453 /* The SREJ_SENT state must be aborted if we are to
2454 * enter the LOCAL_BUSY state.
2456 l2cap_abort_rx_srej_sent(chan);
2459 l2cap_send_ack(chan);
2462 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2463 BT_DBG("Exit LOCAL_BUSY");
2464 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2466 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2467 struct l2cap_ctrl local_control;
2468 memset(&local_control, 0, sizeof(local_control));
2469 local_control.sframe = 1;
2470 local_control.super = L2CAP_SUPER_RR;
2471 local_control.poll = 1;
2472 local_control.reqseq = chan->buffer_seq;
2473 l2cap_send_sframe(chan, &local_control);
2475 chan->retry_count = 1;
2476 __set_monitor_timer(chan);
2477 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2480 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2481 l2cap_process_reqseq(chan, control->reqseq);
2485 case L2CAP_EV_RECV_FBIT:
2486 if (control && control->final) {
2487 __clear_monitor_timer(chan);
2488 if (chan->unacked_frames > 0)
2489 __set_retrans_timer(chan);
2490 chan->retry_count = 0;
2491 chan->tx_state = L2CAP_TX_STATE_XMIT;
2492 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2495 case L2CAP_EV_EXPLICIT_POLL:
2498 case L2CAP_EV_MONITOR_TO:
2499 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2500 l2cap_send_rr_or_rnr(chan, 1);
2501 __set_monitor_timer(chan);
2502 chan->retry_count++;
2504 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2514 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2515 struct sk_buff_head *skbs, u8 event)
2519 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2520 chan, control, skbs, event, chan->tx_state);
2522 switch (chan->tx_state) {
2523 case L2CAP_TX_STATE_XMIT:
2524 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2526 case L2CAP_TX_STATE_WAIT_F:
2527 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2537 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2538 struct l2cap_ctrl *control)
2540 BT_DBG("chan %p, control %p", chan, control);
2541 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2544 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2545 struct l2cap_ctrl *control)
2547 BT_DBG("chan %p, control %p", chan, control);
2548 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_FBIT);
2551 /* Copy frame to all raw sockets on that connection */
2552 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2554 struct sk_buff *nskb;
2555 struct l2cap_chan *chan;
2557 BT_DBG("conn %p", conn);
2559 mutex_lock(&conn->chan_lock);
2561 list_for_each_entry(chan, &conn->chan_l, list) {
2562 struct sock *sk = chan->sk;
2563 if (chan->chan_type != L2CAP_CHAN_RAW)
2566 /* Don't send frame to the socket it came from */
2569 nskb = skb_clone(skb, GFP_ATOMIC);
2573 if (chan->ops->recv(chan->data, nskb))
2577 mutex_unlock(&conn->chan_lock);
2580 /* ---- L2CAP signalling commands ---- */
2581 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2582 u8 code, u8 ident, u16 dlen, void *data)
2584 struct sk_buff *skb, **frag;
2585 struct l2cap_cmd_hdr *cmd;
2586 struct l2cap_hdr *lh;
2589 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2590 conn, code, ident, dlen);
2592 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2593 count = min_t(unsigned int, conn->mtu, len);
2595 skb = bt_skb_alloc(count, GFP_ATOMIC);
2599 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2600 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2602 if (conn->hcon->type == LE_LINK)
2603 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2605 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2607 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2610 cmd->len = cpu_to_le16(dlen);
2613 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2614 memcpy(skb_put(skb, count), data, count);
2620 /* Continuation fragments (no L2CAP header) */
2621 frag = &skb_shinfo(skb)->frag_list;
2623 count = min_t(unsigned int, conn->mtu, len);
2625 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2629 memcpy(skb_put(*frag, count), data, count);
2634 frag = &(*frag)->next;
2644 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2646 struct l2cap_conf_opt *opt = *ptr;
2649 len = L2CAP_CONF_OPT_SIZE + opt->len;
2657 *val = *((u8 *) opt->val);
2661 *val = get_unaligned_le16(opt->val);
2665 *val = get_unaligned_le32(opt->val);
2669 *val = (unsigned long) opt->val;
2673 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2677 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2679 struct l2cap_conf_opt *opt = *ptr;
2681 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2688 *((u8 *) opt->val) = val;
2692 put_unaligned_le16(val, opt->val);
2696 put_unaligned_le32(val, opt->val);
2700 memcpy(opt->val, (void *) val, len);
2704 *ptr += L2CAP_CONF_OPT_SIZE + len;
2707 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2709 struct l2cap_conf_efs efs;
2711 switch (chan->mode) {
2712 case L2CAP_MODE_ERTM:
2713 efs.id = chan->local_id;
2714 efs.stype = chan->local_stype;
2715 efs.msdu = cpu_to_le16(chan->local_msdu);
2716 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2717 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2718 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2721 case L2CAP_MODE_STREAMING:
2723 efs.stype = L2CAP_SERV_BESTEFFORT;
2724 efs.msdu = cpu_to_le16(chan->local_msdu);
2725 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2734 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2735 (unsigned long) &efs);
2738 static void l2cap_ack_timeout(struct work_struct *work)
2740 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2743 BT_DBG("chan %p", chan);
2745 l2cap_chan_lock(chan);
2747 l2cap_send_ack(chan);
2749 l2cap_chan_unlock(chan);
2751 l2cap_chan_put(chan);
2754 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2758 chan->next_tx_seq = 0;
2759 chan->expected_tx_seq = 0;
2760 chan->expected_ack_seq = 0;
2761 chan->unacked_frames = 0;
2762 chan->buffer_seq = 0;
2763 chan->frames_sent = 0;
2764 chan->last_acked_seq = 0;
2766 chan->sdu_last_frag = NULL;
2769 skb_queue_head_init(&chan->tx_q);
2771 if (chan->mode != L2CAP_MODE_ERTM)
2774 chan->rx_state = L2CAP_RX_STATE_RECV;
2775 chan->tx_state = L2CAP_TX_STATE_XMIT;
2777 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2778 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2779 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2781 skb_queue_head_init(&chan->srej_q);
2783 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2787 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2789 l2cap_seq_list_free(&chan->srej_list);
2794 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2797 case L2CAP_MODE_STREAMING:
2798 case L2CAP_MODE_ERTM:
2799 if (l2cap_mode_supported(mode, remote_feat_mask))
2803 return L2CAP_MODE_BASIC;
2807 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2809 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2812 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2814 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2817 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2819 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2820 __l2cap_ews_supported(chan)) {
2821 /* use extended control field */
2822 set_bit(FLAG_EXT_CTRL, &chan->flags);
2823 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2825 chan->tx_win = min_t(u16, chan->tx_win,
2826 L2CAP_DEFAULT_TX_WINDOW);
2827 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2831 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2833 struct l2cap_conf_req *req = data;
2834 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2835 void *ptr = req->data;
2838 BT_DBG("chan %p", chan);
2840 if (chan->num_conf_req || chan->num_conf_rsp)
2843 switch (chan->mode) {
2844 case L2CAP_MODE_STREAMING:
2845 case L2CAP_MODE_ERTM:
2846 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2849 if (__l2cap_efs_supported(chan))
2850 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2854 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2859 if (chan->imtu != L2CAP_DEFAULT_MTU)
2860 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2862 switch (chan->mode) {
2863 case L2CAP_MODE_BASIC:
2864 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2865 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2868 rfc.mode = L2CAP_MODE_BASIC;
2870 rfc.max_transmit = 0;
2871 rfc.retrans_timeout = 0;
2872 rfc.monitor_timeout = 0;
2873 rfc.max_pdu_size = 0;
2875 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2876 (unsigned long) &rfc);
2879 case L2CAP_MODE_ERTM:
2880 rfc.mode = L2CAP_MODE_ERTM;
2881 rfc.max_transmit = chan->max_tx;
2882 rfc.retrans_timeout = 0;
2883 rfc.monitor_timeout = 0;
2885 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2886 L2CAP_EXT_HDR_SIZE -
2889 rfc.max_pdu_size = cpu_to_le16(size);
2891 l2cap_txwin_setup(chan);
2893 rfc.txwin_size = min_t(u16, chan->tx_win,
2894 L2CAP_DEFAULT_TX_WINDOW);
2896 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2897 (unsigned long) &rfc);
2899 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2900 l2cap_add_opt_efs(&ptr, chan);
2902 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2905 if (chan->fcs == L2CAP_FCS_NONE ||
2906 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2907 chan->fcs = L2CAP_FCS_NONE;
2908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2911 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2912 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2916 case L2CAP_MODE_STREAMING:
2917 rfc.mode = L2CAP_MODE_STREAMING;
2919 rfc.max_transmit = 0;
2920 rfc.retrans_timeout = 0;
2921 rfc.monitor_timeout = 0;
2923 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2924 L2CAP_EXT_HDR_SIZE -
2927 rfc.max_pdu_size = cpu_to_le16(size);
2929 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2930 (unsigned long) &rfc);
2932 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2933 l2cap_add_opt_efs(&ptr, chan);
2935 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2938 if (chan->fcs == L2CAP_FCS_NONE ||
2939 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2940 chan->fcs = L2CAP_FCS_NONE;
2941 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2946 req->dcid = cpu_to_le16(chan->dcid);
2947 req->flags = cpu_to_le16(0);
2952 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2954 struct l2cap_conf_rsp *rsp = data;
2955 void *ptr = rsp->data;
2956 void *req = chan->conf_req;
2957 int len = chan->conf_len;
2958 int type, hint, olen;
2960 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2961 struct l2cap_conf_efs efs;
2963 u16 mtu = L2CAP_DEFAULT_MTU;
2964 u16 result = L2CAP_CONF_SUCCESS;
2967 BT_DBG("chan %p", chan);
2969 while (len >= L2CAP_CONF_OPT_SIZE) {
2970 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2972 hint = type & L2CAP_CONF_HINT;
2973 type &= L2CAP_CONF_MASK;
2976 case L2CAP_CONF_MTU:
2980 case L2CAP_CONF_FLUSH_TO:
2981 chan->flush_to = val;
2984 case L2CAP_CONF_QOS:
2987 case L2CAP_CONF_RFC:
2988 if (olen == sizeof(rfc))
2989 memcpy(&rfc, (void *) val, olen);
2992 case L2CAP_CONF_FCS:
2993 if (val == L2CAP_FCS_NONE)
2994 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2997 case L2CAP_CONF_EFS:
2999 if (olen == sizeof(efs))
3000 memcpy(&efs, (void *) val, olen);
3003 case L2CAP_CONF_EWS:
3005 return -ECONNREFUSED;
3007 set_bit(FLAG_EXT_CTRL, &chan->flags);
3008 set_bit(CONF_EWS_RECV, &chan->conf_state);
3009 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3010 chan->remote_tx_win = val;
3017 result = L2CAP_CONF_UNKNOWN;
3018 *((u8 *) ptr++) = type;
3023 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3026 switch (chan->mode) {
3027 case L2CAP_MODE_STREAMING:
3028 case L2CAP_MODE_ERTM:
3029 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3030 chan->mode = l2cap_select_mode(rfc.mode,
3031 chan->conn->feat_mask);
3036 if (__l2cap_efs_supported(chan))
3037 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3039 return -ECONNREFUSED;
3042 if (chan->mode != rfc.mode)
3043 return -ECONNREFUSED;
3049 if (chan->mode != rfc.mode) {
3050 result = L2CAP_CONF_UNACCEPT;
3051 rfc.mode = chan->mode;
3053 if (chan->num_conf_rsp == 1)
3054 return -ECONNREFUSED;
3056 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3057 sizeof(rfc), (unsigned long) &rfc);
3060 if (result == L2CAP_CONF_SUCCESS) {
3061 /* Configure output options and let the other side know
3062 * which ones we don't like. */
3064 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3065 result = L2CAP_CONF_UNACCEPT;
3068 set_bit(CONF_MTU_DONE, &chan->conf_state);
3070 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3073 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3074 efs.stype != L2CAP_SERV_NOTRAFIC &&
3075 efs.stype != chan->local_stype) {
3077 result = L2CAP_CONF_UNACCEPT;
3079 if (chan->num_conf_req >= 1)
3080 return -ECONNREFUSED;
3082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3084 (unsigned long) &efs);
3086 /* Send PENDING Conf Rsp */
3087 result = L2CAP_CONF_PENDING;
3088 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3093 case L2CAP_MODE_BASIC:
3094 chan->fcs = L2CAP_FCS_NONE;
3095 set_bit(CONF_MODE_DONE, &chan->conf_state);
3098 case L2CAP_MODE_ERTM:
3099 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3100 chan->remote_tx_win = rfc.txwin_size;
3102 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3104 chan->remote_max_tx = rfc.max_transmit;
3106 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3108 L2CAP_EXT_HDR_SIZE -
3111 rfc.max_pdu_size = cpu_to_le16(size);
3112 chan->remote_mps = size;
3114 rfc.retrans_timeout =
3115 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3116 rfc.monitor_timeout =
3117 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3119 set_bit(CONF_MODE_DONE, &chan->conf_state);
3121 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3122 sizeof(rfc), (unsigned long) &rfc);
3124 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3125 chan->remote_id = efs.id;
3126 chan->remote_stype = efs.stype;
3127 chan->remote_msdu = le16_to_cpu(efs.msdu);
3128 chan->remote_flush_to =
3129 le32_to_cpu(efs.flush_to);
3130 chan->remote_acc_lat =
3131 le32_to_cpu(efs.acc_lat);
3132 chan->remote_sdu_itime =
3133 le32_to_cpu(efs.sdu_itime);
3134 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3135 sizeof(efs), (unsigned long) &efs);
3139 case L2CAP_MODE_STREAMING:
3140 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3142 L2CAP_EXT_HDR_SIZE -
3145 rfc.max_pdu_size = cpu_to_le16(size);
3146 chan->remote_mps = size;
3148 set_bit(CONF_MODE_DONE, &chan->conf_state);
3150 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3151 sizeof(rfc), (unsigned long) &rfc);
3156 result = L2CAP_CONF_UNACCEPT;
3158 memset(&rfc, 0, sizeof(rfc));
3159 rfc.mode = chan->mode;
3162 if (result == L2CAP_CONF_SUCCESS)
3163 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3165 rsp->scid = cpu_to_le16(chan->dcid);
3166 rsp->result = cpu_to_le16(result);
3167 rsp->flags = cpu_to_le16(0x0000);
3172 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3174 struct l2cap_conf_req *req = data;
3175 void *ptr = req->data;
3178 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3179 struct l2cap_conf_efs efs;
3181 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3183 while (len >= L2CAP_CONF_OPT_SIZE) {
3184 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3187 case L2CAP_CONF_MTU:
3188 if (val < L2CAP_DEFAULT_MIN_MTU) {
3189 *result = L2CAP_CONF_UNACCEPT;
3190 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3196 case L2CAP_CONF_FLUSH_TO:
3197 chan->flush_to = val;
3198 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3202 case L2CAP_CONF_RFC:
3203 if (olen == sizeof(rfc))
3204 memcpy(&rfc, (void *)val, olen);
3206 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3207 rfc.mode != chan->mode)
3208 return -ECONNREFUSED;
3212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3213 sizeof(rfc), (unsigned long) &rfc);
3216 case L2CAP_CONF_EWS:
3217 chan->tx_win = min_t(u16, val,
3218 L2CAP_DEFAULT_EXT_WINDOW);
3219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3223 case L2CAP_CONF_EFS:
3224 if (olen == sizeof(efs))
3225 memcpy(&efs, (void *)val, olen);
3227 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3228 efs.stype != L2CAP_SERV_NOTRAFIC &&
3229 efs.stype != chan->local_stype)
3230 return -ECONNREFUSED;
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3233 sizeof(efs), (unsigned long) &efs);
3238 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3239 return -ECONNREFUSED;
3241 chan->mode = rfc.mode;
3243 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3245 case L2CAP_MODE_ERTM:
3246 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3247 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3248 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3250 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3251 chan->local_msdu = le16_to_cpu(efs.msdu);
3252 chan->local_sdu_itime =
3253 le32_to_cpu(efs.sdu_itime);
3254 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3255 chan->local_flush_to =
3256 le32_to_cpu(efs.flush_to);
3260 case L2CAP_MODE_STREAMING:
3261 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3265 req->dcid = cpu_to_le16(chan->dcid);
3266 req->flags = cpu_to_le16(0x0000);
3271 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3273 struct l2cap_conf_rsp *rsp = data;
3274 void *ptr = rsp->data;
3276 BT_DBG("chan %p", chan);
3278 rsp->scid = cpu_to_le16(chan->dcid);
3279 rsp->result = cpu_to_le16(result);
3280 rsp->flags = cpu_to_le16(flags);
3285 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3287 struct l2cap_conn_rsp rsp;
3288 struct l2cap_conn *conn = chan->conn;
3291 rsp.scid = cpu_to_le16(chan->dcid);
3292 rsp.dcid = cpu_to_le16(chan->scid);
3293 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3294 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3295 l2cap_send_cmd(conn, chan->ident,
3296 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3298 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3301 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3302 l2cap_build_conf_req(chan, buf), buf);
3303 chan->num_conf_req++;
3306 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3310 struct l2cap_conf_rfc rfc;
3312 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3314 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3317 while (len >= L2CAP_CONF_OPT_SIZE) {
3318 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3321 case L2CAP_CONF_RFC:
3322 if (olen == sizeof(rfc))
3323 memcpy(&rfc, (void *)val, olen);
3328 /* Use sane default values in case a misbehaving remote device
3329 * did not send an RFC option.
3331 rfc.mode = chan->mode;
3332 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3333 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3334 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3336 BT_ERR("Expected RFC option was not found, using defaults");
3340 case L2CAP_MODE_ERTM:
3341 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3342 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3343 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3345 case L2CAP_MODE_STREAMING:
3346 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3350 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3352 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3354 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3357 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3358 cmd->ident == conn->info_ident) {
3359 cancel_delayed_work(&conn->info_timer);
3361 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3362 conn->info_ident = 0;
3364 l2cap_conn_start(conn);
3370 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3372 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3373 struct l2cap_conn_rsp rsp;
3374 struct l2cap_chan *chan = NULL, *pchan;
3375 struct sock *parent, *sk = NULL;
3376 int result, status = L2CAP_CS_NO_INFO;
3378 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3379 __le16 psm = req->psm;
3381 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3383 /* Check if we have socket listening on psm */
3384 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3386 result = L2CAP_CR_BAD_PSM;
3392 mutex_lock(&conn->chan_lock);
3395 /* Check if the ACL is secure enough (if not SDP) */
3396 if (psm != cpu_to_le16(0x0001) &&
3397 !hci_conn_check_link_mode(conn->hcon)) {
3398 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3399 result = L2CAP_CR_SEC_BLOCK;
3403 result = L2CAP_CR_NO_MEM;
3405 /* Check for backlog size */
3406 if (sk_acceptq_is_full(parent)) {
3407 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3411 chan = pchan->ops->new_connection(pchan->data);
3417 /* Check if we already have channel with that dcid */
3418 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3419 sock_set_flag(sk, SOCK_ZAPPED);
3420 chan->ops->close(chan->data);
3424 hci_conn_hold(conn->hcon);
3426 bacpy(&bt_sk(sk)->src, conn->src);
3427 bacpy(&bt_sk(sk)->dst, conn->dst);
3431 bt_accept_enqueue(parent, sk);
3433 __l2cap_chan_add(conn, chan);
3437 __set_chan_timer(chan, sk->sk_sndtimeo);
3439 chan->ident = cmd->ident;
3441 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3442 if (l2cap_chan_check_security(chan)) {
3443 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3444 __l2cap_state_change(chan, BT_CONNECT2);
3445 result = L2CAP_CR_PEND;
3446 status = L2CAP_CS_AUTHOR_PEND;
3447 parent->sk_data_ready(parent, 0);
3449 __l2cap_state_change(chan, BT_CONFIG);
3450 result = L2CAP_CR_SUCCESS;
3451 status = L2CAP_CS_NO_INFO;
3454 __l2cap_state_change(chan, BT_CONNECT2);
3455 result = L2CAP_CR_PEND;
3456 status = L2CAP_CS_AUTHEN_PEND;
3459 __l2cap_state_change(chan, BT_CONNECT2);
3460 result = L2CAP_CR_PEND;
3461 status = L2CAP_CS_NO_INFO;
3465 release_sock(parent);
3466 mutex_unlock(&conn->chan_lock);
3469 rsp.scid = cpu_to_le16(scid);
3470 rsp.dcid = cpu_to_le16(dcid);
3471 rsp.result = cpu_to_le16(result);
3472 rsp.status = cpu_to_le16(status);
3473 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3475 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3476 struct l2cap_info_req info;
3477 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3479 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3480 conn->info_ident = l2cap_get_ident(conn);
3482 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3484 l2cap_send_cmd(conn, conn->info_ident,
3485 L2CAP_INFO_REQ, sizeof(info), &info);
3488 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3489 result == L2CAP_CR_SUCCESS) {
3491 set_bit(CONF_REQ_SENT, &chan->conf_state);
3492 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3493 l2cap_build_conf_req(chan, buf), buf);
3494 chan->num_conf_req++;
3500 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3502 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3503 u16 scid, dcid, result, status;
3504 struct l2cap_chan *chan;
3508 scid = __le16_to_cpu(rsp->scid);
3509 dcid = __le16_to_cpu(rsp->dcid);
3510 result = __le16_to_cpu(rsp->result);
3511 status = __le16_to_cpu(rsp->status);
3513 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3514 dcid, scid, result, status);
3516 mutex_lock(&conn->chan_lock);
3519 chan = __l2cap_get_chan_by_scid(conn, scid);
3525 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3534 l2cap_chan_lock(chan);
3537 case L2CAP_CR_SUCCESS:
3538 l2cap_state_change(chan, BT_CONFIG);
3541 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3543 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3546 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3547 l2cap_build_conf_req(chan, req), req);
3548 chan->num_conf_req++;
3552 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3556 l2cap_chan_del(chan, ECONNREFUSED);
3560 l2cap_chan_unlock(chan);
3563 mutex_unlock(&conn->chan_lock);
3568 static inline void set_default_fcs(struct l2cap_chan *chan)
3570 /* FCS is enabled only in ERTM or streaming mode, if one or both
3573 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3574 chan->fcs = L2CAP_FCS_NONE;
3575 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3576 chan->fcs = L2CAP_FCS_CRC16;
3579 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3581 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3584 struct l2cap_chan *chan;
3587 dcid = __le16_to_cpu(req->dcid);
3588 flags = __le16_to_cpu(req->flags);
3590 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3592 chan = l2cap_get_chan_by_scid(conn, dcid);
3596 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3597 struct l2cap_cmd_rej_cid rej;
3599 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3600 rej.scid = cpu_to_le16(chan->scid);
3601 rej.dcid = cpu_to_le16(chan->dcid);
3603 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3608 /* Reject if config buffer is too small. */
3609 len = cmd_len - sizeof(*req);
3610 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3611 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3612 l2cap_build_conf_rsp(chan, rsp,
3613 L2CAP_CONF_REJECT, flags), rsp);
3618 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3619 chan->conf_len += len;
3621 if (flags & 0x0001) {
3622 /* Incomplete config. Send empty response. */
3623 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3624 l2cap_build_conf_rsp(chan, rsp,
3625 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3629 /* Complete config. */
3630 len = l2cap_parse_conf_req(chan, rsp);
3632 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3636 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3637 chan->num_conf_rsp++;
3639 /* Reset config buffer. */
3642 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3645 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3646 set_default_fcs(chan);
3648 l2cap_state_change(chan, BT_CONNECTED);
3650 if (chan->mode == L2CAP_MODE_ERTM ||
3651 chan->mode == L2CAP_MODE_STREAMING)
3652 err = l2cap_ertm_init(chan);
3655 l2cap_send_disconn_req(chan->conn, chan, -err);
3657 l2cap_chan_ready(chan);
3662 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3664 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3665 l2cap_build_conf_req(chan, buf), buf);
3666 chan->num_conf_req++;
3669 /* Got Conf Rsp PENDING from remote side and asume we sent
3670 Conf Rsp PENDING in the code above */
3671 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3672 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3674 /* check compatibility */
3676 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3677 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3679 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3680 l2cap_build_conf_rsp(chan, rsp,
3681 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3685 l2cap_chan_unlock(chan);
3689 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3691 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3692 u16 scid, flags, result;
3693 struct l2cap_chan *chan;
3694 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3697 scid = __le16_to_cpu(rsp->scid);
3698 flags = __le16_to_cpu(rsp->flags);
3699 result = __le16_to_cpu(rsp->result);
3701 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3704 chan = l2cap_get_chan_by_scid(conn, scid);
3709 case L2CAP_CONF_SUCCESS:
3710 l2cap_conf_rfc_get(chan, rsp->data, len);
3711 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3714 case L2CAP_CONF_PENDING:
3715 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3717 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3720 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3723 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3727 /* check compatibility */
3729 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3730 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3732 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3733 l2cap_build_conf_rsp(chan, buf,
3734 L2CAP_CONF_SUCCESS, 0x0000), buf);
3738 case L2CAP_CONF_UNACCEPT:
3739 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3742 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3743 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3747 /* throw out any old stored conf requests */
3748 result = L2CAP_CONF_SUCCESS;
3749 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3752 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3756 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3757 L2CAP_CONF_REQ, len, req);
3758 chan->num_conf_req++;
3759 if (result != L2CAP_CONF_SUCCESS)
3765 l2cap_chan_set_err(chan, ECONNRESET);
3767 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3768 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3775 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3777 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3778 set_default_fcs(chan);
3780 l2cap_state_change(chan, BT_CONNECTED);
3781 if (chan->mode == L2CAP_MODE_ERTM ||
3782 chan->mode == L2CAP_MODE_STREAMING)
3783 err = l2cap_ertm_init(chan);
3786 l2cap_send_disconn_req(chan->conn, chan, -err);
3788 l2cap_chan_ready(chan);
3792 l2cap_chan_unlock(chan);
3796 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3798 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3799 struct l2cap_disconn_rsp rsp;
3801 struct l2cap_chan *chan;
3804 scid = __le16_to_cpu(req->scid);
3805 dcid = __le16_to_cpu(req->dcid);
3807 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3809 mutex_lock(&conn->chan_lock);
3811 chan = __l2cap_get_chan_by_scid(conn, dcid);
3813 mutex_unlock(&conn->chan_lock);
3817 l2cap_chan_lock(chan);
3821 rsp.dcid = cpu_to_le16(chan->scid);
3822 rsp.scid = cpu_to_le16(chan->dcid);
3823 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3826 sk->sk_shutdown = SHUTDOWN_MASK;
3829 l2cap_chan_hold(chan);
3830 l2cap_chan_del(chan, ECONNRESET);
3832 l2cap_chan_unlock(chan);
3834 chan->ops->close(chan->data);
3835 l2cap_chan_put(chan);
3837 mutex_unlock(&conn->chan_lock);
3842 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3844 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3846 struct l2cap_chan *chan;
3848 scid = __le16_to_cpu(rsp->scid);
3849 dcid = __le16_to_cpu(rsp->dcid);
3851 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3853 mutex_lock(&conn->chan_lock);
3855 chan = __l2cap_get_chan_by_scid(conn, scid);
3857 mutex_unlock(&conn->chan_lock);
3861 l2cap_chan_lock(chan);
3863 l2cap_chan_hold(chan);
3864 l2cap_chan_del(chan, 0);
3866 l2cap_chan_unlock(chan);
3868 chan->ops->close(chan->data);
3869 l2cap_chan_put(chan);
3871 mutex_unlock(&conn->chan_lock);
3876 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3878 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3881 type = __le16_to_cpu(req->type);
3883 BT_DBG("type 0x%4.4x", type);
3885 if (type == L2CAP_IT_FEAT_MASK) {
3887 u32 feat_mask = l2cap_feat_mask;
3888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3889 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3890 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3892 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3895 feat_mask |= L2CAP_FEAT_EXT_FLOW
3896 | L2CAP_FEAT_EXT_WINDOW;
3898 put_unaligned_le32(feat_mask, rsp->data);
3899 l2cap_send_cmd(conn, cmd->ident,
3900 L2CAP_INFO_RSP, sizeof(buf), buf);
3901 } else if (type == L2CAP_IT_FIXED_CHAN) {
3903 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3906 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3908 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3910 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3911 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3912 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3913 l2cap_send_cmd(conn, cmd->ident,
3914 L2CAP_INFO_RSP, sizeof(buf), buf);
3916 struct l2cap_info_rsp rsp;
3917 rsp.type = cpu_to_le16(type);
3918 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3919 l2cap_send_cmd(conn, cmd->ident,
3920 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3926 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3928 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3931 type = __le16_to_cpu(rsp->type);
3932 result = __le16_to_cpu(rsp->result);
3934 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3936 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3937 if (cmd->ident != conn->info_ident ||
3938 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3941 cancel_delayed_work(&conn->info_timer);
3943 if (result != L2CAP_IR_SUCCESS) {
3944 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3945 conn->info_ident = 0;
3947 l2cap_conn_start(conn);
3953 case L2CAP_IT_FEAT_MASK:
3954 conn->feat_mask = get_unaligned_le32(rsp->data);
3956 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3957 struct l2cap_info_req req;
3958 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3960 conn->info_ident = l2cap_get_ident(conn);
3962 l2cap_send_cmd(conn, conn->info_ident,
3963 L2CAP_INFO_REQ, sizeof(req), &req);
3965 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3966 conn->info_ident = 0;
3968 l2cap_conn_start(conn);
3972 case L2CAP_IT_FIXED_CHAN:
3973 conn->fixed_chan_mask = rsp->data[0];
3974 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3975 conn->info_ident = 0;
3977 l2cap_conn_start(conn);
3984 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3985 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3988 struct l2cap_create_chan_req *req = data;
3989 struct l2cap_create_chan_rsp rsp;
3992 if (cmd_len != sizeof(*req))
3998 psm = le16_to_cpu(req->psm);
3999 scid = le16_to_cpu(req->scid);
4001 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4003 /* Placeholder: Always reject */
4005 rsp.scid = cpu_to_le16(scid);
4006 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4007 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4009 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4015 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4016 struct l2cap_cmd_hdr *cmd, void *data)
4018 BT_DBG("conn %p", conn);
4020 return l2cap_connect_rsp(conn, cmd, data);
4023 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4024 u16 icid, u16 result)
4026 struct l2cap_move_chan_rsp rsp;
4028 BT_DBG("icid %d, result %d", icid, result);
4030 rsp.icid = cpu_to_le16(icid);
4031 rsp.result = cpu_to_le16(result);
4033 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4036 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4037 struct l2cap_chan *chan, u16 icid, u16 result)
4039 struct l2cap_move_chan_cfm cfm;
4042 BT_DBG("icid %d, result %d", icid, result);
4044 ident = l2cap_get_ident(conn);
4046 chan->ident = ident;
4048 cfm.icid = cpu_to_le16(icid);
4049 cfm.result = cpu_to_le16(result);
4051 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4054 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4057 struct l2cap_move_chan_cfm_rsp rsp;
4059 BT_DBG("icid %d", icid);
4061 rsp.icid = cpu_to_le16(icid);
4062 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4065 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4066 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4068 struct l2cap_move_chan_req *req = data;
4070 u16 result = L2CAP_MR_NOT_ALLOWED;
4072 if (cmd_len != sizeof(*req))
4075 icid = le16_to_cpu(req->icid);
4077 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4082 /* Placeholder: Always refuse */
4083 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4088 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4091 struct l2cap_move_chan_rsp *rsp = data;
4094 if (cmd_len != sizeof(*rsp))
4097 icid = le16_to_cpu(rsp->icid);
4098 result = le16_to_cpu(rsp->result);
4100 BT_DBG("icid %d, result %d", icid, result);
4102 /* Placeholder: Always unconfirmed */
4103 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4108 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4111 struct l2cap_move_chan_cfm *cfm = data;
4114 if (cmd_len != sizeof(*cfm))
4117 icid = le16_to_cpu(cfm->icid);
4118 result = le16_to_cpu(cfm->result);
4120 BT_DBG("icid %d, result %d", icid, result);
4122 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4127 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4128 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4130 struct l2cap_move_chan_cfm_rsp *rsp = data;
4133 if (cmd_len != sizeof(*rsp))
4136 icid = le16_to_cpu(rsp->icid);
4138 BT_DBG("icid %d", icid);
4143 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4148 if (min > max || min < 6 || max > 3200)
4151 if (to_multiplier < 10 || to_multiplier > 3200)
4154 if (max >= to_multiplier * 8)
4157 max_latency = (to_multiplier * 8 / max) - 1;
4158 if (latency > 499 || latency > max_latency)
4164 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4165 struct l2cap_cmd_hdr *cmd, u8 *data)
4167 struct hci_conn *hcon = conn->hcon;
4168 struct l2cap_conn_param_update_req *req;
4169 struct l2cap_conn_param_update_rsp rsp;
4170 u16 min, max, latency, to_multiplier, cmd_len;
4173 if (!(hcon->link_mode & HCI_LM_MASTER))
4176 cmd_len = __le16_to_cpu(cmd->len);
4177 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4180 req = (struct l2cap_conn_param_update_req *) data;
4181 min = __le16_to_cpu(req->min);
4182 max = __le16_to_cpu(req->max);
4183 latency = __le16_to_cpu(req->latency);
4184 to_multiplier = __le16_to_cpu(req->to_multiplier);
4186 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4187 min, max, latency, to_multiplier);
4189 memset(&rsp, 0, sizeof(rsp));
4191 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4193 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4195 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4197 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4201 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4206 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4207 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4211 switch (cmd->code) {
4212 case L2CAP_COMMAND_REJ:
4213 l2cap_command_rej(conn, cmd, data);
4216 case L2CAP_CONN_REQ:
4217 err = l2cap_connect_req(conn, cmd, data);
4220 case L2CAP_CONN_RSP:
4221 err = l2cap_connect_rsp(conn, cmd, data);
4224 case L2CAP_CONF_REQ:
4225 err = l2cap_config_req(conn, cmd, cmd_len, data);
4228 case L2CAP_CONF_RSP:
4229 err = l2cap_config_rsp(conn, cmd, data);
4232 case L2CAP_DISCONN_REQ:
4233 err = l2cap_disconnect_req(conn, cmd, data);
4236 case L2CAP_DISCONN_RSP:
4237 err = l2cap_disconnect_rsp(conn, cmd, data);
4240 case L2CAP_ECHO_REQ:
4241 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4244 case L2CAP_ECHO_RSP:
4247 case L2CAP_INFO_REQ:
4248 err = l2cap_information_req(conn, cmd, data);
4251 case L2CAP_INFO_RSP:
4252 err = l2cap_information_rsp(conn, cmd, data);
4255 case L2CAP_CREATE_CHAN_REQ:
4256 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4259 case L2CAP_CREATE_CHAN_RSP:
4260 err = l2cap_create_channel_rsp(conn, cmd, data);
4263 case L2CAP_MOVE_CHAN_REQ:
4264 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4267 case L2CAP_MOVE_CHAN_RSP:
4268 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4271 case L2CAP_MOVE_CHAN_CFM:
4272 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4275 case L2CAP_MOVE_CHAN_CFM_RSP:
4276 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4280 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4288 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4289 struct l2cap_cmd_hdr *cmd, u8 *data)
4291 switch (cmd->code) {
4292 case L2CAP_COMMAND_REJ:
4295 case L2CAP_CONN_PARAM_UPDATE_REQ:
4296 return l2cap_conn_param_update_req(conn, cmd, data);
4298 case L2CAP_CONN_PARAM_UPDATE_RSP:
4302 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4307 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4308 struct sk_buff *skb)
4310 u8 *data = skb->data;
4312 struct l2cap_cmd_hdr cmd;
4315 l2cap_raw_recv(conn, skb);
4317 while (len >= L2CAP_CMD_HDR_SIZE) {
4319 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4320 data += L2CAP_CMD_HDR_SIZE;
4321 len -= L2CAP_CMD_HDR_SIZE;
4323 cmd_len = le16_to_cpu(cmd.len);
4325 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4327 if (cmd_len > len || !cmd.ident) {
4328 BT_DBG("corrupted command");
4332 if (conn->hcon->type == LE_LINK)
4333 err = l2cap_le_sig_cmd(conn, &cmd, data);
4335 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4338 struct l2cap_cmd_rej_unk rej;
4340 BT_ERR("Wrong link type (%d)", err);
4342 /* FIXME: Map err to a valid reason */
4343 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4344 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4354 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4356 u16 our_fcs, rcv_fcs;
4359 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4360 hdr_size = L2CAP_EXT_HDR_SIZE;
4362 hdr_size = L2CAP_ENH_HDR_SIZE;
4364 if (chan->fcs == L2CAP_FCS_CRC16) {
4365 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4366 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4367 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4369 if (our_fcs != rcv_fcs)
4375 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4377 struct l2cap_ctrl control;
4379 BT_DBG("chan %p", chan);
4381 memset(&control, 0, sizeof(control));
4384 control.reqseq = chan->buffer_seq;
4385 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4387 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4388 control.super = L2CAP_SUPER_RNR;
4389 l2cap_send_sframe(chan, &control);
4392 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4393 chan->unacked_frames > 0)
4394 __set_retrans_timer(chan);
4396 /* Send pending iframes */
4397 l2cap_ertm_send(chan);
4399 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4400 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4401 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4404 control.super = L2CAP_SUPER_RR;
4405 l2cap_send_sframe(chan, &control);
4409 static void append_skb_frag(struct sk_buff *skb,
4410 struct sk_buff *new_frag, struct sk_buff **last_frag)
4412 /* skb->len reflects data in skb as well as all fragments
4413 * skb->data_len reflects only data in fragments
4415 if (!skb_has_frag_list(skb))
4416 skb_shinfo(skb)->frag_list = new_frag;
4418 new_frag->next = NULL;
4420 (*last_frag)->next = new_frag;
4421 *last_frag = new_frag;
4423 skb->len += new_frag->len;
4424 skb->data_len += new_frag->len;
4425 skb->truesize += new_frag->truesize;
4428 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4429 struct l2cap_ctrl *control)
4433 switch (control->sar) {
4434 case L2CAP_SAR_UNSEGMENTED:
4438 err = chan->ops->recv(chan->data, skb);
4441 case L2CAP_SAR_START:
4445 chan->sdu_len = get_unaligned_le16(skb->data);
4446 skb_pull(skb, L2CAP_SDULEN_SIZE);
4448 if (chan->sdu_len > chan->imtu) {
4453 if (skb->len >= chan->sdu_len)
4457 chan->sdu_last_frag = skb;
4463 case L2CAP_SAR_CONTINUE:
4467 append_skb_frag(chan->sdu, skb,
4468 &chan->sdu_last_frag);
4471 if (chan->sdu->len >= chan->sdu_len)
4481 append_skb_frag(chan->sdu, skb,
4482 &chan->sdu_last_frag);
4485 if (chan->sdu->len != chan->sdu_len)
4488 err = chan->ops->recv(chan->data, chan->sdu);
4491 /* Reassembly complete */
4493 chan->sdu_last_frag = NULL;
4501 kfree_skb(chan->sdu);
4503 chan->sdu_last_frag = NULL;
4510 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4514 if (chan->mode != L2CAP_MODE_ERTM)
4517 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4518 l2cap_tx(chan, 0, 0, event);
4521 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4524 /* Pass sequential frames to l2cap_reassemble_sdu()
4525 * until a gap is encountered.
4528 BT_DBG("chan %p", chan);
4530 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4531 struct sk_buff *skb;
4532 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4533 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4535 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4540 skb_unlink(skb, &chan->srej_q);
4541 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4542 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4547 if (skb_queue_empty(&chan->srej_q)) {
4548 chan->rx_state = L2CAP_RX_STATE_RECV;
4549 l2cap_send_ack(chan);
4555 static void l2cap_handle_srej(struct l2cap_chan *chan,
4556 struct l2cap_ctrl *control)
4558 struct sk_buff *skb;
4560 BT_DBG("chan %p, control %p", chan, control);
4562 if (control->reqseq == chan->next_tx_seq) {
4563 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4564 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4568 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4571 BT_DBG("Seq %d not available for retransmission",
4576 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4577 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4582 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4584 if (control->poll) {
4585 l2cap_pass_to_tx(chan, control);
4587 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4588 l2cap_retransmit(chan, control);
4589 l2cap_ertm_send(chan);
4591 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4592 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4593 chan->srej_save_reqseq = control->reqseq;
4596 l2cap_pass_to_tx_fbit(chan, control);
4598 if (control->final) {
4599 if (chan->srej_save_reqseq != control->reqseq ||
4600 !test_and_clear_bit(CONN_SREJ_ACT,
4602 l2cap_retransmit(chan, control);
4604 l2cap_retransmit(chan, control);
4605 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4606 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4607 chan->srej_save_reqseq = control->reqseq;
4613 static void l2cap_handle_rej(struct l2cap_chan *chan,
4614 struct l2cap_ctrl *control)
4616 struct sk_buff *skb;
4618 BT_DBG("chan %p, control %p", chan, control);
4620 if (control->reqseq == chan->next_tx_seq) {
4621 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4622 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4626 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4628 if (chan->max_tx && skb &&
4629 bt_cb(skb)->control.retries >= chan->max_tx) {
4630 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4631 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4635 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4637 l2cap_pass_to_tx(chan, control);
4639 if (control->final) {
4640 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4641 l2cap_retransmit_all(chan, control);
4643 l2cap_retransmit_all(chan, control);
4644 l2cap_ertm_send(chan);
4645 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4646 set_bit(CONN_REJ_ACT, &chan->conn_state);
4650 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4652 BT_DBG("chan %p, txseq %d", chan, txseq);
4654 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4655 chan->expected_tx_seq);
4657 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4658 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4660 /* See notes below regarding "double poll" and
4663 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4664 BT_DBG("Invalid/Ignore - after SREJ");
4665 return L2CAP_TXSEQ_INVALID_IGNORE;
4667 BT_DBG("Invalid - in window after SREJ sent");
4668 return L2CAP_TXSEQ_INVALID;
4672 if (chan->srej_list.head == txseq) {
4673 BT_DBG("Expected SREJ");
4674 return L2CAP_TXSEQ_EXPECTED_SREJ;
4677 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4678 BT_DBG("Duplicate SREJ - txseq already stored");
4679 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4682 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4683 BT_DBG("Unexpected SREJ - not requested");
4684 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4688 if (chan->expected_tx_seq == txseq) {
4689 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4691 BT_DBG("Invalid - txseq outside tx window");
4692 return L2CAP_TXSEQ_INVALID;
4695 return L2CAP_TXSEQ_EXPECTED;
4699 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4700 __seq_offset(chan, chan->expected_tx_seq,
4701 chan->last_acked_seq)){
4702 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4703 return L2CAP_TXSEQ_DUPLICATE;
4706 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4707 /* A source of invalid packets is a "double poll" condition,
4708 * where delays cause us to send multiple poll packets. If
4709 * the remote stack receives and processes both polls,
4710 * sequence numbers can wrap around in such a way that a
4711 * resent frame has a sequence number that looks like new data
4712 * with a sequence gap. This would trigger an erroneous SREJ
4715 * Fortunately, this is impossible with a tx window that's
4716 * less than half of the maximum sequence number, which allows
4717 * invalid frames to be safely ignored.
4719 * With tx window sizes greater than half of the tx window
4720 * maximum, the frame is invalid and cannot be ignored. This
4721 * causes a disconnect.
4724 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4725 BT_DBG("Invalid/Ignore - txseq outside tx window");
4726 return L2CAP_TXSEQ_INVALID_IGNORE;
4728 BT_DBG("Invalid - txseq outside tx window");
4729 return L2CAP_TXSEQ_INVALID;
4732 BT_DBG("Unexpected - txseq indicates missing frames");
4733 return L2CAP_TXSEQ_UNEXPECTED;
4737 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4738 struct l2cap_ctrl *control,
4739 struct sk_buff *skb, u8 event)
4742 bool skb_in_use = 0;
4744 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4748 case L2CAP_EV_RECV_IFRAME:
4749 switch (l2cap_classify_txseq(chan, control->txseq)) {
4750 case L2CAP_TXSEQ_EXPECTED:
4751 l2cap_pass_to_tx(chan, control);
4753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 BT_DBG("Busy, discarding expected seq %d",
4759 chan->expected_tx_seq = __next_seq(chan,
4762 chan->buffer_seq = chan->expected_tx_seq;
4765 err = l2cap_reassemble_sdu(chan, skb, control);
4769 if (control->final) {
4770 if (!test_and_clear_bit(CONN_REJ_ACT,
4771 &chan->conn_state)) {
4773 l2cap_retransmit_all(chan, control);
4774 l2cap_ertm_send(chan);
4778 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4779 l2cap_send_ack(chan);
4781 case L2CAP_TXSEQ_UNEXPECTED:
4782 l2cap_pass_to_tx(chan, control);
4784 /* Can't issue SREJ frames in the local busy state.
4785 * Drop this frame, it will be seen as missing
4786 * when local busy is exited.
4788 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4789 BT_DBG("Busy, discarding unexpected seq %d",
4794 /* There was a gap in the sequence, so an SREJ
4795 * must be sent for each missing frame. The
4796 * current frame is stored for later use.
4798 skb_queue_tail(&chan->srej_q, skb);
4800 BT_DBG("Queued %p (queue len %d)", skb,
4801 skb_queue_len(&chan->srej_q));
4803 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4804 l2cap_seq_list_clear(&chan->srej_list);
4805 l2cap_send_srej(chan, control->txseq);
4807 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4809 case L2CAP_TXSEQ_DUPLICATE:
4810 l2cap_pass_to_tx(chan, control);
4812 case L2CAP_TXSEQ_INVALID_IGNORE:
4814 case L2CAP_TXSEQ_INVALID:
4816 l2cap_send_disconn_req(chan->conn, chan,
4821 case L2CAP_EV_RECV_RR:
4822 l2cap_pass_to_tx(chan, control);
4823 if (control->final) {
4824 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4826 if (!test_and_clear_bit(CONN_REJ_ACT,
4827 &chan->conn_state)) {
4829 l2cap_retransmit_all(chan, control);
4832 l2cap_ertm_send(chan);
4833 } else if (control->poll) {
4834 l2cap_send_i_or_rr_or_rnr(chan);
4836 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4837 &chan->conn_state) &&
4838 chan->unacked_frames)
4839 __set_retrans_timer(chan);
4841 l2cap_ertm_send(chan);
4844 case L2CAP_EV_RECV_RNR:
4845 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4846 l2cap_pass_to_tx(chan, control);
4847 if (control && control->poll) {
4848 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4849 l2cap_send_rr_or_rnr(chan, 0);
4851 __clear_retrans_timer(chan);
4852 l2cap_seq_list_clear(&chan->retrans_list);
4854 case L2CAP_EV_RECV_REJ:
4855 l2cap_handle_rej(chan, control);
4857 case L2CAP_EV_RECV_SREJ:
4858 l2cap_handle_srej(chan, control);
4864 if (skb && !skb_in_use) {
4865 BT_DBG("Freeing %p", skb);
4872 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4873 struct l2cap_ctrl *control,
4874 struct sk_buff *skb, u8 event)
4877 u16 txseq = control->txseq;
4878 bool skb_in_use = 0;
4880 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4884 case L2CAP_EV_RECV_IFRAME:
4885 switch (l2cap_classify_txseq(chan, txseq)) {
4886 case L2CAP_TXSEQ_EXPECTED:
4887 /* Keep frame for reassembly later */
4888 l2cap_pass_to_tx(chan, control);
4889 skb_queue_tail(&chan->srej_q, skb);
4891 BT_DBG("Queued %p (queue len %d)", skb,
4892 skb_queue_len(&chan->srej_q));
4894 chan->expected_tx_seq = __next_seq(chan, txseq);
4896 case L2CAP_TXSEQ_EXPECTED_SREJ:
4897 l2cap_seq_list_pop(&chan->srej_list);
4899 l2cap_pass_to_tx(chan, control);
4900 skb_queue_tail(&chan->srej_q, skb);
4902 BT_DBG("Queued %p (queue len %d)", skb,
4903 skb_queue_len(&chan->srej_q));
4905 err = l2cap_rx_queued_iframes(chan);
4910 case L2CAP_TXSEQ_UNEXPECTED:
4911 /* Got a frame that can't be reassembled yet.
4912 * Save it for later, and send SREJs to cover
4913 * the missing frames.
4915 skb_queue_tail(&chan->srej_q, skb);
4917 BT_DBG("Queued %p (queue len %d)", skb,
4918 skb_queue_len(&chan->srej_q));
4920 l2cap_pass_to_tx(chan, control);
4921 l2cap_send_srej(chan, control->txseq);
4923 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4924 /* This frame was requested with an SREJ, but
4925 * some expected retransmitted frames are
4926 * missing. Request retransmission of missing
4929 skb_queue_tail(&chan->srej_q, skb);
4931 BT_DBG("Queued %p (queue len %d)", skb,
4932 skb_queue_len(&chan->srej_q));
4934 l2cap_pass_to_tx(chan, control);
4935 l2cap_send_srej_list(chan, control->txseq);
4937 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4938 /* We've already queued this frame. Drop this copy. */
4939 l2cap_pass_to_tx(chan, control);
4941 case L2CAP_TXSEQ_DUPLICATE:
4942 /* Expecting a later sequence number, so this frame
4943 * was already received. Ignore it completely.
4946 case L2CAP_TXSEQ_INVALID_IGNORE:
4948 case L2CAP_TXSEQ_INVALID:
4950 l2cap_send_disconn_req(chan->conn, chan,
4955 case L2CAP_EV_RECV_RR:
4956 l2cap_pass_to_tx(chan, control);
4957 if (control->final) {
4958 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4960 if (!test_and_clear_bit(CONN_REJ_ACT,
4961 &chan->conn_state)) {
4963 l2cap_retransmit_all(chan, control);
4966 l2cap_ertm_send(chan);
4967 } else if (control->poll) {
4968 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4969 &chan->conn_state) &&
4970 chan->unacked_frames) {
4971 __set_retrans_timer(chan);
4974 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4975 l2cap_send_srej_tail(chan);
4977 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4978 &chan->conn_state) &&
4979 chan->unacked_frames)
4980 __set_retrans_timer(chan);
4982 l2cap_send_ack(chan);
4985 case L2CAP_EV_RECV_RNR:
4986 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4987 l2cap_pass_to_tx(chan, control);
4988 if (control->poll) {
4989 l2cap_send_srej_tail(chan);
4991 struct l2cap_ctrl rr_control;
4992 memset(&rr_control, 0, sizeof(rr_control));
4993 rr_control.sframe = 1;
4994 rr_control.super = L2CAP_SUPER_RR;
4995 rr_control.reqseq = chan->buffer_seq;
4996 l2cap_send_sframe(chan, &rr_control);
5000 case L2CAP_EV_RECV_REJ:
5001 l2cap_handle_rej(chan, control);
5003 case L2CAP_EV_RECV_SREJ:
5004 l2cap_handle_srej(chan, control);
5008 if (skb && !skb_in_use) {
5009 BT_DBG("Freeing %p", skb);
5016 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5018 /* Make sure reqseq is for a packet that has been sent but not acked */
5021 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5022 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5025 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5026 struct sk_buff *skb, u8 event)
5030 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5031 control, skb, event, chan->rx_state);
5033 if (__valid_reqseq(chan, control->reqseq)) {
5034 switch (chan->rx_state) {
5035 case L2CAP_RX_STATE_RECV:
5036 err = l2cap_rx_state_recv(chan, control, skb, event);
5038 case L2CAP_RX_STATE_SREJ_SENT:
5039 err = l2cap_rx_state_srej_sent(chan, control, skb,
5047 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5048 control->reqseq, chan->next_tx_seq,
5049 chan->expected_ack_seq);
5050 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5056 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5057 struct sk_buff *skb)
5061 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5064 if (l2cap_classify_txseq(chan, control->txseq) ==
5065 L2CAP_TXSEQ_EXPECTED) {
5066 l2cap_pass_to_tx(chan, control);
5068 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5069 __next_seq(chan, chan->buffer_seq));
5071 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5073 l2cap_reassemble_sdu(chan, skb, control);
5076 kfree_skb(chan->sdu);
5079 chan->sdu_last_frag = NULL;
5083 BT_DBG("Freeing %p", skb);
5088 chan->last_acked_seq = control->txseq;
5089 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5094 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5096 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5100 __unpack_control(chan, skb);
5105 * We can just drop the corrupted I-frame here.
5106 * Receiver will miss it and start proper recovery
5107 * procedures and ask for retransmission.
5109 if (l2cap_check_fcs(chan, skb))
5112 if (!control->sframe && control->sar == L2CAP_SAR_START)
5113 len -= L2CAP_SDULEN_SIZE;
5115 if (chan->fcs == L2CAP_FCS_CRC16)
5116 len -= L2CAP_FCS_SIZE;
5118 if (len > chan->mps) {
5119 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5123 if (!control->sframe) {
5126 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5127 control->sar, control->reqseq, control->final,
5130 /* Validate F-bit - F=0 always valid, F=1 only
5131 * valid in TX WAIT_F
5133 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5136 if (chan->mode != L2CAP_MODE_STREAMING) {
5137 event = L2CAP_EV_RECV_IFRAME;
5138 err = l2cap_rx(chan, control, skb, event);
5140 err = l2cap_stream_rx(chan, control, skb);
5144 l2cap_send_disconn_req(chan->conn, chan,
5147 const u8 rx_func_to_event[4] = {
5148 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5149 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5152 /* Only I-frames are expected in streaming mode */
5153 if (chan->mode == L2CAP_MODE_STREAMING)
5156 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5157 control->reqseq, control->final, control->poll,
5162 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5166 /* Validate F and P bits */
5167 if (control->final && (control->poll ||
5168 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5171 event = rx_func_to_event[control->super];
5172 if (l2cap_rx(chan, control, skb, event))
5173 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5183 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5185 struct l2cap_chan *chan;
5187 chan = l2cap_get_chan_by_scid(conn, cid);
5189 BT_DBG("unknown cid 0x%4.4x", cid);
5190 /* Drop packet and return */
5195 BT_DBG("chan %p, len %d", chan, skb->len);
5197 if (chan->state != BT_CONNECTED)
5200 switch (chan->mode) {
5201 case L2CAP_MODE_BASIC:
5202 /* If socket recv buffers overflows we drop data here
5203 * which is *bad* because L2CAP has to be reliable.
5204 * But we don't have any other choice. L2CAP doesn't
5205 * provide flow control mechanism. */
5207 if (chan->imtu < skb->len)
5210 if (!chan->ops->recv(chan->data, skb))
5214 case L2CAP_MODE_ERTM:
5215 case L2CAP_MODE_STREAMING:
5216 l2cap_data_rcv(chan, skb);
5220 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5228 l2cap_chan_unlock(chan);
5233 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5235 struct l2cap_chan *chan;
5237 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5241 BT_DBG("chan %p, len %d", chan, skb->len);
5243 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5246 if (chan->imtu < skb->len)
5249 if (!chan->ops->recv(chan->data, skb))
5258 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5259 struct sk_buff *skb)
5261 struct l2cap_chan *chan;
5263 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5267 BT_DBG("chan %p, len %d", chan, skb->len);
5269 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5272 if (chan->imtu < skb->len)
5275 if (!chan->ops->recv(chan->data, skb))
5284 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5286 struct l2cap_hdr *lh = (void *) skb->data;
5290 skb_pull(skb, L2CAP_HDR_SIZE);
5291 cid = __le16_to_cpu(lh->cid);
5292 len = __le16_to_cpu(lh->len);
5294 if (len != skb->len) {
5299 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5302 case L2CAP_CID_LE_SIGNALING:
5303 case L2CAP_CID_SIGNALING:
5304 l2cap_sig_channel(conn, skb);
5307 case L2CAP_CID_CONN_LESS:
5308 psm = get_unaligned((__le16 *) skb->data);
5310 l2cap_conless_channel(conn, psm, skb);
5313 case L2CAP_CID_LE_DATA:
5314 l2cap_att_channel(conn, cid, skb);
5318 if (smp_sig_channel(conn, skb))
5319 l2cap_conn_del(conn->hcon, EACCES);
5323 l2cap_data_channel(conn, cid, skb);
5328 /* ---- L2CAP interface with lower layer (HCI) ---- */
5330 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5332 int exact = 0, lm1 = 0, lm2 = 0;
5333 struct l2cap_chan *c;
5335 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5337 /* Find listening sockets and check their link_mode */
5338 read_lock(&chan_list_lock);
5339 list_for_each_entry(c, &chan_list, global_l) {
5340 struct sock *sk = c->sk;
5342 if (c->state != BT_LISTEN)
5345 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5346 lm1 |= HCI_LM_ACCEPT;
5347 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5348 lm1 |= HCI_LM_MASTER;
5350 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5351 lm2 |= HCI_LM_ACCEPT;
5352 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5353 lm2 |= HCI_LM_MASTER;
5356 read_unlock(&chan_list_lock);
5358 return exact ? lm1 : lm2;
5361 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5363 struct l2cap_conn *conn;
5365 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5368 conn = l2cap_conn_add(hcon, status);
5370 l2cap_conn_ready(conn);
5372 l2cap_conn_del(hcon, bt_to_errno(status));
5377 int l2cap_disconn_ind(struct hci_conn *hcon)
5379 struct l2cap_conn *conn = hcon->l2cap_data;
5381 BT_DBG("hcon %p", hcon);
5384 return HCI_ERROR_REMOTE_USER_TERM;
5385 return conn->disc_reason;
5388 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5390 BT_DBG("hcon %p reason %d", hcon, reason);
5392 l2cap_conn_del(hcon, bt_to_errno(reason));
5396 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5398 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5401 if (encrypt == 0x00) {
5402 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5403 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5404 } else if (chan->sec_level == BT_SECURITY_HIGH)
5405 l2cap_chan_close(chan, ECONNREFUSED);
5407 if (chan->sec_level == BT_SECURITY_MEDIUM)
5408 __clear_chan_timer(chan);
5412 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5414 struct l2cap_conn *conn = hcon->l2cap_data;
5415 struct l2cap_chan *chan;
5420 BT_DBG("conn %p", conn);
5422 if (hcon->type == LE_LINK) {
5423 if (!status && encrypt)
5424 smp_distribute_keys(conn, 0);
5425 cancel_delayed_work(&conn->security_timer);
5428 mutex_lock(&conn->chan_lock);
5430 list_for_each_entry(chan, &conn->chan_l, list) {
5431 l2cap_chan_lock(chan);
5433 BT_DBG("chan->scid %d", chan->scid);
5435 if (chan->scid == L2CAP_CID_LE_DATA) {
5436 if (!status && encrypt) {
5437 chan->sec_level = hcon->sec_level;
5438 l2cap_chan_ready(chan);
5441 l2cap_chan_unlock(chan);
5445 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5446 l2cap_chan_unlock(chan);
5450 if (!status && (chan->state == BT_CONNECTED ||
5451 chan->state == BT_CONFIG)) {
5452 struct sock *sk = chan->sk;
5454 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5455 sk->sk_state_change(sk);
5457 l2cap_check_encryption(chan, encrypt);
5458 l2cap_chan_unlock(chan);
5462 if (chan->state == BT_CONNECT) {
5464 l2cap_send_conn_req(chan);
5466 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5468 } else if (chan->state == BT_CONNECT2) {
5469 struct sock *sk = chan->sk;
5470 struct l2cap_conn_rsp rsp;
5476 if (test_bit(BT_SK_DEFER_SETUP,
5477 &bt_sk(sk)->flags)) {
5478 struct sock *parent = bt_sk(sk)->parent;
5479 res = L2CAP_CR_PEND;
5480 stat = L2CAP_CS_AUTHOR_PEND;
5482 parent->sk_data_ready(parent, 0);
5484 __l2cap_state_change(chan, BT_CONFIG);
5485 res = L2CAP_CR_SUCCESS;
5486 stat = L2CAP_CS_NO_INFO;
5489 __l2cap_state_change(chan, BT_DISCONN);
5490 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5491 res = L2CAP_CR_SEC_BLOCK;
5492 stat = L2CAP_CS_NO_INFO;
5497 rsp.scid = cpu_to_le16(chan->dcid);
5498 rsp.dcid = cpu_to_le16(chan->scid);
5499 rsp.result = cpu_to_le16(res);
5500 rsp.status = cpu_to_le16(stat);
5501 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5505 l2cap_chan_unlock(chan);
5508 mutex_unlock(&conn->chan_lock);
5513 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5515 struct l2cap_conn *conn = hcon->l2cap_data;
5518 conn = l2cap_conn_add(hcon, 0);
5523 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5525 if (!(flags & ACL_CONT)) {
5526 struct l2cap_hdr *hdr;
5530 BT_ERR("Unexpected start frame (len %d)", skb->len);
5531 kfree_skb(conn->rx_skb);
5532 conn->rx_skb = NULL;
5534 l2cap_conn_unreliable(conn, ECOMM);
5537 /* Start fragment always begin with Basic L2CAP header */
5538 if (skb->len < L2CAP_HDR_SIZE) {
5539 BT_ERR("Frame is too short (len %d)", skb->len);
5540 l2cap_conn_unreliable(conn, ECOMM);
5544 hdr = (struct l2cap_hdr *) skb->data;
5545 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5547 if (len == skb->len) {
5548 /* Complete frame received */
5549 l2cap_recv_frame(conn, skb);
5553 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5555 if (skb->len > len) {
5556 BT_ERR("Frame is too long (len %d, expected len %d)",
5558 l2cap_conn_unreliable(conn, ECOMM);
5562 /* Allocate skb for the complete frame (with header) */
5563 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5567 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5569 conn->rx_len = len - skb->len;
5571 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5573 if (!conn->rx_len) {
5574 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5575 l2cap_conn_unreliable(conn, ECOMM);
5579 if (skb->len > conn->rx_len) {
5580 BT_ERR("Fragment is too long (len %d, expected %d)",
5581 skb->len, conn->rx_len);
5582 kfree_skb(conn->rx_skb);
5583 conn->rx_skb = NULL;
5585 l2cap_conn_unreliable(conn, ECOMM);
5589 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5591 conn->rx_len -= skb->len;
5593 if (!conn->rx_len) {
5594 /* Complete frame received */
5595 l2cap_recv_frame(conn, conn->rx_skb);
5596 conn->rx_skb = NULL;
5605 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5607 struct l2cap_chan *c;
5609 read_lock(&chan_list_lock);
5611 list_for_each_entry(c, &chan_list, global_l) {
5612 struct sock *sk = c->sk;
5614 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5615 batostr(&bt_sk(sk)->src),
5616 batostr(&bt_sk(sk)->dst),
5617 c->state, __le16_to_cpu(c->psm),
5618 c->scid, c->dcid, c->imtu, c->omtu,
5619 c->sec_level, c->mode);
5622 read_unlock(&chan_list_lock);
5627 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5629 return single_open(file, l2cap_debugfs_show, inode->i_private);
5632 static const struct file_operations l2cap_debugfs_fops = {
5633 .open = l2cap_debugfs_open,
5635 .llseek = seq_lseek,
5636 .release = single_release,
5639 static struct dentry *l2cap_debugfs;
5641 int __init l2cap_init(void)
5645 err = l2cap_init_sockets();
5650 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5651 bt_debugfs, NULL, &l2cap_debugfs_fops);
5653 BT_ERR("Failed to create L2CAP debug file");
5659 void l2cap_exit(void)
5661 debugfs_remove(l2cap_debugfs);
5662 l2cap_cleanup_sockets();
5665 module_param(disable_ertm, bool, 0644);
5666 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");