2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm = 1;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 mutex_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
131 struct l2cap_chan *c;
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 write_lock(&chan_list_lock);
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
169 write_unlock(&chan_list_lock);
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
175 write_lock(&chan_list_lock);
179 write_unlock(&chan_list_lock);
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
186 u16 cid = L2CAP_CID_DYN_START;
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
202 chan->ops->state_change(chan->data, state);
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
207 struct sock *sk = chan->sk;
210 __l2cap_state_change(chan, state);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
216 struct sock *sk = chan->sk;
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
223 struct sock *sk = chan->sk;
226 __l2cap_chan_set_err(chan, err);
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
483 l2cap_chan_hold(chan);
485 list_add(&chan->list, &conn->chan_l);
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
501 __clear_chan_timer(chan);
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
506 /* Delete from channel list */
507 list_del(&chan->list);
509 l2cap_chan_put(chan);
512 hci_conn_put(conn->hcon);
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
521 __l2cap_chan_set_err(chan, err);
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
527 sk->sk_state_change(sk);
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 skb_queue_purge(&chan->tx_q);
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 __clear_retrans_timer(chan);
538 __clear_monitor_timer(chan);
539 __clear_ack_timer(chan);
541 skb_queue_purge(&chan->srej_q);
543 l2cap_seq_list_free(&chan->srej_list);
544 l2cap_seq_list_free(&chan->retrans_list);
548 static void l2cap_chan_cleanup_listen(struct sock *parent)
552 BT_DBG("parent %p", parent);
554 /* Close not yet accepted channels */
555 while ((sk = bt_accept_dequeue(parent, NULL))) {
556 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
558 l2cap_chan_lock(chan);
559 __clear_chan_timer(chan);
560 l2cap_chan_close(chan, ECONNRESET);
561 l2cap_chan_unlock(chan);
563 chan->ops->close(chan->data);
567 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
569 struct l2cap_conn *conn = chan->conn;
570 struct sock *sk = chan->sk;
572 BT_DBG("chan %p state %s sk %p", chan,
573 state_to_string(chan->state), sk);
575 switch (chan->state) {
578 l2cap_chan_cleanup_listen(sk);
580 __l2cap_state_change(chan, BT_CLOSED);
581 sock_set_flag(sk, SOCK_ZAPPED);
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason);
592 l2cap_chan_del(chan, reason);
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp;
601 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 result = L2CAP_CR_SEC_BLOCK;
604 result = L2CAP_CR_BAD_PSM;
605 l2cap_state_change(chan, BT_DISCONN);
607 rsp.scid = cpu_to_le16(chan->dcid);
608 rsp.dcid = cpu_to_le16(chan->scid);
609 rsp.result = cpu_to_le16(result);
610 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
615 l2cap_chan_del(chan, reason);
620 l2cap_chan_del(chan, reason);
625 sock_set_flag(sk, SOCK_ZAPPED);
631 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
633 if (chan->chan_type == L2CAP_CHAN_RAW) {
634 switch (chan->sec_level) {
635 case BT_SECURITY_HIGH:
636 return HCI_AT_DEDICATED_BONDING_MITM;
637 case BT_SECURITY_MEDIUM:
638 return HCI_AT_DEDICATED_BONDING;
640 return HCI_AT_NO_BONDING;
642 } else if (chan->psm == cpu_to_le16(0x0001)) {
643 if (chan->sec_level == BT_SECURITY_LOW)
644 chan->sec_level = BT_SECURITY_SDP;
646 if (chan->sec_level == BT_SECURITY_HIGH)
647 return HCI_AT_NO_BONDING_MITM;
649 return HCI_AT_NO_BONDING;
651 switch (chan->sec_level) {
652 case BT_SECURITY_HIGH:
653 return HCI_AT_GENERAL_BONDING_MITM;
654 case BT_SECURITY_MEDIUM:
655 return HCI_AT_GENERAL_BONDING;
657 return HCI_AT_NO_BONDING;
662 /* Service level security */
663 int l2cap_chan_check_security(struct l2cap_chan *chan)
665 struct l2cap_conn *conn = chan->conn;
668 auth_type = l2cap_get_auth_type(chan);
670 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
673 static u8 l2cap_get_ident(struct l2cap_conn *conn)
677 /* Get next available identificator.
678 * 1 - 128 are used by kernel.
679 * 129 - 199 are reserved.
680 * 200 - 254 are used by utilities like l2ping, etc.
683 spin_lock(&conn->lock);
685 if (++conn->tx_ident > 128)
690 spin_unlock(&conn->lock);
695 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
697 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
700 BT_DBG("code 0x%2.2x", code);
705 if (lmp_no_flush_capable(conn->hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
710 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
711 skb->priority = HCI_PRIO_MAX;
713 hci_send_acl(conn->hchan, skb, flags);
716 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
718 struct hci_conn *hcon = chan->conn->hcon;
721 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
724 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
725 lmp_no_flush_capable(hcon->hdev))
726 flags = ACL_START_NO_FLUSH;
730 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
731 hci_send_acl(chan->conn->hchan, skb, flags);
734 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
736 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
737 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
739 if (enh & L2CAP_CTRL_FRAME_TYPE) {
742 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
743 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
750 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
751 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
758 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
760 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
761 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
763 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
766 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
767 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
774 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
775 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
782 static inline void __unpack_control(struct l2cap_chan *chan,
785 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
786 __unpack_extended_control(get_unaligned_le32(skb->data),
787 &bt_cb(skb)->control);
788 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
790 __unpack_enhanced_control(get_unaligned_le16(skb->data),
791 &bt_cb(skb)->control);
792 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
796 static u32 __pack_extended_control(struct l2cap_ctrl *control)
800 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
808 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
815 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
819 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
820 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
822 if (control->sframe) {
823 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
824 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
825 packed |= L2CAP_CTRL_FRAME_TYPE;
827 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
828 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
834 static inline void __pack_control(struct l2cap_chan *chan,
835 struct l2cap_ctrl *control,
838 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
839 put_unaligned_le32(__pack_extended_control(control),
840 skb->data + L2CAP_HDR_SIZE);
842 put_unaligned_le16(__pack_enhanced_control(control),
843 skb->data + L2CAP_HDR_SIZE);
847 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
851 struct l2cap_hdr *lh;
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 hlen = L2CAP_EXT_HDR_SIZE;
857 hlen = L2CAP_ENH_HDR_SIZE;
859 if (chan->fcs == L2CAP_FCS_CRC16)
860 hlen += L2CAP_FCS_SIZE;
862 skb = bt_skb_alloc(hlen, GFP_KERNEL);
865 return ERR_PTR(-ENOMEM);
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
872 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
874 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
876 if (chan->fcs == L2CAP_FCS_CRC16) {
877 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
878 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
881 skb->priority = HCI_PRIO_MAX;
885 static void l2cap_send_sframe(struct l2cap_chan *chan,
886 struct l2cap_ctrl *control)
891 BT_DBG("chan %p, control %p", chan, control);
893 if (!control->sframe)
896 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
900 if (control->super == L2CAP_SUPER_RR)
901 clear_bit(CONN_RNR_SENT, &chan->conn_state);
902 else if (control->super == L2CAP_SUPER_RNR)
903 set_bit(CONN_RNR_SENT, &chan->conn_state);
905 if (control->super != L2CAP_SUPER_SREJ) {
906 chan->last_acked_seq = control->reqseq;
907 __clear_ack_timer(chan);
910 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
911 control->final, control->poll, control->super);
913 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
914 control_field = __pack_extended_control(control);
916 control_field = __pack_enhanced_control(control);
918 skb = l2cap_create_sframe_pdu(chan, control_field);
920 l2cap_do_send(chan, skb);
923 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
926 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
927 set_bit(CONN_RNR_SENT, &chan->conn_state);
929 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
931 control |= __set_reqseq(chan, chan->buffer_seq);
934 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
936 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
939 static void l2cap_send_conn_req(struct l2cap_chan *chan)
941 struct l2cap_conn *conn = chan->conn;
942 struct l2cap_conn_req req;
944 req.scid = cpu_to_le16(chan->scid);
947 chan->ident = l2cap_get_ident(conn);
949 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
951 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
954 static void l2cap_chan_ready(struct l2cap_chan *chan)
956 struct sock *sk = chan->sk;
961 parent = bt_sk(sk)->parent;
963 BT_DBG("sk %p, parent %p", sk, parent);
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
969 __l2cap_state_change(chan, BT_CONNECTED);
970 sk->sk_state_change(sk);
973 parent->sk_data_ready(parent, 0);
978 static void l2cap_do_start(struct l2cap_chan *chan)
980 struct l2cap_conn *conn = chan->conn;
982 if (conn->hcon->type == LE_LINK) {
983 l2cap_chan_ready(chan);
987 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
988 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
991 if (l2cap_chan_check_security(chan) &&
992 __l2cap_no_conn_pending(chan))
993 l2cap_send_conn_req(chan);
995 struct l2cap_info_req req;
996 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
999 conn->info_ident = l2cap_get_ident(conn);
1001 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1003 l2cap_send_cmd(conn, conn->info_ident,
1004 L2CAP_INFO_REQ, sizeof(req), &req);
1008 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1010 u32 local_feat_mask = l2cap_feat_mask;
1012 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1015 case L2CAP_MODE_ERTM:
1016 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1017 case L2CAP_MODE_STREAMING:
1018 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1024 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1026 struct sock *sk = chan->sk;
1027 struct l2cap_disconn_req req;
1032 if (chan->mode == L2CAP_MODE_ERTM) {
1033 __clear_retrans_timer(chan);
1034 __clear_monitor_timer(chan);
1035 __clear_ack_timer(chan);
1038 req.dcid = cpu_to_le16(chan->dcid);
1039 req.scid = cpu_to_le16(chan->scid);
1040 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1041 L2CAP_DISCONN_REQ, sizeof(req), &req);
1044 __l2cap_state_change(chan, BT_DISCONN);
1045 __l2cap_chan_set_err(chan, err);
1049 /* ---- L2CAP connections ---- */
1050 static void l2cap_conn_start(struct l2cap_conn *conn)
1052 struct l2cap_chan *chan, *tmp;
1054 BT_DBG("conn %p", conn);
1056 mutex_lock(&conn->chan_lock);
1058 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1059 struct sock *sk = chan->sk;
1061 l2cap_chan_lock(chan);
1063 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1064 l2cap_chan_unlock(chan);
1068 if (chan->state == BT_CONNECT) {
1069 if (!l2cap_chan_check_security(chan) ||
1070 !__l2cap_no_conn_pending(chan)) {
1071 l2cap_chan_unlock(chan);
1075 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1076 && test_bit(CONF_STATE2_DEVICE,
1077 &chan->conf_state)) {
1078 l2cap_chan_close(chan, ECONNRESET);
1079 l2cap_chan_unlock(chan);
1083 l2cap_send_conn_req(chan);
1085 } else if (chan->state == BT_CONNECT2) {
1086 struct l2cap_conn_rsp rsp;
1088 rsp.scid = cpu_to_le16(chan->dcid);
1089 rsp.dcid = cpu_to_le16(chan->scid);
1091 if (l2cap_chan_check_security(chan)) {
1093 if (test_bit(BT_SK_DEFER_SETUP,
1094 &bt_sk(sk)->flags)) {
1095 struct sock *parent = bt_sk(sk)->parent;
1096 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1097 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1099 parent->sk_data_ready(parent, 0);
1102 __l2cap_state_change(chan, BT_CONFIG);
1103 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1104 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1108 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1109 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1112 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1115 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1116 rsp.result != L2CAP_CR_SUCCESS) {
1117 l2cap_chan_unlock(chan);
1121 set_bit(CONF_REQ_SENT, &chan->conf_state);
1122 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1123 l2cap_build_conf_req(chan, buf), buf);
1124 chan->num_conf_req++;
1127 l2cap_chan_unlock(chan);
1130 mutex_unlock(&conn->chan_lock);
1133 /* Find socket with cid and source/destination bdaddr.
1134 * Returns closest match, locked.
1136 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1140 struct l2cap_chan *c, *c1 = NULL;
1142 read_lock(&chan_list_lock);
1144 list_for_each_entry(c, &chan_list, global_l) {
1145 struct sock *sk = c->sk;
1147 if (state && c->state != state)
1150 if (c->scid == cid) {
1151 int src_match, dst_match;
1152 int src_any, dst_any;
1155 src_match = !bacmp(&bt_sk(sk)->src, src);
1156 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1157 if (src_match && dst_match) {
1158 read_unlock(&chan_list_lock);
1163 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1164 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1165 if ((src_match && dst_any) || (src_any && dst_match) ||
1166 (src_any && dst_any))
1171 read_unlock(&chan_list_lock);
1176 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1178 struct sock *parent, *sk;
1179 struct l2cap_chan *chan, *pchan;
1183 /* Check if we have socket listening on cid */
1184 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1185 conn->src, conn->dst);
1193 /* Check for backlog size */
1194 if (sk_acceptq_is_full(parent)) {
1195 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1199 chan = pchan->ops->new_connection(pchan->data);
1205 hci_conn_hold(conn->hcon);
1207 bacpy(&bt_sk(sk)->src, conn->src);
1208 bacpy(&bt_sk(sk)->dst, conn->dst);
1210 bt_accept_enqueue(parent, sk);
1212 l2cap_chan_add(conn, chan);
1214 __set_chan_timer(chan, sk->sk_sndtimeo);
1216 __l2cap_state_change(chan, BT_CONNECTED);
1217 parent->sk_data_ready(parent, 0);
1220 release_sock(parent);
1223 static void l2cap_conn_ready(struct l2cap_conn *conn)
1225 struct l2cap_chan *chan;
1227 BT_DBG("conn %p", conn);
1229 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1230 l2cap_le_conn_ready(conn);
1232 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1233 smp_conn_security(conn, conn->hcon->pending_sec_level);
1235 mutex_lock(&conn->chan_lock);
1237 list_for_each_entry(chan, &conn->chan_l, list) {
1239 l2cap_chan_lock(chan);
1241 if (conn->hcon->type == LE_LINK) {
1242 if (smp_conn_security(conn, chan->sec_level))
1243 l2cap_chan_ready(chan);
1245 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1246 struct sock *sk = chan->sk;
1247 __clear_chan_timer(chan);
1249 __l2cap_state_change(chan, BT_CONNECTED);
1250 sk->sk_state_change(sk);
1253 } else if (chan->state == BT_CONNECT)
1254 l2cap_do_start(chan);
1256 l2cap_chan_unlock(chan);
1259 mutex_unlock(&conn->chan_lock);
1262 /* Notify sockets that we cannot guaranty reliability anymore */
1263 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1265 struct l2cap_chan *chan;
1267 BT_DBG("conn %p", conn);
1269 mutex_lock(&conn->chan_lock);
1271 list_for_each_entry(chan, &conn->chan_l, list) {
1272 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 __l2cap_chan_set_err(chan, err);
1276 mutex_unlock(&conn->chan_lock);
1279 static void l2cap_info_timeout(struct work_struct *work)
1281 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 conn->info_ident = 0;
1287 l2cap_conn_start(conn);
1290 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1292 struct l2cap_conn *conn = hcon->l2cap_data;
1293 struct l2cap_chan *chan, *l;
1298 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1300 kfree_skb(conn->rx_skb);
1302 mutex_lock(&conn->chan_lock);
1305 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1306 l2cap_chan_hold(chan);
1307 l2cap_chan_lock(chan);
1309 l2cap_chan_del(chan, err);
1311 l2cap_chan_unlock(chan);
1313 chan->ops->close(chan->data);
1314 l2cap_chan_put(chan);
1317 mutex_unlock(&conn->chan_lock);
1319 hci_chan_del(conn->hchan);
1321 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1322 cancel_delayed_work_sync(&conn->info_timer);
1324 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1325 cancel_delayed_work_sync(&conn->security_timer);
1326 smp_chan_destroy(conn);
1329 hcon->l2cap_data = NULL;
1333 static void security_timeout(struct work_struct *work)
1335 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 security_timer.work);
1338 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1341 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1343 struct l2cap_conn *conn = hcon->l2cap_data;
1344 struct hci_chan *hchan;
1349 hchan = hci_chan_create(hcon);
1353 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1355 hci_chan_del(hchan);
1359 hcon->l2cap_data = conn;
1361 conn->hchan = hchan;
1363 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1365 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1366 conn->mtu = hcon->hdev->le_mtu;
1368 conn->mtu = hcon->hdev->acl_mtu;
1370 conn->src = &hcon->hdev->bdaddr;
1371 conn->dst = &hcon->dst;
1373 conn->feat_mask = 0;
1375 spin_lock_init(&conn->lock);
1376 mutex_init(&conn->chan_lock);
1378 INIT_LIST_HEAD(&conn->chan_l);
1380 if (hcon->type == LE_LINK)
1381 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1383 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1385 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1390 /* ---- Socket interface ---- */
1392 /* Find socket with psm and source / destination bdaddr.
1393 * Returns closest match.
1395 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1399 struct l2cap_chan *c, *c1 = NULL;
1401 read_lock(&chan_list_lock);
1403 list_for_each_entry(c, &chan_list, global_l) {
1404 struct sock *sk = c->sk;
1406 if (state && c->state != state)
1409 if (c->psm == psm) {
1410 int src_match, dst_match;
1411 int src_any, dst_any;
1414 src_match = !bacmp(&bt_sk(sk)->src, src);
1415 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1416 if (src_match && dst_match) {
1417 read_unlock(&chan_list_lock);
1422 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1423 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1424 if ((src_match && dst_any) || (src_any && dst_match) ||
1425 (src_any && dst_any))
1430 read_unlock(&chan_list_lock);
1435 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1436 bdaddr_t *dst, u8 dst_type)
1438 struct sock *sk = chan->sk;
1439 bdaddr_t *src = &bt_sk(sk)->src;
1440 struct l2cap_conn *conn;
1441 struct hci_conn *hcon;
1442 struct hci_dev *hdev;
1446 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1447 dst_type, __le16_to_cpu(chan->psm));
1449 hdev = hci_get_route(dst, src);
1451 return -EHOSTUNREACH;
1455 l2cap_chan_lock(chan);
1457 /* PSM must be odd and lsb of upper byte must be 0 */
1458 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1459 chan->chan_type != L2CAP_CHAN_RAW) {
1464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1469 switch (chan->mode) {
1470 case L2CAP_MODE_BASIC:
1472 case L2CAP_MODE_ERTM:
1473 case L2CAP_MODE_STREAMING:
1484 switch (sk->sk_state) {
1488 /* Already connecting */
1494 /* Already connected */
1510 /* Set destination address and psm */
1511 bacpy(&bt_sk(sk)->dst, dst);
1518 auth_type = l2cap_get_auth_type(chan);
1520 if (chan->dcid == L2CAP_CID_LE_DATA)
1521 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1522 chan->sec_level, auth_type);
1524 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1525 chan->sec_level, auth_type);
1528 err = PTR_ERR(hcon);
1532 conn = l2cap_conn_add(hcon, 0);
1539 if (hcon->type == LE_LINK) {
1542 if (!list_empty(&conn->chan_l)) {
1551 /* Update source addr of the socket */
1552 bacpy(src, conn->src);
1554 l2cap_chan_unlock(chan);
1555 l2cap_chan_add(conn, chan);
1556 l2cap_chan_lock(chan);
1558 l2cap_state_change(chan, BT_CONNECT);
1559 __set_chan_timer(chan, sk->sk_sndtimeo);
1561 if (hcon->state == BT_CONNECTED) {
1562 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1563 __clear_chan_timer(chan);
1564 if (l2cap_chan_check_security(chan))
1565 l2cap_state_change(chan, BT_CONNECTED);
1567 l2cap_do_start(chan);
1573 l2cap_chan_unlock(chan);
1574 hci_dev_unlock(hdev);
1579 int __l2cap_wait_ack(struct sock *sk)
1581 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1582 DECLARE_WAITQUEUE(wait, current);
1586 add_wait_queue(sk_sleep(sk), &wait);
1587 set_current_state(TASK_INTERRUPTIBLE);
1588 while (chan->unacked_frames > 0 && chan->conn) {
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeo);
1598 timeo = schedule_timeout(timeo);
1600 set_current_state(TASK_INTERRUPTIBLE);
1602 err = sock_error(sk);
1606 set_current_state(TASK_RUNNING);
1607 remove_wait_queue(sk_sleep(sk), &wait);
1611 static void l2cap_monitor_timeout(struct work_struct *work)
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 monitor_timer.work);
1616 BT_DBG("chan %p", chan);
1618 l2cap_chan_lock(chan);
1620 if (chan->retry_count >= chan->remote_max_tx) {
1621 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1627 chan->retry_count++;
1628 __set_monitor_timer(chan);
1630 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1631 l2cap_chan_unlock(chan);
1632 l2cap_chan_put(chan);
1635 static void l2cap_retrans_timeout(struct work_struct *work)
1637 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1638 retrans_timer.work);
1640 BT_DBG("chan %p", chan);
1642 l2cap_chan_lock(chan);
1644 chan->retry_count = 1;
1645 __set_monitor_timer(chan);
1647 set_bit(CONN_WAIT_F, &chan->conn_state);
1649 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1651 l2cap_chan_unlock(chan);
1652 l2cap_chan_put(chan);
1655 static int l2cap_streaming_send(struct l2cap_chan *chan,
1656 struct sk_buff_head *skbs)
1658 struct sk_buff *skb;
1659 struct l2cap_ctrl *control;
1661 BT_DBG("chan %p, skbs %p", chan, skbs);
1663 if (chan->state != BT_CONNECTED)
1666 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1668 while (!skb_queue_empty(&chan->tx_q)) {
1670 skb = skb_dequeue(&chan->tx_q);
1672 bt_cb(skb)->control.retries = 1;
1673 control = &bt_cb(skb)->control;
1675 control->reqseq = 0;
1676 control->txseq = chan->next_tx_seq;
1678 __pack_control(chan, control, skb);
1680 if (chan->fcs == L2CAP_FCS_CRC16) {
1681 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1682 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1685 l2cap_do_send(chan, skb);
1687 BT_DBG("Sent txseq %d", (int)control->txseq);
1689 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1690 chan->frames_sent++;
1696 static int l2cap_ertm_send(struct l2cap_chan *chan)
1698 struct sk_buff *skb, *tx_skb;
1699 struct l2cap_ctrl *control;
1702 BT_DBG("chan %p", chan);
1704 if (chan->state != BT_CONNECTED)
1707 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1710 while (chan->tx_send_head &&
1711 chan->unacked_frames < chan->remote_tx_win &&
1712 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1714 skb = chan->tx_send_head;
1716 bt_cb(skb)->control.retries = 1;
1717 control = &bt_cb(skb)->control;
1719 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1722 control->reqseq = chan->buffer_seq;
1723 chan->last_acked_seq = chan->buffer_seq;
1724 control->txseq = chan->next_tx_seq;
1726 __pack_control(chan, control, skb);
1728 if (chan->fcs == L2CAP_FCS_CRC16) {
1729 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1730 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1733 /* Clone after data has been modified. Data is assumed to be
1734 read-only (for locking purposes) on cloned sk_buffs.
1736 tx_skb = skb_clone(skb, GFP_KERNEL);
1741 __set_retrans_timer(chan);
1743 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1744 chan->unacked_frames++;
1745 chan->frames_sent++;
1748 if (skb_queue_is_last(&chan->tx_q, skb))
1749 chan->tx_send_head = NULL;
1751 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1753 l2cap_do_send(chan, tx_skb);
1754 BT_DBG("Sent txseq %d", (int)control->txseq);
1757 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1758 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1763 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1765 struct l2cap_ctrl control;
1766 struct sk_buff *skb;
1767 struct sk_buff *tx_skb;
1770 BT_DBG("chan %p", chan);
1772 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1775 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1776 seq = l2cap_seq_list_pop(&chan->retrans_list);
1778 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1780 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1785 bt_cb(skb)->control.retries++;
1786 control = bt_cb(skb)->control;
1788 if (chan->max_tx != 0 &&
1789 bt_cb(skb)->control.retries > chan->max_tx) {
1790 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1791 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1792 l2cap_seq_list_clear(&chan->retrans_list);
1796 control.reqseq = chan->buffer_seq;
1797 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1802 if (skb_cloned(skb)) {
1803 /* Cloned sk_buffs are read-only, so we need a
1806 tx_skb = skb_copy(skb, GFP_ATOMIC);
1808 tx_skb = skb_clone(skb, GFP_ATOMIC);
1812 l2cap_seq_list_clear(&chan->retrans_list);
1816 /* Update skb contents */
1817 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1818 put_unaligned_le32(__pack_extended_control(&control),
1819 tx_skb->data + L2CAP_HDR_SIZE);
1821 put_unaligned_le16(__pack_enhanced_control(&control),
1822 tx_skb->data + L2CAP_HDR_SIZE);
1825 if (chan->fcs == L2CAP_FCS_CRC16) {
1826 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1827 put_unaligned_le16(fcs, skb_put(tx_skb,
1831 l2cap_do_send(chan, tx_skb);
1833 BT_DBG("Resent txseq %d", control.txseq);
1835 chan->last_acked_seq = chan->buffer_seq;
1839 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1840 struct l2cap_ctrl *control)
1842 struct sk_buff *skb;
1844 BT_DBG("chan %p, control %p", chan, control);
1847 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1849 l2cap_seq_list_clear(&chan->retrans_list);
1851 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1854 if (chan->unacked_frames) {
1855 skb_queue_walk(&chan->tx_q, skb) {
1856 if (bt_cb(skb)->control.txseq == control->reqseq ||
1857 skb == chan->tx_send_head)
1861 skb_queue_walk_from(&chan->tx_q, skb) {
1862 if (skb == chan->tx_send_head)
1865 l2cap_seq_list_append(&chan->retrans_list,
1866 bt_cb(skb)->control.txseq);
1869 l2cap_ertm_resend(chan);
1873 static void l2cap_send_ack(struct l2cap_chan *chan)
1875 struct l2cap_ctrl control;
1876 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1877 chan->last_acked_seq);
1880 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1881 chan, chan->last_acked_seq, chan->buffer_seq);
1883 memset(&control, 0, sizeof(control));
1886 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1887 chan->rx_state == L2CAP_RX_STATE_RECV) {
1888 __clear_ack_timer(chan);
1889 control.super = L2CAP_SUPER_RNR;
1890 control.reqseq = chan->buffer_seq;
1891 l2cap_send_sframe(chan, &control);
1893 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1894 l2cap_ertm_send(chan);
1895 /* If any i-frames were sent, they included an ack */
1896 if (chan->buffer_seq == chan->last_acked_seq)
1900 /* Ack now if the tx window is 3/4ths full.
1901 * Calculate without mul or div
1903 threshold = chan->tx_win;
1904 threshold += threshold << 1;
1907 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1910 if (frames_to_ack >= threshold) {
1911 __clear_ack_timer(chan);
1912 control.super = L2CAP_SUPER_RR;
1913 control.reqseq = chan->buffer_seq;
1914 l2cap_send_sframe(chan, &control);
1919 __set_ack_timer(chan);
1923 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1924 struct msghdr *msg, int len,
1925 int count, struct sk_buff *skb)
1927 struct l2cap_conn *conn = chan->conn;
1928 struct sk_buff **frag;
1931 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1937 /* Continuation fragments (no L2CAP header) */
1938 frag = &skb_shinfo(skb)->frag_list;
1940 struct sk_buff *tmp;
1942 count = min_t(unsigned int, conn->mtu, len);
1944 tmp = chan->ops->alloc_skb(chan, count,
1945 msg->msg_flags & MSG_DONTWAIT);
1947 return PTR_ERR(tmp);
1951 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1954 (*frag)->priority = skb->priority;
1959 skb->len += (*frag)->len;
1960 skb->data_len += (*frag)->len;
1962 frag = &(*frag)->next;
1968 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1969 struct msghdr *msg, size_t len,
1972 struct l2cap_conn *conn = chan->conn;
1973 struct sk_buff *skb;
1974 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1975 struct l2cap_hdr *lh;
1977 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1979 count = min_t(unsigned int, (conn->mtu - hlen), len);
1981 skb = chan->ops->alloc_skb(chan, count + hlen,
1982 msg->msg_flags & MSG_DONTWAIT);
1986 skb->priority = priority;
1988 /* Create L2CAP header */
1989 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1990 lh->cid = cpu_to_le16(chan->dcid);
1991 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1992 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1994 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1995 if (unlikely(err < 0)) {
1997 return ERR_PTR(err);
2002 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2003 struct msghdr *msg, size_t len,
2006 struct l2cap_conn *conn = chan->conn;
2007 struct sk_buff *skb;
2009 struct l2cap_hdr *lh;
2011 BT_DBG("chan %p len %d", chan, (int)len);
2013 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2015 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2016 msg->msg_flags & MSG_DONTWAIT);
2020 skb->priority = priority;
2022 /* Create L2CAP header */
2023 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2024 lh->cid = cpu_to_le16(chan->dcid);
2025 lh->len = cpu_to_le16(len);
2027 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2028 if (unlikely(err < 0)) {
2030 return ERR_PTR(err);
2035 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2036 struct msghdr *msg, size_t len,
2039 struct l2cap_conn *conn = chan->conn;
2040 struct sk_buff *skb;
2041 int err, count, hlen;
2042 struct l2cap_hdr *lh;
2044 BT_DBG("chan %p len %d", chan, (int)len);
2047 return ERR_PTR(-ENOTCONN);
2049 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2050 hlen = L2CAP_EXT_HDR_SIZE;
2052 hlen = L2CAP_ENH_HDR_SIZE;
2055 hlen += L2CAP_SDULEN_SIZE;
2057 if (chan->fcs == L2CAP_FCS_CRC16)
2058 hlen += L2CAP_FCS_SIZE;
2060 count = min_t(unsigned int, (conn->mtu - hlen), len);
2062 skb = chan->ops->alloc_skb(chan, count + hlen,
2063 msg->msg_flags & MSG_DONTWAIT);
2067 /* Create L2CAP header */
2068 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2069 lh->cid = cpu_to_le16(chan->dcid);
2070 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2072 /* Control header is populated later */
2073 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2074 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2076 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2079 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2081 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2082 if (unlikely(err < 0)) {
2084 return ERR_PTR(err);
2087 bt_cb(skb)->control.fcs = chan->fcs;
2088 bt_cb(skb)->control.retries = 0;
2092 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2093 struct sk_buff_head *seg_queue,
2094 struct msghdr *msg, size_t len)
2096 struct sk_buff *skb;
2102 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2104 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2105 * so fragmented skbs are not used. The HCI layer's handling
2106 * of fragmented skbs is not compatible with ERTM's queueing.
2109 /* PDU size is derived from the HCI MTU */
2110 pdu_len = chan->conn->mtu;
2112 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2114 /* Adjust for largest possible L2CAP overhead. */
2115 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2117 /* Remote device may have requested smaller PDUs */
2118 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2120 if (len <= pdu_len) {
2121 sar = L2CAP_SAR_UNSEGMENTED;
2125 sar = L2CAP_SAR_START;
2127 pdu_len -= L2CAP_SDULEN_SIZE;
2131 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2134 __skb_queue_purge(seg_queue);
2135 return PTR_ERR(skb);
2138 bt_cb(skb)->control.sar = sar;
2139 __skb_queue_tail(seg_queue, skb);
2144 pdu_len += L2CAP_SDULEN_SIZE;
2147 if (len <= pdu_len) {
2148 sar = L2CAP_SAR_END;
2151 sar = L2CAP_SAR_CONTINUE;
2158 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2161 struct sk_buff *skb;
2163 struct sk_buff_head seg_queue;
2165 /* Connectionless channel */
2166 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2167 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2169 return PTR_ERR(skb);
2171 l2cap_do_send(chan, skb);
2175 switch (chan->mode) {
2176 case L2CAP_MODE_BASIC:
2177 /* Check outgoing MTU */
2178 if (len > chan->omtu)
2181 /* Create a basic PDU */
2182 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2184 return PTR_ERR(skb);
2186 l2cap_do_send(chan, skb);
2190 case L2CAP_MODE_ERTM:
2191 case L2CAP_MODE_STREAMING:
2192 /* Check outgoing MTU */
2193 if (len > chan->omtu) {
2198 __skb_queue_head_init(&seg_queue);
2200 /* Do segmentation before calling in to the state machine,
2201 * since it's possible to block while waiting for memory
2204 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2206 /* The channel could have been closed while segmenting,
2207 * check that it is still connected.
2209 if (chan->state != BT_CONNECTED) {
2210 __skb_queue_purge(&seg_queue);
2217 if (chan->mode == L2CAP_MODE_ERTM)
2218 err = l2cap_tx(chan, 0, &seg_queue,
2219 L2CAP_EV_DATA_REQUEST);
2221 err = l2cap_streaming_send(chan, &seg_queue);
2226 /* If the skbs were not queued for sending, they'll still be in
2227 * seg_queue and need to be purged.
2229 __skb_queue_purge(&seg_queue);
2233 BT_DBG("bad state %1.1x", chan->mode);
2240 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2245 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2250 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2255 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2257 struct sk_buff *acked_skb;
2260 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2262 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2265 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2266 chan->expected_ack_seq, chan->unacked_frames);
2268 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2269 ackseq = __next_seq(chan, ackseq)) {
2271 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2273 skb_unlink(acked_skb, &chan->tx_q);
2274 kfree_skb(acked_skb);
2275 chan->unacked_frames--;
2279 chan->expected_ack_seq = reqseq;
2281 if (chan->unacked_frames == 0)
2282 __clear_retrans_timer(chan);
2284 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2287 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2289 BT_DBG("chan %p", chan);
2291 chan->expected_tx_seq = chan->buffer_seq;
2292 l2cap_seq_list_clear(&chan->srej_list);
2293 skb_queue_purge(&chan->srej_q);
2294 chan->rx_state = L2CAP_RX_STATE_RECV;
2297 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2298 struct l2cap_ctrl *control,
2299 struct sk_buff_head *skbs, u8 event)
2303 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2307 case L2CAP_EV_DATA_REQUEST:
2308 if (chan->tx_send_head == NULL)
2309 chan->tx_send_head = skb_peek(skbs);
2311 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2312 l2cap_ertm_send(chan);
2314 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2315 BT_DBG("Enter LOCAL_BUSY");
2316 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2318 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2319 /* The SREJ_SENT state must be aborted if we are to
2320 * enter the LOCAL_BUSY state.
2322 l2cap_abort_rx_srej_sent(chan);
2325 l2cap_send_ack(chan);
2328 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2329 BT_DBG("Exit LOCAL_BUSY");
2330 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2332 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2333 struct l2cap_ctrl local_control;
2335 memset(&local_control, 0, sizeof(local_control));
2336 local_control.sframe = 1;
2337 local_control.super = L2CAP_SUPER_RR;
2338 local_control.poll = 1;
2339 local_control.reqseq = chan->buffer_seq;
2340 l2cap_send_sframe(chan, &local_control);
2342 chan->retry_count = 1;
2343 __set_monitor_timer(chan);
2344 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2347 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2348 l2cap_process_reqseq(chan, control->reqseq);
2350 case L2CAP_EV_EXPLICIT_POLL:
2351 l2cap_send_rr_or_rnr(chan, 1);
2352 chan->retry_count = 1;
2353 __set_monitor_timer(chan);
2354 __clear_ack_timer(chan);
2355 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2357 case L2CAP_EV_RETRANS_TO:
2358 l2cap_send_rr_or_rnr(chan, 1);
2359 chan->retry_count = 1;
2360 __set_monitor_timer(chan);
2361 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2363 case L2CAP_EV_RECV_FBIT:
2364 /* Nothing to process */
2373 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2374 struct l2cap_ctrl *control,
2375 struct sk_buff_head *skbs, u8 event)
2379 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2383 case L2CAP_EV_DATA_REQUEST:
2384 if (chan->tx_send_head == NULL)
2385 chan->tx_send_head = skb_peek(skbs);
2386 /* Queue data, but don't send. */
2387 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2389 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2390 BT_DBG("Enter LOCAL_BUSY");
2391 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2393 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2394 /* The SREJ_SENT state must be aborted if we are to
2395 * enter the LOCAL_BUSY state.
2397 l2cap_abort_rx_srej_sent(chan);
2400 l2cap_send_ack(chan);
2403 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2404 BT_DBG("Exit LOCAL_BUSY");
2405 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2407 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2408 struct l2cap_ctrl local_control;
2409 memset(&local_control, 0, sizeof(local_control));
2410 local_control.sframe = 1;
2411 local_control.super = L2CAP_SUPER_RR;
2412 local_control.poll = 1;
2413 local_control.reqseq = chan->buffer_seq;
2414 l2cap_send_sframe(chan, &local_control);
2416 chan->retry_count = 1;
2417 __set_monitor_timer(chan);
2418 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2421 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2422 l2cap_process_reqseq(chan, control->reqseq);
2426 case L2CAP_EV_RECV_FBIT:
2427 if (control && control->final) {
2428 __clear_monitor_timer(chan);
2429 if (chan->unacked_frames > 0)
2430 __set_retrans_timer(chan);
2431 chan->retry_count = 0;
2432 chan->tx_state = L2CAP_TX_STATE_XMIT;
2433 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2436 case L2CAP_EV_EXPLICIT_POLL:
2439 case L2CAP_EV_MONITOR_TO:
2440 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2441 l2cap_send_rr_or_rnr(chan, 1);
2442 __set_monitor_timer(chan);
2443 chan->retry_count++;
2445 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2455 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2456 struct sk_buff_head *skbs, u8 event)
2460 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2461 chan, control, skbs, event, chan->tx_state);
2463 switch (chan->tx_state) {
2464 case L2CAP_TX_STATE_XMIT:
2465 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2467 case L2CAP_TX_STATE_WAIT_F:
2468 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2478 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2479 struct l2cap_ctrl *control)
2481 BT_DBG("chan %p, control %p", chan, control);
2482 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2485 /* Copy frame to all raw sockets on that connection */
2486 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2488 struct sk_buff *nskb;
2489 struct l2cap_chan *chan;
2491 BT_DBG("conn %p", conn);
2493 mutex_lock(&conn->chan_lock);
2495 list_for_each_entry(chan, &conn->chan_l, list) {
2496 struct sock *sk = chan->sk;
2497 if (chan->chan_type != L2CAP_CHAN_RAW)
2500 /* Don't send frame to the socket it came from */
2503 nskb = skb_clone(skb, GFP_ATOMIC);
2507 if (chan->ops->recv(chan->data, nskb))
2511 mutex_unlock(&conn->chan_lock);
2514 /* ---- L2CAP signalling commands ---- */
2515 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2516 u8 code, u8 ident, u16 dlen, void *data)
2518 struct sk_buff *skb, **frag;
2519 struct l2cap_cmd_hdr *cmd;
2520 struct l2cap_hdr *lh;
2523 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2524 conn, code, ident, dlen);
2526 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2527 count = min_t(unsigned int, conn->mtu, len);
2529 skb = bt_skb_alloc(count, GFP_ATOMIC);
2533 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2534 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2536 if (conn->hcon->type == LE_LINK)
2537 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2539 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2541 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2544 cmd->len = cpu_to_le16(dlen);
2547 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2548 memcpy(skb_put(skb, count), data, count);
2554 /* Continuation fragments (no L2CAP header) */
2555 frag = &skb_shinfo(skb)->frag_list;
2557 count = min_t(unsigned int, conn->mtu, len);
2559 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2563 memcpy(skb_put(*frag, count), data, count);
2568 frag = &(*frag)->next;
2578 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2580 struct l2cap_conf_opt *opt = *ptr;
2583 len = L2CAP_CONF_OPT_SIZE + opt->len;
2591 *val = *((u8 *) opt->val);
2595 *val = get_unaligned_le16(opt->val);
2599 *val = get_unaligned_le32(opt->val);
2603 *val = (unsigned long) opt->val;
2607 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2611 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2613 struct l2cap_conf_opt *opt = *ptr;
2615 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2622 *((u8 *) opt->val) = val;
2626 put_unaligned_le16(val, opt->val);
2630 put_unaligned_le32(val, opt->val);
2634 memcpy(opt->val, (void *) val, len);
2638 *ptr += L2CAP_CONF_OPT_SIZE + len;
2641 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2643 struct l2cap_conf_efs efs;
2645 switch (chan->mode) {
2646 case L2CAP_MODE_ERTM:
2647 efs.id = chan->local_id;
2648 efs.stype = chan->local_stype;
2649 efs.msdu = cpu_to_le16(chan->local_msdu);
2650 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2651 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2652 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2655 case L2CAP_MODE_STREAMING:
2657 efs.stype = L2CAP_SERV_BESTEFFORT;
2658 efs.msdu = cpu_to_le16(chan->local_msdu);
2659 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2668 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2669 (unsigned long) &efs);
2672 static void l2cap_ack_timeout(struct work_struct *work)
2674 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2677 BT_DBG("chan %p", chan);
2679 l2cap_chan_lock(chan);
2681 l2cap_send_ack(chan);
2683 l2cap_chan_unlock(chan);
2685 l2cap_chan_put(chan);
2688 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2692 chan->next_tx_seq = 0;
2693 chan->expected_tx_seq = 0;
2694 chan->expected_ack_seq = 0;
2695 chan->unacked_frames = 0;
2696 chan->buffer_seq = 0;
2697 chan->frames_sent = 0;
2698 chan->last_acked_seq = 0;
2700 chan->sdu_last_frag = NULL;
2703 skb_queue_head_init(&chan->tx_q);
2705 if (chan->mode != L2CAP_MODE_ERTM)
2708 chan->rx_state = L2CAP_RX_STATE_RECV;
2709 chan->tx_state = L2CAP_TX_STATE_XMIT;
2711 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2712 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2713 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2715 skb_queue_head_init(&chan->srej_q);
2717 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2721 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2723 l2cap_seq_list_free(&chan->srej_list);
2728 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2731 case L2CAP_MODE_STREAMING:
2732 case L2CAP_MODE_ERTM:
2733 if (l2cap_mode_supported(mode, remote_feat_mask))
2737 return L2CAP_MODE_BASIC;
2741 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2743 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2746 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2748 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2751 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2753 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2754 __l2cap_ews_supported(chan)) {
2755 /* use extended control field */
2756 set_bit(FLAG_EXT_CTRL, &chan->flags);
2757 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2759 chan->tx_win = min_t(u16, chan->tx_win,
2760 L2CAP_DEFAULT_TX_WINDOW);
2761 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2765 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2767 struct l2cap_conf_req *req = data;
2768 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2769 void *ptr = req->data;
2772 BT_DBG("chan %p", chan);
2774 if (chan->num_conf_req || chan->num_conf_rsp)
2777 switch (chan->mode) {
2778 case L2CAP_MODE_STREAMING:
2779 case L2CAP_MODE_ERTM:
2780 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2783 if (__l2cap_efs_supported(chan))
2784 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2788 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2793 if (chan->imtu != L2CAP_DEFAULT_MTU)
2794 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2796 switch (chan->mode) {
2797 case L2CAP_MODE_BASIC:
2798 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2799 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2802 rfc.mode = L2CAP_MODE_BASIC;
2804 rfc.max_transmit = 0;
2805 rfc.retrans_timeout = 0;
2806 rfc.monitor_timeout = 0;
2807 rfc.max_pdu_size = 0;
2809 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2810 (unsigned long) &rfc);
2813 case L2CAP_MODE_ERTM:
2814 rfc.mode = L2CAP_MODE_ERTM;
2815 rfc.max_transmit = chan->max_tx;
2816 rfc.retrans_timeout = 0;
2817 rfc.monitor_timeout = 0;
2819 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2820 L2CAP_EXT_HDR_SIZE -
2823 rfc.max_pdu_size = cpu_to_le16(size);
2825 l2cap_txwin_setup(chan);
2827 rfc.txwin_size = min_t(u16, chan->tx_win,
2828 L2CAP_DEFAULT_TX_WINDOW);
2830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2831 (unsigned long) &rfc);
2833 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2834 l2cap_add_opt_efs(&ptr, chan);
2836 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2839 if (chan->fcs == L2CAP_FCS_NONE ||
2840 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2841 chan->fcs = L2CAP_FCS_NONE;
2842 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2845 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2846 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2850 case L2CAP_MODE_STREAMING:
2851 rfc.mode = L2CAP_MODE_STREAMING;
2853 rfc.max_transmit = 0;
2854 rfc.retrans_timeout = 0;
2855 rfc.monitor_timeout = 0;
2857 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2858 L2CAP_EXT_HDR_SIZE -
2861 rfc.max_pdu_size = cpu_to_le16(size);
2863 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2864 (unsigned long) &rfc);
2866 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2867 l2cap_add_opt_efs(&ptr, chan);
2869 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2872 if (chan->fcs == L2CAP_FCS_NONE ||
2873 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2874 chan->fcs = L2CAP_FCS_NONE;
2875 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2880 req->dcid = cpu_to_le16(chan->dcid);
2881 req->flags = cpu_to_le16(0);
2886 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2888 struct l2cap_conf_rsp *rsp = data;
2889 void *ptr = rsp->data;
2890 void *req = chan->conf_req;
2891 int len = chan->conf_len;
2892 int type, hint, olen;
2894 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2895 struct l2cap_conf_efs efs;
2897 u16 mtu = L2CAP_DEFAULT_MTU;
2898 u16 result = L2CAP_CONF_SUCCESS;
2901 BT_DBG("chan %p", chan);
2903 while (len >= L2CAP_CONF_OPT_SIZE) {
2904 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2906 hint = type & L2CAP_CONF_HINT;
2907 type &= L2CAP_CONF_MASK;
2910 case L2CAP_CONF_MTU:
2914 case L2CAP_CONF_FLUSH_TO:
2915 chan->flush_to = val;
2918 case L2CAP_CONF_QOS:
2921 case L2CAP_CONF_RFC:
2922 if (olen == sizeof(rfc))
2923 memcpy(&rfc, (void *) val, olen);
2926 case L2CAP_CONF_FCS:
2927 if (val == L2CAP_FCS_NONE)
2928 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2931 case L2CAP_CONF_EFS:
2933 if (olen == sizeof(efs))
2934 memcpy(&efs, (void *) val, olen);
2937 case L2CAP_CONF_EWS:
2939 return -ECONNREFUSED;
2941 set_bit(FLAG_EXT_CTRL, &chan->flags);
2942 set_bit(CONF_EWS_RECV, &chan->conf_state);
2943 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2944 chan->remote_tx_win = val;
2951 result = L2CAP_CONF_UNKNOWN;
2952 *((u8 *) ptr++) = type;
2957 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2960 switch (chan->mode) {
2961 case L2CAP_MODE_STREAMING:
2962 case L2CAP_MODE_ERTM:
2963 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2964 chan->mode = l2cap_select_mode(rfc.mode,
2965 chan->conn->feat_mask);
2970 if (__l2cap_efs_supported(chan))
2971 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2973 return -ECONNREFUSED;
2976 if (chan->mode != rfc.mode)
2977 return -ECONNREFUSED;
2983 if (chan->mode != rfc.mode) {
2984 result = L2CAP_CONF_UNACCEPT;
2985 rfc.mode = chan->mode;
2987 if (chan->num_conf_rsp == 1)
2988 return -ECONNREFUSED;
2990 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2991 sizeof(rfc), (unsigned long) &rfc);
2994 if (result == L2CAP_CONF_SUCCESS) {
2995 /* Configure output options and let the other side know
2996 * which ones we don't like. */
2998 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2999 result = L2CAP_CONF_UNACCEPT;
3002 set_bit(CONF_MTU_DONE, &chan->conf_state);
3004 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3007 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3008 efs.stype != L2CAP_SERV_NOTRAFIC &&
3009 efs.stype != chan->local_stype) {
3011 result = L2CAP_CONF_UNACCEPT;
3013 if (chan->num_conf_req >= 1)
3014 return -ECONNREFUSED;
3016 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3018 (unsigned long) &efs);
3020 /* Send PENDING Conf Rsp */
3021 result = L2CAP_CONF_PENDING;
3022 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3027 case L2CAP_MODE_BASIC:
3028 chan->fcs = L2CAP_FCS_NONE;
3029 set_bit(CONF_MODE_DONE, &chan->conf_state);
3032 case L2CAP_MODE_ERTM:
3033 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3034 chan->remote_tx_win = rfc.txwin_size;
3036 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3038 chan->remote_max_tx = rfc.max_transmit;
3040 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3042 L2CAP_EXT_HDR_SIZE -
3045 rfc.max_pdu_size = cpu_to_le16(size);
3046 chan->remote_mps = size;
3048 rfc.retrans_timeout =
3049 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3050 rfc.monitor_timeout =
3051 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3053 set_bit(CONF_MODE_DONE, &chan->conf_state);
3055 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3056 sizeof(rfc), (unsigned long) &rfc);
3058 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3059 chan->remote_id = efs.id;
3060 chan->remote_stype = efs.stype;
3061 chan->remote_msdu = le16_to_cpu(efs.msdu);
3062 chan->remote_flush_to =
3063 le32_to_cpu(efs.flush_to);
3064 chan->remote_acc_lat =
3065 le32_to_cpu(efs.acc_lat);
3066 chan->remote_sdu_itime =
3067 le32_to_cpu(efs.sdu_itime);
3068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3069 sizeof(efs), (unsigned long) &efs);
3073 case L2CAP_MODE_STREAMING:
3074 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3076 L2CAP_EXT_HDR_SIZE -
3079 rfc.max_pdu_size = cpu_to_le16(size);
3080 chan->remote_mps = size;
3082 set_bit(CONF_MODE_DONE, &chan->conf_state);
3084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3085 sizeof(rfc), (unsigned long) &rfc);
3090 result = L2CAP_CONF_UNACCEPT;
3092 memset(&rfc, 0, sizeof(rfc));
3093 rfc.mode = chan->mode;
3096 if (result == L2CAP_CONF_SUCCESS)
3097 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3099 rsp->scid = cpu_to_le16(chan->dcid);
3100 rsp->result = cpu_to_le16(result);
3101 rsp->flags = cpu_to_le16(0x0000);
3106 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3108 struct l2cap_conf_req *req = data;
3109 void *ptr = req->data;
3112 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3113 struct l2cap_conf_efs efs;
3115 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3117 while (len >= L2CAP_CONF_OPT_SIZE) {
3118 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3121 case L2CAP_CONF_MTU:
3122 if (val < L2CAP_DEFAULT_MIN_MTU) {
3123 *result = L2CAP_CONF_UNACCEPT;
3124 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3127 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3130 case L2CAP_CONF_FLUSH_TO:
3131 chan->flush_to = val;
3132 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3136 case L2CAP_CONF_RFC:
3137 if (olen == sizeof(rfc))
3138 memcpy(&rfc, (void *)val, olen);
3140 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3141 rfc.mode != chan->mode)
3142 return -ECONNREFUSED;
3146 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3147 sizeof(rfc), (unsigned long) &rfc);
3150 case L2CAP_CONF_EWS:
3151 chan->tx_win = min_t(u16, val,
3152 L2CAP_DEFAULT_EXT_WINDOW);
3153 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3157 case L2CAP_CONF_EFS:
3158 if (olen == sizeof(efs))
3159 memcpy(&efs, (void *)val, olen);
3161 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3162 efs.stype != L2CAP_SERV_NOTRAFIC &&
3163 efs.stype != chan->local_stype)
3164 return -ECONNREFUSED;
3166 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3167 sizeof(efs), (unsigned long) &efs);
3172 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3173 return -ECONNREFUSED;
3175 chan->mode = rfc.mode;
3177 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3179 case L2CAP_MODE_ERTM:
3180 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3181 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3182 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3184 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3185 chan->local_msdu = le16_to_cpu(efs.msdu);
3186 chan->local_sdu_itime =
3187 le32_to_cpu(efs.sdu_itime);
3188 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3189 chan->local_flush_to =
3190 le32_to_cpu(efs.flush_to);
3194 case L2CAP_MODE_STREAMING:
3195 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3199 req->dcid = cpu_to_le16(chan->dcid);
3200 req->flags = cpu_to_le16(0x0000);
3205 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3207 struct l2cap_conf_rsp *rsp = data;
3208 void *ptr = rsp->data;
3210 BT_DBG("chan %p", chan);
3212 rsp->scid = cpu_to_le16(chan->dcid);
3213 rsp->result = cpu_to_le16(result);
3214 rsp->flags = cpu_to_le16(flags);
3219 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3221 struct l2cap_conn_rsp rsp;
3222 struct l2cap_conn *conn = chan->conn;
3225 rsp.scid = cpu_to_le16(chan->dcid);
3226 rsp.dcid = cpu_to_le16(chan->scid);
3227 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3228 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3229 l2cap_send_cmd(conn, chan->ident,
3230 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3232 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3235 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3236 l2cap_build_conf_req(chan, buf), buf);
3237 chan->num_conf_req++;
3240 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3244 struct l2cap_conf_rfc rfc;
3246 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3248 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3251 while (len >= L2CAP_CONF_OPT_SIZE) {
3252 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3255 case L2CAP_CONF_RFC:
3256 if (olen == sizeof(rfc))
3257 memcpy(&rfc, (void *)val, olen);
3262 /* Use sane default values in case a misbehaving remote device
3263 * did not send an RFC option.
3265 rfc.mode = chan->mode;
3266 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3267 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3268 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3270 BT_ERR("Expected RFC option was not found, using defaults");
3274 case L2CAP_MODE_ERTM:
3275 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3276 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3277 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3279 case L2CAP_MODE_STREAMING:
3280 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3284 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3286 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3288 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3291 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3292 cmd->ident == conn->info_ident) {
3293 cancel_delayed_work(&conn->info_timer);
3295 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3296 conn->info_ident = 0;
3298 l2cap_conn_start(conn);
3304 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3306 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3307 struct l2cap_conn_rsp rsp;
3308 struct l2cap_chan *chan = NULL, *pchan;
3309 struct sock *parent, *sk = NULL;
3310 int result, status = L2CAP_CS_NO_INFO;
3312 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3313 __le16 psm = req->psm;
3315 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3317 /* Check if we have socket listening on psm */
3318 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3320 result = L2CAP_CR_BAD_PSM;
3326 mutex_lock(&conn->chan_lock);
3329 /* Check if the ACL is secure enough (if not SDP) */
3330 if (psm != cpu_to_le16(0x0001) &&
3331 !hci_conn_check_link_mode(conn->hcon)) {
3332 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3333 result = L2CAP_CR_SEC_BLOCK;
3337 result = L2CAP_CR_NO_MEM;
3339 /* Check for backlog size */
3340 if (sk_acceptq_is_full(parent)) {
3341 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3345 chan = pchan->ops->new_connection(pchan->data);
3351 /* Check if we already have channel with that dcid */
3352 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3353 sock_set_flag(sk, SOCK_ZAPPED);
3354 chan->ops->close(chan->data);
3358 hci_conn_hold(conn->hcon);
3360 bacpy(&bt_sk(sk)->src, conn->src);
3361 bacpy(&bt_sk(sk)->dst, conn->dst);
3365 bt_accept_enqueue(parent, sk);
3367 __l2cap_chan_add(conn, chan);
3371 __set_chan_timer(chan, sk->sk_sndtimeo);
3373 chan->ident = cmd->ident;
3375 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3376 if (l2cap_chan_check_security(chan)) {
3377 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3378 __l2cap_state_change(chan, BT_CONNECT2);
3379 result = L2CAP_CR_PEND;
3380 status = L2CAP_CS_AUTHOR_PEND;
3381 parent->sk_data_ready(parent, 0);
3383 __l2cap_state_change(chan, BT_CONFIG);
3384 result = L2CAP_CR_SUCCESS;
3385 status = L2CAP_CS_NO_INFO;
3388 __l2cap_state_change(chan, BT_CONNECT2);
3389 result = L2CAP_CR_PEND;
3390 status = L2CAP_CS_AUTHEN_PEND;
3393 __l2cap_state_change(chan, BT_CONNECT2);
3394 result = L2CAP_CR_PEND;
3395 status = L2CAP_CS_NO_INFO;
3399 release_sock(parent);
3400 mutex_unlock(&conn->chan_lock);
3403 rsp.scid = cpu_to_le16(scid);
3404 rsp.dcid = cpu_to_le16(dcid);
3405 rsp.result = cpu_to_le16(result);
3406 rsp.status = cpu_to_le16(status);
3407 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3409 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3410 struct l2cap_info_req info;
3411 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3414 conn->info_ident = l2cap_get_ident(conn);
3416 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3418 l2cap_send_cmd(conn, conn->info_ident,
3419 L2CAP_INFO_REQ, sizeof(info), &info);
3422 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3423 result == L2CAP_CR_SUCCESS) {
3425 set_bit(CONF_REQ_SENT, &chan->conf_state);
3426 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3427 l2cap_build_conf_req(chan, buf), buf);
3428 chan->num_conf_req++;
3434 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3436 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3437 u16 scid, dcid, result, status;
3438 struct l2cap_chan *chan;
3442 scid = __le16_to_cpu(rsp->scid);
3443 dcid = __le16_to_cpu(rsp->dcid);
3444 result = __le16_to_cpu(rsp->result);
3445 status = __le16_to_cpu(rsp->status);
3447 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3448 dcid, scid, result, status);
3450 mutex_lock(&conn->chan_lock);
3453 chan = __l2cap_get_chan_by_scid(conn, scid);
3459 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3468 l2cap_chan_lock(chan);
3471 case L2CAP_CR_SUCCESS:
3472 l2cap_state_change(chan, BT_CONFIG);
3475 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3477 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3480 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3481 l2cap_build_conf_req(chan, req), req);
3482 chan->num_conf_req++;
3486 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3490 l2cap_chan_del(chan, ECONNREFUSED);
3494 l2cap_chan_unlock(chan);
3497 mutex_unlock(&conn->chan_lock);
3502 static inline void set_default_fcs(struct l2cap_chan *chan)
3504 /* FCS is enabled only in ERTM or streaming mode, if one or both
3507 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3508 chan->fcs = L2CAP_FCS_NONE;
3509 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3510 chan->fcs = L2CAP_FCS_CRC16;
3513 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3515 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3518 struct l2cap_chan *chan;
3521 dcid = __le16_to_cpu(req->dcid);
3522 flags = __le16_to_cpu(req->flags);
3524 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3526 chan = l2cap_get_chan_by_scid(conn, dcid);
3530 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3531 struct l2cap_cmd_rej_cid rej;
3533 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3534 rej.scid = cpu_to_le16(chan->scid);
3535 rej.dcid = cpu_to_le16(chan->dcid);
3537 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3542 /* Reject if config buffer is too small. */
3543 len = cmd_len - sizeof(*req);
3544 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3545 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3546 l2cap_build_conf_rsp(chan, rsp,
3547 L2CAP_CONF_REJECT, flags), rsp);
3552 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3553 chan->conf_len += len;
3555 if (flags & 0x0001) {
3556 /* Incomplete config. Send empty response. */
3557 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3558 l2cap_build_conf_rsp(chan, rsp,
3559 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3563 /* Complete config. */
3564 len = l2cap_parse_conf_req(chan, rsp);
3566 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3570 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3571 chan->num_conf_rsp++;
3573 /* Reset config buffer. */
3576 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3579 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3580 set_default_fcs(chan);
3582 l2cap_state_change(chan, BT_CONNECTED);
3584 if (chan->mode == L2CAP_MODE_ERTM ||
3585 chan->mode == L2CAP_MODE_STREAMING)
3586 err = l2cap_ertm_init(chan);
3589 l2cap_send_disconn_req(chan->conn, chan, -err);
3591 l2cap_chan_ready(chan);
3596 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3598 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3599 l2cap_build_conf_req(chan, buf), buf);
3600 chan->num_conf_req++;
3603 /* Got Conf Rsp PENDING from remote side and asume we sent
3604 Conf Rsp PENDING in the code above */
3605 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3606 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3608 /* check compatibility */
3610 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3611 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3614 l2cap_build_conf_rsp(chan, rsp,
3615 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3619 l2cap_chan_unlock(chan);
3623 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3625 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3626 u16 scid, flags, result;
3627 struct l2cap_chan *chan;
3628 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3631 scid = __le16_to_cpu(rsp->scid);
3632 flags = __le16_to_cpu(rsp->flags);
3633 result = __le16_to_cpu(rsp->result);
3635 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3638 chan = l2cap_get_chan_by_scid(conn, scid);
3643 case L2CAP_CONF_SUCCESS:
3644 l2cap_conf_rfc_get(chan, rsp->data, len);
3645 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3648 case L2CAP_CONF_PENDING:
3649 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3651 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3654 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3657 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3661 /* check compatibility */
3663 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3664 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3666 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3667 l2cap_build_conf_rsp(chan, buf,
3668 L2CAP_CONF_SUCCESS, 0x0000), buf);
3672 case L2CAP_CONF_UNACCEPT:
3673 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3676 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3677 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3681 /* throw out any old stored conf requests */
3682 result = L2CAP_CONF_SUCCESS;
3683 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3686 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3690 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3691 L2CAP_CONF_REQ, len, req);
3692 chan->num_conf_req++;
3693 if (result != L2CAP_CONF_SUCCESS)
3699 l2cap_chan_set_err(chan, ECONNRESET);
3701 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3702 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3709 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3711 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3712 set_default_fcs(chan);
3714 l2cap_state_change(chan, BT_CONNECTED);
3715 if (chan->mode == L2CAP_MODE_ERTM ||
3716 chan->mode == L2CAP_MODE_STREAMING)
3717 err = l2cap_ertm_init(chan);
3720 l2cap_send_disconn_req(chan->conn, chan, -err);
3722 l2cap_chan_ready(chan);
3726 l2cap_chan_unlock(chan);
3730 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3732 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3733 struct l2cap_disconn_rsp rsp;
3735 struct l2cap_chan *chan;
3738 scid = __le16_to_cpu(req->scid);
3739 dcid = __le16_to_cpu(req->dcid);
3741 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3743 mutex_lock(&conn->chan_lock);
3745 chan = __l2cap_get_chan_by_scid(conn, dcid);
3747 mutex_unlock(&conn->chan_lock);
3751 l2cap_chan_lock(chan);
3755 rsp.dcid = cpu_to_le16(chan->scid);
3756 rsp.scid = cpu_to_le16(chan->dcid);
3757 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3760 sk->sk_shutdown = SHUTDOWN_MASK;
3763 l2cap_chan_hold(chan);
3764 l2cap_chan_del(chan, ECONNRESET);
3766 l2cap_chan_unlock(chan);
3768 chan->ops->close(chan->data);
3769 l2cap_chan_put(chan);
3771 mutex_unlock(&conn->chan_lock);
3776 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3778 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3780 struct l2cap_chan *chan;
3782 scid = __le16_to_cpu(rsp->scid);
3783 dcid = __le16_to_cpu(rsp->dcid);
3785 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3787 mutex_lock(&conn->chan_lock);
3789 chan = __l2cap_get_chan_by_scid(conn, scid);
3791 mutex_unlock(&conn->chan_lock);
3795 l2cap_chan_lock(chan);
3797 l2cap_chan_hold(chan);
3798 l2cap_chan_del(chan, 0);
3800 l2cap_chan_unlock(chan);
3802 chan->ops->close(chan->data);
3803 l2cap_chan_put(chan);
3805 mutex_unlock(&conn->chan_lock);
3810 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3812 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3815 type = __le16_to_cpu(req->type);
3817 BT_DBG("type 0x%4.4x", type);
3819 if (type == L2CAP_IT_FEAT_MASK) {
3821 u32 feat_mask = l2cap_feat_mask;
3822 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3823 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3824 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3826 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3829 feat_mask |= L2CAP_FEAT_EXT_FLOW
3830 | L2CAP_FEAT_EXT_WINDOW;
3832 put_unaligned_le32(feat_mask, rsp->data);
3833 l2cap_send_cmd(conn, cmd->ident,
3834 L2CAP_INFO_RSP, sizeof(buf), buf);
3835 } else if (type == L2CAP_IT_FIXED_CHAN) {
3837 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3840 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3842 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3844 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3845 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3846 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3847 l2cap_send_cmd(conn, cmd->ident,
3848 L2CAP_INFO_RSP, sizeof(buf), buf);
3850 struct l2cap_info_rsp rsp;
3851 rsp.type = cpu_to_le16(type);
3852 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3853 l2cap_send_cmd(conn, cmd->ident,
3854 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3860 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3862 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3865 type = __le16_to_cpu(rsp->type);
3866 result = __le16_to_cpu(rsp->result);
3868 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3870 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3871 if (cmd->ident != conn->info_ident ||
3872 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3875 cancel_delayed_work(&conn->info_timer);
3877 if (result != L2CAP_IR_SUCCESS) {
3878 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3879 conn->info_ident = 0;
3881 l2cap_conn_start(conn);
3887 case L2CAP_IT_FEAT_MASK:
3888 conn->feat_mask = get_unaligned_le32(rsp->data);
3890 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3891 struct l2cap_info_req req;
3892 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3894 conn->info_ident = l2cap_get_ident(conn);
3896 l2cap_send_cmd(conn, conn->info_ident,
3897 L2CAP_INFO_REQ, sizeof(req), &req);
3899 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3900 conn->info_ident = 0;
3902 l2cap_conn_start(conn);
3906 case L2CAP_IT_FIXED_CHAN:
3907 conn->fixed_chan_mask = rsp->data[0];
3908 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3909 conn->info_ident = 0;
3911 l2cap_conn_start(conn);
3918 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3919 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3922 struct l2cap_create_chan_req *req = data;
3923 struct l2cap_create_chan_rsp rsp;
3926 if (cmd_len != sizeof(*req))
3932 psm = le16_to_cpu(req->psm);
3933 scid = le16_to_cpu(req->scid);
3935 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3937 /* Placeholder: Always reject */
3939 rsp.scid = cpu_to_le16(scid);
3940 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3941 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3943 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3949 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3950 struct l2cap_cmd_hdr *cmd, void *data)
3952 BT_DBG("conn %p", conn);
3954 return l2cap_connect_rsp(conn, cmd, data);
3957 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3958 u16 icid, u16 result)
3960 struct l2cap_move_chan_rsp rsp;
3962 BT_DBG("icid %d, result %d", icid, result);
3964 rsp.icid = cpu_to_le16(icid);
3965 rsp.result = cpu_to_le16(result);
3967 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3970 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3971 struct l2cap_chan *chan, u16 icid, u16 result)
3973 struct l2cap_move_chan_cfm cfm;
3976 BT_DBG("icid %d, result %d", icid, result);
3978 ident = l2cap_get_ident(conn);
3980 chan->ident = ident;
3982 cfm.icid = cpu_to_le16(icid);
3983 cfm.result = cpu_to_le16(result);
3985 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3988 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3991 struct l2cap_move_chan_cfm_rsp rsp;
3993 BT_DBG("icid %d", icid);
3995 rsp.icid = cpu_to_le16(icid);
3996 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3999 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4000 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4002 struct l2cap_move_chan_req *req = data;
4004 u16 result = L2CAP_MR_NOT_ALLOWED;
4006 if (cmd_len != sizeof(*req))
4009 icid = le16_to_cpu(req->icid);
4011 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4016 /* Placeholder: Always refuse */
4017 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4022 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4023 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4025 struct l2cap_move_chan_rsp *rsp = data;
4028 if (cmd_len != sizeof(*rsp))
4031 icid = le16_to_cpu(rsp->icid);
4032 result = le16_to_cpu(rsp->result);
4034 BT_DBG("icid %d, result %d", icid, result);
4036 /* Placeholder: Always unconfirmed */
4037 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4042 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4043 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4045 struct l2cap_move_chan_cfm *cfm = data;
4048 if (cmd_len != sizeof(*cfm))
4051 icid = le16_to_cpu(cfm->icid);
4052 result = le16_to_cpu(cfm->result);
4054 BT_DBG("icid %d, result %d", icid, result);
4056 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4061 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4062 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4064 struct l2cap_move_chan_cfm_rsp *rsp = data;
4067 if (cmd_len != sizeof(*rsp))
4070 icid = le16_to_cpu(rsp->icid);
4072 BT_DBG("icid %d", icid);
4077 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4082 if (min > max || min < 6 || max > 3200)
4085 if (to_multiplier < 10 || to_multiplier > 3200)
4088 if (max >= to_multiplier * 8)
4091 max_latency = (to_multiplier * 8 / max) - 1;
4092 if (latency > 499 || latency > max_latency)
4098 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4099 struct l2cap_cmd_hdr *cmd, u8 *data)
4101 struct hci_conn *hcon = conn->hcon;
4102 struct l2cap_conn_param_update_req *req;
4103 struct l2cap_conn_param_update_rsp rsp;
4104 u16 min, max, latency, to_multiplier, cmd_len;
4107 if (!(hcon->link_mode & HCI_LM_MASTER))
4110 cmd_len = __le16_to_cpu(cmd->len);
4111 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4114 req = (struct l2cap_conn_param_update_req *) data;
4115 min = __le16_to_cpu(req->min);
4116 max = __le16_to_cpu(req->max);
4117 latency = __le16_to_cpu(req->latency);
4118 to_multiplier = __le16_to_cpu(req->to_multiplier);
4120 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4121 min, max, latency, to_multiplier);
4123 memset(&rsp, 0, sizeof(rsp));
4125 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4127 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4129 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4131 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4135 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4140 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4141 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4145 switch (cmd->code) {
4146 case L2CAP_COMMAND_REJ:
4147 l2cap_command_rej(conn, cmd, data);
4150 case L2CAP_CONN_REQ:
4151 err = l2cap_connect_req(conn, cmd, data);
4154 case L2CAP_CONN_RSP:
4155 err = l2cap_connect_rsp(conn, cmd, data);
4158 case L2CAP_CONF_REQ:
4159 err = l2cap_config_req(conn, cmd, cmd_len, data);
4162 case L2CAP_CONF_RSP:
4163 err = l2cap_config_rsp(conn, cmd, data);
4166 case L2CAP_DISCONN_REQ:
4167 err = l2cap_disconnect_req(conn, cmd, data);
4170 case L2CAP_DISCONN_RSP:
4171 err = l2cap_disconnect_rsp(conn, cmd, data);
4174 case L2CAP_ECHO_REQ:
4175 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4178 case L2CAP_ECHO_RSP:
4181 case L2CAP_INFO_REQ:
4182 err = l2cap_information_req(conn, cmd, data);
4185 case L2CAP_INFO_RSP:
4186 err = l2cap_information_rsp(conn, cmd, data);
4189 case L2CAP_CREATE_CHAN_REQ:
4190 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4193 case L2CAP_CREATE_CHAN_RSP:
4194 err = l2cap_create_channel_rsp(conn, cmd, data);
4197 case L2CAP_MOVE_CHAN_REQ:
4198 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4201 case L2CAP_MOVE_CHAN_RSP:
4202 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4205 case L2CAP_MOVE_CHAN_CFM:
4206 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4209 case L2CAP_MOVE_CHAN_CFM_RSP:
4210 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4214 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4222 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4223 struct l2cap_cmd_hdr *cmd, u8 *data)
4225 switch (cmd->code) {
4226 case L2CAP_COMMAND_REJ:
4229 case L2CAP_CONN_PARAM_UPDATE_REQ:
4230 return l2cap_conn_param_update_req(conn, cmd, data);
4232 case L2CAP_CONN_PARAM_UPDATE_RSP:
4236 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4241 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4242 struct sk_buff *skb)
4244 u8 *data = skb->data;
4246 struct l2cap_cmd_hdr cmd;
4249 l2cap_raw_recv(conn, skb);
4251 while (len >= L2CAP_CMD_HDR_SIZE) {
4253 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4254 data += L2CAP_CMD_HDR_SIZE;
4255 len -= L2CAP_CMD_HDR_SIZE;
4257 cmd_len = le16_to_cpu(cmd.len);
4259 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4261 if (cmd_len > len || !cmd.ident) {
4262 BT_DBG("corrupted command");
4266 if (conn->hcon->type == LE_LINK)
4267 err = l2cap_le_sig_cmd(conn, &cmd, data);
4269 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4272 struct l2cap_cmd_rej_unk rej;
4274 BT_ERR("Wrong link type (%d)", err);
4276 /* FIXME: Map err to a valid reason */
4277 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4278 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4288 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4290 u16 our_fcs, rcv_fcs;
4293 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4294 hdr_size = L2CAP_EXT_HDR_SIZE;
4296 hdr_size = L2CAP_ENH_HDR_SIZE;
4298 if (chan->fcs == L2CAP_FCS_CRC16) {
4299 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4300 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4301 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4303 if (our_fcs != rcv_fcs)
4309 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4311 struct l2cap_ctrl control;
4313 BT_DBG("chan %p", chan);
4315 memset(&control, 0, sizeof(control));
4318 control.reqseq = chan->buffer_seq;
4319 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4321 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4322 control.super = L2CAP_SUPER_RNR;
4323 l2cap_send_sframe(chan, &control);
4326 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4327 chan->unacked_frames > 0)
4328 __set_retrans_timer(chan);
4330 /* Send pending iframes */
4331 l2cap_ertm_send(chan);
4333 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4334 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4335 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4338 control.super = L2CAP_SUPER_RR;
4339 l2cap_send_sframe(chan, &control);
4343 static void append_skb_frag(struct sk_buff *skb,
4344 struct sk_buff *new_frag, struct sk_buff **last_frag)
4346 /* skb->len reflects data in skb as well as all fragments
4347 * skb->data_len reflects only data in fragments
4349 if (!skb_has_frag_list(skb))
4350 skb_shinfo(skb)->frag_list = new_frag;
4352 new_frag->next = NULL;
4354 (*last_frag)->next = new_frag;
4355 *last_frag = new_frag;
4357 skb->len += new_frag->len;
4358 skb->data_len += new_frag->len;
4359 skb->truesize += new_frag->truesize;
4362 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4363 struct l2cap_ctrl *control)
4367 switch (control->sar) {
4368 case L2CAP_SAR_UNSEGMENTED:
4372 err = chan->ops->recv(chan->data, skb);
4375 case L2CAP_SAR_START:
4379 chan->sdu_len = get_unaligned_le16(skb->data);
4380 skb_pull(skb, L2CAP_SDULEN_SIZE);
4382 if (chan->sdu_len > chan->imtu) {
4387 if (skb->len >= chan->sdu_len)
4391 chan->sdu_last_frag = skb;
4397 case L2CAP_SAR_CONTINUE:
4401 append_skb_frag(chan->sdu, skb,
4402 &chan->sdu_last_frag);
4405 if (chan->sdu->len >= chan->sdu_len)
4415 append_skb_frag(chan->sdu, skb,
4416 &chan->sdu_last_frag);
4419 if (chan->sdu->len != chan->sdu_len)
4422 err = chan->ops->recv(chan->data, chan->sdu);
4425 /* Reassembly complete */
4427 chan->sdu_last_frag = NULL;
4435 kfree_skb(chan->sdu);
4437 chan->sdu_last_frag = NULL;
4444 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4448 if (chan->mode != L2CAP_MODE_ERTM)
4451 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4452 l2cap_tx(chan, 0, 0, event);
4455 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4461 static void l2cap_handle_srej(struct l2cap_chan *chan,
4462 struct l2cap_ctrl *control)
4467 static void l2cap_handle_rej(struct l2cap_chan *chan,
4468 struct l2cap_ctrl *control)
4473 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4475 BT_DBG("chan %p, txseq %d", chan, txseq);
4477 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4478 chan->expected_tx_seq);
4480 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4481 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4483 /* See notes below regarding "double poll" and
4486 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4487 BT_DBG("Invalid/Ignore - after SREJ");
4488 return L2CAP_TXSEQ_INVALID_IGNORE;
4490 BT_DBG("Invalid - in window after SREJ sent");
4491 return L2CAP_TXSEQ_INVALID;
4495 if (chan->srej_list.head == txseq) {
4496 BT_DBG("Expected SREJ");
4497 return L2CAP_TXSEQ_EXPECTED_SREJ;
4500 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4501 BT_DBG("Duplicate SREJ - txseq already stored");
4502 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4505 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4506 BT_DBG("Unexpected SREJ - not requested");
4507 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4511 if (chan->expected_tx_seq == txseq) {
4512 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4514 BT_DBG("Invalid - txseq outside tx window");
4515 return L2CAP_TXSEQ_INVALID;
4518 return L2CAP_TXSEQ_EXPECTED;
4522 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4523 __seq_offset(chan, chan->expected_tx_seq,
4524 chan->last_acked_seq)){
4525 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4526 return L2CAP_TXSEQ_DUPLICATE;
4529 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4530 /* A source of invalid packets is a "double poll" condition,
4531 * where delays cause us to send multiple poll packets. If
4532 * the remote stack receives and processes both polls,
4533 * sequence numbers can wrap around in such a way that a
4534 * resent frame has a sequence number that looks like new data
4535 * with a sequence gap. This would trigger an erroneous SREJ
4538 * Fortunately, this is impossible with a tx window that's
4539 * less than half of the maximum sequence number, which allows
4540 * invalid frames to be safely ignored.
4542 * With tx window sizes greater than half of the tx window
4543 * maximum, the frame is invalid and cannot be ignored. This
4544 * causes a disconnect.
4547 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4548 BT_DBG("Invalid/Ignore - txseq outside tx window");
4549 return L2CAP_TXSEQ_INVALID_IGNORE;
4551 BT_DBG("Invalid - txseq outside tx window");
4552 return L2CAP_TXSEQ_INVALID;
4555 BT_DBG("Unexpected - txseq indicates missing frames");
4556 return L2CAP_TXSEQ_UNEXPECTED;
4560 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4561 struct l2cap_ctrl *control,
4562 struct sk_buff *skb, u8 event)
4565 bool skb_in_use = 0;
4567 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4571 case L2CAP_EV_RECV_IFRAME:
4572 switch (l2cap_classify_txseq(chan, control->txseq)) {
4573 case L2CAP_TXSEQ_EXPECTED:
4574 l2cap_pass_to_tx(chan, control);
4576 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4577 BT_DBG("Busy, discarding expected seq %d",
4582 chan->expected_tx_seq = __next_seq(chan,
4585 chan->buffer_seq = chan->expected_tx_seq;
4588 err = l2cap_reassemble_sdu(chan, skb, control);
4592 if (control->final) {
4593 if (!test_and_clear_bit(CONN_REJ_ACT,
4594 &chan->conn_state)) {
4596 l2cap_retransmit_all(chan, control);
4597 l2cap_ertm_send(chan);
4601 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4602 l2cap_send_ack(chan);
4604 case L2CAP_TXSEQ_UNEXPECTED:
4605 l2cap_pass_to_tx(chan, control);
4607 /* Can't issue SREJ frames in the local busy state.
4608 * Drop this frame, it will be seen as missing
4609 * when local busy is exited.
4611 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4612 BT_DBG("Busy, discarding unexpected seq %d",
4617 /* There was a gap in the sequence, so an SREJ
4618 * must be sent for each missing frame. The
4619 * current frame is stored for later use.
4621 skb_queue_tail(&chan->srej_q, skb);
4623 BT_DBG("Queued %p (queue len %d)", skb,
4624 skb_queue_len(&chan->srej_q));
4626 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4627 l2cap_seq_list_clear(&chan->srej_list);
4628 l2cap_send_srej(chan, control->txseq);
4630 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4632 case L2CAP_TXSEQ_DUPLICATE:
4633 l2cap_pass_to_tx(chan, control);
4635 case L2CAP_TXSEQ_INVALID_IGNORE:
4637 case L2CAP_TXSEQ_INVALID:
4639 l2cap_send_disconn_req(chan->conn, chan,
4644 case L2CAP_EV_RECV_RR:
4645 l2cap_pass_to_tx(chan, control);
4646 if (control->final) {
4647 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4649 if (!test_and_clear_bit(CONN_REJ_ACT,
4650 &chan->conn_state)) {
4652 l2cap_retransmit_all(chan, control);
4655 l2cap_ertm_send(chan);
4656 } else if (control->poll) {
4657 l2cap_send_i_or_rr_or_rnr(chan);
4659 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4660 &chan->conn_state) &&
4661 chan->unacked_frames)
4662 __set_retrans_timer(chan);
4664 l2cap_ertm_send(chan);
4667 case L2CAP_EV_RECV_RNR:
4668 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4669 l2cap_pass_to_tx(chan, control);
4670 if (control && control->poll) {
4671 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4672 l2cap_send_rr_or_rnr(chan, 0);
4674 __clear_retrans_timer(chan);
4675 l2cap_seq_list_clear(&chan->retrans_list);
4677 case L2CAP_EV_RECV_REJ:
4678 l2cap_handle_rej(chan, control);
4680 case L2CAP_EV_RECV_SREJ:
4681 l2cap_handle_srej(chan, control);
4687 if (skb && !skb_in_use) {
4688 BT_DBG("Freeing %p", skb);
4695 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4696 struct l2cap_ctrl *control,
4697 struct sk_buff *skb, u8 event)
4700 u16 txseq = control->txseq;
4701 bool skb_in_use = 0;
4703 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4707 case L2CAP_EV_RECV_IFRAME:
4708 switch (l2cap_classify_txseq(chan, txseq)) {
4709 case L2CAP_TXSEQ_EXPECTED:
4710 /* Keep frame for reassembly later */
4711 l2cap_pass_to_tx(chan, control);
4712 skb_queue_tail(&chan->srej_q, skb);
4714 BT_DBG("Queued %p (queue len %d)", skb,
4715 skb_queue_len(&chan->srej_q));
4717 chan->expected_tx_seq = __next_seq(chan, txseq);
4719 case L2CAP_TXSEQ_EXPECTED_SREJ:
4720 l2cap_seq_list_pop(&chan->srej_list);
4722 l2cap_pass_to_tx(chan, control);
4723 skb_queue_tail(&chan->srej_q, skb);
4725 BT_DBG("Queued %p (queue len %d)", skb,
4726 skb_queue_len(&chan->srej_q));
4728 err = l2cap_rx_queued_iframes(chan);
4733 case L2CAP_TXSEQ_UNEXPECTED:
4734 /* Got a frame that can't be reassembled yet.
4735 * Save it for later, and send SREJs to cover
4736 * the missing frames.
4738 skb_queue_tail(&chan->srej_q, skb);
4740 BT_DBG("Queued %p (queue len %d)", skb,
4741 skb_queue_len(&chan->srej_q));
4743 l2cap_pass_to_tx(chan, control);
4744 l2cap_send_srej(chan, control->txseq);
4746 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4747 /* This frame was requested with an SREJ, but
4748 * some expected retransmitted frames are
4749 * missing. Request retransmission of missing
4752 skb_queue_tail(&chan->srej_q, skb);
4754 BT_DBG("Queued %p (queue len %d)", skb,
4755 skb_queue_len(&chan->srej_q));
4757 l2cap_pass_to_tx(chan, control);
4758 l2cap_send_srej_list(chan, control->txseq);
4760 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4761 /* We've already queued this frame. Drop this copy. */
4762 l2cap_pass_to_tx(chan, control);
4764 case L2CAP_TXSEQ_DUPLICATE:
4765 /* Expecting a later sequence number, so this frame
4766 * was already received. Ignore it completely.
4769 case L2CAP_TXSEQ_INVALID_IGNORE:
4771 case L2CAP_TXSEQ_INVALID:
4773 l2cap_send_disconn_req(chan->conn, chan,
4778 case L2CAP_EV_RECV_RR:
4779 l2cap_pass_to_tx(chan, control);
4780 if (control->final) {
4781 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4783 if (!test_and_clear_bit(CONN_REJ_ACT,
4784 &chan->conn_state)) {
4786 l2cap_retransmit_all(chan, control);
4789 l2cap_ertm_send(chan);
4790 } else if (control->poll) {
4791 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4792 &chan->conn_state) &&
4793 chan->unacked_frames) {
4794 __set_retrans_timer(chan);
4797 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4798 l2cap_send_srej_tail(chan);
4800 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4801 &chan->conn_state) &&
4802 chan->unacked_frames)
4803 __set_retrans_timer(chan);
4805 l2cap_send_ack(chan);
4808 case L2CAP_EV_RECV_RNR:
4809 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4810 l2cap_pass_to_tx(chan, control);
4811 if (control->poll) {
4812 l2cap_send_srej_tail(chan);
4814 struct l2cap_ctrl rr_control;
4815 memset(&rr_control, 0, sizeof(rr_control));
4816 rr_control.sframe = 1;
4817 rr_control.super = L2CAP_SUPER_RR;
4818 rr_control.reqseq = chan->buffer_seq;
4819 l2cap_send_sframe(chan, &rr_control);
4823 case L2CAP_EV_RECV_REJ:
4824 l2cap_handle_rej(chan, control);
4826 case L2CAP_EV_RECV_SREJ:
4827 l2cap_handle_srej(chan, control);
4831 if (skb && !skb_in_use) {
4832 BT_DBG("Freeing %p", skb);
4839 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4841 /* Make sure reqseq is for a packet that has been sent but not acked */
4844 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4845 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4848 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4849 struct sk_buff *skb, u8 event)
4853 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4854 control, skb, event, chan->rx_state);
4856 if (__valid_reqseq(chan, control->reqseq)) {
4857 switch (chan->rx_state) {
4858 case L2CAP_RX_STATE_RECV:
4859 err = l2cap_rx_state_recv(chan, control, skb, event);
4861 case L2CAP_RX_STATE_SREJ_SENT:
4862 err = l2cap_rx_state_srej_sent(chan, control, skb,
4870 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4871 control->reqseq, chan->next_tx_seq,
4872 chan->expected_ack_seq);
4873 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4879 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4880 struct sk_buff *skb)
4884 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
4887 if (l2cap_classify_txseq(chan, control->txseq) ==
4888 L2CAP_TXSEQ_EXPECTED) {
4889 l2cap_pass_to_tx(chan, control);
4891 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4892 __next_seq(chan, chan->buffer_seq));
4894 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4896 l2cap_reassemble_sdu(chan, skb, control);
4899 kfree_skb(chan->sdu);
4902 chan->sdu_last_frag = NULL;
4906 BT_DBG("Freeing %p", skb);
4911 chan->last_acked_seq = control->txseq;
4912 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4917 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4919 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4923 __unpack_control(chan, skb);
4928 * We can just drop the corrupted I-frame here.
4929 * Receiver will miss it and start proper recovery
4930 * procedures and ask for retransmission.
4932 if (l2cap_check_fcs(chan, skb))
4935 if (!control->sframe && control->sar == L2CAP_SAR_START)
4936 len -= L2CAP_SDULEN_SIZE;
4938 if (chan->fcs == L2CAP_FCS_CRC16)
4939 len -= L2CAP_FCS_SIZE;
4941 if (len > chan->mps) {
4942 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4946 if (!control->sframe) {
4949 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4950 control->sar, control->reqseq, control->final,
4953 /* Validate F-bit - F=0 always valid, F=1 only
4954 * valid in TX WAIT_F
4956 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4959 if (chan->mode != L2CAP_MODE_STREAMING) {
4960 event = L2CAP_EV_RECV_IFRAME;
4961 err = l2cap_rx(chan, control, skb, event);
4963 err = l2cap_stream_rx(chan, control, skb);
4967 l2cap_send_disconn_req(chan->conn, chan,
4970 const u8 rx_func_to_event[4] = {
4971 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
4972 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
4975 /* Only I-frames are expected in streaming mode */
4976 if (chan->mode == L2CAP_MODE_STREAMING)
4979 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
4980 control->reqseq, control->final, control->poll,
4985 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4989 /* Validate F and P bits */
4990 if (control->final && (control->poll ||
4991 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
4994 event = rx_func_to_event[control->super];
4995 if (l2cap_rx(chan, control, skb, event))
4996 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5006 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5008 struct l2cap_chan *chan;
5010 chan = l2cap_get_chan_by_scid(conn, cid);
5012 BT_DBG("unknown cid 0x%4.4x", cid);
5013 /* Drop packet and return */
5018 BT_DBG("chan %p, len %d", chan, skb->len);
5020 if (chan->state != BT_CONNECTED)
5023 switch (chan->mode) {
5024 case L2CAP_MODE_BASIC:
5025 /* If socket recv buffers overflows we drop data here
5026 * which is *bad* because L2CAP has to be reliable.
5027 * But we don't have any other choice. L2CAP doesn't
5028 * provide flow control mechanism. */
5030 if (chan->imtu < skb->len)
5033 if (!chan->ops->recv(chan->data, skb))
5037 case L2CAP_MODE_ERTM:
5038 case L2CAP_MODE_STREAMING:
5039 l2cap_data_rcv(chan, skb);
5043 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5051 l2cap_chan_unlock(chan);
5056 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5058 struct l2cap_chan *chan;
5060 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5064 BT_DBG("chan %p, len %d", chan, skb->len);
5066 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5069 if (chan->imtu < skb->len)
5072 if (!chan->ops->recv(chan->data, skb))
5081 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5082 struct sk_buff *skb)
5084 struct l2cap_chan *chan;
5086 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5090 BT_DBG("chan %p, len %d", chan, skb->len);
5092 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5095 if (chan->imtu < skb->len)
5098 if (!chan->ops->recv(chan->data, skb))
5107 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5109 struct l2cap_hdr *lh = (void *) skb->data;
5113 skb_pull(skb, L2CAP_HDR_SIZE);
5114 cid = __le16_to_cpu(lh->cid);
5115 len = __le16_to_cpu(lh->len);
5117 if (len != skb->len) {
5122 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5125 case L2CAP_CID_LE_SIGNALING:
5126 case L2CAP_CID_SIGNALING:
5127 l2cap_sig_channel(conn, skb);
5130 case L2CAP_CID_CONN_LESS:
5131 psm = get_unaligned((__le16 *) skb->data);
5133 l2cap_conless_channel(conn, psm, skb);
5136 case L2CAP_CID_LE_DATA:
5137 l2cap_att_channel(conn, cid, skb);
5141 if (smp_sig_channel(conn, skb))
5142 l2cap_conn_del(conn->hcon, EACCES);
5146 l2cap_data_channel(conn, cid, skb);
5151 /* ---- L2CAP interface with lower layer (HCI) ---- */
5153 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5155 int exact = 0, lm1 = 0, lm2 = 0;
5156 struct l2cap_chan *c;
5158 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5160 /* Find listening sockets and check their link_mode */
5161 read_lock(&chan_list_lock);
5162 list_for_each_entry(c, &chan_list, global_l) {
5163 struct sock *sk = c->sk;
5165 if (c->state != BT_LISTEN)
5168 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5169 lm1 |= HCI_LM_ACCEPT;
5170 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5171 lm1 |= HCI_LM_MASTER;
5173 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5174 lm2 |= HCI_LM_ACCEPT;
5175 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5176 lm2 |= HCI_LM_MASTER;
5179 read_unlock(&chan_list_lock);
5181 return exact ? lm1 : lm2;
5184 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5186 struct l2cap_conn *conn;
5188 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5191 conn = l2cap_conn_add(hcon, status);
5193 l2cap_conn_ready(conn);
5195 l2cap_conn_del(hcon, bt_to_errno(status));
5200 int l2cap_disconn_ind(struct hci_conn *hcon)
5202 struct l2cap_conn *conn = hcon->l2cap_data;
5204 BT_DBG("hcon %p", hcon);
5207 return HCI_ERROR_REMOTE_USER_TERM;
5208 return conn->disc_reason;
5211 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5213 BT_DBG("hcon %p reason %d", hcon, reason);
5215 l2cap_conn_del(hcon, bt_to_errno(reason));
5219 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5221 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5224 if (encrypt == 0x00) {
5225 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5226 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5227 } else if (chan->sec_level == BT_SECURITY_HIGH)
5228 l2cap_chan_close(chan, ECONNREFUSED);
5230 if (chan->sec_level == BT_SECURITY_MEDIUM)
5231 __clear_chan_timer(chan);
5235 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5237 struct l2cap_conn *conn = hcon->l2cap_data;
5238 struct l2cap_chan *chan;
5243 BT_DBG("conn %p", conn);
5245 if (hcon->type == LE_LINK) {
5246 if (!status && encrypt)
5247 smp_distribute_keys(conn, 0);
5248 cancel_delayed_work(&conn->security_timer);
5251 mutex_lock(&conn->chan_lock);
5253 list_for_each_entry(chan, &conn->chan_l, list) {
5254 l2cap_chan_lock(chan);
5256 BT_DBG("chan->scid %d", chan->scid);
5258 if (chan->scid == L2CAP_CID_LE_DATA) {
5259 if (!status && encrypt) {
5260 chan->sec_level = hcon->sec_level;
5261 l2cap_chan_ready(chan);
5264 l2cap_chan_unlock(chan);
5268 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5269 l2cap_chan_unlock(chan);
5273 if (!status && (chan->state == BT_CONNECTED ||
5274 chan->state == BT_CONFIG)) {
5275 struct sock *sk = chan->sk;
5277 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5278 sk->sk_state_change(sk);
5280 l2cap_check_encryption(chan, encrypt);
5281 l2cap_chan_unlock(chan);
5285 if (chan->state == BT_CONNECT) {
5287 l2cap_send_conn_req(chan);
5289 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5291 } else if (chan->state == BT_CONNECT2) {
5292 struct sock *sk = chan->sk;
5293 struct l2cap_conn_rsp rsp;
5299 if (test_bit(BT_SK_DEFER_SETUP,
5300 &bt_sk(sk)->flags)) {
5301 struct sock *parent = bt_sk(sk)->parent;
5302 res = L2CAP_CR_PEND;
5303 stat = L2CAP_CS_AUTHOR_PEND;
5305 parent->sk_data_ready(parent, 0);
5307 __l2cap_state_change(chan, BT_CONFIG);
5308 res = L2CAP_CR_SUCCESS;
5309 stat = L2CAP_CS_NO_INFO;
5312 __l2cap_state_change(chan, BT_DISCONN);
5313 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5314 res = L2CAP_CR_SEC_BLOCK;
5315 stat = L2CAP_CS_NO_INFO;
5320 rsp.scid = cpu_to_le16(chan->dcid);
5321 rsp.dcid = cpu_to_le16(chan->scid);
5322 rsp.result = cpu_to_le16(res);
5323 rsp.status = cpu_to_le16(stat);
5324 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5328 l2cap_chan_unlock(chan);
5331 mutex_unlock(&conn->chan_lock);
5336 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5338 struct l2cap_conn *conn = hcon->l2cap_data;
5341 conn = l2cap_conn_add(hcon, 0);
5346 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5348 if (!(flags & ACL_CONT)) {
5349 struct l2cap_hdr *hdr;
5353 BT_ERR("Unexpected start frame (len %d)", skb->len);
5354 kfree_skb(conn->rx_skb);
5355 conn->rx_skb = NULL;
5357 l2cap_conn_unreliable(conn, ECOMM);
5360 /* Start fragment always begin with Basic L2CAP header */
5361 if (skb->len < L2CAP_HDR_SIZE) {
5362 BT_ERR("Frame is too short (len %d)", skb->len);
5363 l2cap_conn_unreliable(conn, ECOMM);
5367 hdr = (struct l2cap_hdr *) skb->data;
5368 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5370 if (len == skb->len) {
5371 /* Complete frame received */
5372 l2cap_recv_frame(conn, skb);
5376 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5378 if (skb->len > len) {
5379 BT_ERR("Frame is too long (len %d, expected len %d)",
5381 l2cap_conn_unreliable(conn, ECOMM);
5385 /* Allocate skb for the complete frame (with header) */
5386 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5390 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5392 conn->rx_len = len - skb->len;
5394 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5396 if (!conn->rx_len) {
5397 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5398 l2cap_conn_unreliable(conn, ECOMM);
5402 if (skb->len > conn->rx_len) {
5403 BT_ERR("Fragment is too long (len %d, expected %d)",
5404 skb->len, conn->rx_len);
5405 kfree_skb(conn->rx_skb);
5406 conn->rx_skb = NULL;
5408 l2cap_conn_unreliable(conn, ECOMM);
5412 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5414 conn->rx_len -= skb->len;
5416 if (!conn->rx_len) {
5417 /* Complete frame received */
5418 l2cap_recv_frame(conn, conn->rx_skb);
5419 conn->rx_skb = NULL;
5428 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5430 struct l2cap_chan *c;
5432 read_lock(&chan_list_lock);
5434 list_for_each_entry(c, &chan_list, global_l) {
5435 struct sock *sk = c->sk;
5437 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5438 batostr(&bt_sk(sk)->src),
5439 batostr(&bt_sk(sk)->dst),
5440 c->state, __le16_to_cpu(c->psm),
5441 c->scid, c->dcid, c->imtu, c->omtu,
5442 c->sec_level, c->mode);
5445 read_unlock(&chan_list_lock);
5450 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5452 return single_open(file, l2cap_debugfs_show, inode->i_private);
5455 static const struct file_operations l2cap_debugfs_fops = {
5456 .open = l2cap_debugfs_open,
5458 .llseek = seq_lseek,
5459 .release = single_release,
5462 static struct dentry *l2cap_debugfs;
5464 int __init l2cap_init(void)
5468 err = l2cap_init_sockets();
5473 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5474 bt_debugfs, NULL, &l2cap_debugfs_fops);
5476 BT_ERR("Failed to create L2CAP debug file");
5482 void l2cap_exit(void)
5484 debugfs_remove(l2cap_debugfs);
5485 l2cap_cleanup_sockets();
5488 module_param(disable_ertm, bool, 0644);
5489 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");