2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm = 1;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 mutex_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
131 struct l2cap_chan *c;
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 write_lock(&chan_list_lock);
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
169 write_unlock(&chan_list_lock);
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
175 write_lock(&chan_list_lock);
179 write_unlock(&chan_list_lock);
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
186 u16 cid = L2CAP_CID_DYN_START;
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
202 chan->ops->state_change(chan->data, state);
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
207 struct sock *sk = chan->sk;
210 __l2cap_state_change(chan, state);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
216 struct sock *sk = chan->sk;
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
223 struct sock *sk = chan->sk;
226 __l2cap_chan_set_err(chan, err);
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
483 l2cap_chan_hold(chan);
485 list_add(&chan->list, &conn->chan_l);
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
501 __clear_chan_timer(chan);
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
506 /* Delete from channel list */
507 list_del(&chan->list);
509 l2cap_chan_put(chan);
512 hci_conn_put(conn->hcon);
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
521 __l2cap_chan_set_err(chan, err);
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
527 sk->sk_state_change(sk);
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 skb_queue_purge(&chan->tx_q);
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 struct srej_list *l, *tmp;
539 __clear_retrans_timer(chan);
540 __clear_monitor_timer(chan);
541 __clear_ack_timer(chan);
543 skb_queue_purge(&chan->srej_q);
545 l2cap_seq_list_free(&chan->srej_list);
546 l2cap_seq_list_free(&chan->retrans_list);
547 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
554 static void l2cap_chan_cleanup_listen(struct sock *parent)
558 BT_DBG("parent %p", parent);
560 /* Close not yet accepted channels */
561 while ((sk = bt_accept_dequeue(parent, NULL))) {
562 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
564 l2cap_chan_lock(chan);
565 __clear_chan_timer(chan);
566 l2cap_chan_close(chan, ECONNRESET);
567 l2cap_chan_unlock(chan);
569 chan->ops->close(chan->data);
573 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
575 struct l2cap_conn *conn = chan->conn;
576 struct sock *sk = chan->sk;
578 BT_DBG("chan %p state %s sk %p", chan,
579 state_to_string(chan->state), sk);
581 switch (chan->state) {
584 l2cap_chan_cleanup_listen(sk);
586 __l2cap_state_change(chan, BT_CLOSED);
587 sock_set_flag(sk, SOCK_ZAPPED);
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
598 l2cap_chan_del(chan, reason);
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
621 l2cap_chan_del(chan, reason);
626 l2cap_chan_del(chan, reason);
631 sock_set_flag(sk, SOCK_ZAPPED);
637 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
639 if (chan->chan_type == L2CAP_CHAN_RAW) {
640 switch (chan->sec_level) {
641 case BT_SECURITY_HIGH:
642 return HCI_AT_DEDICATED_BONDING_MITM;
643 case BT_SECURITY_MEDIUM:
644 return HCI_AT_DEDICATED_BONDING;
646 return HCI_AT_NO_BONDING;
648 } else if (chan->psm == cpu_to_le16(0x0001)) {
649 if (chan->sec_level == BT_SECURITY_LOW)
650 chan->sec_level = BT_SECURITY_SDP;
652 if (chan->sec_level == BT_SECURITY_HIGH)
653 return HCI_AT_NO_BONDING_MITM;
655 return HCI_AT_NO_BONDING;
657 switch (chan->sec_level) {
658 case BT_SECURITY_HIGH:
659 return HCI_AT_GENERAL_BONDING_MITM;
660 case BT_SECURITY_MEDIUM:
661 return HCI_AT_GENERAL_BONDING;
663 return HCI_AT_NO_BONDING;
668 /* Service level security */
669 int l2cap_chan_check_security(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
674 auth_type = l2cap_get_auth_type(chan);
676 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
679 static u8 l2cap_get_ident(struct l2cap_conn *conn)
683 /* Get next available identificator.
684 * 1 - 128 are used by kernel.
685 * 129 - 199 are reserved.
686 * 200 - 254 are used by utilities like l2ping, etc.
689 spin_lock(&conn->lock);
691 if (++conn->tx_ident > 128)
696 spin_unlock(&conn->lock);
701 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
703 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
706 BT_DBG("code 0x%2.2x", code);
711 if (lmp_no_flush_capable(conn->hcon->hdev))
712 flags = ACL_START_NO_FLUSH;
716 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
717 skb->priority = HCI_PRIO_MAX;
719 hci_send_acl(conn->hchan, skb, flags);
722 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
724 struct hci_conn *hcon = chan->conn->hcon;
727 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
730 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
731 lmp_no_flush_capable(hcon->hdev))
732 flags = ACL_START_NO_FLUSH;
736 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
737 hci_send_acl(chan->conn->hchan, skb, flags);
740 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
742 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
743 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
745 if (enh & L2CAP_CTRL_FRAME_TYPE) {
748 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
749 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
756 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
757 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
764 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
766 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
767 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
769 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
772 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
773 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
780 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
781 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
788 static inline void __unpack_control(struct l2cap_chan *chan,
791 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
792 __unpack_extended_control(get_unaligned_le32(skb->data),
793 &bt_cb(skb)->control);
794 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
796 __unpack_enhanced_control(get_unaligned_le16(skb->data),
797 &bt_cb(skb)->control);
798 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
802 static u32 __pack_extended_control(struct l2cap_ctrl *control)
806 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
807 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
809 if (control->sframe) {
810 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
811 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
812 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
814 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
815 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
821 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
825 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
826 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
828 if (control->sframe) {
829 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
830 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
831 packed |= L2CAP_CTRL_FRAME_TYPE;
833 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
834 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
840 static inline void __pack_control(struct l2cap_chan *chan,
841 struct l2cap_ctrl *control,
844 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
845 put_unaligned_le32(__pack_extended_control(control),
846 skb->data + L2CAP_HDR_SIZE);
848 put_unaligned_le16(__pack_enhanced_control(control),
849 skb->data + L2CAP_HDR_SIZE);
853 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
857 struct l2cap_hdr *lh;
860 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
861 hlen = L2CAP_EXT_HDR_SIZE;
863 hlen = L2CAP_ENH_HDR_SIZE;
865 if (chan->fcs == L2CAP_FCS_CRC16)
866 hlen += L2CAP_FCS_SIZE;
868 skb = bt_skb_alloc(hlen, GFP_KERNEL);
871 return ERR_PTR(-ENOMEM);
873 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
874 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
875 lh->cid = cpu_to_le16(chan->dcid);
877 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
878 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
880 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
882 if (chan->fcs == L2CAP_FCS_CRC16) {
883 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
884 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
887 skb->priority = HCI_PRIO_MAX;
891 static void l2cap_send_sframe(struct l2cap_chan *chan,
892 struct l2cap_ctrl *control)
897 BT_DBG("chan %p, control %p", chan, control);
899 if (!control->sframe)
902 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
906 if (control->super == L2CAP_SUPER_RR)
907 clear_bit(CONN_RNR_SENT, &chan->conn_state);
908 else if (control->super == L2CAP_SUPER_RNR)
909 set_bit(CONN_RNR_SENT, &chan->conn_state);
911 if (control->super != L2CAP_SUPER_SREJ) {
912 chan->last_acked_seq = control->reqseq;
913 __clear_ack_timer(chan);
916 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
917 control->final, control->poll, control->super);
919 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
920 control_field = __pack_extended_control(control);
922 control_field = __pack_enhanced_control(control);
924 skb = l2cap_create_sframe_pdu(chan, control_field);
926 l2cap_do_send(chan, skb);
929 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
931 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
932 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
933 set_bit(CONN_RNR_SENT, &chan->conn_state);
935 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
937 control |= __set_reqseq(chan, chan->buffer_seq);
940 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
942 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
945 static void l2cap_send_conn_req(struct l2cap_chan *chan)
947 struct l2cap_conn *conn = chan->conn;
948 struct l2cap_conn_req req;
950 req.scid = cpu_to_le16(chan->scid);
953 chan->ident = l2cap_get_ident(conn);
955 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
957 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
960 static void l2cap_chan_ready(struct l2cap_chan *chan)
962 struct sock *sk = chan->sk;
967 parent = bt_sk(sk)->parent;
969 BT_DBG("sk %p, parent %p", sk, parent);
971 /* This clears all conf flags, including CONF_NOT_COMPLETE */
972 chan->conf_state = 0;
973 __clear_chan_timer(chan);
975 __l2cap_state_change(chan, BT_CONNECTED);
976 sk->sk_state_change(sk);
979 parent->sk_data_ready(parent, 0);
984 static void l2cap_do_start(struct l2cap_chan *chan)
986 struct l2cap_conn *conn = chan->conn;
988 if (conn->hcon->type == LE_LINK) {
989 l2cap_chan_ready(chan);
993 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
994 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
997 if (l2cap_chan_check_security(chan) &&
998 __l2cap_no_conn_pending(chan))
999 l2cap_send_conn_req(chan);
1001 struct l2cap_info_req req;
1002 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1004 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1005 conn->info_ident = l2cap_get_ident(conn);
1007 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1009 l2cap_send_cmd(conn, conn->info_ident,
1010 L2CAP_INFO_REQ, sizeof(req), &req);
1014 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1016 u32 local_feat_mask = l2cap_feat_mask;
1018 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1021 case L2CAP_MODE_ERTM:
1022 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1023 case L2CAP_MODE_STREAMING:
1024 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1030 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1032 struct sock *sk = chan->sk;
1033 struct l2cap_disconn_req req;
1038 if (chan->mode == L2CAP_MODE_ERTM) {
1039 __clear_retrans_timer(chan);
1040 __clear_monitor_timer(chan);
1041 __clear_ack_timer(chan);
1044 req.dcid = cpu_to_le16(chan->dcid);
1045 req.scid = cpu_to_le16(chan->scid);
1046 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1047 L2CAP_DISCONN_REQ, sizeof(req), &req);
1050 __l2cap_state_change(chan, BT_DISCONN);
1051 __l2cap_chan_set_err(chan, err);
1055 /* ---- L2CAP connections ---- */
1056 static void l2cap_conn_start(struct l2cap_conn *conn)
1058 struct l2cap_chan *chan, *tmp;
1060 BT_DBG("conn %p", conn);
1062 mutex_lock(&conn->chan_lock);
1064 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1065 struct sock *sk = chan->sk;
1067 l2cap_chan_lock(chan);
1069 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1070 l2cap_chan_unlock(chan);
1074 if (chan->state == BT_CONNECT) {
1075 if (!l2cap_chan_check_security(chan) ||
1076 !__l2cap_no_conn_pending(chan)) {
1077 l2cap_chan_unlock(chan);
1081 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1082 && test_bit(CONF_STATE2_DEVICE,
1083 &chan->conf_state)) {
1084 l2cap_chan_close(chan, ECONNRESET);
1085 l2cap_chan_unlock(chan);
1089 l2cap_send_conn_req(chan);
1091 } else if (chan->state == BT_CONNECT2) {
1092 struct l2cap_conn_rsp rsp;
1094 rsp.scid = cpu_to_le16(chan->dcid);
1095 rsp.dcid = cpu_to_le16(chan->scid);
1097 if (l2cap_chan_check_security(chan)) {
1099 if (test_bit(BT_SK_DEFER_SETUP,
1100 &bt_sk(sk)->flags)) {
1101 struct sock *parent = bt_sk(sk)->parent;
1102 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1103 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1105 parent->sk_data_ready(parent, 0);
1108 __l2cap_state_change(chan, BT_CONFIG);
1109 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1110 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1114 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1115 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1118 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1121 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1122 rsp.result != L2CAP_CR_SUCCESS) {
1123 l2cap_chan_unlock(chan);
1127 set_bit(CONF_REQ_SENT, &chan->conf_state);
1128 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1129 l2cap_build_conf_req(chan, buf), buf);
1130 chan->num_conf_req++;
1133 l2cap_chan_unlock(chan);
1136 mutex_unlock(&conn->chan_lock);
1139 /* Find socket with cid and source/destination bdaddr.
1140 * Returns closest match, locked.
1142 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1146 struct l2cap_chan *c, *c1 = NULL;
1148 read_lock(&chan_list_lock);
1150 list_for_each_entry(c, &chan_list, global_l) {
1151 struct sock *sk = c->sk;
1153 if (state && c->state != state)
1156 if (c->scid == cid) {
1157 int src_match, dst_match;
1158 int src_any, dst_any;
1161 src_match = !bacmp(&bt_sk(sk)->src, src);
1162 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1163 if (src_match && dst_match) {
1164 read_unlock(&chan_list_lock);
1169 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1170 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1171 if ((src_match && dst_any) || (src_any && dst_match) ||
1172 (src_any && dst_any))
1177 read_unlock(&chan_list_lock);
1182 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1184 struct sock *parent, *sk;
1185 struct l2cap_chan *chan, *pchan;
1189 /* Check if we have socket listening on cid */
1190 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1191 conn->src, conn->dst);
1199 /* Check for backlog size */
1200 if (sk_acceptq_is_full(parent)) {
1201 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1205 chan = pchan->ops->new_connection(pchan->data);
1211 hci_conn_hold(conn->hcon);
1213 bacpy(&bt_sk(sk)->src, conn->src);
1214 bacpy(&bt_sk(sk)->dst, conn->dst);
1216 bt_accept_enqueue(parent, sk);
1218 l2cap_chan_add(conn, chan);
1220 __set_chan_timer(chan, sk->sk_sndtimeo);
1222 __l2cap_state_change(chan, BT_CONNECTED);
1223 parent->sk_data_ready(parent, 0);
1226 release_sock(parent);
1229 static void l2cap_conn_ready(struct l2cap_conn *conn)
1231 struct l2cap_chan *chan;
1233 BT_DBG("conn %p", conn);
1235 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1236 l2cap_le_conn_ready(conn);
1238 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1239 smp_conn_security(conn, conn->hcon->pending_sec_level);
1241 mutex_lock(&conn->chan_lock);
1243 list_for_each_entry(chan, &conn->chan_l, list) {
1245 l2cap_chan_lock(chan);
1247 if (conn->hcon->type == LE_LINK) {
1248 if (smp_conn_security(conn, chan->sec_level))
1249 l2cap_chan_ready(chan);
1251 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1252 struct sock *sk = chan->sk;
1253 __clear_chan_timer(chan);
1255 __l2cap_state_change(chan, BT_CONNECTED);
1256 sk->sk_state_change(sk);
1259 } else if (chan->state == BT_CONNECT)
1260 l2cap_do_start(chan);
1262 l2cap_chan_unlock(chan);
1265 mutex_unlock(&conn->chan_lock);
1268 /* Notify sockets that we cannot guaranty reliability anymore */
1269 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1271 struct l2cap_chan *chan;
1273 BT_DBG("conn %p", conn);
1275 mutex_lock(&conn->chan_lock);
1277 list_for_each_entry(chan, &conn->chan_l, list) {
1278 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1279 __l2cap_chan_set_err(chan, err);
1282 mutex_unlock(&conn->chan_lock);
1285 static void l2cap_info_timeout(struct work_struct *work)
1287 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1290 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1291 conn->info_ident = 0;
1293 l2cap_conn_start(conn);
1296 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1298 struct l2cap_conn *conn = hcon->l2cap_data;
1299 struct l2cap_chan *chan, *l;
1304 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1306 kfree_skb(conn->rx_skb);
1308 mutex_lock(&conn->chan_lock);
1311 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1312 l2cap_chan_hold(chan);
1313 l2cap_chan_lock(chan);
1315 l2cap_chan_del(chan, err);
1317 l2cap_chan_unlock(chan);
1319 chan->ops->close(chan->data);
1320 l2cap_chan_put(chan);
1323 mutex_unlock(&conn->chan_lock);
1325 hci_chan_del(conn->hchan);
1327 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1328 cancel_delayed_work_sync(&conn->info_timer);
1330 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1331 cancel_delayed_work_sync(&conn->security_timer);
1332 smp_chan_destroy(conn);
1335 hcon->l2cap_data = NULL;
1339 static void security_timeout(struct work_struct *work)
1341 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1342 security_timer.work);
1344 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1347 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1349 struct l2cap_conn *conn = hcon->l2cap_data;
1350 struct hci_chan *hchan;
1355 hchan = hci_chan_create(hcon);
1359 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1361 hci_chan_del(hchan);
1365 hcon->l2cap_data = conn;
1367 conn->hchan = hchan;
1369 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1371 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1372 conn->mtu = hcon->hdev->le_mtu;
1374 conn->mtu = hcon->hdev->acl_mtu;
1376 conn->src = &hcon->hdev->bdaddr;
1377 conn->dst = &hcon->dst;
1379 conn->feat_mask = 0;
1381 spin_lock_init(&conn->lock);
1382 mutex_init(&conn->chan_lock);
1384 INIT_LIST_HEAD(&conn->chan_l);
1386 if (hcon->type == LE_LINK)
1387 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1389 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1391 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1396 /* ---- Socket interface ---- */
1398 /* Find socket with psm and source / destination bdaddr.
1399 * Returns closest match.
1401 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1405 struct l2cap_chan *c, *c1 = NULL;
1407 read_lock(&chan_list_lock);
1409 list_for_each_entry(c, &chan_list, global_l) {
1410 struct sock *sk = c->sk;
1412 if (state && c->state != state)
1415 if (c->psm == psm) {
1416 int src_match, dst_match;
1417 int src_any, dst_any;
1420 src_match = !bacmp(&bt_sk(sk)->src, src);
1421 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1422 if (src_match && dst_match) {
1423 read_unlock(&chan_list_lock);
1428 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1429 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1430 if ((src_match && dst_any) || (src_any && dst_match) ||
1431 (src_any && dst_any))
1436 read_unlock(&chan_list_lock);
1441 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1442 bdaddr_t *dst, u8 dst_type)
1444 struct sock *sk = chan->sk;
1445 bdaddr_t *src = &bt_sk(sk)->src;
1446 struct l2cap_conn *conn;
1447 struct hci_conn *hcon;
1448 struct hci_dev *hdev;
1452 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1453 dst_type, __le16_to_cpu(chan->psm));
1455 hdev = hci_get_route(dst, src);
1457 return -EHOSTUNREACH;
1461 l2cap_chan_lock(chan);
1463 /* PSM must be odd and lsb of upper byte must be 0 */
1464 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1465 chan->chan_type != L2CAP_CHAN_RAW) {
1470 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1475 switch (chan->mode) {
1476 case L2CAP_MODE_BASIC:
1478 case L2CAP_MODE_ERTM:
1479 case L2CAP_MODE_STREAMING:
1490 switch (sk->sk_state) {
1494 /* Already connecting */
1500 /* Already connected */
1516 /* Set destination address and psm */
1517 bacpy(&bt_sk(sk)->dst, dst);
1524 auth_type = l2cap_get_auth_type(chan);
1526 if (chan->dcid == L2CAP_CID_LE_DATA)
1527 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1528 chan->sec_level, auth_type);
1530 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1531 chan->sec_level, auth_type);
1534 err = PTR_ERR(hcon);
1538 conn = l2cap_conn_add(hcon, 0);
1545 if (hcon->type == LE_LINK) {
1548 if (!list_empty(&conn->chan_l)) {
1557 /* Update source addr of the socket */
1558 bacpy(src, conn->src);
1560 l2cap_chan_unlock(chan);
1561 l2cap_chan_add(conn, chan);
1562 l2cap_chan_lock(chan);
1564 l2cap_state_change(chan, BT_CONNECT);
1565 __set_chan_timer(chan, sk->sk_sndtimeo);
1567 if (hcon->state == BT_CONNECTED) {
1568 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1569 __clear_chan_timer(chan);
1570 if (l2cap_chan_check_security(chan))
1571 l2cap_state_change(chan, BT_CONNECTED);
1573 l2cap_do_start(chan);
1579 l2cap_chan_unlock(chan);
1580 hci_dev_unlock(hdev);
1585 int __l2cap_wait_ack(struct sock *sk)
1587 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1588 DECLARE_WAITQUEUE(wait, current);
1592 add_wait_queue(sk_sleep(sk), &wait);
1593 set_current_state(TASK_INTERRUPTIBLE);
1594 while (chan->unacked_frames > 0 && chan->conn) {
1598 if (signal_pending(current)) {
1599 err = sock_intr_errno(timeo);
1604 timeo = schedule_timeout(timeo);
1606 set_current_state(TASK_INTERRUPTIBLE);
1608 err = sock_error(sk);
1612 set_current_state(TASK_RUNNING);
1613 remove_wait_queue(sk_sleep(sk), &wait);
1617 static void l2cap_monitor_timeout(struct work_struct *work)
1619 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1620 monitor_timer.work);
1622 BT_DBG("chan %p", chan);
1624 l2cap_chan_lock(chan);
1626 if (chan->retry_count >= chan->remote_max_tx) {
1627 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1628 l2cap_chan_unlock(chan);
1629 l2cap_chan_put(chan);
1633 chan->retry_count++;
1634 __set_monitor_timer(chan);
1636 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1637 l2cap_chan_unlock(chan);
1638 l2cap_chan_put(chan);
1641 static void l2cap_retrans_timeout(struct work_struct *work)
1643 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1644 retrans_timer.work);
1646 BT_DBG("chan %p", chan);
1648 l2cap_chan_lock(chan);
1650 chan->retry_count = 1;
1651 __set_monitor_timer(chan);
1653 set_bit(CONN_WAIT_F, &chan->conn_state);
1655 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1657 l2cap_chan_unlock(chan);
1658 l2cap_chan_put(chan);
1661 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1663 struct sk_buff *skb;
1665 while ((skb = skb_peek(&chan->tx_q)) &&
1666 chan->unacked_frames) {
1667 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1670 skb = skb_dequeue(&chan->tx_q);
1673 chan->unacked_frames--;
1676 if (!chan->unacked_frames)
1677 __clear_retrans_timer(chan);
1680 static int l2cap_streaming_send(struct l2cap_chan *chan,
1681 struct sk_buff_head *skbs)
1683 struct sk_buff *skb;
1684 struct l2cap_ctrl *control;
1686 BT_DBG("chan %p, skbs %p", chan, skbs);
1688 if (chan->state != BT_CONNECTED)
1691 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1693 while (!skb_queue_empty(&chan->tx_q)) {
1695 skb = skb_dequeue(&chan->tx_q);
1697 bt_cb(skb)->control.retries = 1;
1698 control = &bt_cb(skb)->control;
1700 control->reqseq = 0;
1701 control->txseq = chan->next_tx_seq;
1703 __pack_control(chan, control, skb);
1705 if (chan->fcs == L2CAP_FCS_CRC16) {
1706 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1707 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1710 l2cap_do_send(chan, skb);
1712 BT_DBG("Sent txseq %d", (int)control->txseq);
1714 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1715 chan->frames_sent++;
1721 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1723 struct sk_buff *skb, *tx_skb;
1727 skb = skb_peek(&chan->tx_q);
1731 while (bt_cb(skb)->control.txseq != tx_seq) {
1732 if (skb_queue_is_last(&chan->tx_q, skb))
1735 skb = skb_queue_next(&chan->tx_q, skb);
1738 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1739 chan->remote_max_tx) {
1740 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1744 tx_skb = skb_clone(skb, GFP_ATOMIC);
1745 bt_cb(skb)->control.retries++;
1747 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1748 control &= __get_sar_mask(chan);
1750 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1751 control |= __set_ctrl_final(chan);
1753 control |= __set_reqseq(chan, chan->buffer_seq);
1754 control |= __set_txseq(chan, tx_seq);
1756 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1758 if (chan->fcs == L2CAP_FCS_CRC16) {
1759 fcs = crc16(0, (u8 *)tx_skb->data,
1760 tx_skb->len - L2CAP_FCS_SIZE);
1761 put_unaligned_le16(fcs,
1762 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1765 l2cap_do_send(chan, tx_skb);
1768 static int l2cap_ertm_send(struct l2cap_chan *chan)
1770 struct sk_buff *skb, *tx_skb;
1771 struct l2cap_ctrl *control;
1774 BT_DBG("chan %p", chan);
1776 if (chan->state != BT_CONNECTED)
1779 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1782 while (chan->tx_send_head &&
1783 chan->unacked_frames < chan->remote_tx_win &&
1784 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1786 skb = chan->tx_send_head;
1788 bt_cb(skb)->control.retries = 1;
1789 control = &bt_cb(skb)->control;
1791 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1794 control->reqseq = chan->buffer_seq;
1795 chan->last_acked_seq = chan->buffer_seq;
1796 control->txseq = chan->next_tx_seq;
1798 __pack_control(chan, control, skb);
1800 if (chan->fcs == L2CAP_FCS_CRC16) {
1801 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1802 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1805 /* Clone after data has been modified. Data is assumed to be
1806 read-only (for locking purposes) on cloned sk_buffs.
1808 tx_skb = skb_clone(skb, GFP_KERNEL);
1813 __set_retrans_timer(chan);
1815 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1816 chan->unacked_frames++;
1817 chan->frames_sent++;
1820 if (skb_queue_is_last(&chan->tx_q, skb))
1821 chan->tx_send_head = NULL;
1823 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1825 l2cap_do_send(chan, tx_skb);
1826 BT_DBG("Sent txseq %d", (int)control->txseq);
1829 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1830 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1835 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1839 if (!skb_queue_empty(&chan->tx_q))
1840 chan->tx_send_head = chan->tx_q.next;
1842 chan->next_tx_seq = chan->expected_ack_seq;
1843 ret = l2cap_ertm_send(chan);
1847 static void __l2cap_send_ack(struct l2cap_chan *chan)
1851 control |= __set_reqseq(chan, chan->buffer_seq);
1853 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1854 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1855 set_bit(CONN_RNR_SENT, &chan->conn_state);
1859 if (l2cap_ertm_send(chan) > 0)
1862 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1865 static void l2cap_send_ack(struct l2cap_chan *chan)
1867 __clear_ack_timer(chan);
1868 __l2cap_send_ack(chan);
1871 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1873 struct srej_list *tail;
1876 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1877 control |= __set_ctrl_final(chan);
1879 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1880 control |= __set_reqseq(chan, tail->tx_seq);
1883 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1884 struct msghdr *msg, int len,
1885 int count, struct sk_buff *skb)
1887 struct l2cap_conn *conn = chan->conn;
1888 struct sk_buff **frag;
1891 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1897 /* Continuation fragments (no L2CAP header) */
1898 frag = &skb_shinfo(skb)->frag_list;
1900 struct sk_buff *tmp;
1902 count = min_t(unsigned int, conn->mtu, len);
1904 tmp = chan->ops->alloc_skb(chan, count,
1905 msg->msg_flags & MSG_DONTWAIT);
1907 return PTR_ERR(tmp);
1911 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1914 (*frag)->priority = skb->priority;
1919 skb->len += (*frag)->len;
1920 skb->data_len += (*frag)->len;
1922 frag = &(*frag)->next;
1928 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1929 struct msghdr *msg, size_t len,
1932 struct l2cap_conn *conn = chan->conn;
1933 struct sk_buff *skb;
1934 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1935 struct l2cap_hdr *lh;
1937 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1939 count = min_t(unsigned int, (conn->mtu - hlen), len);
1941 skb = chan->ops->alloc_skb(chan, count + hlen,
1942 msg->msg_flags & MSG_DONTWAIT);
1946 skb->priority = priority;
1948 /* Create L2CAP header */
1949 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1950 lh->cid = cpu_to_le16(chan->dcid);
1951 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1952 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1954 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1955 if (unlikely(err < 0)) {
1957 return ERR_PTR(err);
1962 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1963 struct msghdr *msg, size_t len,
1966 struct l2cap_conn *conn = chan->conn;
1967 struct sk_buff *skb;
1969 struct l2cap_hdr *lh;
1971 BT_DBG("chan %p len %d", chan, (int)len);
1973 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1975 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1976 msg->msg_flags & MSG_DONTWAIT);
1980 skb->priority = priority;
1982 /* Create L2CAP header */
1983 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1984 lh->cid = cpu_to_le16(chan->dcid);
1985 lh->len = cpu_to_le16(len);
1987 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1988 if (unlikely(err < 0)) {
1990 return ERR_PTR(err);
1995 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1996 struct msghdr *msg, size_t len,
1999 struct l2cap_conn *conn = chan->conn;
2000 struct sk_buff *skb;
2001 int err, count, hlen;
2002 struct l2cap_hdr *lh;
2004 BT_DBG("chan %p len %d", chan, (int)len);
2007 return ERR_PTR(-ENOTCONN);
2009 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2010 hlen = L2CAP_EXT_HDR_SIZE;
2012 hlen = L2CAP_ENH_HDR_SIZE;
2015 hlen += L2CAP_SDULEN_SIZE;
2017 if (chan->fcs == L2CAP_FCS_CRC16)
2018 hlen += L2CAP_FCS_SIZE;
2020 count = min_t(unsigned int, (conn->mtu - hlen), len);
2022 skb = chan->ops->alloc_skb(chan, count + hlen,
2023 msg->msg_flags & MSG_DONTWAIT);
2027 /* Create L2CAP header */
2028 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2029 lh->cid = cpu_to_le16(chan->dcid);
2030 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2032 /* Control header is populated later */
2033 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2034 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2036 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2039 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2041 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2042 if (unlikely(err < 0)) {
2044 return ERR_PTR(err);
2047 bt_cb(skb)->control.fcs = chan->fcs;
2048 bt_cb(skb)->control.retries = 0;
2052 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2053 struct sk_buff_head *seg_queue,
2054 struct msghdr *msg, size_t len)
2056 struct sk_buff *skb;
2062 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2064 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2065 * so fragmented skbs are not used. The HCI layer's handling
2066 * of fragmented skbs is not compatible with ERTM's queueing.
2069 /* PDU size is derived from the HCI MTU */
2070 pdu_len = chan->conn->mtu;
2072 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2074 /* Adjust for largest possible L2CAP overhead. */
2075 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2077 /* Remote device may have requested smaller PDUs */
2078 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2080 if (len <= pdu_len) {
2081 sar = L2CAP_SAR_UNSEGMENTED;
2085 sar = L2CAP_SAR_START;
2087 pdu_len -= L2CAP_SDULEN_SIZE;
2091 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2094 __skb_queue_purge(seg_queue);
2095 return PTR_ERR(skb);
2098 bt_cb(skb)->control.sar = sar;
2099 __skb_queue_tail(seg_queue, skb);
2104 pdu_len += L2CAP_SDULEN_SIZE;
2107 if (len <= pdu_len) {
2108 sar = L2CAP_SAR_END;
2111 sar = L2CAP_SAR_CONTINUE;
2118 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2121 struct sk_buff *skb;
2123 struct sk_buff_head seg_queue;
2125 /* Connectionless channel */
2126 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2127 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2129 return PTR_ERR(skb);
2131 l2cap_do_send(chan, skb);
2135 switch (chan->mode) {
2136 case L2CAP_MODE_BASIC:
2137 /* Check outgoing MTU */
2138 if (len > chan->omtu)
2141 /* Create a basic PDU */
2142 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2144 return PTR_ERR(skb);
2146 l2cap_do_send(chan, skb);
2150 case L2CAP_MODE_ERTM:
2151 case L2CAP_MODE_STREAMING:
2152 /* Check outgoing MTU */
2153 if (len > chan->omtu) {
2158 __skb_queue_head_init(&seg_queue);
2160 /* Do segmentation before calling in to the state machine,
2161 * since it's possible to block while waiting for memory
2164 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2166 /* The channel could have been closed while segmenting,
2167 * check that it is still connected.
2169 if (chan->state != BT_CONNECTED) {
2170 __skb_queue_purge(&seg_queue);
2177 if (chan->mode == L2CAP_MODE_ERTM)
2178 err = l2cap_tx(chan, 0, &seg_queue,
2179 L2CAP_EV_DATA_REQUEST);
2181 err = l2cap_streaming_send(chan, &seg_queue);
2186 /* If the skbs were not queued for sending, they'll still be in
2187 * seg_queue and need to be purged.
2189 __skb_queue_purge(&seg_queue);
2193 BT_DBG("bad state %1.1x", chan->mode);
2200 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2202 struct sk_buff *acked_skb;
2205 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2207 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2210 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2211 chan->expected_ack_seq, chan->unacked_frames);
2213 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2214 ackseq = __next_seq(chan, ackseq)) {
2216 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2218 skb_unlink(acked_skb, &chan->tx_q);
2219 kfree_skb(acked_skb);
2220 chan->unacked_frames--;
2224 chan->expected_ack_seq = reqseq;
2226 if (chan->unacked_frames == 0)
2227 __clear_retrans_timer(chan);
2229 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2232 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2234 BT_DBG("chan %p", chan);
2236 chan->expected_tx_seq = chan->buffer_seq;
2237 l2cap_seq_list_clear(&chan->srej_list);
2238 skb_queue_purge(&chan->srej_q);
2239 chan->rx_state = L2CAP_RX_STATE_RECV;
2242 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2243 struct l2cap_ctrl *control,
2244 struct sk_buff_head *skbs, u8 event)
2248 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2252 case L2CAP_EV_DATA_REQUEST:
2253 if (chan->tx_send_head == NULL)
2254 chan->tx_send_head = skb_peek(skbs);
2256 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2257 l2cap_ertm_send(chan);
2259 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2260 BT_DBG("Enter LOCAL_BUSY");
2261 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2263 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2264 /* The SREJ_SENT state must be aborted if we are to
2265 * enter the LOCAL_BUSY state.
2267 l2cap_abort_rx_srej_sent(chan);
2270 l2cap_send_ack(chan);
2273 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2274 BT_DBG("Exit LOCAL_BUSY");
2275 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2277 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2278 struct l2cap_ctrl local_control;
2280 memset(&local_control, 0, sizeof(local_control));
2281 local_control.sframe = 1;
2282 local_control.super = L2CAP_SUPER_RR;
2283 local_control.poll = 1;
2284 local_control.reqseq = chan->buffer_seq;
2285 l2cap_send_sframe(chan, &local_control);
2287 chan->retry_count = 1;
2288 __set_monitor_timer(chan);
2289 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2292 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2293 l2cap_process_reqseq(chan, control->reqseq);
2295 case L2CAP_EV_EXPLICIT_POLL:
2296 l2cap_send_rr_or_rnr(chan, 1);
2297 chan->retry_count = 1;
2298 __set_monitor_timer(chan);
2299 __clear_ack_timer(chan);
2300 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2302 case L2CAP_EV_RETRANS_TO:
2303 l2cap_send_rr_or_rnr(chan, 1);
2304 chan->retry_count = 1;
2305 __set_monitor_timer(chan);
2306 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2308 case L2CAP_EV_RECV_FBIT:
2309 /* Nothing to process */
2318 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2319 struct l2cap_ctrl *control,
2320 struct sk_buff_head *skbs, u8 event)
2324 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2328 case L2CAP_EV_DATA_REQUEST:
2329 if (chan->tx_send_head == NULL)
2330 chan->tx_send_head = skb_peek(skbs);
2331 /* Queue data, but don't send. */
2332 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2334 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2335 BT_DBG("Enter LOCAL_BUSY");
2336 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2338 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2339 /* The SREJ_SENT state must be aborted if we are to
2340 * enter the LOCAL_BUSY state.
2342 l2cap_abort_rx_srej_sent(chan);
2345 l2cap_send_ack(chan);
2348 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2349 BT_DBG("Exit LOCAL_BUSY");
2350 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2352 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2353 struct l2cap_ctrl local_control;
2354 memset(&local_control, 0, sizeof(local_control));
2355 local_control.sframe = 1;
2356 local_control.super = L2CAP_SUPER_RR;
2357 local_control.poll = 1;
2358 local_control.reqseq = chan->buffer_seq;
2359 l2cap_send_sframe(chan, &local_control);
2361 chan->retry_count = 1;
2362 __set_monitor_timer(chan);
2363 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2366 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2367 l2cap_process_reqseq(chan, control->reqseq);
2371 case L2CAP_EV_RECV_FBIT:
2372 if (control && control->final) {
2373 __clear_monitor_timer(chan);
2374 if (chan->unacked_frames > 0)
2375 __set_retrans_timer(chan);
2376 chan->retry_count = 0;
2377 chan->tx_state = L2CAP_TX_STATE_XMIT;
2378 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2381 case L2CAP_EV_EXPLICIT_POLL:
2384 case L2CAP_EV_MONITOR_TO:
2385 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2386 l2cap_send_rr_or_rnr(chan, 1);
2387 __set_monitor_timer(chan);
2388 chan->retry_count++;
2390 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2400 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2401 struct sk_buff_head *skbs, u8 event)
2405 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2406 chan, control, skbs, event, chan->tx_state);
2408 switch (chan->tx_state) {
2409 case L2CAP_TX_STATE_XMIT:
2410 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2412 case L2CAP_TX_STATE_WAIT_F:
2413 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2423 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2424 struct l2cap_ctrl *control)
2426 BT_DBG("chan %p, control %p", chan, control);
2427 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2430 /* Copy frame to all raw sockets on that connection */
2431 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2433 struct sk_buff *nskb;
2434 struct l2cap_chan *chan;
2436 BT_DBG("conn %p", conn);
2438 mutex_lock(&conn->chan_lock);
2440 list_for_each_entry(chan, &conn->chan_l, list) {
2441 struct sock *sk = chan->sk;
2442 if (chan->chan_type != L2CAP_CHAN_RAW)
2445 /* Don't send frame to the socket it came from */
2448 nskb = skb_clone(skb, GFP_ATOMIC);
2452 if (chan->ops->recv(chan->data, nskb))
2456 mutex_unlock(&conn->chan_lock);
2459 /* ---- L2CAP signalling commands ---- */
2460 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2461 u8 code, u8 ident, u16 dlen, void *data)
2463 struct sk_buff *skb, **frag;
2464 struct l2cap_cmd_hdr *cmd;
2465 struct l2cap_hdr *lh;
2468 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2469 conn, code, ident, dlen);
2471 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2472 count = min_t(unsigned int, conn->mtu, len);
2474 skb = bt_skb_alloc(count, GFP_ATOMIC);
2478 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2479 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2481 if (conn->hcon->type == LE_LINK)
2482 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2484 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2486 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2489 cmd->len = cpu_to_le16(dlen);
2492 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2493 memcpy(skb_put(skb, count), data, count);
2499 /* Continuation fragments (no L2CAP header) */
2500 frag = &skb_shinfo(skb)->frag_list;
2502 count = min_t(unsigned int, conn->mtu, len);
2504 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2508 memcpy(skb_put(*frag, count), data, count);
2513 frag = &(*frag)->next;
2523 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2525 struct l2cap_conf_opt *opt = *ptr;
2528 len = L2CAP_CONF_OPT_SIZE + opt->len;
2536 *val = *((u8 *) opt->val);
2540 *val = get_unaligned_le16(opt->val);
2544 *val = get_unaligned_le32(opt->val);
2548 *val = (unsigned long) opt->val;
2552 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2556 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2558 struct l2cap_conf_opt *opt = *ptr;
2560 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2567 *((u8 *) opt->val) = val;
2571 put_unaligned_le16(val, opt->val);
2575 put_unaligned_le32(val, opt->val);
2579 memcpy(opt->val, (void *) val, len);
2583 *ptr += L2CAP_CONF_OPT_SIZE + len;
2586 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2588 struct l2cap_conf_efs efs;
2590 switch (chan->mode) {
2591 case L2CAP_MODE_ERTM:
2592 efs.id = chan->local_id;
2593 efs.stype = chan->local_stype;
2594 efs.msdu = cpu_to_le16(chan->local_msdu);
2595 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2596 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2597 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2600 case L2CAP_MODE_STREAMING:
2602 efs.stype = L2CAP_SERV_BESTEFFORT;
2603 efs.msdu = cpu_to_le16(chan->local_msdu);
2604 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2613 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2614 (unsigned long) &efs);
2617 static void l2cap_ack_timeout(struct work_struct *work)
2619 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2622 BT_DBG("chan %p", chan);
2624 l2cap_chan_lock(chan);
2626 __l2cap_send_ack(chan);
2628 l2cap_chan_unlock(chan);
2630 l2cap_chan_put(chan);
2633 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2637 chan->next_tx_seq = 0;
2638 chan->expected_tx_seq = 0;
2639 chan->expected_ack_seq = 0;
2640 chan->unacked_frames = 0;
2641 chan->buffer_seq = 0;
2642 chan->num_acked = 0;
2643 chan->frames_sent = 0;
2644 chan->last_acked_seq = 0;
2646 chan->sdu_last_frag = NULL;
2649 skb_queue_head_init(&chan->tx_q);
2651 if (chan->mode != L2CAP_MODE_ERTM)
2654 chan->rx_state = L2CAP_RX_STATE_RECV;
2655 chan->tx_state = L2CAP_TX_STATE_XMIT;
2657 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2658 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2659 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2661 skb_queue_head_init(&chan->srej_q);
2663 INIT_LIST_HEAD(&chan->srej_l);
2664 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2668 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2670 l2cap_seq_list_free(&chan->srej_list);
2675 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2678 case L2CAP_MODE_STREAMING:
2679 case L2CAP_MODE_ERTM:
2680 if (l2cap_mode_supported(mode, remote_feat_mask))
2684 return L2CAP_MODE_BASIC;
2688 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2690 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2693 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2695 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2698 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2700 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2701 __l2cap_ews_supported(chan)) {
2702 /* use extended control field */
2703 set_bit(FLAG_EXT_CTRL, &chan->flags);
2704 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2706 chan->tx_win = min_t(u16, chan->tx_win,
2707 L2CAP_DEFAULT_TX_WINDOW);
2708 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2712 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2714 struct l2cap_conf_req *req = data;
2715 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2716 void *ptr = req->data;
2719 BT_DBG("chan %p", chan);
2721 if (chan->num_conf_req || chan->num_conf_rsp)
2724 switch (chan->mode) {
2725 case L2CAP_MODE_STREAMING:
2726 case L2CAP_MODE_ERTM:
2727 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2730 if (__l2cap_efs_supported(chan))
2731 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2735 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2740 if (chan->imtu != L2CAP_DEFAULT_MTU)
2741 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2743 switch (chan->mode) {
2744 case L2CAP_MODE_BASIC:
2745 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2746 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2749 rfc.mode = L2CAP_MODE_BASIC;
2751 rfc.max_transmit = 0;
2752 rfc.retrans_timeout = 0;
2753 rfc.monitor_timeout = 0;
2754 rfc.max_pdu_size = 0;
2756 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2757 (unsigned long) &rfc);
2760 case L2CAP_MODE_ERTM:
2761 rfc.mode = L2CAP_MODE_ERTM;
2762 rfc.max_transmit = chan->max_tx;
2763 rfc.retrans_timeout = 0;
2764 rfc.monitor_timeout = 0;
2766 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2767 L2CAP_EXT_HDR_SIZE -
2770 rfc.max_pdu_size = cpu_to_le16(size);
2772 l2cap_txwin_setup(chan);
2774 rfc.txwin_size = min_t(u16, chan->tx_win,
2775 L2CAP_DEFAULT_TX_WINDOW);
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2778 (unsigned long) &rfc);
2780 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2781 l2cap_add_opt_efs(&ptr, chan);
2783 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2786 if (chan->fcs == L2CAP_FCS_NONE ||
2787 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2788 chan->fcs = L2CAP_FCS_NONE;
2789 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2792 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2793 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2797 case L2CAP_MODE_STREAMING:
2798 rfc.mode = L2CAP_MODE_STREAMING;
2800 rfc.max_transmit = 0;
2801 rfc.retrans_timeout = 0;
2802 rfc.monitor_timeout = 0;
2804 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2805 L2CAP_EXT_HDR_SIZE -
2808 rfc.max_pdu_size = cpu_to_le16(size);
2810 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2811 (unsigned long) &rfc);
2813 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2814 l2cap_add_opt_efs(&ptr, chan);
2816 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2819 if (chan->fcs == L2CAP_FCS_NONE ||
2820 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2821 chan->fcs = L2CAP_FCS_NONE;
2822 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2827 req->dcid = cpu_to_le16(chan->dcid);
2828 req->flags = cpu_to_le16(0);
2833 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2835 struct l2cap_conf_rsp *rsp = data;
2836 void *ptr = rsp->data;
2837 void *req = chan->conf_req;
2838 int len = chan->conf_len;
2839 int type, hint, olen;
2841 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2842 struct l2cap_conf_efs efs;
2844 u16 mtu = L2CAP_DEFAULT_MTU;
2845 u16 result = L2CAP_CONF_SUCCESS;
2848 BT_DBG("chan %p", chan);
2850 while (len >= L2CAP_CONF_OPT_SIZE) {
2851 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2853 hint = type & L2CAP_CONF_HINT;
2854 type &= L2CAP_CONF_MASK;
2857 case L2CAP_CONF_MTU:
2861 case L2CAP_CONF_FLUSH_TO:
2862 chan->flush_to = val;
2865 case L2CAP_CONF_QOS:
2868 case L2CAP_CONF_RFC:
2869 if (olen == sizeof(rfc))
2870 memcpy(&rfc, (void *) val, olen);
2873 case L2CAP_CONF_FCS:
2874 if (val == L2CAP_FCS_NONE)
2875 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2878 case L2CAP_CONF_EFS:
2880 if (olen == sizeof(efs))
2881 memcpy(&efs, (void *) val, olen);
2884 case L2CAP_CONF_EWS:
2886 return -ECONNREFUSED;
2888 set_bit(FLAG_EXT_CTRL, &chan->flags);
2889 set_bit(CONF_EWS_RECV, &chan->conf_state);
2890 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2891 chan->remote_tx_win = val;
2898 result = L2CAP_CONF_UNKNOWN;
2899 *((u8 *) ptr++) = type;
2904 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2907 switch (chan->mode) {
2908 case L2CAP_MODE_STREAMING:
2909 case L2CAP_MODE_ERTM:
2910 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2911 chan->mode = l2cap_select_mode(rfc.mode,
2912 chan->conn->feat_mask);
2917 if (__l2cap_efs_supported(chan))
2918 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2920 return -ECONNREFUSED;
2923 if (chan->mode != rfc.mode)
2924 return -ECONNREFUSED;
2930 if (chan->mode != rfc.mode) {
2931 result = L2CAP_CONF_UNACCEPT;
2932 rfc.mode = chan->mode;
2934 if (chan->num_conf_rsp == 1)
2935 return -ECONNREFUSED;
2937 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2938 sizeof(rfc), (unsigned long) &rfc);
2941 if (result == L2CAP_CONF_SUCCESS) {
2942 /* Configure output options and let the other side know
2943 * which ones we don't like. */
2945 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2946 result = L2CAP_CONF_UNACCEPT;
2949 set_bit(CONF_MTU_DONE, &chan->conf_state);
2951 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2954 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2955 efs.stype != L2CAP_SERV_NOTRAFIC &&
2956 efs.stype != chan->local_stype) {
2958 result = L2CAP_CONF_UNACCEPT;
2960 if (chan->num_conf_req >= 1)
2961 return -ECONNREFUSED;
2963 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2965 (unsigned long) &efs);
2967 /* Send PENDING Conf Rsp */
2968 result = L2CAP_CONF_PENDING;
2969 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2974 case L2CAP_MODE_BASIC:
2975 chan->fcs = L2CAP_FCS_NONE;
2976 set_bit(CONF_MODE_DONE, &chan->conf_state);
2979 case L2CAP_MODE_ERTM:
2980 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2981 chan->remote_tx_win = rfc.txwin_size;
2983 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2985 chan->remote_max_tx = rfc.max_transmit;
2987 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2989 L2CAP_EXT_HDR_SIZE -
2992 rfc.max_pdu_size = cpu_to_le16(size);
2993 chan->remote_mps = size;
2995 rfc.retrans_timeout =
2996 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2997 rfc.monitor_timeout =
2998 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3000 set_bit(CONF_MODE_DONE, &chan->conf_state);
3002 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3003 sizeof(rfc), (unsigned long) &rfc);
3005 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3006 chan->remote_id = efs.id;
3007 chan->remote_stype = efs.stype;
3008 chan->remote_msdu = le16_to_cpu(efs.msdu);
3009 chan->remote_flush_to =
3010 le32_to_cpu(efs.flush_to);
3011 chan->remote_acc_lat =
3012 le32_to_cpu(efs.acc_lat);
3013 chan->remote_sdu_itime =
3014 le32_to_cpu(efs.sdu_itime);
3015 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3016 sizeof(efs), (unsigned long) &efs);
3020 case L2CAP_MODE_STREAMING:
3021 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3023 L2CAP_EXT_HDR_SIZE -
3026 rfc.max_pdu_size = cpu_to_le16(size);
3027 chan->remote_mps = size;
3029 set_bit(CONF_MODE_DONE, &chan->conf_state);
3031 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3032 sizeof(rfc), (unsigned long) &rfc);
3037 result = L2CAP_CONF_UNACCEPT;
3039 memset(&rfc, 0, sizeof(rfc));
3040 rfc.mode = chan->mode;
3043 if (result == L2CAP_CONF_SUCCESS)
3044 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3046 rsp->scid = cpu_to_le16(chan->dcid);
3047 rsp->result = cpu_to_le16(result);
3048 rsp->flags = cpu_to_le16(0x0000);
3053 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3055 struct l2cap_conf_req *req = data;
3056 void *ptr = req->data;
3059 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3060 struct l2cap_conf_efs efs;
3062 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3064 while (len >= L2CAP_CONF_OPT_SIZE) {
3065 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3068 case L2CAP_CONF_MTU:
3069 if (val < L2CAP_DEFAULT_MIN_MTU) {
3070 *result = L2CAP_CONF_UNACCEPT;
3071 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3074 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3077 case L2CAP_CONF_FLUSH_TO:
3078 chan->flush_to = val;
3079 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3083 case L2CAP_CONF_RFC:
3084 if (olen == sizeof(rfc))
3085 memcpy(&rfc, (void *)val, olen);
3087 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3088 rfc.mode != chan->mode)
3089 return -ECONNREFUSED;
3093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3094 sizeof(rfc), (unsigned long) &rfc);
3097 case L2CAP_CONF_EWS:
3098 chan->tx_win = min_t(u16, val,
3099 L2CAP_DEFAULT_EXT_WINDOW);
3100 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3104 case L2CAP_CONF_EFS:
3105 if (olen == sizeof(efs))
3106 memcpy(&efs, (void *)val, olen);
3108 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3109 efs.stype != L2CAP_SERV_NOTRAFIC &&
3110 efs.stype != chan->local_stype)
3111 return -ECONNREFUSED;
3113 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3114 sizeof(efs), (unsigned long) &efs);
3119 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3120 return -ECONNREFUSED;
3122 chan->mode = rfc.mode;
3124 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3126 case L2CAP_MODE_ERTM:
3127 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3128 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3129 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3131 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3132 chan->local_msdu = le16_to_cpu(efs.msdu);
3133 chan->local_sdu_itime =
3134 le32_to_cpu(efs.sdu_itime);
3135 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3136 chan->local_flush_to =
3137 le32_to_cpu(efs.flush_to);
3141 case L2CAP_MODE_STREAMING:
3142 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3146 req->dcid = cpu_to_le16(chan->dcid);
3147 req->flags = cpu_to_le16(0x0000);
3152 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3154 struct l2cap_conf_rsp *rsp = data;
3155 void *ptr = rsp->data;
3157 BT_DBG("chan %p", chan);
3159 rsp->scid = cpu_to_le16(chan->dcid);
3160 rsp->result = cpu_to_le16(result);
3161 rsp->flags = cpu_to_le16(flags);
3166 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3168 struct l2cap_conn_rsp rsp;
3169 struct l2cap_conn *conn = chan->conn;
3172 rsp.scid = cpu_to_le16(chan->dcid);
3173 rsp.dcid = cpu_to_le16(chan->scid);
3174 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3175 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3176 l2cap_send_cmd(conn, chan->ident,
3177 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3179 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3182 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3183 l2cap_build_conf_req(chan, buf), buf);
3184 chan->num_conf_req++;
3187 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3191 struct l2cap_conf_rfc rfc;
3193 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3195 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3198 while (len >= L2CAP_CONF_OPT_SIZE) {
3199 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3202 case L2CAP_CONF_RFC:
3203 if (olen == sizeof(rfc))
3204 memcpy(&rfc, (void *)val, olen);
3209 /* Use sane default values in case a misbehaving remote device
3210 * did not send an RFC option.
3212 rfc.mode = chan->mode;
3213 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3214 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3215 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3217 BT_ERR("Expected RFC option was not found, using defaults");
3221 case L2CAP_MODE_ERTM:
3222 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3223 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3224 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3226 case L2CAP_MODE_STREAMING:
3227 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3231 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3233 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3235 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3238 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3239 cmd->ident == conn->info_ident) {
3240 cancel_delayed_work(&conn->info_timer);
3242 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3243 conn->info_ident = 0;
3245 l2cap_conn_start(conn);
3251 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3253 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3254 struct l2cap_conn_rsp rsp;
3255 struct l2cap_chan *chan = NULL, *pchan;
3256 struct sock *parent, *sk = NULL;
3257 int result, status = L2CAP_CS_NO_INFO;
3259 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3260 __le16 psm = req->psm;
3262 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3264 /* Check if we have socket listening on psm */
3265 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3267 result = L2CAP_CR_BAD_PSM;
3273 mutex_lock(&conn->chan_lock);
3276 /* Check if the ACL is secure enough (if not SDP) */
3277 if (psm != cpu_to_le16(0x0001) &&
3278 !hci_conn_check_link_mode(conn->hcon)) {
3279 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3280 result = L2CAP_CR_SEC_BLOCK;
3284 result = L2CAP_CR_NO_MEM;
3286 /* Check for backlog size */
3287 if (sk_acceptq_is_full(parent)) {
3288 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3292 chan = pchan->ops->new_connection(pchan->data);
3298 /* Check if we already have channel with that dcid */
3299 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3300 sock_set_flag(sk, SOCK_ZAPPED);
3301 chan->ops->close(chan->data);
3305 hci_conn_hold(conn->hcon);
3307 bacpy(&bt_sk(sk)->src, conn->src);
3308 bacpy(&bt_sk(sk)->dst, conn->dst);
3312 bt_accept_enqueue(parent, sk);
3314 __l2cap_chan_add(conn, chan);
3318 __set_chan_timer(chan, sk->sk_sndtimeo);
3320 chan->ident = cmd->ident;
3322 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3323 if (l2cap_chan_check_security(chan)) {
3324 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3325 __l2cap_state_change(chan, BT_CONNECT2);
3326 result = L2CAP_CR_PEND;
3327 status = L2CAP_CS_AUTHOR_PEND;
3328 parent->sk_data_ready(parent, 0);
3330 __l2cap_state_change(chan, BT_CONFIG);
3331 result = L2CAP_CR_SUCCESS;
3332 status = L2CAP_CS_NO_INFO;
3335 __l2cap_state_change(chan, BT_CONNECT2);
3336 result = L2CAP_CR_PEND;
3337 status = L2CAP_CS_AUTHEN_PEND;
3340 __l2cap_state_change(chan, BT_CONNECT2);
3341 result = L2CAP_CR_PEND;
3342 status = L2CAP_CS_NO_INFO;
3346 release_sock(parent);
3347 mutex_unlock(&conn->chan_lock);
3350 rsp.scid = cpu_to_le16(scid);
3351 rsp.dcid = cpu_to_le16(dcid);
3352 rsp.result = cpu_to_le16(result);
3353 rsp.status = cpu_to_le16(status);
3354 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3356 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3357 struct l2cap_info_req info;
3358 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3360 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3361 conn->info_ident = l2cap_get_ident(conn);
3363 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3365 l2cap_send_cmd(conn, conn->info_ident,
3366 L2CAP_INFO_REQ, sizeof(info), &info);
3369 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3370 result == L2CAP_CR_SUCCESS) {
3372 set_bit(CONF_REQ_SENT, &chan->conf_state);
3373 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3374 l2cap_build_conf_req(chan, buf), buf);
3375 chan->num_conf_req++;
3381 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3383 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3384 u16 scid, dcid, result, status;
3385 struct l2cap_chan *chan;
3389 scid = __le16_to_cpu(rsp->scid);
3390 dcid = __le16_to_cpu(rsp->dcid);
3391 result = __le16_to_cpu(rsp->result);
3392 status = __le16_to_cpu(rsp->status);
3394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3395 dcid, scid, result, status);
3397 mutex_lock(&conn->chan_lock);
3400 chan = __l2cap_get_chan_by_scid(conn, scid);
3406 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3415 l2cap_chan_lock(chan);
3418 case L2CAP_CR_SUCCESS:
3419 l2cap_state_change(chan, BT_CONFIG);
3422 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3424 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3427 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3428 l2cap_build_conf_req(chan, req), req);
3429 chan->num_conf_req++;
3433 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3437 l2cap_chan_del(chan, ECONNREFUSED);
3441 l2cap_chan_unlock(chan);
3444 mutex_unlock(&conn->chan_lock);
3449 static inline void set_default_fcs(struct l2cap_chan *chan)
3451 /* FCS is enabled only in ERTM or streaming mode, if one or both
3454 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3455 chan->fcs = L2CAP_FCS_NONE;
3456 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3457 chan->fcs = L2CAP_FCS_CRC16;
3460 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3462 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3465 struct l2cap_chan *chan;
3468 dcid = __le16_to_cpu(req->dcid);
3469 flags = __le16_to_cpu(req->flags);
3471 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3473 chan = l2cap_get_chan_by_scid(conn, dcid);
3477 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3478 struct l2cap_cmd_rej_cid rej;
3480 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3481 rej.scid = cpu_to_le16(chan->scid);
3482 rej.dcid = cpu_to_le16(chan->dcid);
3484 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3489 /* Reject if config buffer is too small. */
3490 len = cmd_len - sizeof(*req);
3491 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3492 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3493 l2cap_build_conf_rsp(chan, rsp,
3494 L2CAP_CONF_REJECT, flags), rsp);
3499 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3500 chan->conf_len += len;
3502 if (flags & 0x0001) {
3503 /* Incomplete config. Send empty response. */
3504 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3505 l2cap_build_conf_rsp(chan, rsp,
3506 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3510 /* Complete config. */
3511 len = l2cap_parse_conf_req(chan, rsp);
3513 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3517 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3518 chan->num_conf_rsp++;
3520 /* Reset config buffer. */
3523 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3526 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3527 set_default_fcs(chan);
3529 l2cap_state_change(chan, BT_CONNECTED);
3531 if (chan->mode == L2CAP_MODE_ERTM ||
3532 chan->mode == L2CAP_MODE_STREAMING)
3533 err = l2cap_ertm_init(chan);
3536 l2cap_send_disconn_req(chan->conn, chan, -err);
3538 l2cap_chan_ready(chan);
3543 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3545 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3546 l2cap_build_conf_req(chan, buf), buf);
3547 chan->num_conf_req++;
3550 /* Got Conf Rsp PENDING from remote side and asume we sent
3551 Conf Rsp PENDING in the code above */
3552 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3553 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3555 /* check compatibility */
3557 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3558 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3560 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3561 l2cap_build_conf_rsp(chan, rsp,
3562 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3566 l2cap_chan_unlock(chan);
3570 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3572 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3573 u16 scid, flags, result;
3574 struct l2cap_chan *chan;
3575 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3578 scid = __le16_to_cpu(rsp->scid);
3579 flags = __le16_to_cpu(rsp->flags);
3580 result = __le16_to_cpu(rsp->result);
3582 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3585 chan = l2cap_get_chan_by_scid(conn, scid);
3590 case L2CAP_CONF_SUCCESS:
3591 l2cap_conf_rfc_get(chan, rsp->data, len);
3592 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3595 case L2CAP_CONF_PENDING:
3596 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3598 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3601 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3604 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3608 /* check compatibility */
3610 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3611 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3614 l2cap_build_conf_rsp(chan, buf,
3615 L2CAP_CONF_SUCCESS, 0x0000), buf);
3619 case L2CAP_CONF_UNACCEPT:
3620 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3623 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3624 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3628 /* throw out any old stored conf requests */
3629 result = L2CAP_CONF_SUCCESS;
3630 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3633 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3637 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3638 L2CAP_CONF_REQ, len, req);
3639 chan->num_conf_req++;
3640 if (result != L2CAP_CONF_SUCCESS)
3646 l2cap_chan_set_err(chan, ECONNRESET);
3648 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3649 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3656 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3658 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3659 set_default_fcs(chan);
3661 l2cap_state_change(chan, BT_CONNECTED);
3662 if (chan->mode == L2CAP_MODE_ERTM ||
3663 chan->mode == L2CAP_MODE_STREAMING)
3664 err = l2cap_ertm_init(chan);
3667 l2cap_send_disconn_req(chan->conn, chan, -err);
3669 l2cap_chan_ready(chan);
3673 l2cap_chan_unlock(chan);
3677 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3679 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3680 struct l2cap_disconn_rsp rsp;
3682 struct l2cap_chan *chan;
3685 scid = __le16_to_cpu(req->scid);
3686 dcid = __le16_to_cpu(req->dcid);
3688 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3690 mutex_lock(&conn->chan_lock);
3692 chan = __l2cap_get_chan_by_scid(conn, dcid);
3694 mutex_unlock(&conn->chan_lock);
3698 l2cap_chan_lock(chan);
3702 rsp.dcid = cpu_to_le16(chan->scid);
3703 rsp.scid = cpu_to_le16(chan->dcid);
3704 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3707 sk->sk_shutdown = SHUTDOWN_MASK;
3710 l2cap_chan_hold(chan);
3711 l2cap_chan_del(chan, ECONNRESET);
3713 l2cap_chan_unlock(chan);
3715 chan->ops->close(chan->data);
3716 l2cap_chan_put(chan);
3718 mutex_unlock(&conn->chan_lock);
3723 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3725 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3727 struct l2cap_chan *chan;
3729 scid = __le16_to_cpu(rsp->scid);
3730 dcid = __le16_to_cpu(rsp->dcid);
3732 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3734 mutex_lock(&conn->chan_lock);
3736 chan = __l2cap_get_chan_by_scid(conn, scid);
3738 mutex_unlock(&conn->chan_lock);
3742 l2cap_chan_lock(chan);
3744 l2cap_chan_hold(chan);
3745 l2cap_chan_del(chan, 0);
3747 l2cap_chan_unlock(chan);
3749 chan->ops->close(chan->data);
3750 l2cap_chan_put(chan);
3752 mutex_unlock(&conn->chan_lock);
3757 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3759 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3762 type = __le16_to_cpu(req->type);
3764 BT_DBG("type 0x%4.4x", type);
3766 if (type == L2CAP_IT_FEAT_MASK) {
3768 u32 feat_mask = l2cap_feat_mask;
3769 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3770 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3771 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3773 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3776 feat_mask |= L2CAP_FEAT_EXT_FLOW
3777 | L2CAP_FEAT_EXT_WINDOW;
3779 put_unaligned_le32(feat_mask, rsp->data);
3780 l2cap_send_cmd(conn, cmd->ident,
3781 L2CAP_INFO_RSP, sizeof(buf), buf);
3782 } else if (type == L2CAP_IT_FIXED_CHAN) {
3784 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3787 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3789 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3791 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3792 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3793 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3794 l2cap_send_cmd(conn, cmd->ident,
3795 L2CAP_INFO_RSP, sizeof(buf), buf);
3797 struct l2cap_info_rsp rsp;
3798 rsp.type = cpu_to_le16(type);
3799 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3800 l2cap_send_cmd(conn, cmd->ident,
3801 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3807 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3809 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3812 type = __le16_to_cpu(rsp->type);
3813 result = __le16_to_cpu(rsp->result);
3815 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3817 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3818 if (cmd->ident != conn->info_ident ||
3819 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3822 cancel_delayed_work(&conn->info_timer);
3824 if (result != L2CAP_IR_SUCCESS) {
3825 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3826 conn->info_ident = 0;
3828 l2cap_conn_start(conn);
3834 case L2CAP_IT_FEAT_MASK:
3835 conn->feat_mask = get_unaligned_le32(rsp->data);
3837 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3838 struct l2cap_info_req req;
3839 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3841 conn->info_ident = l2cap_get_ident(conn);
3843 l2cap_send_cmd(conn, conn->info_ident,
3844 L2CAP_INFO_REQ, sizeof(req), &req);
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3847 conn->info_ident = 0;
3849 l2cap_conn_start(conn);
3853 case L2CAP_IT_FIXED_CHAN:
3854 conn->fixed_chan_mask = rsp->data[0];
3855 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3856 conn->info_ident = 0;
3858 l2cap_conn_start(conn);
3865 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3866 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3869 struct l2cap_create_chan_req *req = data;
3870 struct l2cap_create_chan_rsp rsp;
3873 if (cmd_len != sizeof(*req))
3879 psm = le16_to_cpu(req->psm);
3880 scid = le16_to_cpu(req->scid);
3882 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3884 /* Placeholder: Always reject */
3886 rsp.scid = cpu_to_le16(scid);
3887 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3888 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3890 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3896 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3897 struct l2cap_cmd_hdr *cmd, void *data)
3899 BT_DBG("conn %p", conn);
3901 return l2cap_connect_rsp(conn, cmd, data);
3904 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3905 u16 icid, u16 result)
3907 struct l2cap_move_chan_rsp rsp;
3909 BT_DBG("icid %d, result %d", icid, result);
3911 rsp.icid = cpu_to_le16(icid);
3912 rsp.result = cpu_to_le16(result);
3914 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3917 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3918 struct l2cap_chan *chan, u16 icid, u16 result)
3920 struct l2cap_move_chan_cfm cfm;
3923 BT_DBG("icid %d, result %d", icid, result);
3925 ident = l2cap_get_ident(conn);
3927 chan->ident = ident;
3929 cfm.icid = cpu_to_le16(icid);
3930 cfm.result = cpu_to_le16(result);
3932 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3935 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3938 struct l2cap_move_chan_cfm_rsp rsp;
3940 BT_DBG("icid %d", icid);
3942 rsp.icid = cpu_to_le16(icid);
3943 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3946 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3949 struct l2cap_move_chan_req *req = data;
3951 u16 result = L2CAP_MR_NOT_ALLOWED;
3953 if (cmd_len != sizeof(*req))
3956 icid = le16_to_cpu(req->icid);
3958 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3963 /* Placeholder: Always refuse */
3964 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3969 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3970 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3972 struct l2cap_move_chan_rsp *rsp = data;
3975 if (cmd_len != sizeof(*rsp))
3978 icid = le16_to_cpu(rsp->icid);
3979 result = le16_to_cpu(rsp->result);
3981 BT_DBG("icid %d, result %d", icid, result);
3983 /* Placeholder: Always unconfirmed */
3984 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3989 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3990 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3992 struct l2cap_move_chan_cfm *cfm = data;
3995 if (cmd_len != sizeof(*cfm))
3998 icid = le16_to_cpu(cfm->icid);
3999 result = le16_to_cpu(cfm->result);
4001 BT_DBG("icid %d, result %d", icid, result);
4003 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4008 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4009 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4011 struct l2cap_move_chan_cfm_rsp *rsp = data;
4014 if (cmd_len != sizeof(*rsp))
4017 icid = le16_to_cpu(rsp->icid);
4019 BT_DBG("icid %d", icid);
4024 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4029 if (min > max || min < 6 || max > 3200)
4032 if (to_multiplier < 10 || to_multiplier > 3200)
4035 if (max >= to_multiplier * 8)
4038 max_latency = (to_multiplier * 8 / max) - 1;
4039 if (latency > 499 || latency > max_latency)
4045 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4046 struct l2cap_cmd_hdr *cmd, u8 *data)
4048 struct hci_conn *hcon = conn->hcon;
4049 struct l2cap_conn_param_update_req *req;
4050 struct l2cap_conn_param_update_rsp rsp;
4051 u16 min, max, latency, to_multiplier, cmd_len;
4054 if (!(hcon->link_mode & HCI_LM_MASTER))
4057 cmd_len = __le16_to_cpu(cmd->len);
4058 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4061 req = (struct l2cap_conn_param_update_req *) data;
4062 min = __le16_to_cpu(req->min);
4063 max = __le16_to_cpu(req->max);
4064 latency = __le16_to_cpu(req->latency);
4065 to_multiplier = __le16_to_cpu(req->to_multiplier);
4067 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4068 min, max, latency, to_multiplier);
4070 memset(&rsp, 0, sizeof(rsp));
4072 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4074 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4076 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4078 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4082 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4087 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4088 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4092 switch (cmd->code) {
4093 case L2CAP_COMMAND_REJ:
4094 l2cap_command_rej(conn, cmd, data);
4097 case L2CAP_CONN_REQ:
4098 err = l2cap_connect_req(conn, cmd, data);
4101 case L2CAP_CONN_RSP:
4102 err = l2cap_connect_rsp(conn, cmd, data);
4105 case L2CAP_CONF_REQ:
4106 err = l2cap_config_req(conn, cmd, cmd_len, data);
4109 case L2CAP_CONF_RSP:
4110 err = l2cap_config_rsp(conn, cmd, data);
4113 case L2CAP_DISCONN_REQ:
4114 err = l2cap_disconnect_req(conn, cmd, data);
4117 case L2CAP_DISCONN_RSP:
4118 err = l2cap_disconnect_rsp(conn, cmd, data);
4121 case L2CAP_ECHO_REQ:
4122 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4125 case L2CAP_ECHO_RSP:
4128 case L2CAP_INFO_REQ:
4129 err = l2cap_information_req(conn, cmd, data);
4132 case L2CAP_INFO_RSP:
4133 err = l2cap_information_rsp(conn, cmd, data);
4136 case L2CAP_CREATE_CHAN_REQ:
4137 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4140 case L2CAP_CREATE_CHAN_RSP:
4141 err = l2cap_create_channel_rsp(conn, cmd, data);
4144 case L2CAP_MOVE_CHAN_REQ:
4145 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4148 case L2CAP_MOVE_CHAN_RSP:
4149 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4152 case L2CAP_MOVE_CHAN_CFM:
4153 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4156 case L2CAP_MOVE_CHAN_CFM_RSP:
4157 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4161 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4169 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4170 struct l2cap_cmd_hdr *cmd, u8 *data)
4172 switch (cmd->code) {
4173 case L2CAP_COMMAND_REJ:
4176 case L2CAP_CONN_PARAM_UPDATE_REQ:
4177 return l2cap_conn_param_update_req(conn, cmd, data);
4179 case L2CAP_CONN_PARAM_UPDATE_RSP:
4183 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4188 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4189 struct sk_buff *skb)
4191 u8 *data = skb->data;
4193 struct l2cap_cmd_hdr cmd;
4196 l2cap_raw_recv(conn, skb);
4198 while (len >= L2CAP_CMD_HDR_SIZE) {
4200 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4201 data += L2CAP_CMD_HDR_SIZE;
4202 len -= L2CAP_CMD_HDR_SIZE;
4204 cmd_len = le16_to_cpu(cmd.len);
4206 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4208 if (cmd_len > len || !cmd.ident) {
4209 BT_DBG("corrupted command");
4213 if (conn->hcon->type == LE_LINK)
4214 err = l2cap_le_sig_cmd(conn, &cmd, data);
4216 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4219 struct l2cap_cmd_rej_unk rej;
4221 BT_ERR("Wrong link type (%d)", err);
4223 /* FIXME: Map err to a valid reason */
4224 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4225 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4235 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4237 u16 our_fcs, rcv_fcs;
4240 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4241 hdr_size = L2CAP_EXT_HDR_SIZE;
4243 hdr_size = L2CAP_ENH_HDR_SIZE;
4245 if (chan->fcs == L2CAP_FCS_CRC16) {
4246 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4247 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4248 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4250 if (our_fcs != rcv_fcs)
4256 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4260 chan->frames_sent = 0;
4262 control |= __set_reqseq(chan, chan->buffer_seq);
4264 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4265 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
4266 set_bit(CONN_RNR_SENT, &chan->conn_state);
4269 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
4270 l2cap_retransmit_frames(chan);
4272 l2cap_ertm_send(chan);
4274 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4275 chan->frames_sent == 0) {
4276 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4280 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4282 struct sk_buff *next_skb;
4283 int tx_seq_offset, next_tx_seq_offset;
4285 bt_cb(skb)->control.txseq = tx_seq;
4286 bt_cb(skb)->control.sar = sar;
4288 next_skb = skb_peek(&chan->srej_q);
4290 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4293 if (bt_cb(next_skb)->control.txseq == tx_seq)
4296 next_tx_seq_offset = __seq_offset(chan,
4297 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4299 if (next_tx_seq_offset > tx_seq_offset) {
4300 __skb_queue_before(&chan->srej_q, next_skb, skb);
4304 if (skb_queue_is_last(&chan->srej_q, next_skb))
4307 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4310 __skb_queue_tail(&chan->srej_q, skb);
4315 static void append_skb_frag(struct sk_buff *skb,
4316 struct sk_buff *new_frag, struct sk_buff **last_frag)
4318 /* skb->len reflects data in skb as well as all fragments
4319 * skb->data_len reflects only data in fragments
4321 if (!skb_has_frag_list(skb))
4322 skb_shinfo(skb)->frag_list = new_frag;
4324 new_frag->next = NULL;
4326 (*last_frag)->next = new_frag;
4327 *last_frag = new_frag;
4329 skb->len += new_frag->len;
4330 skb->data_len += new_frag->len;
4331 skb->truesize += new_frag->truesize;
4334 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4335 struct l2cap_ctrl *control)
4339 switch (control->sar) {
4340 case L2CAP_SAR_UNSEGMENTED:
4344 err = chan->ops->recv(chan->data, skb);
4347 case L2CAP_SAR_START:
4351 chan->sdu_len = get_unaligned_le16(skb->data);
4352 skb_pull(skb, L2CAP_SDULEN_SIZE);
4354 if (chan->sdu_len > chan->imtu) {
4359 if (skb->len >= chan->sdu_len)
4363 chan->sdu_last_frag = skb;
4369 case L2CAP_SAR_CONTINUE:
4373 append_skb_frag(chan->sdu, skb,
4374 &chan->sdu_last_frag);
4377 if (chan->sdu->len >= chan->sdu_len)
4387 append_skb_frag(chan->sdu, skb,
4388 &chan->sdu_last_frag);
4391 if (chan->sdu->len != chan->sdu_len)
4394 err = chan->ops->recv(chan->data, chan->sdu);
4397 /* Reassembly complete */
4399 chan->sdu_last_frag = NULL;
4407 kfree_skb(chan->sdu);
4409 chan->sdu_last_frag = NULL;
4416 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4418 BT_DBG("chan %p, Enter local busy", chan);
4420 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4421 l2cap_seq_list_clear(&chan->srej_list);
4423 __set_ack_timer(chan);
4426 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4430 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4433 control = __set_reqseq(chan, chan->buffer_seq);
4434 control |= __set_ctrl_poll(chan);
4435 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4436 chan->retry_count = 1;
4438 __clear_retrans_timer(chan);
4439 __set_monitor_timer(chan);
4441 set_bit(CONN_WAIT_F, &chan->conn_state);
4444 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4445 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4447 BT_DBG("chan %p, Exit local busy", chan);
4450 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4452 if (chan->mode == L2CAP_MODE_ERTM) {
4454 l2cap_ertm_enter_local_busy(chan);
4456 l2cap_ertm_exit_local_busy(chan);
4460 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4462 struct sk_buff *skb;
4465 while ((skb = skb_peek(&chan->srej_q)) &&
4466 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4469 if (bt_cb(skb)->control.txseq != tx_seq)
4472 skb = skb_dequeue(&chan->srej_q);
4473 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4476 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4480 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4481 tx_seq = __next_seq(chan, tx_seq);
4485 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4487 struct srej_list *l, *tmp;
4490 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4491 if (l->tx_seq == tx_seq) {
4496 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4497 control |= __set_reqseq(chan, l->tx_seq);
4499 list_add_tail(&l->list, &chan->srej_l);
4503 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4505 struct srej_list *new;
4508 while (tx_seq != chan->expected_tx_seq) {
4509 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4510 control |= __set_reqseq(chan, chan->expected_tx_seq);
4511 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4513 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4517 new->tx_seq = chan->expected_tx_seq;
4519 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4521 list_add_tail(&new->list, &chan->srej_l);
4524 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4529 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4531 u16 tx_seq = __get_txseq(chan, rx_control);
4532 u16 req_seq = __get_reqseq(chan, rx_control);
4533 u8 sar = __get_ctrl_sar(chan, rx_control);
4534 int tx_seq_offset, expected_tx_seq_offset;
4535 int num_to_ack = (chan->tx_win/6) + 1;
4538 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4539 tx_seq, rx_control);
4541 if (__is_ctrl_final(chan, rx_control) &&
4542 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4543 __clear_monitor_timer(chan);
4544 if (chan->unacked_frames > 0)
4545 __set_retrans_timer(chan);
4546 clear_bit(CONN_WAIT_F, &chan->conn_state);
4549 chan->expected_ack_seq = req_seq;
4550 l2cap_drop_acked_frames(chan);
4552 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4554 /* invalid tx_seq */
4555 if (tx_seq_offset >= chan->tx_win) {
4556 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4560 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4561 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4562 l2cap_send_ack(chan);
4566 if (tx_seq == chan->expected_tx_seq)
4569 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4570 struct srej_list *first;
4572 first = list_first_entry(&chan->srej_l,
4573 struct srej_list, list);
4574 if (tx_seq == first->tx_seq) {
4575 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4576 l2cap_check_srej_gap(chan, tx_seq);
4578 list_del(&first->list);
4581 if (list_empty(&chan->srej_l)) {
4582 chan->buffer_seq = chan->buffer_seq_srej;
4583 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4584 l2cap_send_ack(chan);
4585 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4588 struct srej_list *l;
4590 /* duplicated tx_seq */
4591 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4594 list_for_each_entry(l, &chan->srej_l, list) {
4595 if (l->tx_seq == tx_seq) {
4596 l2cap_resend_srejframe(chan, tx_seq);
4601 err = l2cap_send_srejframe(chan, tx_seq);
4603 l2cap_send_disconn_req(chan->conn, chan, -err);
4608 expected_tx_seq_offset = __seq_offset(chan,
4609 chan->expected_tx_seq, chan->buffer_seq);
4611 /* duplicated tx_seq */
4612 if (tx_seq_offset < expected_tx_seq_offset)
4615 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4617 BT_DBG("chan %p, Enter SREJ", chan);
4619 INIT_LIST_HEAD(&chan->srej_l);
4620 chan->buffer_seq_srej = chan->buffer_seq;
4622 __skb_queue_head_init(&chan->srej_q);
4623 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4625 /* Set P-bit only if there are some I-frames to ack. */
4626 if (__clear_ack_timer(chan))
4627 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4629 err = l2cap_send_srejframe(chan, tx_seq);
4631 l2cap_send_disconn_req(chan->conn, chan, -err);
4638 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4640 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4641 bt_cb(skb)->control.txseq = tx_seq;
4642 bt_cb(skb)->control.sar = sar;
4643 __skb_queue_tail(&chan->srej_q, skb);
4647 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4650 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4654 if (__is_ctrl_final(chan, rx_control)) {
4655 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4656 l2cap_retransmit_frames(chan);
4660 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4661 if (chan->num_acked == num_to_ack - 1)
4662 l2cap_send_ack(chan);
4664 __set_ack_timer(chan);
4673 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4675 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4676 __get_reqseq(chan, rx_control), rx_control);
4678 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4679 l2cap_drop_acked_frames(chan);
4681 if (__is_ctrl_poll(chan, rx_control)) {
4682 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4683 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4684 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4685 (chan->unacked_frames > 0))
4686 __set_retrans_timer(chan);
4688 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4689 l2cap_send_srejtail(chan);
4691 l2cap_send_i_or_rr_or_rnr(chan);
4694 } else if (__is_ctrl_final(chan, rx_control)) {
4695 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4697 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4698 l2cap_retransmit_frames(chan);
4701 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4702 (chan->unacked_frames > 0))
4703 __set_retrans_timer(chan);
4705 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4706 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4707 l2cap_send_ack(chan);
4709 l2cap_ertm_send(chan);
4713 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4715 u16 tx_seq = __get_reqseq(chan, rx_control);
4717 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4719 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4721 chan->expected_ack_seq = tx_seq;
4722 l2cap_drop_acked_frames(chan);
4724 if (__is_ctrl_final(chan, rx_control)) {
4725 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4726 l2cap_retransmit_frames(chan);
4728 l2cap_retransmit_frames(chan);
4730 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4731 set_bit(CONN_REJ_ACT, &chan->conn_state);
4734 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4736 u16 tx_seq = __get_reqseq(chan, rx_control);
4738 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4740 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4742 if (__is_ctrl_poll(chan, rx_control)) {
4743 chan->expected_ack_seq = tx_seq;
4744 l2cap_drop_acked_frames(chan);
4746 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4747 l2cap_retransmit_one_frame(chan, tx_seq);
4749 l2cap_ertm_send(chan);
4751 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4752 chan->srej_save_reqseq = tx_seq;
4753 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4755 } else if (__is_ctrl_final(chan, rx_control)) {
4756 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4757 chan->srej_save_reqseq == tx_seq)
4758 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4760 l2cap_retransmit_one_frame(chan, tx_seq);
4762 l2cap_retransmit_one_frame(chan, tx_seq);
4763 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4764 chan->srej_save_reqseq = tx_seq;
4765 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4770 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4772 u16 tx_seq = __get_reqseq(chan, rx_control);
4774 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4776 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4777 chan->expected_ack_seq = tx_seq;
4778 l2cap_drop_acked_frames(chan);
4780 if (__is_ctrl_poll(chan, rx_control))
4781 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4783 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4784 __clear_retrans_timer(chan);
4785 if (__is_ctrl_poll(chan, rx_control))
4786 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4790 if (__is_ctrl_poll(chan, rx_control)) {
4791 l2cap_send_srejtail(chan);
4793 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4797 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4799 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4801 if (__is_ctrl_final(chan, rx_control) &&
4802 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4803 __clear_monitor_timer(chan);
4804 if (chan->unacked_frames > 0)
4805 __set_retrans_timer(chan);
4806 clear_bit(CONN_WAIT_F, &chan->conn_state);
4809 switch (__get_ctrl_super(chan, rx_control)) {
4810 case L2CAP_SUPER_RR:
4811 l2cap_data_channel_rrframe(chan, rx_control);
4814 case L2CAP_SUPER_REJ:
4815 l2cap_data_channel_rejframe(chan, rx_control);
4818 case L2CAP_SUPER_SREJ:
4819 l2cap_data_channel_srejframe(chan, rx_control);
4822 case L2CAP_SUPER_RNR:
4823 l2cap_data_channel_rnrframe(chan, rx_control);
4831 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4833 BT_DBG("chan %p, txseq %d", chan, txseq);
4835 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4836 chan->expected_tx_seq);
4838 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4839 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4841 /* See notes below regarding "double poll" and
4844 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4845 BT_DBG("Invalid/Ignore - after SREJ");
4846 return L2CAP_TXSEQ_INVALID_IGNORE;
4848 BT_DBG("Invalid - in window after SREJ sent");
4849 return L2CAP_TXSEQ_INVALID;
4853 if (chan->srej_list.head == txseq) {
4854 BT_DBG("Expected SREJ");
4855 return L2CAP_TXSEQ_EXPECTED_SREJ;
4858 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4859 BT_DBG("Duplicate SREJ - txseq already stored");
4860 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4863 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4864 BT_DBG("Unexpected SREJ - not requested");
4865 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4869 if (chan->expected_tx_seq == txseq) {
4870 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4872 BT_DBG("Invalid - txseq outside tx window");
4873 return L2CAP_TXSEQ_INVALID;
4876 return L2CAP_TXSEQ_EXPECTED;
4880 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4881 __seq_offset(chan, chan->expected_tx_seq,
4882 chan->last_acked_seq)){
4883 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4884 return L2CAP_TXSEQ_DUPLICATE;
4887 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4888 /* A source of invalid packets is a "double poll" condition,
4889 * where delays cause us to send multiple poll packets. If
4890 * the remote stack receives and processes both polls,
4891 * sequence numbers can wrap around in such a way that a
4892 * resent frame has a sequence number that looks like new data
4893 * with a sequence gap. This would trigger an erroneous SREJ
4896 * Fortunately, this is impossible with a tx window that's
4897 * less than half of the maximum sequence number, which allows
4898 * invalid frames to be safely ignored.
4900 * With tx window sizes greater than half of the tx window
4901 * maximum, the frame is invalid and cannot be ignored. This
4902 * causes a disconnect.
4905 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4906 BT_DBG("Invalid/Ignore - txseq outside tx window");
4907 return L2CAP_TXSEQ_INVALID_IGNORE;
4909 BT_DBG("Invalid - txseq outside tx window");
4910 return L2CAP_TXSEQ_INVALID;
4913 BT_DBG("Unexpected - txseq indicates missing frames");
4914 return L2CAP_TXSEQ_UNEXPECTED;
4918 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4919 struct sk_buff *skb, u8 event)
4925 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4926 struct sk_buff *skb)
4930 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
4933 if (l2cap_classify_txseq(chan, control->txseq) ==
4934 L2CAP_TXSEQ_EXPECTED) {
4935 l2cap_pass_to_tx(chan, control);
4937 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4938 __next_seq(chan, chan->buffer_seq));
4940 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4942 l2cap_reassemble_sdu(chan, skb, control);
4945 kfree_skb(chan->sdu);
4948 chan->sdu_last_frag = NULL;
4952 BT_DBG("Freeing %p", skb);
4957 chan->last_acked_seq = control->txseq;
4958 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4963 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4965 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4969 __unpack_control(chan, skb);
4974 * We can just drop the corrupted I-frame here.
4975 * Receiver will miss it and start proper recovery
4976 * procedures and ask for retransmission.
4978 if (l2cap_check_fcs(chan, skb))
4981 if (!control->sframe && control->sar == L2CAP_SAR_START)
4982 len -= L2CAP_SDULEN_SIZE;
4984 if (chan->fcs == L2CAP_FCS_CRC16)
4985 len -= L2CAP_FCS_SIZE;
4987 if (len > chan->mps) {
4988 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4992 if (!control->sframe) {
4995 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4996 control->sar, control->reqseq, control->final,
4999 /* Validate F-bit - F=0 always valid, F=1 only
5000 * valid in TX WAIT_F
5002 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5005 if (chan->mode != L2CAP_MODE_STREAMING) {
5006 event = L2CAP_EV_RECV_IFRAME;
5007 err = l2cap_rx(chan, control, skb, event);
5009 err = l2cap_stream_rx(chan, control, skb);
5013 l2cap_send_disconn_req(chan->conn, chan,
5016 const u8 rx_func_to_event[4] = {
5017 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5018 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5021 /* Only I-frames are expected in streaming mode */
5022 if (chan->mode == L2CAP_MODE_STREAMING)
5025 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5026 control->reqseq, control->final, control->poll,
5031 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5035 /* Validate F and P bits */
5036 if (control->final && (control->poll ||
5037 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5040 event = rx_func_to_event[control->super];
5041 if (l2cap_rx(chan, control, skb, event))
5042 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5052 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5054 struct l2cap_chan *chan;
5056 chan = l2cap_get_chan_by_scid(conn, cid);
5058 BT_DBG("unknown cid 0x%4.4x", cid);
5059 /* Drop packet and return */
5064 BT_DBG("chan %p, len %d", chan, skb->len);
5066 if (chan->state != BT_CONNECTED)
5069 switch (chan->mode) {
5070 case L2CAP_MODE_BASIC:
5071 /* If socket recv buffers overflows we drop data here
5072 * which is *bad* because L2CAP has to be reliable.
5073 * But we don't have any other choice. L2CAP doesn't
5074 * provide flow control mechanism. */
5076 if (chan->imtu < skb->len)
5079 if (!chan->ops->recv(chan->data, skb))
5083 case L2CAP_MODE_ERTM:
5084 case L2CAP_MODE_STREAMING:
5085 l2cap_data_rcv(chan, skb);
5089 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5097 l2cap_chan_unlock(chan);
5102 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5104 struct l2cap_chan *chan;
5106 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5110 BT_DBG("chan %p, len %d", chan, skb->len);
5112 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5115 if (chan->imtu < skb->len)
5118 if (!chan->ops->recv(chan->data, skb))
5127 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5128 struct sk_buff *skb)
5130 struct l2cap_chan *chan;
5132 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5136 BT_DBG("chan %p, len %d", chan, skb->len);
5138 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5141 if (chan->imtu < skb->len)
5144 if (!chan->ops->recv(chan->data, skb))
5153 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5155 struct l2cap_hdr *lh = (void *) skb->data;
5159 skb_pull(skb, L2CAP_HDR_SIZE);
5160 cid = __le16_to_cpu(lh->cid);
5161 len = __le16_to_cpu(lh->len);
5163 if (len != skb->len) {
5168 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5171 case L2CAP_CID_LE_SIGNALING:
5172 case L2CAP_CID_SIGNALING:
5173 l2cap_sig_channel(conn, skb);
5176 case L2CAP_CID_CONN_LESS:
5177 psm = get_unaligned((__le16 *) skb->data);
5179 l2cap_conless_channel(conn, psm, skb);
5182 case L2CAP_CID_LE_DATA:
5183 l2cap_att_channel(conn, cid, skb);
5187 if (smp_sig_channel(conn, skb))
5188 l2cap_conn_del(conn->hcon, EACCES);
5192 l2cap_data_channel(conn, cid, skb);
5197 /* ---- L2CAP interface with lower layer (HCI) ---- */
5199 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5201 int exact = 0, lm1 = 0, lm2 = 0;
5202 struct l2cap_chan *c;
5204 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5206 /* Find listening sockets and check their link_mode */
5207 read_lock(&chan_list_lock);
5208 list_for_each_entry(c, &chan_list, global_l) {
5209 struct sock *sk = c->sk;
5211 if (c->state != BT_LISTEN)
5214 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5215 lm1 |= HCI_LM_ACCEPT;
5216 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5217 lm1 |= HCI_LM_MASTER;
5219 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5220 lm2 |= HCI_LM_ACCEPT;
5221 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5222 lm2 |= HCI_LM_MASTER;
5225 read_unlock(&chan_list_lock);
5227 return exact ? lm1 : lm2;
5230 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5232 struct l2cap_conn *conn;
5234 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5237 conn = l2cap_conn_add(hcon, status);
5239 l2cap_conn_ready(conn);
5241 l2cap_conn_del(hcon, bt_to_errno(status));
5246 int l2cap_disconn_ind(struct hci_conn *hcon)
5248 struct l2cap_conn *conn = hcon->l2cap_data;
5250 BT_DBG("hcon %p", hcon);
5253 return HCI_ERROR_REMOTE_USER_TERM;
5254 return conn->disc_reason;
5257 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5259 BT_DBG("hcon %p reason %d", hcon, reason);
5261 l2cap_conn_del(hcon, bt_to_errno(reason));
5265 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5267 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5270 if (encrypt == 0x00) {
5271 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5272 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5273 } else if (chan->sec_level == BT_SECURITY_HIGH)
5274 l2cap_chan_close(chan, ECONNREFUSED);
5276 if (chan->sec_level == BT_SECURITY_MEDIUM)
5277 __clear_chan_timer(chan);
5281 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5283 struct l2cap_conn *conn = hcon->l2cap_data;
5284 struct l2cap_chan *chan;
5289 BT_DBG("conn %p", conn);
5291 if (hcon->type == LE_LINK) {
5292 if (!status && encrypt)
5293 smp_distribute_keys(conn, 0);
5294 cancel_delayed_work(&conn->security_timer);
5297 mutex_lock(&conn->chan_lock);
5299 list_for_each_entry(chan, &conn->chan_l, list) {
5300 l2cap_chan_lock(chan);
5302 BT_DBG("chan->scid %d", chan->scid);
5304 if (chan->scid == L2CAP_CID_LE_DATA) {
5305 if (!status && encrypt) {
5306 chan->sec_level = hcon->sec_level;
5307 l2cap_chan_ready(chan);
5310 l2cap_chan_unlock(chan);
5314 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5315 l2cap_chan_unlock(chan);
5319 if (!status && (chan->state == BT_CONNECTED ||
5320 chan->state == BT_CONFIG)) {
5321 struct sock *sk = chan->sk;
5323 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5324 sk->sk_state_change(sk);
5326 l2cap_check_encryption(chan, encrypt);
5327 l2cap_chan_unlock(chan);
5331 if (chan->state == BT_CONNECT) {
5333 l2cap_send_conn_req(chan);
5335 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5337 } else if (chan->state == BT_CONNECT2) {
5338 struct sock *sk = chan->sk;
5339 struct l2cap_conn_rsp rsp;
5345 if (test_bit(BT_SK_DEFER_SETUP,
5346 &bt_sk(sk)->flags)) {
5347 struct sock *parent = bt_sk(sk)->parent;
5348 res = L2CAP_CR_PEND;
5349 stat = L2CAP_CS_AUTHOR_PEND;
5351 parent->sk_data_ready(parent, 0);
5353 __l2cap_state_change(chan, BT_CONFIG);
5354 res = L2CAP_CR_SUCCESS;
5355 stat = L2CAP_CS_NO_INFO;
5358 __l2cap_state_change(chan, BT_DISCONN);
5359 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5360 res = L2CAP_CR_SEC_BLOCK;
5361 stat = L2CAP_CS_NO_INFO;
5366 rsp.scid = cpu_to_le16(chan->dcid);
5367 rsp.dcid = cpu_to_le16(chan->scid);
5368 rsp.result = cpu_to_le16(res);
5369 rsp.status = cpu_to_le16(stat);
5370 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5374 l2cap_chan_unlock(chan);
5377 mutex_unlock(&conn->chan_lock);
5382 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5384 struct l2cap_conn *conn = hcon->l2cap_data;
5387 conn = l2cap_conn_add(hcon, 0);
5392 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5394 if (!(flags & ACL_CONT)) {
5395 struct l2cap_hdr *hdr;
5399 BT_ERR("Unexpected start frame (len %d)", skb->len);
5400 kfree_skb(conn->rx_skb);
5401 conn->rx_skb = NULL;
5403 l2cap_conn_unreliable(conn, ECOMM);
5406 /* Start fragment always begin with Basic L2CAP header */
5407 if (skb->len < L2CAP_HDR_SIZE) {
5408 BT_ERR("Frame is too short (len %d)", skb->len);
5409 l2cap_conn_unreliable(conn, ECOMM);
5413 hdr = (struct l2cap_hdr *) skb->data;
5414 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5416 if (len == skb->len) {
5417 /* Complete frame received */
5418 l2cap_recv_frame(conn, skb);
5422 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5424 if (skb->len > len) {
5425 BT_ERR("Frame is too long (len %d, expected len %d)",
5427 l2cap_conn_unreliable(conn, ECOMM);
5431 /* Allocate skb for the complete frame (with header) */
5432 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5436 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5438 conn->rx_len = len - skb->len;
5440 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5442 if (!conn->rx_len) {
5443 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5444 l2cap_conn_unreliable(conn, ECOMM);
5448 if (skb->len > conn->rx_len) {
5449 BT_ERR("Fragment is too long (len %d, expected %d)",
5450 skb->len, conn->rx_len);
5451 kfree_skb(conn->rx_skb);
5452 conn->rx_skb = NULL;
5454 l2cap_conn_unreliable(conn, ECOMM);
5458 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5460 conn->rx_len -= skb->len;
5462 if (!conn->rx_len) {
5463 /* Complete frame received */
5464 l2cap_recv_frame(conn, conn->rx_skb);
5465 conn->rx_skb = NULL;
5474 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5476 struct l2cap_chan *c;
5478 read_lock(&chan_list_lock);
5480 list_for_each_entry(c, &chan_list, global_l) {
5481 struct sock *sk = c->sk;
5483 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5484 batostr(&bt_sk(sk)->src),
5485 batostr(&bt_sk(sk)->dst),
5486 c->state, __le16_to_cpu(c->psm),
5487 c->scid, c->dcid, c->imtu, c->omtu,
5488 c->sec_level, c->mode);
5491 read_unlock(&chan_list_lock);
5496 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5498 return single_open(file, l2cap_debugfs_show, inode->i_private);
5501 static const struct file_operations l2cap_debugfs_fops = {
5502 .open = l2cap_debugfs_open,
5504 .llseek = seq_lseek,
5505 .release = single_release,
5508 static struct dentry *l2cap_debugfs;
5510 int __init l2cap_init(void)
5514 err = l2cap_init_sockets();
5519 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5520 bt_debugfs, NULL, &l2cap_debugfs_fops);
5522 BT_ERR("Failed to create L2CAP debug file");
5528 void l2cap_exit(void)
5530 debugfs_remove(l2cap_debugfs);
5531 l2cap_cleanup_sockets();
5534 module_param(disable_ertm, bool, 0644);
5535 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");