2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm = 1;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 mutex_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
131 struct l2cap_chan *c;
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 write_lock(&chan_list_lock);
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
169 write_unlock(&chan_list_lock);
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
175 write_lock(&chan_list_lock);
179 write_unlock(&chan_list_lock);
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
186 u16 cid = L2CAP_CID_DYN_START;
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
202 chan->ops->state_change(chan->data, state);
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
207 struct sock *sk = chan->sk;
210 __l2cap_state_change(chan, state);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
216 struct sock *sk = chan->sk;
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
223 struct sock *sk = chan->sk;
226 __l2cap_chan_set_err(chan, err);
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
483 l2cap_chan_hold(chan);
485 list_add(&chan->list, &conn->chan_l);
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
501 __clear_chan_timer(chan);
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
506 /* Delete from channel list */
507 list_del(&chan->list);
509 l2cap_chan_put(chan);
512 hci_conn_put(conn->hcon);
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
521 __l2cap_chan_set_err(chan, err);
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
527 sk->sk_state_change(sk);
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 skb_queue_purge(&chan->tx_q);
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 struct srej_list *l, *tmp;
539 __clear_retrans_timer(chan);
540 __clear_monitor_timer(chan);
541 __clear_ack_timer(chan);
543 skb_queue_purge(&chan->srej_q);
545 l2cap_seq_list_free(&chan->srej_list);
546 l2cap_seq_list_free(&chan->retrans_list);
547 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
554 static void l2cap_chan_cleanup_listen(struct sock *parent)
558 BT_DBG("parent %p", parent);
560 /* Close not yet accepted channels */
561 while ((sk = bt_accept_dequeue(parent, NULL))) {
562 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
564 l2cap_chan_lock(chan);
565 __clear_chan_timer(chan);
566 l2cap_chan_close(chan, ECONNRESET);
567 l2cap_chan_unlock(chan);
569 chan->ops->close(chan->data);
573 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
575 struct l2cap_conn *conn = chan->conn;
576 struct sock *sk = chan->sk;
578 BT_DBG("chan %p state %s sk %p", chan,
579 state_to_string(chan->state), sk);
581 switch (chan->state) {
584 l2cap_chan_cleanup_listen(sk);
586 __l2cap_state_change(chan, BT_CLOSED);
587 sock_set_flag(sk, SOCK_ZAPPED);
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
598 l2cap_chan_del(chan, reason);
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
621 l2cap_chan_del(chan, reason);
626 l2cap_chan_del(chan, reason);
631 sock_set_flag(sk, SOCK_ZAPPED);
637 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
639 if (chan->chan_type == L2CAP_CHAN_RAW) {
640 switch (chan->sec_level) {
641 case BT_SECURITY_HIGH:
642 return HCI_AT_DEDICATED_BONDING_MITM;
643 case BT_SECURITY_MEDIUM:
644 return HCI_AT_DEDICATED_BONDING;
646 return HCI_AT_NO_BONDING;
648 } else if (chan->psm == cpu_to_le16(0x0001)) {
649 if (chan->sec_level == BT_SECURITY_LOW)
650 chan->sec_level = BT_SECURITY_SDP;
652 if (chan->sec_level == BT_SECURITY_HIGH)
653 return HCI_AT_NO_BONDING_MITM;
655 return HCI_AT_NO_BONDING;
657 switch (chan->sec_level) {
658 case BT_SECURITY_HIGH:
659 return HCI_AT_GENERAL_BONDING_MITM;
660 case BT_SECURITY_MEDIUM:
661 return HCI_AT_GENERAL_BONDING;
663 return HCI_AT_NO_BONDING;
668 /* Service level security */
669 int l2cap_chan_check_security(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
674 auth_type = l2cap_get_auth_type(chan);
676 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
679 static u8 l2cap_get_ident(struct l2cap_conn *conn)
683 /* Get next available identificator.
684 * 1 - 128 are used by kernel.
685 * 129 - 199 are reserved.
686 * 200 - 254 are used by utilities like l2ping, etc.
689 spin_lock(&conn->lock);
691 if (++conn->tx_ident > 128)
696 spin_unlock(&conn->lock);
701 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
703 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
706 BT_DBG("code 0x%2.2x", code);
711 if (lmp_no_flush_capable(conn->hcon->hdev))
712 flags = ACL_START_NO_FLUSH;
716 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
717 skb->priority = HCI_PRIO_MAX;
719 hci_send_acl(conn->hchan, skb, flags);
722 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
724 struct hci_conn *hcon = chan->conn->hcon;
727 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
730 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
731 lmp_no_flush_capable(hcon->hdev))
732 flags = ACL_START_NO_FLUSH;
736 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
737 hci_send_acl(chan->conn->hchan, skb, flags);
740 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
742 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
743 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
745 if (enh & L2CAP_CTRL_FRAME_TYPE) {
748 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
749 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
756 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
757 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
764 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
766 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
767 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
769 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
772 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
773 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
780 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
781 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
788 static inline void __unpack_control(struct l2cap_chan *chan,
791 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
792 __unpack_extended_control(get_unaligned_le32(skb->data),
793 &bt_cb(skb)->control);
794 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
796 __unpack_enhanced_control(get_unaligned_le16(skb->data),
797 &bt_cb(skb)->control);
798 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
802 static u32 __pack_extended_control(struct l2cap_ctrl *control)
806 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
807 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
809 if (control->sframe) {
810 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
811 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
812 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
814 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
815 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
821 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
825 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
826 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
828 if (control->sframe) {
829 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
830 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
831 packed |= L2CAP_CTRL_FRAME_TYPE;
833 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
834 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
840 static inline void __pack_control(struct l2cap_chan *chan,
841 struct l2cap_ctrl *control,
844 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
845 put_unaligned_le32(__pack_extended_control(control),
846 skb->data + L2CAP_HDR_SIZE);
848 put_unaligned_le16(__pack_enhanced_control(control),
849 skb->data + L2CAP_HDR_SIZE);
853 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
857 struct l2cap_hdr *lh;
860 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
861 hlen = L2CAP_EXT_HDR_SIZE;
863 hlen = L2CAP_ENH_HDR_SIZE;
865 if (chan->fcs == L2CAP_FCS_CRC16)
866 hlen += L2CAP_FCS_SIZE;
868 skb = bt_skb_alloc(hlen, GFP_KERNEL);
871 return ERR_PTR(-ENOMEM);
873 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
874 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
875 lh->cid = cpu_to_le16(chan->dcid);
877 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
878 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
880 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
882 if (chan->fcs == L2CAP_FCS_CRC16) {
883 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
884 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
887 skb->priority = HCI_PRIO_MAX;
891 static void l2cap_send_sframe(struct l2cap_chan *chan,
892 struct l2cap_ctrl *control)
897 BT_DBG("chan %p, control %p", chan, control);
899 if (!control->sframe)
902 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
906 if (control->super == L2CAP_SUPER_RR)
907 clear_bit(CONN_RNR_SENT, &chan->conn_state);
908 else if (control->super == L2CAP_SUPER_RNR)
909 set_bit(CONN_RNR_SENT, &chan->conn_state);
911 if (control->super != L2CAP_SUPER_SREJ) {
912 chan->last_acked_seq = control->reqseq;
913 __clear_ack_timer(chan);
916 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
917 control->final, control->poll, control->super);
919 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
920 control_field = __pack_extended_control(control);
922 control_field = __pack_enhanced_control(control);
924 skb = l2cap_create_sframe_pdu(chan, control_field);
926 l2cap_do_send(chan, skb);
929 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
931 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
932 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
933 set_bit(CONN_RNR_SENT, &chan->conn_state);
935 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
937 control |= __set_reqseq(chan, chan->buffer_seq);
940 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
942 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
945 static void l2cap_send_conn_req(struct l2cap_chan *chan)
947 struct l2cap_conn *conn = chan->conn;
948 struct l2cap_conn_req req;
950 req.scid = cpu_to_le16(chan->scid);
953 chan->ident = l2cap_get_ident(conn);
955 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
957 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
960 static void l2cap_chan_ready(struct l2cap_chan *chan)
962 struct sock *sk = chan->sk;
967 parent = bt_sk(sk)->parent;
969 BT_DBG("sk %p, parent %p", sk, parent);
971 /* This clears all conf flags, including CONF_NOT_COMPLETE */
972 chan->conf_state = 0;
973 __clear_chan_timer(chan);
975 __l2cap_state_change(chan, BT_CONNECTED);
976 sk->sk_state_change(sk);
979 parent->sk_data_ready(parent, 0);
984 static void l2cap_do_start(struct l2cap_chan *chan)
986 struct l2cap_conn *conn = chan->conn;
988 if (conn->hcon->type == LE_LINK) {
989 l2cap_chan_ready(chan);
993 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
994 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
997 if (l2cap_chan_check_security(chan) &&
998 __l2cap_no_conn_pending(chan))
999 l2cap_send_conn_req(chan);
1001 struct l2cap_info_req req;
1002 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1004 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1005 conn->info_ident = l2cap_get_ident(conn);
1007 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1009 l2cap_send_cmd(conn, conn->info_ident,
1010 L2CAP_INFO_REQ, sizeof(req), &req);
1014 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1016 u32 local_feat_mask = l2cap_feat_mask;
1018 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1021 case L2CAP_MODE_ERTM:
1022 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1023 case L2CAP_MODE_STREAMING:
1024 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1030 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1032 struct sock *sk = chan->sk;
1033 struct l2cap_disconn_req req;
1038 if (chan->mode == L2CAP_MODE_ERTM) {
1039 __clear_retrans_timer(chan);
1040 __clear_monitor_timer(chan);
1041 __clear_ack_timer(chan);
1044 req.dcid = cpu_to_le16(chan->dcid);
1045 req.scid = cpu_to_le16(chan->scid);
1046 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1047 L2CAP_DISCONN_REQ, sizeof(req), &req);
1050 __l2cap_state_change(chan, BT_DISCONN);
1051 __l2cap_chan_set_err(chan, err);
1055 /* ---- L2CAP connections ---- */
1056 static void l2cap_conn_start(struct l2cap_conn *conn)
1058 struct l2cap_chan *chan, *tmp;
1060 BT_DBG("conn %p", conn);
1062 mutex_lock(&conn->chan_lock);
1064 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1065 struct sock *sk = chan->sk;
1067 l2cap_chan_lock(chan);
1069 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1070 l2cap_chan_unlock(chan);
1074 if (chan->state == BT_CONNECT) {
1075 if (!l2cap_chan_check_security(chan) ||
1076 !__l2cap_no_conn_pending(chan)) {
1077 l2cap_chan_unlock(chan);
1081 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1082 && test_bit(CONF_STATE2_DEVICE,
1083 &chan->conf_state)) {
1084 l2cap_chan_close(chan, ECONNRESET);
1085 l2cap_chan_unlock(chan);
1089 l2cap_send_conn_req(chan);
1091 } else if (chan->state == BT_CONNECT2) {
1092 struct l2cap_conn_rsp rsp;
1094 rsp.scid = cpu_to_le16(chan->dcid);
1095 rsp.dcid = cpu_to_le16(chan->scid);
1097 if (l2cap_chan_check_security(chan)) {
1099 if (test_bit(BT_SK_DEFER_SETUP,
1100 &bt_sk(sk)->flags)) {
1101 struct sock *parent = bt_sk(sk)->parent;
1102 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1103 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1105 parent->sk_data_ready(parent, 0);
1108 __l2cap_state_change(chan, BT_CONFIG);
1109 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1110 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1114 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1115 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1118 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1121 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1122 rsp.result != L2CAP_CR_SUCCESS) {
1123 l2cap_chan_unlock(chan);
1127 set_bit(CONF_REQ_SENT, &chan->conf_state);
1128 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1129 l2cap_build_conf_req(chan, buf), buf);
1130 chan->num_conf_req++;
1133 l2cap_chan_unlock(chan);
1136 mutex_unlock(&conn->chan_lock);
1139 /* Find socket with cid and source/destination bdaddr.
1140 * Returns closest match, locked.
1142 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1146 struct l2cap_chan *c, *c1 = NULL;
1148 read_lock(&chan_list_lock);
1150 list_for_each_entry(c, &chan_list, global_l) {
1151 struct sock *sk = c->sk;
1153 if (state && c->state != state)
1156 if (c->scid == cid) {
1157 int src_match, dst_match;
1158 int src_any, dst_any;
1161 src_match = !bacmp(&bt_sk(sk)->src, src);
1162 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1163 if (src_match && dst_match) {
1164 read_unlock(&chan_list_lock);
1169 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1170 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1171 if ((src_match && dst_any) || (src_any && dst_match) ||
1172 (src_any && dst_any))
1177 read_unlock(&chan_list_lock);
1182 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1184 struct sock *parent, *sk;
1185 struct l2cap_chan *chan, *pchan;
1189 /* Check if we have socket listening on cid */
1190 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1191 conn->src, conn->dst);
1199 /* Check for backlog size */
1200 if (sk_acceptq_is_full(parent)) {
1201 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1205 chan = pchan->ops->new_connection(pchan->data);
1211 hci_conn_hold(conn->hcon);
1213 bacpy(&bt_sk(sk)->src, conn->src);
1214 bacpy(&bt_sk(sk)->dst, conn->dst);
1216 bt_accept_enqueue(parent, sk);
1218 l2cap_chan_add(conn, chan);
1220 __set_chan_timer(chan, sk->sk_sndtimeo);
1222 __l2cap_state_change(chan, BT_CONNECTED);
1223 parent->sk_data_ready(parent, 0);
1226 release_sock(parent);
1229 static void l2cap_conn_ready(struct l2cap_conn *conn)
1231 struct l2cap_chan *chan;
1233 BT_DBG("conn %p", conn);
1235 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1236 l2cap_le_conn_ready(conn);
1238 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1239 smp_conn_security(conn, conn->hcon->pending_sec_level);
1241 mutex_lock(&conn->chan_lock);
1243 list_for_each_entry(chan, &conn->chan_l, list) {
1245 l2cap_chan_lock(chan);
1247 if (conn->hcon->type == LE_LINK) {
1248 if (smp_conn_security(conn, chan->sec_level))
1249 l2cap_chan_ready(chan);
1251 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1252 struct sock *sk = chan->sk;
1253 __clear_chan_timer(chan);
1255 __l2cap_state_change(chan, BT_CONNECTED);
1256 sk->sk_state_change(sk);
1259 } else if (chan->state == BT_CONNECT)
1260 l2cap_do_start(chan);
1262 l2cap_chan_unlock(chan);
1265 mutex_unlock(&conn->chan_lock);
1268 /* Notify sockets that we cannot guaranty reliability anymore */
1269 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1271 struct l2cap_chan *chan;
1273 BT_DBG("conn %p", conn);
1275 mutex_lock(&conn->chan_lock);
1277 list_for_each_entry(chan, &conn->chan_l, list) {
1278 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1279 __l2cap_chan_set_err(chan, err);
1282 mutex_unlock(&conn->chan_lock);
1285 static void l2cap_info_timeout(struct work_struct *work)
1287 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1290 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1291 conn->info_ident = 0;
1293 l2cap_conn_start(conn);
1296 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1298 struct l2cap_conn *conn = hcon->l2cap_data;
1299 struct l2cap_chan *chan, *l;
1304 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1306 kfree_skb(conn->rx_skb);
1308 mutex_lock(&conn->chan_lock);
1311 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1312 l2cap_chan_hold(chan);
1313 l2cap_chan_lock(chan);
1315 l2cap_chan_del(chan, err);
1317 l2cap_chan_unlock(chan);
1319 chan->ops->close(chan->data);
1320 l2cap_chan_put(chan);
1323 mutex_unlock(&conn->chan_lock);
1325 hci_chan_del(conn->hchan);
1327 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1328 cancel_delayed_work_sync(&conn->info_timer);
1330 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1331 cancel_delayed_work_sync(&conn->security_timer);
1332 smp_chan_destroy(conn);
1335 hcon->l2cap_data = NULL;
1339 static void security_timeout(struct work_struct *work)
1341 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1342 security_timer.work);
1344 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1347 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1349 struct l2cap_conn *conn = hcon->l2cap_data;
1350 struct hci_chan *hchan;
1355 hchan = hci_chan_create(hcon);
1359 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1361 hci_chan_del(hchan);
1365 hcon->l2cap_data = conn;
1367 conn->hchan = hchan;
1369 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1371 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1372 conn->mtu = hcon->hdev->le_mtu;
1374 conn->mtu = hcon->hdev->acl_mtu;
1376 conn->src = &hcon->hdev->bdaddr;
1377 conn->dst = &hcon->dst;
1379 conn->feat_mask = 0;
1381 spin_lock_init(&conn->lock);
1382 mutex_init(&conn->chan_lock);
1384 INIT_LIST_HEAD(&conn->chan_l);
1386 if (hcon->type == LE_LINK)
1387 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1389 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1391 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1396 /* ---- Socket interface ---- */
1398 /* Find socket with psm and source / destination bdaddr.
1399 * Returns closest match.
1401 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1405 struct l2cap_chan *c, *c1 = NULL;
1407 read_lock(&chan_list_lock);
1409 list_for_each_entry(c, &chan_list, global_l) {
1410 struct sock *sk = c->sk;
1412 if (state && c->state != state)
1415 if (c->psm == psm) {
1416 int src_match, dst_match;
1417 int src_any, dst_any;
1420 src_match = !bacmp(&bt_sk(sk)->src, src);
1421 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1422 if (src_match && dst_match) {
1423 read_unlock(&chan_list_lock);
1428 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1429 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1430 if ((src_match && dst_any) || (src_any && dst_match) ||
1431 (src_any && dst_any))
1436 read_unlock(&chan_list_lock);
1441 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1442 bdaddr_t *dst, u8 dst_type)
1444 struct sock *sk = chan->sk;
1445 bdaddr_t *src = &bt_sk(sk)->src;
1446 struct l2cap_conn *conn;
1447 struct hci_conn *hcon;
1448 struct hci_dev *hdev;
1452 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1453 dst_type, __le16_to_cpu(chan->psm));
1455 hdev = hci_get_route(dst, src);
1457 return -EHOSTUNREACH;
1461 l2cap_chan_lock(chan);
1463 /* PSM must be odd and lsb of upper byte must be 0 */
1464 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1465 chan->chan_type != L2CAP_CHAN_RAW) {
1470 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1475 switch (chan->mode) {
1476 case L2CAP_MODE_BASIC:
1478 case L2CAP_MODE_ERTM:
1479 case L2CAP_MODE_STREAMING:
1490 switch (sk->sk_state) {
1494 /* Already connecting */
1500 /* Already connected */
1516 /* Set destination address and psm */
1517 bacpy(&bt_sk(sk)->dst, dst);
1524 auth_type = l2cap_get_auth_type(chan);
1526 if (chan->dcid == L2CAP_CID_LE_DATA)
1527 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1528 chan->sec_level, auth_type);
1530 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1531 chan->sec_level, auth_type);
1534 err = PTR_ERR(hcon);
1538 conn = l2cap_conn_add(hcon, 0);
1545 if (hcon->type == LE_LINK) {
1548 if (!list_empty(&conn->chan_l)) {
1557 /* Update source addr of the socket */
1558 bacpy(src, conn->src);
1560 l2cap_chan_unlock(chan);
1561 l2cap_chan_add(conn, chan);
1562 l2cap_chan_lock(chan);
1564 l2cap_state_change(chan, BT_CONNECT);
1565 __set_chan_timer(chan, sk->sk_sndtimeo);
1567 if (hcon->state == BT_CONNECTED) {
1568 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1569 __clear_chan_timer(chan);
1570 if (l2cap_chan_check_security(chan))
1571 l2cap_state_change(chan, BT_CONNECTED);
1573 l2cap_do_start(chan);
1579 l2cap_chan_unlock(chan);
1580 hci_dev_unlock(hdev);
1585 int __l2cap_wait_ack(struct sock *sk)
1587 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1588 DECLARE_WAITQUEUE(wait, current);
1592 add_wait_queue(sk_sleep(sk), &wait);
1593 set_current_state(TASK_INTERRUPTIBLE);
1594 while (chan->unacked_frames > 0 && chan->conn) {
1598 if (signal_pending(current)) {
1599 err = sock_intr_errno(timeo);
1604 timeo = schedule_timeout(timeo);
1606 set_current_state(TASK_INTERRUPTIBLE);
1608 err = sock_error(sk);
1612 set_current_state(TASK_RUNNING);
1613 remove_wait_queue(sk_sleep(sk), &wait);
1617 static void l2cap_monitor_timeout(struct work_struct *work)
1619 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1620 monitor_timer.work);
1622 BT_DBG("chan %p", chan);
1624 l2cap_chan_lock(chan);
1626 if (chan->retry_count >= chan->remote_max_tx) {
1627 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1628 l2cap_chan_unlock(chan);
1629 l2cap_chan_put(chan);
1633 chan->retry_count++;
1634 __set_monitor_timer(chan);
1636 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1637 l2cap_chan_unlock(chan);
1638 l2cap_chan_put(chan);
1641 static void l2cap_retrans_timeout(struct work_struct *work)
1643 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1644 retrans_timer.work);
1646 BT_DBG("chan %p", chan);
1648 l2cap_chan_lock(chan);
1650 chan->retry_count = 1;
1651 __set_monitor_timer(chan);
1653 set_bit(CONN_WAIT_F, &chan->conn_state);
1655 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1657 l2cap_chan_unlock(chan);
1658 l2cap_chan_put(chan);
1661 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1663 struct sk_buff *skb;
1665 while ((skb = skb_peek(&chan->tx_q)) &&
1666 chan->unacked_frames) {
1667 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1670 skb = skb_dequeue(&chan->tx_q);
1673 chan->unacked_frames--;
1676 if (!chan->unacked_frames)
1677 __clear_retrans_timer(chan);
1680 static int l2cap_streaming_send(struct l2cap_chan *chan,
1681 struct sk_buff_head *skbs)
1683 struct sk_buff *skb;
1684 struct l2cap_ctrl *control;
1686 BT_DBG("chan %p, skbs %p", chan, skbs);
1688 if (chan->state != BT_CONNECTED)
1691 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1693 while (!skb_queue_empty(&chan->tx_q)) {
1695 skb = skb_dequeue(&chan->tx_q);
1697 bt_cb(skb)->control.retries = 1;
1698 control = &bt_cb(skb)->control;
1700 control->reqseq = 0;
1701 control->txseq = chan->next_tx_seq;
1703 __pack_control(chan, control, skb);
1705 if (chan->fcs == L2CAP_FCS_CRC16) {
1706 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1707 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1710 l2cap_do_send(chan, skb);
1712 BT_DBG("Sent txseq %d", (int)control->txseq);
1714 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1715 chan->frames_sent++;
1721 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1723 struct sk_buff *skb, *tx_skb;
1727 skb = skb_peek(&chan->tx_q);
1731 while (bt_cb(skb)->control.txseq != tx_seq) {
1732 if (skb_queue_is_last(&chan->tx_q, skb))
1735 skb = skb_queue_next(&chan->tx_q, skb);
1738 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1739 chan->remote_max_tx) {
1740 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1744 tx_skb = skb_clone(skb, GFP_ATOMIC);
1745 bt_cb(skb)->control.retries++;
1747 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1748 control &= __get_sar_mask(chan);
1750 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1751 control |= __set_ctrl_final(chan);
1753 control |= __set_reqseq(chan, chan->buffer_seq);
1754 control |= __set_txseq(chan, tx_seq);
1756 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1758 if (chan->fcs == L2CAP_FCS_CRC16) {
1759 fcs = crc16(0, (u8 *)tx_skb->data,
1760 tx_skb->len - L2CAP_FCS_SIZE);
1761 put_unaligned_le16(fcs,
1762 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1765 l2cap_do_send(chan, tx_skb);
1768 static int l2cap_ertm_send(struct l2cap_chan *chan)
1770 struct sk_buff *skb, *tx_skb;
1771 struct l2cap_ctrl *control;
1774 BT_DBG("chan %p", chan);
1776 if (chan->state != BT_CONNECTED)
1779 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1782 while (chan->tx_send_head &&
1783 chan->unacked_frames < chan->remote_tx_win &&
1784 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1786 skb = chan->tx_send_head;
1788 bt_cb(skb)->control.retries = 1;
1789 control = &bt_cb(skb)->control;
1791 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1794 control->reqseq = chan->buffer_seq;
1795 chan->last_acked_seq = chan->buffer_seq;
1796 control->txseq = chan->next_tx_seq;
1798 __pack_control(chan, control, skb);
1800 if (chan->fcs == L2CAP_FCS_CRC16) {
1801 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1802 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1805 /* Clone after data has been modified. Data is assumed to be
1806 read-only (for locking purposes) on cloned sk_buffs.
1808 tx_skb = skb_clone(skb, GFP_KERNEL);
1813 __set_retrans_timer(chan);
1815 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1816 chan->unacked_frames++;
1817 chan->frames_sent++;
1820 if (skb_queue_is_last(&chan->tx_q, skb))
1821 chan->tx_send_head = NULL;
1823 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1825 l2cap_do_send(chan, tx_skb);
1826 BT_DBG("Sent txseq %d", (int)control->txseq);
1829 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1830 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1835 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1839 if (!skb_queue_empty(&chan->tx_q))
1840 chan->tx_send_head = chan->tx_q.next;
1842 chan->next_tx_seq = chan->expected_ack_seq;
1843 ret = l2cap_ertm_send(chan);
1847 static void __l2cap_send_ack(struct l2cap_chan *chan)
1851 control |= __set_reqseq(chan, chan->buffer_seq);
1853 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1854 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1855 set_bit(CONN_RNR_SENT, &chan->conn_state);
1859 if (l2cap_ertm_send(chan) > 0)
1862 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1865 static void l2cap_send_ack(struct l2cap_chan *chan)
1867 __clear_ack_timer(chan);
1868 __l2cap_send_ack(chan);
1871 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1873 struct srej_list *tail;
1876 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1877 control |= __set_ctrl_final(chan);
1879 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1880 control |= __set_reqseq(chan, tail->tx_seq);
1883 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1884 struct msghdr *msg, int len,
1885 int count, struct sk_buff *skb)
1887 struct l2cap_conn *conn = chan->conn;
1888 struct sk_buff **frag;
1891 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1897 /* Continuation fragments (no L2CAP header) */
1898 frag = &skb_shinfo(skb)->frag_list;
1900 struct sk_buff *tmp;
1902 count = min_t(unsigned int, conn->mtu, len);
1904 tmp = chan->ops->alloc_skb(chan, count,
1905 msg->msg_flags & MSG_DONTWAIT);
1907 return PTR_ERR(tmp);
1911 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1914 (*frag)->priority = skb->priority;
1919 skb->len += (*frag)->len;
1920 skb->data_len += (*frag)->len;
1922 frag = &(*frag)->next;
1928 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1929 struct msghdr *msg, size_t len,
1932 struct l2cap_conn *conn = chan->conn;
1933 struct sk_buff *skb;
1934 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1935 struct l2cap_hdr *lh;
1937 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1939 count = min_t(unsigned int, (conn->mtu - hlen), len);
1941 skb = chan->ops->alloc_skb(chan, count + hlen,
1942 msg->msg_flags & MSG_DONTWAIT);
1946 skb->priority = priority;
1948 /* Create L2CAP header */
1949 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1950 lh->cid = cpu_to_le16(chan->dcid);
1951 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1952 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1954 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1955 if (unlikely(err < 0)) {
1957 return ERR_PTR(err);
1962 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1963 struct msghdr *msg, size_t len,
1966 struct l2cap_conn *conn = chan->conn;
1967 struct sk_buff *skb;
1969 struct l2cap_hdr *lh;
1971 BT_DBG("chan %p len %d", chan, (int)len);
1973 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1975 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1976 msg->msg_flags & MSG_DONTWAIT);
1980 skb->priority = priority;
1982 /* Create L2CAP header */
1983 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1984 lh->cid = cpu_to_le16(chan->dcid);
1985 lh->len = cpu_to_le16(len);
1987 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1988 if (unlikely(err < 0)) {
1990 return ERR_PTR(err);
1995 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1996 struct msghdr *msg, size_t len,
1999 struct l2cap_conn *conn = chan->conn;
2000 struct sk_buff *skb;
2001 int err, count, hlen;
2002 struct l2cap_hdr *lh;
2004 BT_DBG("chan %p len %d", chan, (int)len);
2007 return ERR_PTR(-ENOTCONN);
2009 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2010 hlen = L2CAP_EXT_HDR_SIZE;
2012 hlen = L2CAP_ENH_HDR_SIZE;
2015 hlen += L2CAP_SDULEN_SIZE;
2017 if (chan->fcs == L2CAP_FCS_CRC16)
2018 hlen += L2CAP_FCS_SIZE;
2020 count = min_t(unsigned int, (conn->mtu - hlen), len);
2022 skb = chan->ops->alloc_skb(chan, count + hlen,
2023 msg->msg_flags & MSG_DONTWAIT);
2027 /* Create L2CAP header */
2028 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2029 lh->cid = cpu_to_le16(chan->dcid);
2030 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2032 /* Control header is populated later */
2033 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2034 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2036 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2039 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2041 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2042 if (unlikely(err < 0)) {
2044 return ERR_PTR(err);
2047 bt_cb(skb)->control.fcs = chan->fcs;
2048 bt_cb(skb)->control.retries = 0;
2052 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2053 struct sk_buff_head *seg_queue,
2054 struct msghdr *msg, size_t len)
2056 struct sk_buff *skb;
2062 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2064 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2065 * so fragmented skbs are not used. The HCI layer's handling
2066 * of fragmented skbs is not compatible with ERTM's queueing.
2069 /* PDU size is derived from the HCI MTU */
2070 pdu_len = chan->conn->mtu;
2072 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2074 /* Adjust for largest possible L2CAP overhead. */
2075 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2077 /* Remote device may have requested smaller PDUs */
2078 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2080 if (len <= pdu_len) {
2081 sar = L2CAP_SAR_UNSEGMENTED;
2085 sar = L2CAP_SAR_START;
2087 pdu_len -= L2CAP_SDULEN_SIZE;
2091 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2094 __skb_queue_purge(seg_queue);
2095 return PTR_ERR(skb);
2098 bt_cb(skb)->control.sar = sar;
2099 __skb_queue_tail(seg_queue, skb);
2104 pdu_len += L2CAP_SDULEN_SIZE;
2107 if (len <= pdu_len) {
2108 sar = L2CAP_SAR_END;
2111 sar = L2CAP_SAR_CONTINUE;
2118 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2121 struct sk_buff *skb;
2123 struct sk_buff_head seg_queue;
2125 /* Connectionless channel */
2126 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2127 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2129 return PTR_ERR(skb);
2131 l2cap_do_send(chan, skb);
2135 switch (chan->mode) {
2136 case L2CAP_MODE_BASIC:
2137 /* Check outgoing MTU */
2138 if (len > chan->omtu)
2141 /* Create a basic PDU */
2142 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2144 return PTR_ERR(skb);
2146 l2cap_do_send(chan, skb);
2150 case L2CAP_MODE_ERTM:
2151 case L2CAP_MODE_STREAMING:
2152 /* Check outgoing MTU */
2153 if (len > chan->omtu) {
2158 __skb_queue_head_init(&seg_queue);
2160 /* Do segmentation before calling in to the state machine,
2161 * since it's possible to block while waiting for memory
2164 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2166 /* The channel could have been closed while segmenting,
2167 * check that it is still connected.
2169 if (chan->state != BT_CONNECTED) {
2170 __skb_queue_purge(&seg_queue);
2177 if (chan->mode == L2CAP_MODE_ERTM)
2178 err = l2cap_tx(chan, 0, &seg_queue,
2179 L2CAP_EV_DATA_REQUEST);
2181 err = l2cap_streaming_send(chan, &seg_queue);
2186 /* If the skbs were not queued for sending, they'll still be in
2187 * seg_queue and need to be purged.
2189 __skb_queue_purge(&seg_queue);
2193 BT_DBG("bad state %1.1x", chan->mode);
2200 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2202 struct sk_buff *acked_skb;
2205 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2207 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2210 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2211 chan->expected_ack_seq, chan->unacked_frames);
2213 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2214 ackseq = __next_seq(chan, ackseq)) {
2216 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2218 skb_unlink(acked_skb, &chan->tx_q);
2219 kfree_skb(acked_skb);
2220 chan->unacked_frames--;
2224 chan->expected_ack_seq = reqseq;
2226 if (chan->unacked_frames == 0)
2227 __clear_retrans_timer(chan);
2229 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2232 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2234 BT_DBG("chan %p", chan);
2236 chan->expected_tx_seq = chan->buffer_seq;
2237 l2cap_seq_list_clear(&chan->srej_list);
2238 skb_queue_purge(&chan->srej_q);
2239 chan->rx_state = L2CAP_RX_STATE_RECV;
2242 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2243 struct l2cap_ctrl *control,
2244 struct sk_buff_head *skbs, u8 event)
2248 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2252 case L2CAP_EV_DATA_REQUEST:
2253 if (chan->tx_send_head == NULL)
2254 chan->tx_send_head = skb_peek(skbs);
2256 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2257 l2cap_ertm_send(chan);
2259 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2260 BT_DBG("Enter LOCAL_BUSY");
2261 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2263 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2264 /* The SREJ_SENT state must be aborted if we are to
2265 * enter the LOCAL_BUSY state.
2267 l2cap_abort_rx_srej_sent(chan);
2270 l2cap_send_ack(chan);
2273 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2274 BT_DBG("Exit LOCAL_BUSY");
2275 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2277 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2278 struct l2cap_ctrl local_control;
2280 memset(&local_control, 0, sizeof(local_control));
2281 local_control.sframe = 1;
2282 local_control.super = L2CAP_SUPER_RR;
2283 local_control.poll = 1;
2284 local_control.reqseq = chan->buffer_seq;
2285 l2cap_send_sframe(chan, &local_control);
2287 chan->retry_count = 1;
2288 __set_monitor_timer(chan);
2289 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2292 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2293 l2cap_process_reqseq(chan, control->reqseq);
2295 case L2CAP_EV_EXPLICIT_POLL:
2296 l2cap_send_rr_or_rnr(chan, 1);
2297 chan->retry_count = 1;
2298 __set_monitor_timer(chan);
2299 __clear_ack_timer(chan);
2300 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2302 case L2CAP_EV_RETRANS_TO:
2303 l2cap_send_rr_or_rnr(chan, 1);
2304 chan->retry_count = 1;
2305 __set_monitor_timer(chan);
2306 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2308 case L2CAP_EV_RECV_FBIT:
2309 /* Nothing to process */
2318 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2319 struct l2cap_ctrl *control,
2320 struct sk_buff_head *skbs, u8 event)
2324 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2328 case L2CAP_EV_DATA_REQUEST:
2329 if (chan->tx_send_head == NULL)
2330 chan->tx_send_head = skb_peek(skbs);
2331 /* Queue data, but don't send. */
2332 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2334 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2335 BT_DBG("Enter LOCAL_BUSY");
2336 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2338 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2339 /* The SREJ_SENT state must be aborted if we are to
2340 * enter the LOCAL_BUSY state.
2342 l2cap_abort_rx_srej_sent(chan);
2345 l2cap_send_ack(chan);
2348 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2349 BT_DBG("Exit LOCAL_BUSY");
2350 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2352 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2353 struct l2cap_ctrl local_control;
2354 memset(&local_control, 0, sizeof(local_control));
2355 local_control.sframe = 1;
2356 local_control.super = L2CAP_SUPER_RR;
2357 local_control.poll = 1;
2358 local_control.reqseq = chan->buffer_seq;
2359 l2cap_send_sframe(chan, &local_control);
2361 chan->retry_count = 1;
2362 __set_monitor_timer(chan);
2363 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2366 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2367 l2cap_process_reqseq(chan, control->reqseq);
2371 case L2CAP_EV_RECV_FBIT:
2372 if (control && control->final) {
2373 __clear_monitor_timer(chan);
2374 if (chan->unacked_frames > 0)
2375 __set_retrans_timer(chan);
2376 chan->retry_count = 0;
2377 chan->tx_state = L2CAP_TX_STATE_XMIT;
2378 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2381 case L2CAP_EV_EXPLICIT_POLL:
2384 case L2CAP_EV_MONITOR_TO:
2385 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2386 l2cap_send_rr_or_rnr(chan, 1);
2387 __set_monitor_timer(chan);
2388 chan->retry_count++;
2390 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2400 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2401 struct sk_buff_head *skbs, u8 event)
2405 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2406 chan, control, skbs, event, chan->tx_state);
2408 switch (chan->tx_state) {
2409 case L2CAP_TX_STATE_XMIT:
2410 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2412 case L2CAP_TX_STATE_WAIT_F:
2413 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2423 /* Copy frame to all raw sockets on that connection */
2424 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2426 struct sk_buff *nskb;
2427 struct l2cap_chan *chan;
2429 BT_DBG("conn %p", conn);
2431 mutex_lock(&conn->chan_lock);
2433 list_for_each_entry(chan, &conn->chan_l, list) {
2434 struct sock *sk = chan->sk;
2435 if (chan->chan_type != L2CAP_CHAN_RAW)
2438 /* Don't send frame to the socket it came from */
2441 nskb = skb_clone(skb, GFP_ATOMIC);
2445 if (chan->ops->recv(chan->data, nskb))
2449 mutex_unlock(&conn->chan_lock);
2452 /* ---- L2CAP signalling commands ---- */
2453 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2454 u8 code, u8 ident, u16 dlen, void *data)
2456 struct sk_buff *skb, **frag;
2457 struct l2cap_cmd_hdr *cmd;
2458 struct l2cap_hdr *lh;
2461 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2462 conn, code, ident, dlen);
2464 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2465 count = min_t(unsigned int, conn->mtu, len);
2467 skb = bt_skb_alloc(count, GFP_ATOMIC);
2471 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2472 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2474 if (conn->hcon->type == LE_LINK)
2475 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2477 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2479 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2482 cmd->len = cpu_to_le16(dlen);
2485 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2486 memcpy(skb_put(skb, count), data, count);
2492 /* Continuation fragments (no L2CAP header) */
2493 frag = &skb_shinfo(skb)->frag_list;
2495 count = min_t(unsigned int, conn->mtu, len);
2497 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2501 memcpy(skb_put(*frag, count), data, count);
2506 frag = &(*frag)->next;
2516 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2518 struct l2cap_conf_opt *opt = *ptr;
2521 len = L2CAP_CONF_OPT_SIZE + opt->len;
2529 *val = *((u8 *) opt->val);
2533 *val = get_unaligned_le16(opt->val);
2537 *val = get_unaligned_le32(opt->val);
2541 *val = (unsigned long) opt->val;
2545 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2549 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2551 struct l2cap_conf_opt *opt = *ptr;
2553 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2560 *((u8 *) opt->val) = val;
2564 put_unaligned_le16(val, opt->val);
2568 put_unaligned_le32(val, opt->val);
2572 memcpy(opt->val, (void *) val, len);
2576 *ptr += L2CAP_CONF_OPT_SIZE + len;
2579 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2581 struct l2cap_conf_efs efs;
2583 switch (chan->mode) {
2584 case L2CAP_MODE_ERTM:
2585 efs.id = chan->local_id;
2586 efs.stype = chan->local_stype;
2587 efs.msdu = cpu_to_le16(chan->local_msdu);
2588 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2589 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2590 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2593 case L2CAP_MODE_STREAMING:
2595 efs.stype = L2CAP_SERV_BESTEFFORT;
2596 efs.msdu = cpu_to_le16(chan->local_msdu);
2597 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2606 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2607 (unsigned long) &efs);
2610 static void l2cap_ack_timeout(struct work_struct *work)
2612 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2615 BT_DBG("chan %p", chan);
2617 l2cap_chan_lock(chan);
2619 __l2cap_send_ack(chan);
2621 l2cap_chan_unlock(chan);
2623 l2cap_chan_put(chan);
2626 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2630 chan->next_tx_seq = 0;
2631 chan->expected_tx_seq = 0;
2632 chan->expected_ack_seq = 0;
2633 chan->unacked_frames = 0;
2634 chan->buffer_seq = 0;
2635 chan->num_acked = 0;
2636 chan->frames_sent = 0;
2637 chan->last_acked_seq = 0;
2639 chan->sdu_last_frag = NULL;
2642 skb_queue_head_init(&chan->tx_q);
2644 if (chan->mode != L2CAP_MODE_ERTM)
2647 chan->rx_state = L2CAP_RX_STATE_RECV;
2648 chan->tx_state = L2CAP_TX_STATE_XMIT;
2650 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2651 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2652 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2654 skb_queue_head_init(&chan->srej_q);
2656 INIT_LIST_HEAD(&chan->srej_l);
2657 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2661 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2663 l2cap_seq_list_free(&chan->srej_list);
2668 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2671 case L2CAP_MODE_STREAMING:
2672 case L2CAP_MODE_ERTM:
2673 if (l2cap_mode_supported(mode, remote_feat_mask))
2677 return L2CAP_MODE_BASIC;
2681 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2683 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2686 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2688 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2691 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2693 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2694 __l2cap_ews_supported(chan)) {
2695 /* use extended control field */
2696 set_bit(FLAG_EXT_CTRL, &chan->flags);
2697 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2699 chan->tx_win = min_t(u16, chan->tx_win,
2700 L2CAP_DEFAULT_TX_WINDOW);
2701 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2705 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2707 struct l2cap_conf_req *req = data;
2708 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2709 void *ptr = req->data;
2712 BT_DBG("chan %p", chan);
2714 if (chan->num_conf_req || chan->num_conf_rsp)
2717 switch (chan->mode) {
2718 case L2CAP_MODE_STREAMING:
2719 case L2CAP_MODE_ERTM:
2720 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2723 if (__l2cap_efs_supported(chan))
2724 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2728 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2733 if (chan->imtu != L2CAP_DEFAULT_MTU)
2734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2736 switch (chan->mode) {
2737 case L2CAP_MODE_BASIC:
2738 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2739 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2742 rfc.mode = L2CAP_MODE_BASIC;
2744 rfc.max_transmit = 0;
2745 rfc.retrans_timeout = 0;
2746 rfc.monitor_timeout = 0;
2747 rfc.max_pdu_size = 0;
2749 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2750 (unsigned long) &rfc);
2753 case L2CAP_MODE_ERTM:
2754 rfc.mode = L2CAP_MODE_ERTM;
2755 rfc.max_transmit = chan->max_tx;
2756 rfc.retrans_timeout = 0;
2757 rfc.monitor_timeout = 0;
2759 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2760 L2CAP_EXT_HDR_SIZE -
2763 rfc.max_pdu_size = cpu_to_le16(size);
2765 l2cap_txwin_setup(chan);
2767 rfc.txwin_size = min_t(u16, chan->tx_win,
2768 L2CAP_DEFAULT_TX_WINDOW);
2770 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2771 (unsigned long) &rfc);
2773 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2774 l2cap_add_opt_efs(&ptr, chan);
2776 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2779 if (chan->fcs == L2CAP_FCS_NONE ||
2780 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2781 chan->fcs = L2CAP_FCS_NONE;
2782 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2785 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2790 case L2CAP_MODE_STREAMING:
2791 rfc.mode = L2CAP_MODE_STREAMING;
2793 rfc.max_transmit = 0;
2794 rfc.retrans_timeout = 0;
2795 rfc.monitor_timeout = 0;
2797 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2798 L2CAP_EXT_HDR_SIZE -
2801 rfc.max_pdu_size = cpu_to_le16(size);
2803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2804 (unsigned long) &rfc);
2806 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2807 l2cap_add_opt_efs(&ptr, chan);
2809 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2812 if (chan->fcs == L2CAP_FCS_NONE ||
2813 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2814 chan->fcs = L2CAP_FCS_NONE;
2815 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2820 req->dcid = cpu_to_le16(chan->dcid);
2821 req->flags = cpu_to_le16(0);
2826 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2828 struct l2cap_conf_rsp *rsp = data;
2829 void *ptr = rsp->data;
2830 void *req = chan->conf_req;
2831 int len = chan->conf_len;
2832 int type, hint, olen;
2834 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2835 struct l2cap_conf_efs efs;
2837 u16 mtu = L2CAP_DEFAULT_MTU;
2838 u16 result = L2CAP_CONF_SUCCESS;
2841 BT_DBG("chan %p", chan);
2843 while (len >= L2CAP_CONF_OPT_SIZE) {
2844 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2846 hint = type & L2CAP_CONF_HINT;
2847 type &= L2CAP_CONF_MASK;
2850 case L2CAP_CONF_MTU:
2854 case L2CAP_CONF_FLUSH_TO:
2855 chan->flush_to = val;
2858 case L2CAP_CONF_QOS:
2861 case L2CAP_CONF_RFC:
2862 if (olen == sizeof(rfc))
2863 memcpy(&rfc, (void *) val, olen);
2866 case L2CAP_CONF_FCS:
2867 if (val == L2CAP_FCS_NONE)
2868 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2871 case L2CAP_CONF_EFS:
2873 if (olen == sizeof(efs))
2874 memcpy(&efs, (void *) val, olen);
2877 case L2CAP_CONF_EWS:
2879 return -ECONNREFUSED;
2881 set_bit(FLAG_EXT_CTRL, &chan->flags);
2882 set_bit(CONF_EWS_RECV, &chan->conf_state);
2883 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2884 chan->remote_tx_win = val;
2891 result = L2CAP_CONF_UNKNOWN;
2892 *((u8 *) ptr++) = type;
2897 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2900 switch (chan->mode) {
2901 case L2CAP_MODE_STREAMING:
2902 case L2CAP_MODE_ERTM:
2903 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2904 chan->mode = l2cap_select_mode(rfc.mode,
2905 chan->conn->feat_mask);
2910 if (__l2cap_efs_supported(chan))
2911 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2913 return -ECONNREFUSED;
2916 if (chan->mode != rfc.mode)
2917 return -ECONNREFUSED;
2923 if (chan->mode != rfc.mode) {
2924 result = L2CAP_CONF_UNACCEPT;
2925 rfc.mode = chan->mode;
2927 if (chan->num_conf_rsp == 1)
2928 return -ECONNREFUSED;
2930 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2931 sizeof(rfc), (unsigned long) &rfc);
2934 if (result == L2CAP_CONF_SUCCESS) {
2935 /* Configure output options and let the other side know
2936 * which ones we don't like. */
2938 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2939 result = L2CAP_CONF_UNACCEPT;
2942 set_bit(CONF_MTU_DONE, &chan->conf_state);
2944 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2947 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2948 efs.stype != L2CAP_SERV_NOTRAFIC &&
2949 efs.stype != chan->local_stype) {
2951 result = L2CAP_CONF_UNACCEPT;
2953 if (chan->num_conf_req >= 1)
2954 return -ECONNREFUSED;
2956 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2958 (unsigned long) &efs);
2960 /* Send PENDING Conf Rsp */
2961 result = L2CAP_CONF_PENDING;
2962 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2967 case L2CAP_MODE_BASIC:
2968 chan->fcs = L2CAP_FCS_NONE;
2969 set_bit(CONF_MODE_DONE, &chan->conf_state);
2972 case L2CAP_MODE_ERTM:
2973 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2974 chan->remote_tx_win = rfc.txwin_size;
2976 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2978 chan->remote_max_tx = rfc.max_transmit;
2980 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2982 L2CAP_EXT_HDR_SIZE -
2985 rfc.max_pdu_size = cpu_to_le16(size);
2986 chan->remote_mps = size;
2988 rfc.retrans_timeout =
2989 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2990 rfc.monitor_timeout =
2991 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2993 set_bit(CONF_MODE_DONE, &chan->conf_state);
2995 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2996 sizeof(rfc), (unsigned long) &rfc);
2998 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2999 chan->remote_id = efs.id;
3000 chan->remote_stype = efs.stype;
3001 chan->remote_msdu = le16_to_cpu(efs.msdu);
3002 chan->remote_flush_to =
3003 le32_to_cpu(efs.flush_to);
3004 chan->remote_acc_lat =
3005 le32_to_cpu(efs.acc_lat);
3006 chan->remote_sdu_itime =
3007 le32_to_cpu(efs.sdu_itime);
3008 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3009 sizeof(efs), (unsigned long) &efs);
3013 case L2CAP_MODE_STREAMING:
3014 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3016 L2CAP_EXT_HDR_SIZE -
3019 rfc.max_pdu_size = cpu_to_le16(size);
3020 chan->remote_mps = size;
3022 set_bit(CONF_MODE_DONE, &chan->conf_state);
3024 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3025 sizeof(rfc), (unsigned long) &rfc);
3030 result = L2CAP_CONF_UNACCEPT;
3032 memset(&rfc, 0, sizeof(rfc));
3033 rfc.mode = chan->mode;
3036 if (result == L2CAP_CONF_SUCCESS)
3037 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3039 rsp->scid = cpu_to_le16(chan->dcid);
3040 rsp->result = cpu_to_le16(result);
3041 rsp->flags = cpu_to_le16(0x0000);
3046 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3048 struct l2cap_conf_req *req = data;
3049 void *ptr = req->data;
3052 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3053 struct l2cap_conf_efs efs;
3055 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3057 while (len >= L2CAP_CONF_OPT_SIZE) {
3058 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3061 case L2CAP_CONF_MTU:
3062 if (val < L2CAP_DEFAULT_MIN_MTU) {
3063 *result = L2CAP_CONF_UNACCEPT;
3064 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3067 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3070 case L2CAP_CONF_FLUSH_TO:
3071 chan->flush_to = val;
3072 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3076 case L2CAP_CONF_RFC:
3077 if (olen == sizeof(rfc))
3078 memcpy(&rfc, (void *)val, olen);
3080 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3081 rfc.mode != chan->mode)
3082 return -ECONNREFUSED;
3086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3087 sizeof(rfc), (unsigned long) &rfc);
3090 case L2CAP_CONF_EWS:
3091 chan->tx_win = min_t(u16, val,
3092 L2CAP_DEFAULT_EXT_WINDOW);
3093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3097 case L2CAP_CONF_EFS:
3098 if (olen == sizeof(efs))
3099 memcpy(&efs, (void *)val, olen);
3101 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3102 efs.stype != L2CAP_SERV_NOTRAFIC &&
3103 efs.stype != chan->local_stype)
3104 return -ECONNREFUSED;
3106 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3107 sizeof(efs), (unsigned long) &efs);
3112 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3113 return -ECONNREFUSED;
3115 chan->mode = rfc.mode;
3117 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3119 case L2CAP_MODE_ERTM:
3120 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3121 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3122 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3124 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3125 chan->local_msdu = le16_to_cpu(efs.msdu);
3126 chan->local_sdu_itime =
3127 le32_to_cpu(efs.sdu_itime);
3128 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3129 chan->local_flush_to =
3130 le32_to_cpu(efs.flush_to);
3134 case L2CAP_MODE_STREAMING:
3135 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3139 req->dcid = cpu_to_le16(chan->dcid);
3140 req->flags = cpu_to_le16(0x0000);
3145 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3147 struct l2cap_conf_rsp *rsp = data;
3148 void *ptr = rsp->data;
3150 BT_DBG("chan %p", chan);
3152 rsp->scid = cpu_to_le16(chan->dcid);
3153 rsp->result = cpu_to_le16(result);
3154 rsp->flags = cpu_to_le16(flags);
3159 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3161 struct l2cap_conn_rsp rsp;
3162 struct l2cap_conn *conn = chan->conn;
3165 rsp.scid = cpu_to_le16(chan->dcid);
3166 rsp.dcid = cpu_to_le16(chan->scid);
3167 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3168 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3169 l2cap_send_cmd(conn, chan->ident,
3170 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3172 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3175 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3176 l2cap_build_conf_req(chan, buf), buf);
3177 chan->num_conf_req++;
3180 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3184 struct l2cap_conf_rfc rfc;
3186 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3188 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3191 while (len >= L2CAP_CONF_OPT_SIZE) {
3192 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3195 case L2CAP_CONF_RFC:
3196 if (olen == sizeof(rfc))
3197 memcpy(&rfc, (void *)val, olen);
3202 /* Use sane default values in case a misbehaving remote device
3203 * did not send an RFC option.
3205 rfc.mode = chan->mode;
3206 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3207 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3208 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3210 BT_ERR("Expected RFC option was not found, using defaults");
3214 case L2CAP_MODE_ERTM:
3215 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3216 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3217 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3219 case L2CAP_MODE_STREAMING:
3220 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3224 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3226 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3228 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3231 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3232 cmd->ident == conn->info_ident) {
3233 cancel_delayed_work(&conn->info_timer);
3235 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3236 conn->info_ident = 0;
3238 l2cap_conn_start(conn);
3244 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3246 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3247 struct l2cap_conn_rsp rsp;
3248 struct l2cap_chan *chan = NULL, *pchan;
3249 struct sock *parent, *sk = NULL;
3250 int result, status = L2CAP_CS_NO_INFO;
3252 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3253 __le16 psm = req->psm;
3255 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3257 /* Check if we have socket listening on psm */
3258 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3260 result = L2CAP_CR_BAD_PSM;
3266 mutex_lock(&conn->chan_lock);
3269 /* Check if the ACL is secure enough (if not SDP) */
3270 if (psm != cpu_to_le16(0x0001) &&
3271 !hci_conn_check_link_mode(conn->hcon)) {
3272 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3273 result = L2CAP_CR_SEC_BLOCK;
3277 result = L2CAP_CR_NO_MEM;
3279 /* Check for backlog size */
3280 if (sk_acceptq_is_full(parent)) {
3281 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3285 chan = pchan->ops->new_connection(pchan->data);
3291 /* Check if we already have channel with that dcid */
3292 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3293 sock_set_flag(sk, SOCK_ZAPPED);
3294 chan->ops->close(chan->data);
3298 hci_conn_hold(conn->hcon);
3300 bacpy(&bt_sk(sk)->src, conn->src);
3301 bacpy(&bt_sk(sk)->dst, conn->dst);
3305 bt_accept_enqueue(parent, sk);
3307 __l2cap_chan_add(conn, chan);
3311 __set_chan_timer(chan, sk->sk_sndtimeo);
3313 chan->ident = cmd->ident;
3315 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3316 if (l2cap_chan_check_security(chan)) {
3317 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3318 __l2cap_state_change(chan, BT_CONNECT2);
3319 result = L2CAP_CR_PEND;
3320 status = L2CAP_CS_AUTHOR_PEND;
3321 parent->sk_data_ready(parent, 0);
3323 __l2cap_state_change(chan, BT_CONFIG);
3324 result = L2CAP_CR_SUCCESS;
3325 status = L2CAP_CS_NO_INFO;
3328 __l2cap_state_change(chan, BT_CONNECT2);
3329 result = L2CAP_CR_PEND;
3330 status = L2CAP_CS_AUTHEN_PEND;
3333 __l2cap_state_change(chan, BT_CONNECT2);
3334 result = L2CAP_CR_PEND;
3335 status = L2CAP_CS_NO_INFO;
3339 release_sock(parent);
3340 mutex_unlock(&conn->chan_lock);
3343 rsp.scid = cpu_to_le16(scid);
3344 rsp.dcid = cpu_to_le16(dcid);
3345 rsp.result = cpu_to_le16(result);
3346 rsp.status = cpu_to_le16(status);
3347 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3349 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3350 struct l2cap_info_req info;
3351 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3354 conn->info_ident = l2cap_get_ident(conn);
3356 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3358 l2cap_send_cmd(conn, conn->info_ident,
3359 L2CAP_INFO_REQ, sizeof(info), &info);
3362 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3363 result == L2CAP_CR_SUCCESS) {
3365 set_bit(CONF_REQ_SENT, &chan->conf_state);
3366 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3367 l2cap_build_conf_req(chan, buf), buf);
3368 chan->num_conf_req++;
3374 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3376 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3377 u16 scid, dcid, result, status;
3378 struct l2cap_chan *chan;
3382 scid = __le16_to_cpu(rsp->scid);
3383 dcid = __le16_to_cpu(rsp->dcid);
3384 result = __le16_to_cpu(rsp->result);
3385 status = __le16_to_cpu(rsp->status);
3387 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3388 dcid, scid, result, status);
3390 mutex_lock(&conn->chan_lock);
3393 chan = __l2cap_get_chan_by_scid(conn, scid);
3399 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3408 l2cap_chan_lock(chan);
3411 case L2CAP_CR_SUCCESS:
3412 l2cap_state_change(chan, BT_CONFIG);
3415 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3417 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3420 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3421 l2cap_build_conf_req(chan, req), req);
3422 chan->num_conf_req++;
3426 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3430 l2cap_chan_del(chan, ECONNREFUSED);
3434 l2cap_chan_unlock(chan);
3437 mutex_unlock(&conn->chan_lock);
3442 static inline void set_default_fcs(struct l2cap_chan *chan)
3444 /* FCS is enabled only in ERTM or streaming mode, if one or both
3447 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3448 chan->fcs = L2CAP_FCS_NONE;
3449 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3450 chan->fcs = L2CAP_FCS_CRC16;
3453 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3455 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3458 struct l2cap_chan *chan;
3461 dcid = __le16_to_cpu(req->dcid);
3462 flags = __le16_to_cpu(req->flags);
3464 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3466 chan = l2cap_get_chan_by_scid(conn, dcid);
3470 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3471 struct l2cap_cmd_rej_cid rej;
3473 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3474 rej.scid = cpu_to_le16(chan->scid);
3475 rej.dcid = cpu_to_le16(chan->dcid);
3477 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3482 /* Reject if config buffer is too small. */
3483 len = cmd_len - sizeof(*req);
3484 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3485 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3486 l2cap_build_conf_rsp(chan, rsp,
3487 L2CAP_CONF_REJECT, flags), rsp);
3492 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3493 chan->conf_len += len;
3495 if (flags & 0x0001) {
3496 /* Incomplete config. Send empty response. */
3497 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3498 l2cap_build_conf_rsp(chan, rsp,
3499 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3503 /* Complete config. */
3504 len = l2cap_parse_conf_req(chan, rsp);
3506 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3510 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3511 chan->num_conf_rsp++;
3513 /* Reset config buffer. */
3516 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3519 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3520 set_default_fcs(chan);
3522 l2cap_state_change(chan, BT_CONNECTED);
3524 if (chan->mode == L2CAP_MODE_ERTM ||
3525 chan->mode == L2CAP_MODE_STREAMING)
3526 err = l2cap_ertm_init(chan);
3529 l2cap_send_disconn_req(chan->conn, chan, -err);
3531 l2cap_chan_ready(chan);
3536 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3538 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3539 l2cap_build_conf_req(chan, buf), buf);
3540 chan->num_conf_req++;
3543 /* Got Conf Rsp PENDING from remote side and asume we sent
3544 Conf Rsp PENDING in the code above */
3545 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3546 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3548 /* check compatibility */
3550 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3551 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3553 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3554 l2cap_build_conf_rsp(chan, rsp,
3555 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3559 l2cap_chan_unlock(chan);
3563 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3565 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3566 u16 scid, flags, result;
3567 struct l2cap_chan *chan;
3568 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3571 scid = __le16_to_cpu(rsp->scid);
3572 flags = __le16_to_cpu(rsp->flags);
3573 result = __le16_to_cpu(rsp->result);
3575 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3578 chan = l2cap_get_chan_by_scid(conn, scid);
3583 case L2CAP_CONF_SUCCESS:
3584 l2cap_conf_rfc_get(chan, rsp->data, len);
3585 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3588 case L2CAP_CONF_PENDING:
3589 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3591 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3594 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3597 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3601 /* check compatibility */
3603 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3604 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3606 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3607 l2cap_build_conf_rsp(chan, buf,
3608 L2CAP_CONF_SUCCESS, 0x0000), buf);
3612 case L2CAP_CONF_UNACCEPT:
3613 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3616 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3617 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3621 /* throw out any old stored conf requests */
3622 result = L2CAP_CONF_SUCCESS;
3623 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3626 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3630 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3631 L2CAP_CONF_REQ, len, req);
3632 chan->num_conf_req++;
3633 if (result != L2CAP_CONF_SUCCESS)
3639 l2cap_chan_set_err(chan, ECONNRESET);
3641 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3642 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3649 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3651 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3652 set_default_fcs(chan);
3654 l2cap_state_change(chan, BT_CONNECTED);
3655 if (chan->mode == L2CAP_MODE_ERTM ||
3656 chan->mode == L2CAP_MODE_STREAMING)
3657 err = l2cap_ertm_init(chan);
3660 l2cap_send_disconn_req(chan->conn, chan, -err);
3662 l2cap_chan_ready(chan);
3666 l2cap_chan_unlock(chan);
3670 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3672 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3673 struct l2cap_disconn_rsp rsp;
3675 struct l2cap_chan *chan;
3678 scid = __le16_to_cpu(req->scid);
3679 dcid = __le16_to_cpu(req->dcid);
3681 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3683 mutex_lock(&conn->chan_lock);
3685 chan = __l2cap_get_chan_by_scid(conn, dcid);
3687 mutex_unlock(&conn->chan_lock);
3691 l2cap_chan_lock(chan);
3695 rsp.dcid = cpu_to_le16(chan->scid);
3696 rsp.scid = cpu_to_le16(chan->dcid);
3697 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3700 sk->sk_shutdown = SHUTDOWN_MASK;
3703 l2cap_chan_hold(chan);
3704 l2cap_chan_del(chan, ECONNRESET);
3706 l2cap_chan_unlock(chan);
3708 chan->ops->close(chan->data);
3709 l2cap_chan_put(chan);
3711 mutex_unlock(&conn->chan_lock);
3716 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3718 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3720 struct l2cap_chan *chan;
3722 scid = __le16_to_cpu(rsp->scid);
3723 dcid = __le16_to_cpu(rsp->dcid);
3725 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3727 mutex_lock(&conn->chan_lock);
3729 chan = __l2cap_get_chan_by_scid(conn, scid);
3731 mutex_unlock(&conn->chan_lock);
3735 l2cap_chan_lock(chan);
3737 l2cap_chan_hold(chan);
3738 l2cap_chan_del(chan, 0);
3740 l2cap_chan_unlock(chan);
3742 chan->ops->close(chan->data);
3743 l2cap_chan_put(chan);
3745 mutex_unlock(&conn->chan_lock);
3750 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3752 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3755 type = __le16_to_cpu(req->type);
3757 BT_DBG("type 0x%4.4x", type);
3759 if (type == L2CAP_IT_FEAT_MASK) {
3761 u32 feat_mask = l2cap_feat_mask;
3762 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3763 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3764 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3766 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3769 feat_mask |= L2CAP_FEAT_EXT_FLOW
3770 | L2CAP_FEAT_EXT_WINDOW;
3772 put_unaligned_le32(feat_mask, rsp->data);
3773 l2cap_send_cmd(conn, cmd->ident,
3774 L2CAP_INFO_RSP, sizeof(buf), buf);
3775 } else if (type == L2CAP_IT_FIXED_CHAN) {
3777 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3780 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3782 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3784 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3785 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3786 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3787 l2cap_send_cmd(conn, cmd->ident,
3788 L2CAP_INFO_RSP, sizeof(buf), buf);
3790 struct l2cap_info_rsp rsp;
3791 rsp.type = cpu_to_le16(type);
3792 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3793 l2cap_send_cmd(conn, cmd->ident,
3794 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3800 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3802 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3805 type = __le16_to_cpu(rsp->type);
3806 result = __le16_to_cpu(rsp->result);
3808 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3810 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3811 if (cmd->ident != conn->info_ident ||
3812 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3815 cancel_delayed_work(&conn->info_timer);
3817 if (result != L2CAP_IR_SUCCESS) {
3818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3819 conn->info_ident = 0;
3821 l2cap_conn_start(conn);
3827 case L2CAP_IT_FEAT_MASK:
3828 conn->feat_mask = get_unaligned_le32(rsp->data);
3830 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3831 struct l2cap_info_req req;
3832 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3834 conn->info_ident = l2cap_get_ident(conn);
3836 l2cap_send_cmd(conn, conn->info_ident,
3837 L2CAP_INFO_REQ, sizeof(req), &req);
3839 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3840 conn->info_ident = 0;
3842 l2cap_conn_start(conn);
3846 case L2CAP_IT_FIXED_CHAN:
3847 conn->fixed_chan_mask = rsp->data[0];
3848 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3849 conn->info_ident = 0;
3851 l2cap_conn_start(conn);
3858 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3859 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3862 struct l2cap_create_chan_req *req = data;
3863 struct l2cap_create_chan_rsp rsp;
3866 if (cmd_len != sizeof(*req))
3872 psm = le16_to_cpu(req->psm);
3873 scid = le16_to_cpu(req->scid);
3875 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3877 /* Placeholder: Always reject */
3879 rsp.scid = cpu_to_le16(scid);
3880 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3881 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3883 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3889 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3890 struct l2cap_cmd_hdr *cmd, void *data)
3892 BT_DBG("conn %p", conn);
3894 return l2cap_connect_rsp(conn, cmd, data);
3897 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3898 u16 icid, u16 result)
3900 struct l2cap_move_chan_rsp rsp;
3902 BT_DBG("icid %d, result %d", icid, result);
3904 rsp.icid = cpu_to_le16(icid);
3905 rsp.result = cpu_to_le16(result);
3907 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3910 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3911 struct l2cap_chan *chan, u16 icid, u16 result)
3913 struct l2cap_move_chan_cfm cfm;
3916 BT_DBG("icid %d, result %d", icid, result);
3918 ident = l2cap_get_ident(conn);
3920 chan->ident = ident;
3922 cfm.icid = cpu_to_le16(icid);
3923 cfm.result = cpu_to_le16(result);
3925 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3928 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3931 struct l2cap_move_chan_cfm_rsp rsp;
3933 BT_DBG("icid %d", icid);
3935 rsp.icid = cpu_to_le16(icid);
3936 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3939 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3940 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3942 struct l2cap_move_chan_req *req = data;
3944 u16 result = L2CAP_MR_NOT_ALLOWED;
3946 if (cmd_len != sizeof(*req))
3949 icid = le16_to_cpu(req->icid);
3951 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3956 /* Placeholder: Always refuse */
3957 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3962 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3963 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3965 struct l2cap_move_chan_rsp *rsp = data;
3968 if (cmd_len != sizeof(*rsp))
3971 icid = le16_to_cpu(rsp->icid);
3972 result = le16_to_cpu(rsp->result);
3974 BT_DBG("icid %d, result %d", icid, result);
3976 /* Placeholder: Always unconfirmed */
3977 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3982 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3983 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3985 struct l2cap_move_chan_cfm *cfm = data;
3988 if (cmd_len != sizeof(*cfm))
3991 icid = le16_to_cpu(cfm->icid);
3992 result = le16_to_cpu(cfm->result);
3994 BT_DBG("icid %d, result %d", icid, result);
3996 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4001 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4002 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4004 struct l2cap_move_chan_cfm_rsp *rsp = data;
4007 if (cmd_len != sizeof(*rsp))
4010 icid = le16_to_cpu(rsp->icid);
4012 BT_DBG("icid %d", icid);
4017 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4022 if (min > max || min < 6 || max > 3200)
4025 if (to_multiplier < 10 || to_multiplier > 3200)
4028 if (max >= to_multiplier * 8)
4031 max_latency = (to_multiplier * 8 / max) - 1;
4032 if (latency > 499 || latency > max_latency)
4038 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4039 struct l2cap_cmd_hdr *cmd, u8 *data)
4041 struct hci_conn *hcon = conn->hcon;
4042 struct l2cap_conn_param_update_req *req;
4043 struct l2cap_conn_param_update_rsp rsp;
4044 u16 min, max, latency, to_multiplier, cmd_len;
4047 if (!(hcon->link_mode & HCI_LM_MASTER))
4050 cmd_len = __le16_to_cpu(cmd->len);
4051 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4054 req = (struct l2cap_conn_param_update_req *) data;
4055 min = __le16_to_cpu(req->min);
4056 max = __le16_to_cpu(req->max);
4057 latency = __le16_to_cpu(req->latency);
4058 to_multiplier = __le16_to_cpu(req->to_multiplier);
4060 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4061 min, max, latency, to_multiplier);
4063 memset(&rsp, 0, sizeof(rsp));
4065 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4067 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4069 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4071 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4075 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4080 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4081 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4085 switch (cmd->code) {
4086 case L2CAP_COMMAND_REJ:
4087 l2cap_command_rej(conn, cmd, data);
4090 case L2CAP_CONN_REQ:
4091 err = l2cap_connect_req(conn, cmd, data);
4094 case L2CAP_CONN_RSP:
4095 err = l2cap_connect_rsp(conn, cmd, data);
4098 case L2CAP_CONF_REQ:
4099 err = l2cap_config_req(conn, cmd, cmd_len, data);
4102 case L2CAP_CONF_RSP:
4103 err = l2cap_config_rsp(conn, cmd, data);
4106 case L2CAP_DISCONN_REQ:
4107 err = l2cap_disconnect_req(conn, cmd, data);
4110 case L2CAP_DISCONN_RSP:
4111 err = l2cap_disconnect_rsp(conn, cmd, data);
4114 case L2CAP_ECHO_REQ:
4115 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4118 case L2CAP_ECHO_RSP:
4121 case L2CAP_INFO_REQ:
4122 err = l2cap_information_req(conn, cmd, data);
4125 case L2CAP_INFO_RSP:
4126 err = l2cap_information_rsp(conn, cmd, data);
4129 case L2CAP_CREATE_CHAN_REQ:
4130 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4133 case L2CAP_CREATE_CHAN_RSP:
4134 err = l2cap_create_channel_rsp(conn, cmd, data);
4137 case L2CAP_MOVE_CHAN_REQ:
4138 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4141 case L2CAP_MOVE_CHAN_RSP:
4142 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4145 case L2CAP_MOVE_CHAN_CFM:
4146 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4149 case L2CAP_MOVE_CHAN_CFM_RSP:
4150 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4154 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4162 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4163 struct l2cap_cmd_hdr *cmd, u8 *data)
4165 switch (cmd->code) {
4166 case L2CAP_COMMAND_REJ:
4169 case L2CAP_CONN_PARAM_UPDATE_REQ:
4170 return l2cap_conn_param_update_req(conn, cmd, data);
4172 case L2CAP_CONN_PARAM_UPDATE_RSP:
4176 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4181 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4182 struct sk_buff *skb)
4184 u8 *data = skb->data;
4186 struct l2cap_cmd_hdr cmd;
4189 l2cap_raw_recv(conn, skb);
4191 while (len >= L2CAP_CMD_HDR_SIZE) {
4193 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4194 data += L2CAP_CMD_HDR_SIZE;
4195 len -= L2CAP_CMD_HDR_SIZE;
4197 cmd_len = le16_to_cpu(cmd.len);
4199 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4201 if (cmd_len > len || !cmd.ident) {
4202 BT_DBG("corrupted command");
4206 if (conn->hcon->type == LE_LINK)
4207 err = l2cap_le_sig_cmd(conn, &cmd, data);
4209 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4212 struct l2cap_cmd_rej_unk rej;
4214 BT_ERR("Wrong link type (%d)", err);
4216 /* FIXME: Map err to a valid reason */
4217 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4218 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4228 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4230 u16 our_fcs, rcv_fcs;
4233 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4234 hdr_size = L2CAP_EXT_HDR_SIZE;
4236 hdr_size = L2CAP_ENH_HDR_SIZE;
4238 if (chan->fcs == L2CAP_FCS_CRC16) {
4239 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4240 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4241 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4243 if (our_fcs != rcv_fcs)
4249 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4253 chan->frames_sent = 0;
4255 control |= __set_reqseq(chan, chan->buffer_seq);
4257 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4258 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
4259 set_bit(CONN_RNR_SENT, &chan->conn_state);
4262 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
4263 l2cap_retransmit_frames(chan);
4265 l2cap_ertm_send(chan);
4267 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4268 chan->frames_sent == 0) {
4269 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4273 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4275 struct sk_buff *next_skb;
4276 int tx_seq_offset, next_tx_seq_offset;
4278 bt_cb(skb)->control.txseq = tx_seq;
4279 bt_cb(skb)->control.sar = sar;
4281 next_skb = skb_peek(&chan->srej_q);
4283 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4286 if (bt_cb(next_skb)->control.txseq == tx_seq)
4289 next_tx_seq_offset = __seq_offset(chan,
4290 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4292 if (next_tx_seq_offset > tx_seq_offset) {
4293 __skb_queue_before(&chan->srej_q, next_skb, skb);
4297 if (skb_queue_is_last(&chan->srej_q, next_skb))
4300 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4303 __skb_queue_tail(&chan->srej_q, skb);
4308 static void append_skb_frag(struct sk_buff *skb,
4309 struct sk_buff *new_frag, struct sk_buff **last_frag)
4311 /* skb->len reflects data in skb as well as all fragments
4312 * skb->data_len reflects only data in fragments
4314 if (!skb_has_frag_list(skb))
4315 skb_shinfo(skb)->frag_list = new_frag;
4317 new_frag->next = NULL;
4319 (*last_frag)->next = new_frag;
4320 *last_frag = new_frag;
4322 skb->len += new_frag->len;
4323 skb->data_len += new_frag->len;
4324 skb->truesize += new_frag->truesize;
4327 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4331 switch (__get_ctrl_sar(chan, control)) {
4332 case L2CAP_SAR_UNSEGMENTED:
4336 err = chan->ops->recv(chan->data, skb);
4339 case L2CAP_SAR_START:
4343 chan->sdu_len = get_unaligned_le16(skb->data);
4344 skb_pull(skb, L2CAP_SDULEN_SIZE);
4346 if (chan->sdu_len > chan->imtu) {
4351 if (skb->len >= chan->sdu_len)
4355 chan->sdu_last_frag = skb;
4361 case L2CAP_SAR_CONTINUE:
4365 append_skb_frag(chan->sdu, skb,
4366 &chan->sdu_last_frag);
4369 if (chan->sdu->len >= chan->sdu_len)
4379 append_skb_frag(chan->sdu, skb,
4380 &chan->sdu_last_frag);
4383 if (chan->sdu->len != chan->sdu_len)
4386 err = chan->ops->recv(chan->data, chan->sdu);
4389 /* Reassembly complete */
4391 chan->sdu_last_frag = NULL;
4399 kfree_skb(chan->sdu);
4401 chan->sdu_last_frag = NULL;
4408 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4410 BT_DBG("chan %p, Enter local busy", chan);
4412 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4413 l2cap_seq_list_clear(&chan->srej_list);
4415 __set_ack_timer(chan);
4418 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4422 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4425 control = __set_reqseq(chan, chan->buffer_seq);
4426 control |= __set_ctrl_poll(chan);
4427 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4428 chan->retry_count = 1;
4430 __clear_retrans_timer(chan);
4431 __set_monitor_timer(chan);
4433 set_bit(CONN_WAIT_F, &chan->conn_state);
4436 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4437 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4439 BT_DBG("chan %p, Exit local busy", chan);
4442 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4444 if (chan->mode == L2CAP_MODE_ERTM) {
4446 l2cap_ertm_enter_local_busy(chan);
4448 l2cap_ertm_exit_local_busy(chan);
4452 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4454 struct sk_buff *skb;
4457 while ((skb = skb_peek(&chan->srej_q)) &&
4458 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4461 if (bt_cb(skb)->control.txseq != tx_seq)
4464 skb = skb_dequeue(&chan->srej_q);
4465 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4466 err = l2cap_reassemble_sdu(chan, skb, control);
4469 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4473 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4474 tx_seq = __next_seq(chan, tx_seq);
4478 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4480 struct srej_list *l, *tmp;
4483 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4484 if (l->tx_seq == tx_seq) {
4489 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4490 control |= __set_reqseq(chan, l->tx_seq);
4492 list_add_tail(&l->list, &chan->srej_l);
4496 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4498 struct srej_list *new;
4501 while (tx_seq != chan->expected_tx_seq) {
4502 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4503 control |= __set_reqseq(chan, chan->expected_tx_seq);
4504 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4506 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4510 new->tx_seq = chan->expected_tx_seq;
4512 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4514 list_add_tail(&new->list, &chan->srej_l);
4517 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4522 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4524 u16 tx_seq = __get_txseq(chan, rx_control);
4525 u16 req_seq = __get_reqseq(chan, rx_control);
4526 u8 sar = __get_ctrl_sar(chan, rx_control);
4527 int tx_seq_offset, expected_tx_seq_offset;
4528 int num_to_ack = (chan->tx_win/6) + 1;
4531 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4532 tx_seq, rx_control);
4534 if (__is_ctrl_final(chan, rx_control) &&
4535 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4536 __clear_monitor_timer(chan);
4537 if (chan->unacked_frames > 0)
4538 __set_retrans_timer(chan);
4539 clear_bit(CONN_WAIT_F, &chan->conn_state);
4542 chan->expected_ack_seq = req_seq;
4543 l2cap_drop_acked_frames(chan);
4545 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4547 /* invalid tx_seq */
4548 if (tx_seq_offset >= chan->tx_win) {
4549 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4553 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4554 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4555 l2cap_send_ack(chan);
4559 if (tx_seq == chan->expected_tx_seq)
4562 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4563 struct srej_list *first;
4565 first = list_first_entry(&chan->srej_l,
4566 struct srej_list, list);
4567 if (tx_seq == first->tx_seq) {
4568 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4569 l2cap_check_srej_gap(chan, tx_seq);
4571 list_del(&first->list);
4574 if (list_empty(&chan->srej_l)) {
4575 chan->buffer_seq = chan->buffer_seq_srej;
4576 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4577 l2cap_send_ack(chan);
4578 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4581 struct srej_list *l;
4583 /* duplicated tx_seq */
4584 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4587 list_for_each_entry(l, &chan->srej_l, list) {
4588 if (l->tx_seq == tx_seq) {
4589 l2cap_resend_srejframe(chan, tx_seq);
4594 err = l2cap_send_srejframe(chan, tx_seq);
4596 l2cap_send_disconn_req(chan->conn, chan, -err);
4601 expected_tx_seq_offset = __seq_offset(chan,
4602 chan->expected_tx_seq, chan->buffer_seq);
4604 /* duplicated tx_seq */
4605 if (tx_seq_offset < expected_tx_seq_offset)
4608 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4610 BT_DBG("chan %p, Enter SREJ", chan);
4612 INIT_LIST_HEAD(&chan->srej_l);
4613 chan->buffer_seq_srej = chan->buffer_seq;
4615 __skb_queue_head_init(&chan->srej_q);
4616 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4618 /* Set P-bit only if there are some I-frames to ack. */
4619 if (__clear_ack_timer(chan))
4620 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4622 err = l2cap_send_srejframe(chan, tx_seq);
4624 l2cap_send_disconn_req(chan->conn, chan, -err);
4631 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4633 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4634 bt_cb(skb)->control.txseq = tx_seq;
4635 bt_cb(skb)->control.sar = sar;
4636 __skb_queue_tail(&chan->srej_q, skb);
4640 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4641 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4644 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4648 if (__is_ctrl_final(chan, rx_control)) {
4649 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4650 l2cap_retransmit_frames(chan);
4654 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4655 if (chan->num_acked == num_to_ack - 1)
4656 l2cap_send_ack(chan);
4658 __set_ack_timer(chan);
4667 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4669 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4670 __get_reqseq(chan, rx_control), rx_control);
4672 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4673 l2cap_drop_acked_frames(chan);
4675 if (__is_ctrl_poll(chan, rx_control)) {
4676 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4677 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4678 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4679 (chan->unacked_frames > 0))
4680 __set_retrans_timer(chan);
4682 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4683 l2cap_send_srejtail(chan);
4685 l2cap_send_i_or_rr_or_rnr(chan);
4688 } else if (__is_ctrl_final(chan, rx_control)) {
4689 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4691 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4692 l2cap_retransmit_frames(chan);
4695 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4696 (chan->unacked_frames > 0))
4697 __set_retrans_timer(chan);
4699 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4700 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4701 l2cap_send_ack(chan);
4703 l2cap_ertm_send(chan);
4707 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4709 u16 tx_seq = __get_reqseq(chan, rx_control);
4711 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4713 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4715 chan->expected_ack_seq = tx_seq;
4716 l2cap_drop_acked_frames(chan);
4718 if (__is_ctrl_final(chan, rx_control)) {
4719 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4720 l2cap_retransmit_frames(chan);
4722 l2cap_retransmit_frames(chan);
4724 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4725 set_bit(CONN_REJ_ACT, &chan->conn_state);
4728 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4730 u16 tx_seq = __get_reqseq(chan, rx_control);
4732 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4734 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4736 if (__is_ctrl_poll(chan, rx_control)) {
4737 chan->expected_ack_seq = tx_seq;
4738 l2cap_drop_acked_frames(chan);
4740 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4741 l2cap_retransmit_one_frame(chan, tx_seq);
4743 l2cap_ertm_send(chan);
4745 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4746 chan->srej_save_reqseq = tx_seq;
4747 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4749 } else if (__is_ctrl_final(chan, rx_control)) {
4750 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4751 chan->srej_save_reqseq == tx_seq)
4752 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4754 l2cap_retransmit_one_frame(chan, tx_seq);
4756 l2cap_retransmit_one_frame(chan, tx_seq);
4757 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4758 chan->srej_save_reqseq = tx_seq;
4759 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4764 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4766 u16 tx_seq = __get_reqseq(chan, rx_control);
4768 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4770 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4771 chan->expected_ack_seq = tx_seq;
4772 l2cap_drop_acked_frames(chan);
4774 if (__is_ctrl_poll(chan, rx_control))
4775 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4777 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4778 __clear_retrans_timer(chan);
4779 if (__is_ctrl_poll(chan, rx_control))
4780 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4784 if (__is_ctrl_poll(chan, rx_control)) {
4785 l2cap_send_srejtail(chan);
4787 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4791 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4793 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4795 if (__is_ctrl_final(chan, rx_control) &&
4796 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4797 __clear_monitor_timer(chan);
4798 if (chan->unacked_frames > 0)
4799 __set_retrans_timer(chan);
4800 clear_bit(CONN_WAIT_F, &chan->conn_state);
4803 switch (__get_ctrl_super(chan, rx_control)) {
4804 case L2CAP_SUPER_RR:
4805 l2cap_data_channel_rrframe(chan, rx_control);
4808 case L2CAP_SUPER_REJ:
4809 l2cap_data_channel_rejframe(chan, rx_control);
4812 case L2CAP_SUPER_SREJ:
4813 l2cap_data_channel_srejframe(chan, rx_control);
4816 case L2CAP_SUPER_RNR:
4817 l2cap_data_channel_rnrframe(chan, rx_control);
4825 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4826 struct sk_buff *skb, u8 event)
4832 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4833 struct sk_buff *skb)
4839 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4841 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4845 __unpack_control(chan, skb);
4850 * We can just drop the corrupted I-frame here.
4851 * Receiver will miss it and start proper recovery
4852 * procedures and ask for retransmission.
4854 if (l2cap_check_fcs(chan, skb))
4857 if (!control->sframe && control->sar == L2CAP_SAR_START)
4858 len -= L2CAP_SDULEN_SIZE;
4860 if (chan->fcs == L2CAP_FCS_CRC16)
4861 len -= L2CAP_FCS_SIZE;
4863 if (len > chan->mps) {
4864 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4868 if (!control->sframe) {
4871 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4872 control->sar, control->reqseq, control->final,
4875 /* Validate F-bit - F=0 always valid, F=1 only
4876 * valid in TX WAIT_F
4878 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4881 if (chan->mode != L2CAP_MODE_STREAMING) {
4882 event = L2CAP_EV_RECV_IFRAME;
4883 err = l2cap_rx(chan, control, skb, event);
4885 err = l2cap_stream_rx(chan, control, skb);
4889 l2cap_send_disconn_req(chan->conn, chan,
4892 const u8 rx_func_to_event[4] = {
4893 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
4894 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
4897 /* Only I-frames are expected in streaming mode */
4898 if (chan->mode == L2CAP_MODE_STREAMING)
4901 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
4902 control->reqseq, control->final, control->poll,
4907 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4911 /* Validate F and P bits */
4912 if (control->final && (control->poll ||
4913 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
4916 event = rx_func_to_event[control->super];
4917 if (l2cap_rx(chan, control, skb, event))
4918 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4928 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4930 struct l2cap_chan *chan;
4932 chan = l2cap_get_chan_by_scid(conn, cid);
4934 BT_DBG("unknown cid 0x%4.4x", cid);
4935 /* Drop packet and return */
4940 BT_DBG("chan %p, len %d", chan, skb->len);
4942 if (chan->state != BT_CONNECTED)
4945 switch (chan->mode) {
4946 case L2CAP_MODE_BASIC:
4947 /* If socket recv buffers overflows we drop data here
4948 * which is *bad* because L2CAP has to be reliable.
4949 * But we don't have any other choice. L2CAP doesn't
4950 * provide flow control mechanism. */
4952 if (chan->imtu < skb->len)
4955 if (!chan->ops->recv(chan->data, skb))
4959 case L2CAP_MODE_ERTM:
4960 case L2CAP_MODE_STREAMING:
4961 l2cap_data_rcv(chan, skb);
4965 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4973 l2cap_chan_unlock(chan);
4978 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4980 struct l2cap_chan *chan;
4982 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4986 BT_DBG("chan %p, len %d", chan, skb->len);
4988 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4991 if (chan->imtu < skb->len)
4994 if (!chan->ops->recv(chan->data, skb))
5003 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5004 struct sk_buff *skb)
5006 struct l2cap_chan *chan;
5008 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5012 BT_DBG("chan %p, len %d", chan, skb->len);
5014 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5017 if (chan->imtu < skb->len)
5020 if (!chan->ops->recv(chan->data, skb))
5029 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5031 struct l2cap_hdr *lh = (void *) skb->data;
5035 skb_pull(skb, L2CAP_HDR_SIZE);
5036 cid = __le16_to_cpu(lh->cid);
5037 len = __le16_to_cpu(lh->len);
5039 if (len != skb->len) {
5044 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5047 case L2CAP_CID_LE_SIGNALING:
5048 case L2CAP_CID_SIGNALING:
5049 l2cap_sig_channel(conn, skb);
5052 case L2CAP_CID_CONN_LESS:
5053 psm = get_unaligned((__le16 *) skb->data);
5055 l2cap_conless_channel(conn, psm, skb);
5058 case L2CAP_CID_LE_DATA:
5059 l2cap_att_channel(conn, cid, skb);
5063 if (smp_sig_channel(conn, skb))
5064 l2cap_conn_del(conn->hcon, EACCES);
5068 l2cap_data_channel(conn, cid, skb);
5073 /* ---- L2CAP interface with lower layer (HCI) ---- */
5075 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5077 int exact = 0, lm1 = 0, lm2 = 0;
5078 struct l2cap_chan *c;
5080 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5082 /* Find listening sockets and check their link_mode */
5083 read_lock(&chan_list_lock);
5084 list_for_each_entry(c, &chan_list, global_l) {
5085 struct sock *sk = c->sk;
5087 if (c->state != BT_LISTEN)
5090 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5091 lm1 |= HCI_LM_ACCEPT;
5092 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5093 lm1 |= HCI_LM_MASTER;
5095 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5096 lm2 |= HCI_LM_ACCEPT;
5097 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5098 lm2 |= HCI_LM_MASTER;
5101 read_unlock(&chan_list_lock);
5103 return exact ? lm1 : lm2;
5106 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5108 struct l2cap_conn *conn;
5110 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5113 conn = l2cap_conn_add(hcon, status);
5115 l2cap_conn_ready(conn);
5117 l2cap_conn_del(hcon, bt_to_errno(status));
5122 int l2cap_disconn_ind(struct hci_conn *hcon)
5124 struct l2cap_conn *conn = hcon->l2cap_data;
5126 BT_DBG("hcon %p", hcon);
5129 return HCI_ERROR_REMOTE_USER_TERM;
5130 return conn->disc_reason;
5133 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5135 BT_DBG("hcon %p reason %d", hcon, reason);
5137 l2cap_conn_del(hcon, bt_to_errno(reason));
5141 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5143 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5146 if (encrypt == 0x00) {
5147 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5148 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5149 } else if (chan->sec_level == BT_SECURITY_HIGH)
5150 l2cap_chan_close(chan, ECONNREFUSED);
5152 if (chan->sec_level == BT_SECURITY_MEDIUM)
5153 __clear_chan_timer(chan);
5157 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5159 struct l2cap_conn *conn = hcon->l2cap_data;
5160 struct l2cap_chan *chan;
5165 BT_DBG("conn %p", conn);
5167 if (hcon->type == LE_LINK) {
5168 if (!status && encrypt)
5169 smp_distribute_keys(conn, 0);
5170 cancel_delayed_work(&conn->security_timer);
5173 mutex_lock(&conn->chan_lock);
5175 list_for_each_entry(chan, &conn->chan_l, list) {
5176 l2cap_chan_lock(chan);
5178 BT_DBG("chan->scid %d", chan->scid);
5180 if (chan->scid == L2CAP_CID_LE_DATA) {
5181 if (!status && encrypt) {
5182 chan->sec_level = hcon->sec_level;
5183 l2cap_chan_ready(chan);
5186 l2cap_chan_unlock(chan);
5190 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5191 l2cap_chan_unlock(chan);
5195 if (!status && (chan->state == BT_CONNECTED ||
5196 chan->state == BT_CONFIG)) {
5197 struct sock *sk = chan->sk;
5199 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5200 sk->sk_state_change(sk);
5202 l2cap_check_encryption(chan, encrypt);
5203 l2cap_chan_unlock(chan);
5207 if (chan->state == BT_CONNECT) {
5209 l2cap_send_conn_req(chan);
5211 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5213 } else if (chan->state == BT_CONNECT2) {
5214 struct sock *sk = chan->sk;
5215 struct l2cap_conn_rsp rsp;
5221 if (test_bit(BT_SK_DEFER_SETUP,
5222 &bt_sk(sk)->flags)) {
5223 struct sock *parent = bt_sk(sk)->parent;
5224 res = L2CAP_CR_PEND;
5225 stat = L2CAP_CS_AUTHOR_PEND;
5227 parent->sk_data_ready(parent, 0);
5229 __l2cap_state_change(chan, BT_CONFIG);
5230 res = L2CAP_CR_SUCCESS;
5231 stat = L2CAP_CS_NO_INFO;
5234 __l2cap_state_change(chan, BT_DISCONN);
5235 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5236 res = L2CAP_CR_SEC_BLOCK;
5237 stat = L2CAP_CS_NO_INFO;
5242 rsp.scid = cpu_to_le16(chan->dcid);
5243 rsp.dcid = cpu_to_le16(chan->scid);
5244 rsp.result = cpu_to_le16(res);
5245 rsp.status = cpu_to_le16(stat);
5246 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5250 l2cap_chan_unlock(chan);
5253 mutex_unlock(&conn->chan_lock);
5258 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5260 struct l2cap_conn *conn = hcon->l2cap_data;
5263 conn = l2cap_conn_add(hcon, 0);
5268 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5270 if (!(flags & ACL_CONT)) {
5271 struct l2cap_hdr *hdr;
5275 BT_ERR("Unexpected start frame (len %d)", skb->len);
5276 kfree_skb(conn->rx_skb);
5277 conn->rx_skb = NULL;
5279 l2cap_conn_unreliable(conn, ECOMM);
5282 /* Start fragment always begin with Basic L2CAP header */
5283 if (skb->len < L2CAP_HDR_SIZE) {
5284 BT_ERR("Frame is too short (len %d)", skb->len);
5285 l2cap_conn_unreliable(conn, ECOMM);
5289 hdr = (struct l2cap_hdr *) skb->data;
5290 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5292 if (len == skb->len) {
5293 /* Complete frame received */
5294 l2cap_recv_frame(conn, skb);
5298 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5300 if (skb->len > len) {
5301 BT_ERR("Frame is too long (len %d, expected len %d)",
5303 l2cap_conn_unreliable(conn, ECOMM);
5307 /* Allocate skb for the complete frame (with header) */
5308 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5312 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5314 conn->rx_len = len - skb->len;
5316 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5318 if (!conn->rx_len) {
5319 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5320 l2cap_conn_unreliable(conn, ECOMM);
5324 if (skb->len > conn->rx_len) {
5325 BT_ERR("Fragment is too long (len %d, expected %d)",
5326 skb->len, conn->rx_len);
5327 kfree_skb(conn->rx_skb);
5328 conn->rx_skb = NULL;
5330 l2cap_conn_unreliable(conn, ECOMM);
5334 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5336 conn->rx_len -= skb->len;
5338 if (!conn->rx_len) {
5339 /* Complete frame received */
5340 l2cap_recv_frame(conn, conn->rx_skb);
5341 conn->rx_skb = NULL;
5350 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5352 struct l2cap_chan *c;
5354 read_lock(&chan_list_lock);
5356 list_for_each_entry(c, &chan_list, global_l) {
5357 struct sock *sk = c->sk;
5359 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5360 batostr(&bt_sk(sk)->src),
5361 batostr(&bt_sk(sk)->dst),
5362 c->state, __le16_to_cpu(c->psm),
5363 c->scid, c->dcid, c->imtu, c->omtu,
5364 c->sec_level, c->mode);
5367 read_unlock(&chan_list_lock);
5372 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5374 return single_open(file, l2cap_debugfs_show, inode->i_private);
5377 static const struct file_operations l2cap_debugfs_fops = {
5378 .open = l2cap_debugfs_open,
5380 .llseek = seq_lseek,
5381 .release = single_release,
5384 static struct dentry *l2cap_debugfs;
5386 int __init l2cap_init(void)
5390 err = l2cap_init_sockets();
5395 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5396 bt_debugfs, NULL, &l2cap_debugfs_fops);
5398 BT_ERR("Failed to create L2CAP debug file");
5404 void l2cap_exit(void)
5406 debugfs_remove(l2cap_debugfs);
5407 l2cap_cleanup_sockets();
5410 module_param(disable_ertm, bool, 0644);
5411 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");