2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm = 1;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 mutex_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
131 struct l2cap_chan *c;
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 write_lock(&chan_list_lock);
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
169 write_unlock(&chan_list_lock);
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
175 write_lock(&chan_list_lock);
179 write_unlock(&chan_list_lock);
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
186 u16 cid = L2CAP_CID_DYN_START;
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
202 chan->ops->state_change(chan->data, state);
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
207 struct sock *sk = chan->sk;
210 __l2cap_state_change(chan, state);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
216 struct sock *sk = chan->sk;
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
223 struct sock *sk = chan->sk;
226 __l2cap_chan_set_err(chan, err);
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
483 l2cap_chan_hold(chan);
485 list_add(&chan->list, &conn->chan_l);
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
501 __clear_chan_timer(chan);
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
506 /* Delete from channel list */
507 list_del(&chan->list);
509 l2cap_chan_put(chan);
512 hci_conn_put(conn->hcon);
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
521 __l2cap_chan_set_err(chan, err);
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
527 sk->sk_state_change(sk);
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 skb_queue_purge(&chan->tx_q);
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 struct srej_list *l, *tmp;
539 __clear_retrans_timer(chan);
540 __clear_monitor_timer(chan);
541 __clear_ack_timer(chan);
543 skb_queue_purge(&chan->srej_q);
545 l2cap_seq_list_free(&chan->srej_list);
546 l2cap_seq_list_free(&chan->retrans_list);
547 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
554 static void l2cap_chan_cleanup_listen(struct sock *parent)
558 BT_DBG("parent %p", parent);
560 /* Close not yet accepted channels */
561 while ((sk = bt_accept_dequeue(parent, NULL))) {
562 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
564 l2cap_chan_lock(chan);
565 __clear_chan_timer(chan);
566 l2cap_chan_close(chan, ECONNRESET);
567 l2cap_chan_unlock(chan);
569 chan->ops->close(chan->data);
573 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
575 struct l2cap_conn *conn = chan->conn;
576 struct sock *sk = chan->sk;
578 BT_DBG("chan %p state %s sk %p", chan,
579 state_to_string(chan->state), sk);
581 switch (chan->state) {
584 l2cap_chan_cleanup_listen(sk);
586 __l2cap_state_change(chan, BT_CLOSED);
587 sock_set_flag(sk, SOCK_ZAPPED);
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
598 l2cap_chan_del(chan, reason);
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
621 l2cap_chan_del(chan, reason);
626 l2cap_chan_del(chan, reason);
631 sock_set_flag(sk, SOCK_ZAPPED);
637 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
639 if (chan->chan_type == L2CAP_CHAN_RAW) {
640 switch (chan->sec_level) {
641 case BT_SECURITY_HIGH:
642 return HCI_AT_DEDICATED_BONDING_MITM;
643 case BT_SECURITY_MEDIUM:
644 return HCI_AT_DEDICATED_BONDING;
646 return HCI_AT_NO_BONDING;
648 } else if (chan->psm == cpu_to_le16(0x0001)) {
649 if (chan->sec_level == BT_SECURITY_LOW)
650 chan->sec_level = BT_SECURITY_SDP;
652 if (chan->sec_level == BT_SECURITY_HIGH)
653 return HCI_AT_NO_BONDING_MITM;
655 return HCI_AT_NO_BONDING;
657 switch (chan->sec_level) {
658 case BT_SECURITY_HIGH:
659 return HCI_AT_GENERAL_BONDING_MITM;
660 case BT_SECURITY_MEDIUM:
661 return HCI_AT_GENERAL_BONDING;
663 return HCI_AT_NO_BONDING;
668 /* Service level security */
669 int l2cap_chan_check_security(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
674 auth_type = l2cap_get_auth_type(chan);
676 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
679 static u8 l2cap_get_ident(struct l2cap_conn *conn)
683 /* Get next available identificator.
684 * 1 - 128 are used by kernel.
685 * 129 - 199 are reserved.
686 * 200 - 254 are used by utilities like l2ping, etc.
689 spin_lock(&conn->lock);
691 if (++conn->tx_ident > 128)
696 spin_unlock(&conn->lock);
701 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
703 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
706 BT_DBG("code 0x%2.2x", code);
711 if (lmp_no_flush_capable(conn->hcon->hdev))
712 flags = ACL_START_NO_FLUSH;
716 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
717 skb->priority = HCI_PRIO_MAX;
719 hci_send_acl(conn->hchan, skb, flags);
722 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
724 struct hci_conn *hcon = chan->conn->hcon;
727 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
730 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
731 lmp_no_flush_capable(hcon->hdev))
732 flags = ACL_START_NO_FLUSH;
736 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
737 hci_send_acl(chan->conn->hchan, skb, flags);
740 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
742 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
743 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
745 if (enh & L2CAP_CTRL_FRAME_TYPE) {
748 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
749 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
756 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
757 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
764 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
766 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
767 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
769 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
772 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
773 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
780 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
781 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
788 static inline void __unpack_control(struct l2cap_chan *chan,
791 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
792 __unpack_extended_control(get_unaligned_le32(skb->data),
793 &bt_cb(skb)->control);
795 __unpack_enhanced_control(get_unaligned_le16(skb->data),
796 &bt_cb(skb)->control);
800 static u32 __pack_extended_control(struct l2cap_ctrl *control)
804 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
805 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
807 if (control->sframe) {
808 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
809 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
810 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
812 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
813 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
819 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
823 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
824 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
826 if (control->sframe) {
827 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
828 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
829 packed |= L2CAP_CTRL_FRAME_TYPE;
831 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
832 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
838 static inline void __pack_control(struct l2cap_chan *chan,
839 struct l2cap_ctrl *control,
842 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
843 put_unaligned_le32(__pack_extended_control(control),
844 skb->data + L2CAP_HDR_SIZE);
846 put_unaligned_le16(__pack_enhanced_control(control),
847 skb->data + L2CAP_HDR_SIZE);
851 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
854 struct l2cap_hdr *lh;
855 struct l2cap_conn *conn = chan->conn;
858 if (chan->state != BT_CONNECTED)
861 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
862 hlen = L2CAP_EXT_HDR_SIZE;
864 hlen = L2CAP_ENH_HDR_SIZE;
866 if (chan->fcs == L2CAP_FCS_CRC16)
867 hlen += L2CAP_FCS_SIZE;
869 BT_DBG("chan %p, control 0x%8.8x", chan, control);
871 count = min_t(unsigned int, conn->mtu, hlen);
873 control |= __set_sframe(chan);
875 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
876 control |= __set_ctrl_final(chan);
878 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
879 control |= __set_ctrl_poll(chan);
881 skb = bt_skb_alloc(count, GFP_ATOMIC);
885 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
886 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
887 lh->cid = cpu_to_le16(chan->dcid);
889 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
891 if (chan->fcs == L2CAP_FCS_CRC16) {
892 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
893 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
896 skb->priority = HCI_PRIO_MAX;
897 l2cap_do_send(chan, skb);
900 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
902 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
903 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
904 set_bit(CONN_RNR_SENT, &chan->conn_state);
906 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
908 control |= __set_reqseq(chan, chan->buffer_seq);
910 l2cap_send_sframe(chan, control);
913 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
915 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
918 static void l2cap_send_conn_req(struct l2cap_chan *chan)
920 struct l2cap_conn *conn = chan->conn;
921 struct l2cap_conn_req req;
923 req.scid = cpu_to_le16(chan->scid);
926 chan->ident = l2cap_get_ident(conn);
928 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
930 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
933 static void l2cap_chan_ready(struct l2cap_chan *chan)
935 struct sock *sk = chan->sk;
940 parent = bt_sk(sk)->parent;
942 BT_DBG("sk %p, parent %p", sk, parent);
944 /* This clears all conf flags, including CONF_NOT_COMPLETE */
945 chan->conf_state = 0;
946 __clear_chan_timer(chan);
948 __l2cap_state_change(chan, BT_CONNECTED);
949 sk->sk_state_change(sk);
952 parent->sk_data_ready(parent, 0);
957 static void l2cap_do_start(struct l2cap_chan *chan)
959 struct l2cap_conn *conn = chan->conn;
961 if (conn->hcon->type == LE_LINK) {
962 l2cap_chan_ready(chan);
966 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
970 if (l2cap_chan_check_security(chan) &&
971 __l2cap_no_conn_pending(chan))
972 l2cap_send_conn_req(chan);
974 struct l2cap_info_req req;
975 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 conn->info_ident = l2cap_get_ident(conn);
980 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
982 l2cap_send_cmd(conn, conn->info_ident,
983 L2CAP_INFO_REQ, sizeof(req), &req);
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
989 u32 local_feat_mask = l2cap_feat_mask;
991 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
994 case L2CAP_MODE_ERTM:
995 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 case L2CAP_MODE_STREAMING:
997 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1005 struct sock *sk = chan->sk;
1006 struct l2cap_disconn_req req;
1011 if (chan->mode == L2CAP_MODE_ERTM) {
1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan);
1017 req.dcid = cpu_to_le16(chan->dcid);
1018 req.scid = cpu_to_le16(chan->scid);
1019 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1020 L2CAP_DISCONN_REQ, sizeof(req), &req);
1023 __l2cap_state_change(chan, BT_DISCONN);
1024 __l2cap_chan_set_err(chan, err);
1028 /* ---- L2CAP connections ---- */
1029 static void l2cap_conn_start(struct l2cap_conn *conn)
1031 struct l2cap_chan *chan, *tmp;
1033 BT_DBG("conn %p", conn);
1035 mutex_lock(&conn->chan_lock);
1037 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1038 struct sock *sk = chan->sk;
1040 l2cap_chan_lock(chan);
1042 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1043 l2cap_chan_unlock(chan);
1047 if (chan->state == BT_CONNECT) {
1048 if (!l2cap_chan_check_security(chan) ||
1049 !__l2cap_no_conn_pending(chan)) {
1050 l2cap_chan_unlock(chan);
1054 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1055 && test_bit(CONF_STATE2_DEVICE,
1056 &chan->conf_state)) {
1057 l2cap_chan_close(chan, ECONNRESET);
1058 l2cap_chan_unlock(chan);
1062 l2cap_send_conn_req(chan);
1064 } else if (chan->state == BT_CONNECT2) {
1065 struct l2cap_conn_rsp rsp;
1067 rsp.scid = cpu_to_le16(chan->dcid);
1068 rsp.dcid = cpu_to_le16(chan->scid);
1070 if (l2cap_chan_check_security(chan)) {
1072 if (test_bit(BT_SK_DEFER_SETUP,
1073 &bt_sk(sk)->flags)) {
1074 struct sock *parent = bt_sk(sk)->parent;
1075 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1076 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1078 parent->sk_data_ready(parent, 0);
1081 __l2cap_state_change(chan, BT_CONFIG);
1082 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1083 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1087 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1088 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1091 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1094 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1095 rsp.result != L2CAP_CR_SUCCESS) {
1096 l2cap_chan_unlock(chan);
1100 set_bit(CONF_REQ_SENT, &chan->conf_state);
1101 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1102 l2cap_build_conf_req(chan, buf), buf);
1103 chan->num_conf_req++;
1106 l2cap_chan_unlock(chan);
1109 mutex_unlock(&conn->chan_lock);
1112 /* Find socket with cid and source/destination bdaddr.
1113 * Returns closest match, locked.
1115 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1119 struct l2cap_chan *c, *c1 = NULL;
1121 read_lock(&chan_list_lock);
1123 list_for_each_entry(c, &chan_list, global_l) {
1124 struct sock *sk = c->sk;
1126 if (state && c->state != state)
1129 if (c->scid == cid) {
1130 int src_match, dst_match;
1131 int src_any, dst_any;
1134 src_match = !bacmp(&bt_sk(sk)->src, src);
1135 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1136 if (src_match && dst_match) {
1137 read_unlock(&chan_list_lock);
1142 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1143 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1144 if ((src_match && dst_any) || (src_any && dst_match) ||
1145 (src_any && dst_any))
1150 read_unlock(&chan_list_lock);
1155 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1157 struct sock *parent, *sk;
1158 struct l2cap_chan *chan, *pchan;
1162 /* Check if we have socket listening on cid */
1163 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1164 conn->src, conn->dst);
1172 /* Check for backlog size */
1173 if (sk_acceptq_is_full(parent)) {
1174 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1178 chan = pchan->ops->new_connection(pchan->data);
1184 hci_conn_hold(conn->hcon);
1186 bacpy(&bt_sk(sk)->src, conn->src);
1187 bacpy(&bt_sk(sk)->dst, conn->dst);
1189 bt_accept_enqueue(parent, sk);
1191 l2cap_chan_add(conn, chan);
1193 __set_chan_timer(chan, sk->sk_sndtimeo);
1195 __l2cap_state_change(chan, BT_CONNECTED);
1196 parent->sk_data_ready(parent, 0);
1199 release_sock(parent);
1202 static void l2cap_conn_ready(struct l2cap_conn *conn)
1204 struct l2cap_chan *chan;
1206 BT_DBG("conn %p", conn);
1208 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1209 l2cap_le_conn_ready(conn);
1211 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1212 smp_conn_security(conn, conn->hcon->pending_sec_level);
1214 mutex_lock(&conn->chan_lock);
1216 list_for_each_entry(chan, &conn->chan_l, list) {
1218 l2cap_chan_lock(chan);
1220 if (conn->hcon->type == LE_LINK) {
1221 if (smp_conn_security(conn, chan->sec_level))
1222 l2cap_chan_ready(chan);
1224 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1225 struct sock *sk = chan->sk;
1226 __clear_chan_timer(chan);
1228 __l2cap_state_change(chan, BT_CONNECTED);
1229 sk->sk_state_change(sk);
1232 } else if (chan->state == BT_CONNECT)
1233 l2cap_do_start(chan);
1235 l2cap_chan_unlock(chan);
1238 mutex_unlock(&conn->chan_lock);
1241 /* Notify sockets that we cannot guaranty reliability anymore */
1242 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1244 struct l2cap_chan *chan;
1246 BT_DBG("conn %p", conn);
1248 mutex_lock(&conn->chan_lock);
1250 list_for_each_entry(chan, &conn->chan_l, list) {
1251 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1252 __l2cap_chan_set_err(chan, err);
1255 mutex_unlock(&conn->chan_lock);
1258 static void l2cap_info_timeout(struct work_struct *work)
1260 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1264 conn->info_ident = 0;
1266 l2cap_conn_start(conn);
1269 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1271 struct l2cap_conn *conn = hcon->l2cap_data;
1272 struct l2cap_chan *chan, *l;
1277 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1279 kfree_skb(conn->rx_skb);
1281 mutex_lock(&conn->chan_lock);
1284 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1285 l2cap_chan_hold(chan);
1286 l2cap_chan_lock(chan);
1288 l2cap_chan_del(chan, err);
1290 l2cap_chan_unlock(chan);
1292 chan->ops->close(chan->data);
1293 l2cap_chan_put(chan);
1296 mutex_unlock(&conn->chan_lock);
1298 hci_chan_del(conn->hchan);
1300 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1301 cancel_delayed_work_sync(&conn->info_timer);
1303 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1304 cancel_delayed_work_sync(&conn->security_timer);
1305 smp_chan_destroy(conn);
1308 hcon->l2cap_data = NULL;
1312 static void security_timeout(struct work_struct *work)
1314 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1315 security_timer.work);
1317 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1320 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1322 struct l2cap_conn *conn = hcon->l2cap_data;
1323 struct hci_chan *hchan;
1328 hchan = hci_chan_create(hcon);
1332 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1334 hci_chan_del(hchan);
1338 hcon->l2cap_data = conn;
1340 conn->hchan = hchan;
1342 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1344 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1345 conn->mtu = hcon->hdev->le_mtu;
1347 conn->mtu = hcon->hdev->acl_mtu;
1349 conn->src = &hcon->hdev->bdaddr;
1350 conn->dst = &hcon->dst;
1352 conn->feat_mask = 0;
1354 spin_lock_init(&conn->lock);
1355 mutex_init(&conn->chan_lock);
1357 INIT_LIST_HEAD(&conn->chan_l);
1359 if (hcon->type == LE_LINK)
1360 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1362 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1364 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1369 /* ---- Socket interface ---- */
1371 /* Find socket with psm and source / destination bdaddr.
1372 * Returns closest match.
1374 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1378 struct l2cap_chan *c, *c1 = NULL;
1380 read_lock(&chan_list_lock);
1382 list_for_each_entry(c, &chan_list, global_l) {
1383 struct sock *sk = c->sk;
1385 if (state && c->state != state)
1388 if (c->psm == psm) {
1389 int src_match, dst_match;
1390 int src_any, dst_any;
1393 src_match = !bacmp(&bt_sk(sk)->src, src);
1394 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1395 if (src_match && dst_match) {
1396 read_unlock(&chan_list_lock);
1401 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1402 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1403 if ((src_match && dst_any) || (src_any && dst_match) ||
1404 (src_any && dst_any))
1409 read_unlock(&chan_list_lock);
1414 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1415 bdaddr_t *dst, u8 dst_type)
1417 struct sock *sk = chan->sk;
1418 bdaddr_t *src = &bt_sk(sk)->src;
1419 struct l2cap_conn *conn;
1420 struct hci_conn *hcon;
1421 struct hci_dev *hdev;
1425 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1426 dst_type, __le16_to_cpu(chan->psm));
1428 hdev = hci_get_route(dst, src);
1430 return -EHOSTUNREACH;
1434 l2cap_chan_lock(chan);
1436 /* PSM must be odd and lsb of upper byte must be 0 */
1437 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1438 chan->chan_type != L2CAP_CHAN_RAW) {
1443 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1448 switch (chan->mode) {
1449 case L2CAP_MODE_BASIC:
1451 case L2CAP_MODE_ERTM:
1452 case L2CAP_MODE_STREAMING:
1463 switch (sk->sk_state) {
1467 /* Already connecting */
1473 /* Already connected */
1489 /* Set destination address and psm */
1490 bacpy(&bt_sk(sk)->dst, dst);
1497 auth_type = l2cap_get_auth_type(chan);
1499 if (chan->dcid == L2CAP_CID_LE_DATA)
1500 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1501 chan->sec_level, auth_type);
1503 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1504 chan->sec_level, auth_type);
1507 err = PTR_ERR(hcon);
1511 conn = l2cap_conn_add(hcon, 0);
1518 if (hcon->type == LE_LINK) {
1521 if (!list_empty(&conn->chan_l)) {
1530 /* Update source addr of the socket */
1531 bacpy(src, conn->src);
1533 l2cap_chan_unlock(chan);
1534 l2cap_chan_add(conn, chan);
1535 l2cap_chan_lock(chan);
1537 l2cap_state_change(chan, BT_CONNECT);
1538 __set_chan_timer(chan, sk->sk_sndtimeo);
1540 if (hcon->state == BT_CONNECTED) {
1541 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1542 __clear_chan_timer(chan);
1543 if (l2cap_chan_check_security(chan))
1544 l2cap_state_change(chan, BT_CONNECTED);
1546 l2cap_do_start(chan);
1552 l2cap_chan_unlock(chan);
1553 hci_dev_unlock(hdev);
1558 int __l2cap_wait_ack(struct sock *sk)
1560 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1561 DECLARE_WAITQUEUE(wait, current);
1565 add_wait_queue(sk_sleep(sk), &wait);
1566 set_current_state(TASK_INTERRUPTIBLE);
1567 while (chan->unacked_frames > 0 && chan->conn) {
1571 if (signal_pending(current)) {
1572 err = sock_intr_errno(timeo);
1577 timeo = schedule_timeout(timeo);
1579 set_current_state(TASK_INTERRUPTIBLE);
1581 err = sock_error(sk);
1585 set_current_state(TASK_RUNNING);
1586 remove_wait_queue(sk_sleep(sk), &wait);
1590 static void l2cap_monitor_timeout(struct work_struct *work)
1592 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1593 monitor_timer.work);
1595 BT_DBG("chan %p", chan);
1597 l2cap_chan_lock(chan);
1599 if (chan->retry_count >= chan->remote_max_tx) {
1600 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1601 l2cap_chan_unlock(chan);
1602 l2cap_chan_put(chan);
1606 chan->retry_count++;
1607 __set_monitor_timer(chan);
1609 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1610 l2cap_chan_unlock(chan);
1611 l2cap_chan_put(chan);
1614 static void l2cap_retrans_timeout(struct work_struct *work)
1616 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1617 retrans_timer.work);
1619 BT_DBG("chan %p", chan);
1621 l2cap_chan_lock(chan);
1623 chan->retry_count = 1;
1624 __set_monitor_timer(chan);
1626 set_bit(CONN_WAIT_F, &chan->conn_state);
1628 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1630 l2cap_chan_unlock(chan);
1631 l2cap_chan_put(chan);
1634 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1636 struct sk_buff *skb;
1638 while ((skb = skb_peek(&chan->tx_q)) &&
1639 chan->unacked_frames) {
1640 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1643 skb = skb_dequeue(&chan->tx_q);
1646 chan->unacked_frames--;
1649 if (!chan->unacked_frames)
1650 __clear_retrans_timer(chan);
1653 static int l2cap_streaming_send(struct l2cap_chan *chan,
1654 struct sk_buff_head *skbs)
1656 struct sk_buff *skb;
1657 struct l2cap_ctrl *control;
1659 BT_DBG("chan %p, skbs %p", chan, skbs);
1661 if (chan->state != BT_CONNECTED)
1664 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1666 while (!skb_queue_empty(&chan->tx_q)) {
1668 skb = skb_dequeue(&chan->tx_q);
1670 bt_cb(skb)->control.retries = 1;
1671 control = &bt_cb(skb)->control;
1673 control->reqseq = 0;
1674 control->txseq = chan->next_tx_seq;
1676 __pack_control(chan, control, skb);
1678 if (chan->fcs == L2CAP_FCS_CRC16) {
1679 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1680 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1683 l2cap_do_send(chan, skb);
1685 BT_DBG("Sent txseq %d", (int)control->txseq);
1687 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1688 chan->frames_sent++;
1694 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1696 struct sk_buff *skb, *tx_skb;
1700 skb = skb_peek(&chan->tx_q);
1704 while (bt_cb(skb)->control.txseq != tx_seq) {
1705 if (skb_queue_is_last(&chan->tx_q, skb))
1708 skb = skb_queue_next(&chan->tx_q, skb);
1711 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1712 chan->remote_max_tx) {
1713 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1717 tx_skb = skb_clone(skb, GFP_ATOMIC);
1718 bt_cb(skb)->control.retries++;
1720 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1721 control &= __get_sar_mask(chan);
1723 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1724 control |= __set_ctrl_final(chan);
1726 control |= __set_reqseq(chan, chan->buffer_seq);
1727 control |= __set_txseq(chan, tx_seq);
1729 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1731 if (chan->fcs == L2CAP_FCS_CRC16) {
1732 fcs = crc16(0, (u8 *)tx_skb->data,
1733 tx_skb->len - L2CAP_FCS_SIZE);
1734 put_unaligned_le16(fcs,
1735 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1738 l2cap_do_send(chan, tx_skb);
1741 static int l2cap_ertm_send(struct l2cap_chan *chan)
1743 struct sk_buff *skb, *tx_skb;
1748 if (chan->state != BT_CONNECTED)
1751 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1754 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1756 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1757 chan->remote_max_tx) {
1758 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1762 tx_skb = skb_clone(skb, GFP_ATOMIC);
1764 bt_cb(skb)->control.retries++;
1766 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1767 control &= __get_sar_mask(chan);
1769 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1770 control |= __set_ctrl_final(chan);
1772 control |= __set_reqseq(chan, chan->buffer_seq);
1773 control |= __set_txseq(chan, chan->next_tx_seq);
1774 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1776 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1778 if (chan->fcs == L2CAP_FCS_CRC16) {
1779 fcs = crc16(0, (u8 *)skb->data,
1780 tx_skb->len - L2CAP_FCS_SIZE);
1781 put_unaligned_le16(fcs, skb->data +
1782 tx_skb->len - L2CAP_FCS_SIZE);
1785 l2cap_do_send(chan, tx_skb);
1787 __set_retrans_timer(chan);
1789 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1791 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1793 if (bt_cb(skb)->control.retries == 1) {
1794 chan->unacked_frames++;
1797 __clear_ack_timer(chan);
1800 chan->frames_sent++;
1802 if (skb_queue_is_last(&chan->tx_q, skb))
1803 chan->tx_send_head = NULL;
1805 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1811 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1815 if (!skb_queue_empty(&chan->tx_q))
1816 chan->tx_send_head = chan->tx_q.next;
1818 chan->next_tx_seq = chan->expected_ack_seq;
1819 ret = l2cap_ertm_send(chan);
1823 static void __l2cap_send_ack(struct l2cap_chan *chan)
1827 control |= __set_reqseq(chan, chan->buffer_seq);
1829 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1830 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1831 set_bit(CONN_RNR_SENT, &chan->conn_state);
1832 l2cap_send_sframe(chan, control);
1836 if (l2cap_ertm_send(chan) > 0)
1839 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1840 l2cap_send_sframe(chan, control);
1843 static void l2cap_send_ack(struct l2cap_chan *chan)
1845 __clear_ack_timer(chan);
1846 __l2cap_send_ack(chan);
1849 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1851 struct srej_list *tail;
1854 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1855 control |= __set_ctrl_final(chan);
1857 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1858 control |= __set_reqseq(chan, tail->tx_seq);
1860 l2cap_send_sframe(chan, control);
1863 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1864 struct msghdr *msg, int len,
1865 int count, struct sk_buff *skb)
1867 struct l2cap_conn *conn = chan->conn;
1868 struct sk_buff **frag;
1871 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1877 /* Continuation fragments (no L2CAP header) */
1878 frag = &skb_shinfo(skb)->frag_list;
1880 struct sk_buff *tmp;
1882 count = min_t(unsigned int, conn->mtu, len);
1884 tmp = chan->ops->alloc_skb(chan, count,
1885 msg->msg_flags & MSG_DONTWAIT);
1887 return PTR_ERR(tmp);
1891 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1894 (*frag)->priority = skb->priority;
1899 skb->len += (*frag)->len;
1900 skb->data_len += (*frag)->len;
1902 frag = &(*frag)->next;
1908 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1909 struct msghdr *msg, size_t len,
1912 struct l2cap_conn *conn = chan->conn;
1913 struct sk_buff *skb;
1914 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1915 struct l2cap_hdr *lh;
1917 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1919 count = min_t(unsigned int, (conn->mtu - hlen), len);
1921 skb = chan->ops->alloc_skb(chan, count + hlen,
1922 msg->msg_flags & MSG_DONTWAIT);
1926 skb->priority = priority;
1928 /* Create L2CAP header */
1929 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1930 lh->cid = cpu_to_le16(chan->dcid);
1931 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1932 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1934 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1935 if (unlikely(err < 0)) {
1937 return ERR_PTR(err);
1942 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1943 struct msghdr *msg, size_t len,
1946 struct l2cap_conn *conn = chan->conn;
1947 struct sk_buff *skb;
1949 struct l2cap_hdr *lh;
1951 BT_DBG("chan %p len %d", chan, (int)len);
1953 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1955 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1956 msg->msg_flags & MSG_DONTWAIT);
1960 skb->priority = priority;
1962 /* Create L2CAP header */
1963 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1964 lh->cid = cpu_to_le16(chan->dcid);
1965 lh->len = cpu_to_le16(len);
1967 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1968 if (unlikely(err < 0)) {
1970 return ERR_PTR(err);
1975 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1976 struct msghdr *msg, size_t len,
1979 struct l2cap_conn *conn = chan->conn;
1980 struct sk_buff *skb;
1981 int err, count, hlen;
1982 struct l2cap_hdr *lh;
1984 BT_DBG("chan %p len %d", chan, (int)len);
1987 return ERR_PTR(-ENOTCONN);
1989 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1990 hlen = L2CAP_EXT_HDR_SIZE;
1992 hlen = L2CAP_ENH_HDR_SIZE;
1995 hlen += L2CAP_SDULEN_SIZE;
1997 if (chan->fcs == L2CAP_FCS_CRC16)
1998 hlen += L2CAP_FCS_SIZE;
2000 count = min_t(unsigned int, (conn->mtu - hlen), len);
2002 skb = chan->ops->alloc_skb(chan, count + hlen,
2003 msg->msg_flags & MSG_DONTWAIT);
2007 /* Create L2CAP header */
2008 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2009 lh->cid = cpu_to_le16(chan->dcid);
2010 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2012 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
2015 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2017 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2018 if (unlikely(err < 0)) {
2020 return ERR_PTR(err);
2023 if (chan->fcs == L2CAP_FCS_CRC16)
2024 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
2026 bt_cb(skb)->control.retries = 0;
2030 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2031 struct sk_buff_head *seg_queue,
2032 struct msghdr *msg, size_t len)
2034 struct sk_buff *skb;
2040 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2042 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2043 * so fragmented skbs are not used. The HCI layer's handling
2044 * of fragmented skbs is not compatible with ERTM's queueing.
2047 /* PDU size is derived from the HCI MTU */
2048 pdu_len = chan->conn->mtu;
2050 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2052 /* Adjust for largest possible L2CAP overhead. */
2053 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2055 /* Remote device may have requested smaller PDUs */
2056 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2058 if (len <= pdu_len) {
2059 sar = L2CAP_SAR_UNSEGMENTED;
2063 sar = L2CAP_SAR_START;
2065 pdu_len -= L2CAP_SDULEN_SIZE;
2069 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2072 __skb_queue_purge(seg_queue);
2073 return PTR_ERR(skb);
2076 bt_cb(skb)->control.sar = sar;
2077 __skb_queue_tail(seg_queue, skb);
2082 pdu_len += L2CAP_SDULEN_SIZE;
2085 if (len <= pdu_len) {
2086 sar = L2CAP_SAR_END;
2089 sar = L2CAP_SAR_CONTINUE;
2096 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2099 struct sk_buff *skb;
2101 struct sk_buff_head seg_queue;
2103 /* Connectionless channel */
2104 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2105 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2107 return PTR_ERR(skb);
2109 l2cap_do_send(chan, skb);
2113 switch (chan->mode) {
2114 case L2CAP_MODE_BASIC:
2115 /* Check outgoing MTU */
2116 if (len > chan->omtu)
2119 /* Create a basic PDU */
2120 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2122 return PTR_ERR(skb);
2124 l2cap_do_send(chan, skb);
2128 case L2CAP_MODE_ERTM:
2129 case L2CAP_MODE_STREAMING:
2130 /* Check outgoing MTU */
2131 if (len > chan->omtu) {
2136 __skb_queue_head_init(&seg_queue);
2138 /* Do segmentation before calling in to the state machine,
2139 * since it's possible to block while waiting for memory
2142 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2144 /* The channel could have been closed while segmenting,
2145 * check that it is still connected.
2147 if (chan->state != BT_CONNECTED) {
2148 __skb_queue_purge(&seg_queue);
2155 if (chan->mode == L2CAP_MODE_ERTM)
2156 err = l2cap_tx(chan, 0, &seg_queue,
2157 L2CAP_EV_DATA_REQUEST);
2159 err = l2cap_streaming_send(chan, &seg_queue);
2164 /* If the skbs were not queued for sending, they'll still be in
2165 * seg_queue and need to be purged.
2167 __skb_queue_purge(&seg_queue);
2171 BT_DBG("bad state %1.1x", chan->mode);
2178 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2180 struct sk_buff *acked_skb;
2183 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2185 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2188 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2189 chan->expected_ack_seq, chan->unacked_frames);
2191 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2192 ackseq = __next_seq(chan, ackseq)) {
2194 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2196 skb_unlink(acked_skb, &chan->tx_q);
2197 kfree_skb(acked_skb);
2198 chan->unacked_frames--;
2202 chan->expected_ack_seq = reqseq;
2204 if (chan->unacked_frames == 0)
2205 __clear_retrans_timer(chan);
2207 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2210 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2212 BT_DBG("chan %p", chan);
2214 chan->expected_tx_seq = chan->buffer_seq;
2215 l2cap_seq_list_clear(&chan->srej_list);
2216 skb_queue_purge(&chan->srej_q);
2217 chan->rx_state = L2CAP_RX_STATE_RECV;
2220 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2221 struct l2cap_ctrl *control,
2222 struct sk_buff_head *skbs, u8 event)
2226 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2230 case L2CAP_EV_DATA_REQUEST:
2231 if (chan->tx_send_head == NULL)
2232 chan->tx_send_head = skb_peek(skbs);
2234 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2235 l2cap_ertm_send(chan);
2237 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2238 BT_DBG("Enter LOCAL_BUSY");
2239 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2241 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2242 /* The SREJ_SENT state must be aborted if we are to
2243 * enter the LOCAL_BUSY state.
2245 l2cap_abort_rx_srej_sent(chan);
2248 l2cap_send_ack(chan);
2251 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2252 BT_DBG("Exit LOCAL_BUSY");
2253 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2255 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2256 struct l2cap_ctrl local_control;
2258 memset(&local_control, 0, sizeof(local_control));
2259 local_control.sframe = 1;
2260 local_control.super = L2CAP_SUPER_RR;
2261 local_control.poll = 1;
2262 local_control.reqseq = chan->buffer_seq;
2263 l2cap_send_sframe(chan, 0);
2265 chan->retry_count = 1;
2266 __set_monitor_timer(chan);
2267 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2270 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2271 l2cap_process_reqseq(chan, control->reqseq);
2273 case L2CAP_EV_EXPLICIT_POLL:
2274 l2cap_send_rr_or_rnr(chan, 1);
2275 chan->retry_count = 1;
2276 __set_monitor_timer(chan);
2277 __clear_ack_timer(chan);
2278 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2280 case L2CAP_EV_RETRANS_TO:
2281 l2cap_send_rr_or_rnr(chan, 1);
2282 chan->retry_count = 1;
2283 __set_monitor_timer(chan);
2284 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2286 case L2CAP_EV_RECV_FBIT:
2287 /* Nothing to process */
2296 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2297 struct l2cap_ctrl *control,
2298 struct sk_buff_head *skbs, u8 event)
2302 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2306 case L2CAP_EV_DATA_REQUEST:
2307 if (chan->tx_send_head == NULL)
2308 chan->tx_send_head = skb_peek(skbs);
2309 /* Queue data, but don't send. */
2310 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2312 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2313 BT_DBG("Enter LOCAL_BUSY");
2314 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2316 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2317 /* The SREJ_SENT state must be aborted if we are to
2318 * enter the LOCAL_BUSY state.
2320 l2cap_abort_rx_srej_sent(chan);
2323 l2cap_send_ack(chan);
2326 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2327 BT_DBG("Exit LOCAL_BUSY");
2328 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2330 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2331 struct l2cap_ctrl local_control;
2332 memset(&local_control, 0, sizeof(local_control));
2333 local_control.sframe = 1;
2334 local_control.super = L2CAP_SUPER_RR;
2335 local_control.poll = 1;
2336 local_control.reqseq = chan->buffer_seq;
2337 l2cap_send_sframe(chan, 0);
2339 chan->retry_count = 1;
2340 __set_monitor_timer(chan);
2341 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2344 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2345 l2cap_process_reqseq(chan, control->reqseq);
2349 case L2CAP_EV_RECV_FBIT:
2350 if (control && control->final) {
2351 __clear_monitor_timer(chan);
2352 if (chan->unacked_frames > 0)
2353 __set_retrans_timer(chan);
2354 chan->retry_count = 0;
2355 chan->tx_state = L2CAP_TX_STATE_XMIT;
2356 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2359 case L2CAP_EV_EXPLICIT_POLL:
2362 case L2CAP_EV_MONITOR_TO:
2363 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2364 l2cap_send_rr_or_rnr(chan, 1);
2365 __set_monitor_timer(chan);
2366 chan->retry_count++;
2368 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2378 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2379 struct sk_buff_head *skbs, u8 event)
2383 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2384 chan, control, skbs, event, chan->tx_state);
2386 switch (chan->tx_state) {
2387 case L2CAP_TX_STATE_XMIT:
2388 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2390 case L2CAP_TX_STATE_WAIT_F:
2391 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2401 /* Copy frame to all raw sockets on that connection */
2402 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2404 struct sk_buff *nskb;
2405 struct l2cap_chan *chan;
2407 BT_DBG("conn %p", conn);
2409 mutex_lock(&conn->chan_lock);
2411 list_for_each_entry(chan, &conn->chan_l, list) {
2412 struct sock *sk = chan->sk;
2413 if (chan->chan_type != L2CAP_CHAN_RAW)
2416 /* Don't send frame to the socket it came from */
2419 nskb = skb_clone(skb, GFP_ATOMIC);
2423 if (chan->ops->recv(chan->data, nskb))
2427 mutex_unlock(&conn->chan_lock);
2430 /* ---- L2CAP signalling commands ---- */
2431 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2432 u8 code, u8 ident, u16 dlen, void *data)
2434 struct sk_buff *skb, **frag;
2435 struct l2cap_cmd_hdr *cmd;
2436 struct l2cap_hdr *lh;
2439 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2440 conn, code, ident, dlen);
2442 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2443 count = min_t(unsigned int, conn->mtu, len);
2445 skb = bt_skb_alloc(count, GFP_ATOMIC);
2449 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2450 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2452 if (conn->hcon->type == LE_LINK)
2453 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2455 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2457 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2460 cmd->len = cpu_to_le16(dlen);
2463 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2464 memcpy(skb_put(skb, count), data, count);
2470 /* Continuation fragments (no L2CAP header) */
2471 frag = &skb_shinfo(skb)->frag_list;
2473 count = min_t(unsigned int, conn->mtu, len);
2475 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2479 memcpy(skb_put(*frag, count), data, count);
2484 frag = &(*frag)->next;
2494 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2496 struct l2cap_conf_opt *opt = *ptr;
2499 len = L2CAP_CONF_OPT_SIZE + opt->len;
2507 *val = *((u8 *) opt->val);
2511 *val = get_unaligned_le16(opt->val);
2515 *val = get_unaligned_le32(opt->val);
2519 *val = (unsigned long) opt->val;
2523 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2527 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2529 struct l2cap_conf_opt *opt = *ptr;
2531 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2538 *((u8 *) opt->val) = val;
2542 put_unaligned_le16(val, opt->val);
2546 put_unaligned_le32(val, opt->val);
2550 memcpy(opt->val, (void *) val, len);
2554 *ptr += L2CAP_CONF_OPT_SIZE + len;
2557 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2559 struct l2cap_conf_efs efs;
2561 switch (chan->mode) {
2562 case L2CAP_MODE_ERTM:
2563 efs.id = chan->local_id;
2564 efs.stype = chan->local_stype;
2565 efs.msdu = cpu_to_le16(chan->local_msdu);
2566 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2567 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2568 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2571 case L2CAP_MODE_STREAMING:
2573 efs.stype = L2CAP_SERV_BESTEFFORT;
2574 efs.msdu = cpu_to_le16(chan->local_msdu);
2575 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2584 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2585 (unsigned long) &efs);
2588 static void l2cap_ack_timeout(struct work_struct *work)
2590 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2593 BT_DBG("chan %p", chan);
2595 l2cap_chan_lock(chan);
2597 __l2cap_send_ack(chan);
2599 l2cap_chan_unlock(chan);
2601 l2cap_chan_put(chan);
2604 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2608 chan->next_tx_seq = 0;
2609 chan->expected_tx_seq = 0;
2610 chan->expected_ack_seq = 0;
2611 chan->unacked_frames = 0;
2612 chan->buffer_seq = 0;
2613 chan->num_acked = 0;
2614 chan->frames_sent = 0;
2615 chan->last_acked_seq = 0;
2617 chan->sdu_last_frag = NULL;
2620 skb_queue_head_init(&chan->tx_q);
2622 if (chan->mode != L2CAP_MODE_ERTM)
2625 chan->rx_state = L2CAP_RX_STATE_RECV;
2626 chan->tx_state = L2CAP_TX_STATE_XMIT;
2628 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2629 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2630 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2632 skb_queue_head_init(&chan->srej_q);
2634 INIT_LIST_HEAD(&chan->srej_l);
2635 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2639 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2641 l2cap_seq_list_free(&chan->srej_list);
2646 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2649 case L2CAP_MODE_STREAMING:
2650 case L2CAP_MODE_ERTM:
2651 if (l2cap_mode_supported(mode, remote_feat_mask))
2655 return L2CAP_MODE_BASIC;
2659 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2661 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2664 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2666 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2669 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2671 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2672 __l2cap_ews_supported(chan)) {
2673 /* use extended control field */
2674 set_bit(FLAG_EXT_CTRL, &chan->flags);
2675 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2677 chan->tx_win = min_t(u16, chan->tx_win,
2678 L2CAP_DEFAULT_TX_WINDOW);
2679 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2683 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2685 struct l2cap_conf_req *req = data;
2686 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2687 void *ptr = req->data;
2690 BT_DBG("chan %p", chan);
2692 if (chan->num_conf_req || chan->num_conf_rsp)
2695 switch (chan->mode) {
2696 case L2CAP_MODE_STREAMING:
2697 case L2CAP_MODE_ERTM:
2698 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2701 if (__l2cap_efs_supported(chan))
2702 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2706 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2711 if (chan->imtu != L2CAP_DEFAULT_MTU)
2712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2714 switch (chan->mode) {
2715 case L2CAP_MODE_BASIC:
2716 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2717 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2720 rfc.mode = L2CAP_MODE_BASIC;
2722 rfc.max_transmit = 0;
2723 rfc.retrans_timeout = 0;
2724 rfc.monitor_timeout = 0;
2725 rfc.max_pdu_size = 0;
2727 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2728 (unsigned long) &rfc);
2731 case L2CAP_MODE_ERTM:
2732 rfc.mode = L2CAP_MODE_ERTM;
2733 rfc.max_transmit = chan->max_tx;
2734 rfc.retrans_timeout = 0;
2735 rfc.monitor_timeout = 0;
2737 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2738 L2CAP_EXT_HDR_SIZE -
2741 rfc.max_pdu_size = cpu_to_le16(size);
2743 l2cap_txwin_setup(chan);
2745 rfc.txwin_size = min_t(u16, chan->tx_win,
2746 L2CAP_DEFAULT_TX_WINDOW);
2748 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2749 (unsigned long) &rfc);
2751 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2752 l2cap_add_opt_efs(&ptr, chan);
2754 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2757 if (chan->fcs == L2CAP_FCS_NONE ||
2758 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2759 chan->fcs = L2CAP_FCS_NONE;
2760 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2763 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2768 case L2CAP_MODE_STREAMING:
2769 rfc.mode = L2CAP_MODE_STREAMING;
2771 rfc.max_transmit = 0;
2772 rfc.retrans_timeout = 0;
2773 rfc.monitor_timeout = 0;
2775 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2776 L2CAP_EXT_HDR_SIZE -
2779 rfc.max_pdu_size = cpu_to_le16(size);
2781 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2782 (unsigned long) &rfc);
2784 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2785 l2cap_add_opt_efs(&ptr, chan);
2787 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2790 if (chan->fcs == L2CAP_FCS_NONE ||
2791 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2792 chan->fcs = L2CAP_FCS_NONE;
2793 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2798 req->dcid = cpu_to_le16(chan->dcid);
2799 req->flags = cpu_to_le16(0);
2804 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2806 struct l2cap_conf_rsp *rsp = data;
2807 void *ptr = rsp->data;
2808 void *req = chan->conf_req;
2809 int len = chan->conf_len;
2810 int type, hint, olen;
2812 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2813 struct l2cap_conf_efs efs;
2815 u16 mtu = L2CAP_DEFAULT_MTU;
2816 u16 result = L2CAP_CONF_SUCCESS;
2819 BT_DBG("chan %p", chan);
2821 while (len >= L2CAP_CONF_OPT_SIZE) {
2822 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2824 hint = type & L2CAP_CONF_HINT;
2825 type &= L2CAP_CONF_MASK;
2828 case L2CAP_CONF_MTU:
2832 case L2CAP_CONF_FLUSH_TO:
2833 chan->flush_to = val;
2836 case L2CAP_CONF_QOS:
2839 case L2CAP_CONF_RFC:
2840 if (olen == sizeof(rfc))
2841 memcpy(&rfc, (void *) val, olen);
2844 case L2CAP_CONF_FCS:
2845 if (val == L2CAP_FCS_NONE)
2846 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2849 case L2CAP_CONF_EFS:
2851 if (olen == sizeof(efs))
2852 memcpy(&efs, (void *) val, olen);
2855 case L2CAP_CONF_EWS:
2857 return -ECONNREFUSED;
2859 set_bit(FLAG_EXT_CTRL, &chan->flags);
2860 set_bit(CONF_EWS_RECV, &chan->conf_state);
2861 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2862 chan->remote_tx_win = val;
2869 result = L2CAP_CONF_UNKNOWN;
2870 *((u8 *) ptr++) = type;
2875 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2878 switch (chan->mode) {
2879 case L2CAP_MODE_STREAMING:
2880 case L2CAP_MODE_ERTM:
2881 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2882 chan->mode = l2cap_select_mode(rfc.mode,
2883 chan->conn->feat_mask);
2888 if (__l2cap_efs_supported(chan))
2889 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2891 return -ECONNREFUSED;
2894 if (chan->mode != rfc.mode)
2895 return -ECONNREFUSED;
2901 if (chan->mode != rfc.mode) {
2902 result = L2CAP_CONF_UNACCEPT;
2903 rfc.mode = chan->mode;
2905 if (chan->num_conf_rsp == 1)
2906 return -ECONNREFUSED;
2908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2909 sizeof(rfc), (unsigned long) &rfc);
2912 if (result == L2CAP_CONF_SUCCESS) {
2913 /* Configure output options and let the other side know
2914 * which ones we don't like. */
2916 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2917 result = L2CAP_CONF_UNACCEPT;
2920 set_bit(CONF_MTU_DONE, &chan->conf_state);
2922 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2925 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2926 efs.stype != L2CAP_SERV_NOTRAFIC &&
2927 efs.stype != chan->local_stype) {
2929 result = L2CAP_CONF_UNACCEPT;
2931 if (chan->num_conf_req >= 1)
2932 return -ECONNREFUSED;
2934 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2936 (unsigned long) &efs);
2938 /* Send PENDING Conf Rsp */
2939 result = L2CAP_CONF_PENDING;
2940 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2945 case L2CAP_MODE_BASIC:
2946 chan->fcs = L2CAP_FCS_NONE;
2947 set_bit(CONF_MODE_DONE, &chan->conf_state);
2950 case L2CAP_MODE_ERTM:
2951 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2952 chan->remote_tx_win = rfc.txwin_size;
2954 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2956 chan->remote_max_tx = rfc.max_transmit;
2958 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2960 L2CAP_EXT_HDR_SIZE -
2963 rfc.max_pdu_size = cpu_to_le16(size);
2964 chan->remote_mps = size;
2966 rfc.retrans_timeout =
2967 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2968 rfc.monitor_timeout =
2969 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2971 set_bit(CONF_MODE_DONE, &chan->conf_state);
2973 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2974 sizeof(rfc), (unsigned long) &rfc);
2976 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2977 chan->remote_id = efs.id;
2978 chan->remote_stype = efs.stype;
2979 chan->remote_msdu = le16_to_cpu(efs.msdu);
2980 chan->remote_flush_to =
2981 le32_to_cpu(efs.flush_to);
2982 chan->remote_acc_lat =
2983 le32_to_cpu(efs.acc_lat);
2984 chan->remote_sdu_itime =
2985 le32_to_cpu(efs.sdu_itime);
2986 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2987 sizeof(efs), (unsigned long) &efs);
2991 case L2CAP_MODE_STREAMING:
2992 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2994 L2CAP_EXT_HDR_SIZE -
2997 rfc.max_pdu_size = cpu_to_le16(size);
2998 chan->remote_mps = size;
3000 set_bit(CONF_MODE_DONE, &chan->conf_state);
3002 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3003 sizeof(rfc), (unsigned long) &rfc);
3008 result = L2CAP_CONF_UNACCEPT;
3010 memset(&rfc, 0, sizeof(rfc));
3011 rfc.mode = chan->mode;
3014 if (result == L2CAP_CONF_SUCCESS)
3015 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3017 rsp->scid = cpu_to_le16(chan->dcid);
3018 rsp->result = cpu_to_le16(result);
3019 rsp->flags = cpu_to_le16(0x0000);
3024 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3026 struct l2cap_conf_req *req = data;
3027 void *ptr = req->data;
3030 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3031 struct l2cap_conf_efs efs;
3033 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3035 while (len >= L2CAP_CONF_OPT_SIZE) {
3036 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3039 case L2CAP_CONF_MTU:
3040 if (val < L2CAP_DEFAULT_MIN_MTU) {
3041 *result = L2CAP_CONF_UNACCEPT;
3042 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3045 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3048 case L2CAP_CONF_FLUSH_TO:
3049 chan->flush_to = val;
3050 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3054 case L2CAP_CONF_RFC:
3055 if (olen == sizeof(rfc))
3056 memcpy(&rfc, (void *)val, olen);
3058 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3059 rfc.mode != chan->mode)
3060 return -ECONNREFUSED;
3064 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3065 sizeof(rfc), (unsigned long) &rfc);
3068 case L2CAP_CONF_EWS:
3069 chan->tx_win = min_t(u16, val,
3070 L2CAP_DEFAULT_EXT_WINDOW);
3071 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3075 case L2CAP_CONF_EFS:
3076 if (olen == sizeof(efs))
3077 memcpy(&efs, (void *)val, olen);
3079 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3080 efs.stype != L2CAP_SERV_NOTRAFIC &&
3081 efs.stype != chan->local_stype)
3082 return -ECONNREFUSED;
3084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3085 sizeof(efs), (unsigned long) &efs);
3090 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3091 return -ECONNREFUSED;
3093 chan->mode = rfc.mode;
3095 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3097 case L2CAP_MODE_ERTM:
3098 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3099 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3100 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3102 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3103 chan->local_msdu = le16_to_cpu(efs.msdu);
3104 chan->local_sdu_itime =
3105 le32_to_cpu(efs.sdu_itime);
3106 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3107 chan->local_flush_to =
3108 le32_to_cpu(efs.flush_to);
3112 case L2CAP_MODE_STREAMING:
3113 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3117 req->dcid = cpu_to_le16(chan->dcid);
3118 req->flags = cpu_to_le16(0x0000);
3123 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3125 struct l2cap_conf_rsp *rsp = data;
3126 void *ptr = rsp->data;
3128 BT_DBG("chan %p", chan);
3130 rsp->scid = cpu_to_le16(chan->dcid);
3131 rsp->result = cpu_to_le16(result);
3132 rsp->flags = cpu_to_le16(flags);
3137 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3139 struct l2cap_conn_rsp rsp;
3140 struct l2cap_conn *conn = chan->conn;
3143 rsp.scid = cpu_to_le16(chan->dcid);
3144 rsp.dcid = cpu_to_le16(chan->scid);
3145 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3146 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3147 l2cap_send_cmd(conn, chan->ident,
3148 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3150 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3153 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3154 l2cap_build_conf_req(chan, buf), buf);
3155 chan->num_conf_req++;
3158 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3162 struct l2cap_conf_rfc rfc;
3164 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3166 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3169 while (len >= L2CAP_CONF_OPT_SIZE) {
3170 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3173 case L2CAP_CONF_RFC:
3174 if (olen == sizeof(rfc))
3175 memcpy(&rfc, (void *)val, olen);
3180 /* Use sane default values in case a misbehaving remote device
3181 * did not send an RFC option.
3183 rfc.mode = chan->mode;
3184 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3185 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3186 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3188 BT_ERR("Expected RFC option was not found, using defaults");
3192 case L2CAP_MODE_ERTM:
3193 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3194 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3195 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3197 case L2CAP_MODE_STREAMING:
3198 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3202 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3204 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3206 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3209 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3210 cmd->ident == conn->info_ident) {
3211 cancel_delayed_work(&conn->info_timer);
3213 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3214 conn->info_ident = 0;
3216 l2cap_conn_start(conn);
3222 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3224 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3225 struct l2cap_conn_rsp rsp;
3226 struct l2cap_chan *chan = NULL, *pchan;
3227 struct sock *parent, *sk = NULL;
3228 int result, status = L2CAP_CS_NO_INFO;
3230 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3231 __le16 psm = req->psm;
3233 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3235 /* Check if we have socket listening on psm */
3236 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3238 result = L2CAP_CR_BAD_PSM;
3244 mutex_lock(&conn->chan_lock);
3247 /* Check if the ACL is secure enough (if not SDP) */
3248 if (psm != cpu_to_le16(0x0001) &&
3249 !hci_conn_check_link_mode(conn->hcon)) {
3250 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3251 result = L2CAP_CR_SEC_BLOCK;
3255 result = L2CAP_CR_NO_MEM;
3257 /* Check for backlog size */
3258 if (sk_acceptq_is_full(parent)) {
3259 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3263 chan = pchan->ops->new_connection(pchan->data);
3269 /* Check if we already have channel with that dcid */
3270 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3271 sock_set_flag(sk, SOCK_ZAPPED);
3272 chan->ops->close(chan->data);
3276 hci_conn_hold(conn->hcon);
3278 bacpy(&bt_sk(sk)->src, conn->src);
3279 bacpy(&bt_sk(sk)->dst, conn->dst);
3283 bt_accept_enqueue(parent, sk);
3285 __l2cap_chan_add(conn, chan);
3289 __set_chan_timer(chan, sk->sk_sndtimeo);
3291 chan->ident = cmd->ident;
3293 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3294 if (l2cap_chan_check_security(chan)) {
3295 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3296 __l2cap_state_change(chan, BT_CONNECT2);
3297 result = L2CAP_CR_PEND;
3298 status = L2CAP_CS_AUTHOR_PEND;
3299 parent->sk_data_ready(parent, 0);
3301 __l2cap_state_change(chan, BT_CONFIG);
3302 result = L2CAP_CR_SUCCESS;
3303 status = L2CAP_CS_NO_INFO;
3306 __l2cap_state_change(chan, BT_CONNECT2);
3307 result = L2CAP_CR_PEND;
3308 status = L2CAP_CS_AUTHEN_PEND;
3311 __l2cap_state_change(chan, BT_CONNECT2);
3312 result = L2CAP_CR_PEND;
3313 status = L2CAP_CS_NO_INFO;
3317 release_sock(parent);
3318 mutex_unlock(&conn->chan_lock);
3321 rsp.scid = cpu_to_le16(scid);
3322 rsp.dcid = cpu_to_le16(dcid);
3323 rsp.result = cpu_to_le16(result);
3324 rsp.status = cpu_to_le16(status);
3325 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3327 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3328 struct l2cap_info_req info;
3329 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3331 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3332 conn->info_ident = l2cap_get_ident(conn);
3334 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3336 l2cap_send_cmd(conn, conn->info_ident,
3337 L2CAP_INFO_REQ, sizeof(info), &info);
3340 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3341 result == L2CAP_CR_SUCCESS) {
3343 set_bit(CONF_REQ_SENT, &chan->conf_state);
3344 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3345 l2cap_build_conf_req(chan, buf), buf);
3346 chan->num_conf_req++;
3352 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3354 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3355 u16 scid, dcid, result, status;
3356 struct l2cap_chan *chan;
3360 scid = __le16_to_cpu(rsp->scid);
3361 dcid = __le16_to_cpu(rsp->dcid);
3362 result = __le16_to_cpu(rsp->result);
3363 status = __le16_to_cpu(rsp->status);
3365 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3366 dcid, scid, result, status);
3368 mutex_lock(&conn->chan_lock);
3371 chan = __l2cap_get_chan_by_scid(conn, scid);
3377 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3386 l2cap_chan_lock(chan);
3389 case L2CAP_CR_SUCCESS:
3390 l2cap_state_change(chan, BT_CONFIG);
3393 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3395 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3398 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3399 l2cap_build_conf_req(chan, req), req);
3400 chan->num_conf_req++;
3404 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3408 l2cap_chan_del(chan, ECONNREFUSED);
3412 l2cap_chan_unlock(chan);
3415 mutex_unlock(&conn->chan_lock);
3420 static inline void set_default_fcs(struct l2cap_chan *chan)
3422 /* FCS is enabled only in ERTM or streaming mode, if one or both
3425 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3426 chan->fcs = L2CAP_FCS_NONE;
3427 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3428 chan->fcs = L2CAP_FCS_CRC16;
3431 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3433 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3436 struct l2cap_chan *chan;
3439 dcid = __le16_to_cpu(req->dcid);
3440 flags = __le16_to_cpu(req->flags);
3442 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3444 chan = l2cap_get_chan_by_scid(conn, dcid);
3448 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3449 struct l2cap_cmd_rej_cid rej;
3451 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3452 rej.scid = cpu_to_le16(chan->scid);
3453 rej.dcid = cpu_to_le16(chan->dcid);
3455 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3460 /* Reject if config buffer is too small. */
3461 len = cmd_len - sizeof(*req);
3462 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3463 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3464 l2cap_build_conf_rsp(chan, rsp,
3465 L2CAP_CONF_REJECT, flags), rsp);
3470 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3471 chan->conf_len += len;
3473 if (flags & 0x0001) {
3474 /* Incomplete config. Send empty response. */
3475 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3476 l2cap_build_conf_rsp(chan, rsp,
3477 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3481 /* Complete config. */
3482 len = l2cap_parse_conf_req(chan, rsp);
3484 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3488 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3489 chan->num_conf_rsp++;
3491 /* Reset config buffer. */
3494 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3497 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3498 set_default_fcs(chan);
3500 l2cap_state_change(chan, BT_CONNECTED);
3502 if (chan->mode == L2CAP_MODE_ERTM ||
3503 chan->mode == L2CAP_MODE_STREAMING)
3504 err = l2cap_ertm_init(chan);
3507 l2cap_send_disconn_req(chan->conn, chan, -err);
3509 l2cap_chan_ready(chan);
3514 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3516 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3517 l2cap_build_conf_req(chan, buf), buf);
3518 chan->num_conf_req++;
3521 /* Got Conf Rsp PENDING from remote side and asume we sent
3522 Conf Rsp PENDING in the code above */
3523 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3524 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3526 /* check compatibility */
3528 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3529 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3531 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3532 l2cap_build_conf_rsp(chan, rsp,
3533 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3537 l2cap_chan_unlock(chan);
3541 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3543 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3544 u16 scid, flags, result;
3545 struct l2cap_chan *chan;
3546 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3549 scid = __le16_to_cpu(rsp->scid);
3550 flags = __le16_to_cpu(rsp->flags);
3551 result = __le16_to_cpu(rsp->result);
3553 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3556 chan = l2cap_get_chan_by_scid(conn, scid);
3561 case L2CAP_CONF_SUCCESS:
3562 l2cap_conf_rfc_get(chan, rsp->data, len);
3563 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3566 case L2CAP_CONF_PENDING:
3567 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3569 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3572 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3575 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3579 /* check compatibility */
3581 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3582 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3584 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3585 l2cap_build_conf_rsp(chan, buf,
3586 L2CAP_CONF_SUCCESS, 0x0000), buf);
3590 case L2CAP_CONF_UNACCEPT:
3591 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3594 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3595 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3599 /* throw out any old stored conf requests */
3600 result = L2CAP_CONF_SUCCESS;
3601 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3604 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3608 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3609 L2CAP_CONF_REQ, len, req);
3610 chan->num_conf_req++;
3611 if (result != L2CAP_CONF_SUCCESS)
3617 l2cap_chan_set_err(chan, ECONNRESET);
3619 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3620 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3627 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3629 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3630 set_default_fcs(chan);
3632 l2cap_state_change(chan, BT_CONNECTED);
3633 if (chan->mode == L2CAP_MODE_ERTM ||
3634 chan->mode == L2CAP_MODE_STREAMING)
3635 err = l2cap_ertm_init(chan);
3638 l2cap_send_disconn_req(chan->conn, chan, -err);
3640 l2cap_chan_ready(chan);
3644 l2cap_chan_unlock(chan);
3648 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3650 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3651 struct l2cap_disconn_rsp rsp;
3653 struct l2cap_chan *chan;
3656 scid = __le16_to_cpu(req->scid);
3657 dcid = __le16_to_cpu(req->dcid);
3659 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3661 mutex_lock(&conn->chan_lock);
3663 chan = __l2cap_get_chan_by_scid(conn, dcid);
3665 mutex_unlock(&conn->chan_lock);
3669 l2cap_chan_lock(chan);
3673 rsp.dcid = cpu_to_le16(chan->scid);
3674 rsp.scid = cpu_to_le16(chan->dcid);
3675 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3678 sk->sk_shutdown = SHUTDOWN_MASK;
3681 l2cap_chan_hold(chan);
3682 l2cap_chan_del(chan, ECONNRESET);
3684 l2cap_chan_unlock(chan);
3686 chan->ops->close(chan->data);
3687 l2cap_chan_put(chan);
3689 mutex_unlock(&conn->chan_lock);
3694 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3696 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3698 struct l2cap_chan *chan;
3700 scid = __le16_to_cpu(rsp->scid);
3701 dcid = __le16_to_cpu(rsp->dcid);
3703 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3705 mutex_lock(&conn->chan_lock);
3707 chan = __l2cap_get_chan_by_scid(conn, scid);
3709 mutex_unlock(&conn->chan_lock);
3713 l2cap_chan_lock(chan);
3715 l2cap_chan_hold(chan);
3716 l2cap_chan_del(chan, 0);
3718 l2cap_chan_unlock(chan);
3720 chan->ops->close(chan->data);
3721 l2cap_chan_put(chan);
3723 mutex_unlock(&conn->chan_lock);
3728 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3730 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3733 type = __le16_to_cpu(req->type);
3735 BT_DBG("type 0x%4.4x", type);
3737 if (type == L2CAP_IT_FEAT_MASK) {
3739 u32 feat_mask = l2cap_feat_mask;
3740 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3741 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3742 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3744 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3747 feat_mask |= L2CAP_FEAT_EXT_FLOW
3748 | L2CAP_FEAT_EXT_WINDOW;
3750 put_unaligned_le32(feat_mask, rsp->data);
3751 l2cap_send_cmd(conn, cmd->ident,
3752 L2CAP_INFO_RSP, sizeof(buf), buf);
3753 } else if (type == L2CAP_IT_FIXED_CHAN) {
3755 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3758 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3760 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3762 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3763 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3764 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3765 l2cap_send_cmd(conn, cmd->ident,
3766 L2CAP_INFO_RSP, sizeof(buf), buf);
3768 struct l2cap_info_rsp rsp;
3769 rsp.type = cpu_to_le16(type);
3770 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3771 l2cap_send_cmd(conn, cmd->ident,
3772 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3778 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3780 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3783 type = __le16_to_cpu(rsp->type);
3784 result = __le16_to_cpu(rsp->result);
3786 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3788 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3789 if (cmd->ident != conn->info_ident ||
3790 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3793 cancel_delayed_work(&conn->info_timer);
3795 if (result != L2CAP_IR_SUCCESS) {
3796 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3797 conn->info_ident = 0;
3799 l2cap_conn_start(conn);
3805 case L2CAP_IT_FEAT_MASK:
3806 conn->feat_mask = get_unaligned_le32(rsp->data);
3808 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3809 struct l2cap_info_req req;
3810 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3812 conn->info_ident = l2cap_get_ident(conn);
3814 l2cap_send_cmd(conn, conn->info_ident,
3815 L2CAP_INFO_REQ, sizeof(req), &req);
3817 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3818 conn->info_ident = 0;
3820 l2cap_conn_start(conn);
3824 case L2CAP_IT_FIXED_CHAN:
3825 conn->fixed_chan_mask = rsp->data[0];
3826 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3827 conn->info_ident = 0;
3829 l2cap_conn_start(conn);
3836 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3837 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3840 struct l2cap_create_chan_req *req = data;
3841 struct l2cap_create_chan_rsp rsp;
3844 if (cmd_len != sizeof(*req))
3850 psm = le16_to_cpu(req->psm);
3851 scid = le16_to_cpu(req->scid);
3853 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3855 /* Placeholder: Always reject */
3857 rsp.scid = cpu_to_le16(scid);
3858 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3859 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3861 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3867 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3868 struct l2cap_cmd_hdr *cmd, void *data)
3870 BT_DBG("conn %p", conn);
3872 return l2cap_connect_rsp(conn, cmd, data);
3875 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3876 u16 icid, u16 result)
3878 struct l2cap_move_chan_rsp rsp;
3880 BT_DBG("icid %d, result %d", icid, result);
3882 rsp.icid = cpu_to_le16(icid);
3883 rsp.result = cpu_to_le16(result);
3885 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3888 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3889 struct l2cap_chan *chan, u16 icid, u16 result)
3891 struct l2cap_move_chan_cfm cfm;
3894 BT_DBG("icid %d, result %d", icid, result);
3896 ident = l2cap_get_ident(conn);
3898 chan->ident = ident;
3900 cfm.icid = cpu_to_le16(icid);
3901 cfm.result = cpu_to_le16(result);
3903 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3906 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3909 struct l2cap_move_chan_cfm_rsp rsp;
3911 BT_DBG("icid %d", icid);
3913 rsp.icid = cpu_to_le16(icid);
3914 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3917 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3918 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3920 struct l2cap_move_chan_req *req = data;
3922 u16 result = L2CAP_MR_NOT_ALLOWED;
3924 if (cmd_len != sizeof(*req))
3927 icid = le16_to_cpu(req->icid);
3929 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3934 /* Placeholder: Always refuse */
3935 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3940 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3941 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3943 struct l2cap_move_chan_rsp *rsp = data;
3946 if (cmd_len != sizeof(*rsp))
3949 icid = le16_to_cpu(rsp->icid);
3950 result = le16_to_cpu(rsp->result);
3952 BT_DBG("icid %d, result %d", icid, result);
3954 /* Placeholder: Always unconfirmed */
3955 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3960 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3961 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3963 struct l2cap_move_chan_cfm *cfm = data;
3966 if (cmd_len != sizeof(*cfm))
3969 icid = le16_to_cpu(cfm->icid);
3970 result = le16_to_cpu(cfm->result);
3972 BT_DBG("icid %d, result %d", icid, result);
3974 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3979 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3980 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3982 struct l2cap_move_chan_cfm_rsp *rsp = data;
3985 if (cmd_len != sizeof(*rsp))
3988 icid = le16_to_cpu(rsp->icid);
3990 BT_DBG("icid %d", icid);
3995 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4000 if (min > max || min < 6 || max > 3200)
4003 if (to_multiplier < 10 || to_multiplier > 3200)
4006 if (max >= to_multiplier * 8)
4009 max_latency = (to_multiplier * 8 / max) - 1;
4010 if (latency > 499 || latency > max_latency)
4016 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4017 struct l2cap_cmd_hdr *cmd, u8 *data)
4019 struct hci_conn *hcon = conn->hcon;
4020 struct l2cap_conn_param_update_req *req;
4021 struct l2cap_conn_param_update_rsp rsp;
4022 u16 min, max, latency, to_multiplier, cmd_len;
4025 if (!(hcon->link_mode & HCI_LM_MASTER))
4028 cmd_len = __le16_to_cpu(cmd->len);
4029 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4032 req = (struct l2cap_conn_param_update_req *) data;
4033 min = __le16_to_cpu(req->min);
4034 max = __le16_to_cpu(req->max);
4035 latency = __le16_to_cpu(req->latency);
4036 to_multiplier = __le16_to_cpu(req->to_multiplier);
4038 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4039 min, max, latency, to_multiplier);
4041 memset(&rsp, 0, sizeof(rsp));
4043 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4045 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4047 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4049 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4053 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4058 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4059 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4063 switch (cmd->code) {
4064 case L2CAP_COMMAND_REJ:
4065 l2cap_command_rej(conn, cmd, data);
4068 case L2CAP_CONN_REQ:
4069 err = l2cap_connect_req(conn, cmd, data);
4072 case L2CAP_CONN_RSP:
4073 err = l2cap_connect_rsp(conn, cmd, data);
4076 case L2CAP_CONF_REQ:
4077 err = l2cap_config_req(conn, cmd, cmd_len, data);
4080 case L2CAP_CONF_RSP:
4081 err = l2cap_config_rsp(conn, cmd, data);
4084 case L2CAP_DISCONN_REQ:
4085 err = l2cap_disconnect_req(conn, cmd, data);
4088 case L2CAP_DISCONN_RSP:
4089 err = l2cap_disconnect_rsp(conn, cmd, data);
4092 case L2CAP_ECHO_REQ:
4093 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4096 case L2CAP_ECHO_RSP:
4099 case L2CAP_INFO_REQ:
4100 err = l2cap_information_req(conn, cmd, data);
4103 case L2CAP_INFO_RSP:
4104 err = l2cap_information_rsp(conn, cmd, data);
4107 case L2CAP_CREATE_CHAN_REQ:
4108 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4111 case L2CAP_CREATE_CHAN_RSP:
4112 err = l2cap_create_channel_rsp(conn, cmd, data);
4115 case L2CAP_MOVE_CHAN_REQ:
4116 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4119 case L2CAP_MOVE_CHAN_RSP:
4120 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4123 case L2CAP_MOVE_CHAN_CFM:
4124 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4127 case L2CAP_MOVE_CHAN_CFM_RSP:
4128 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4132 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4140 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4141 struct l2cap_cmd_hdr *cmd, u8 *data)
4143 switch (cmd->code) {
4144 case L2CAP_COMMAND_REJ:
4147 case L2CAP_CONN_PARAM_UPDATE_REQ:
4148 return l2cap_conn_param_update_req(conn, cmd, data);
4150 case L2CAP_CONN_PARAM_UPDATE_RSP:
4154 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4159 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4160 struct sk_buff *skb)
4162 u8 *data = skb->data;
4164 struct l2cap_cmd_hdr cmd;
4167 l2cap_raw_recv(conn, skb);
4169 while (len >= L2CAP_CMD_HDR_SIZE) {
4171 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4172 data += L2CAP_CMD_HDR_SIZE;
4173 len -= L2CAP_CMD_HDR_SIZE;
4175 cmd_len = le16_to_cpu(cmd.len);
4177 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4179 if (cmd_len > len || !cmd.ident) {
4180 BT_DBG("corrupted command");
4184 if (conn->hcon->type == LE_LINK)
4185 err = l2cap_le_sig_cmd(conn, &cmd, data);
4187 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4190 struct l2cap_cmd_rej_unk rej;
4192 BT_ERR("Wrong link type (%d)", err);
4194 /* FIXME: Map err to a valid reason */
4195 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4196 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4206 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4208 u16 our_fcs, rcv_fcs;
4211 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4212 hdr_size = L2CAP_EXT_HDR_SIZE;
4214 hdr_size = L2CAP_ENH_HDR_SIZE;
4216 if (chan->fcs == L2CAP_FCS_CRC16) {
4217 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4218 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4219 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4221 if (our_fcs != rcv_fcs)
4227 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4231 chan->frames_sent = 0;
4233 control |= __set_reqseq(chan, chan->buffer_seq);
4235 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4236 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
4237 l2cap_send_sframe(chan, control);
4238 set_bit(CONN_RNR_SENT, &chan->conn_state);
4241 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
4242 l2cap_retransmit_frames(chan);
4244 l2cap_ertm_send(chan);
4246 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4247 chan->frames_sent == 0) {
4248 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4249 l2cap_send_sframe(chan, control);
4253 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4255 struct sk_buff *next_skb;
4256 int tx_seq_offset, next_tx_seq_offset;
4258 bt_cb(skb)->control.txseq = tx_seq;
4259 bt_cb(skb)->control.sar = sar;
4261 next_skb = skb_peek(&chan->srej_q);
4263 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4266 if (bt_cb(next_skb)->control.txseq == tx_seq)
4269 next_tx_seq_offset = __seq_offset(chan,
4270 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4272 if (next_tx_seq_offset > tx_seq_offset) {
4273 __skb_queue_before(&chan->srej_q, next_skb, skb);
4277 if (skb_queue_is_last(&chan->srej_q, next_skb))
4280 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4283 __skb_queue_tail(&chan->srej_q, skb);
4288 static void append_skb_frag(struct sk_buff *skb,
4289 struct sk_buff *new_frag, struct sk_buff **last_frag)
4291 /* skb->len reflects data in skb as well as all fragments
4292 * skb->data_len reflects only data in fragments
4294 if (!skb_has_frag_list(skb))
4295 skb_shinfo(skb)->frag_list = new_frag;
4297 new_frag->next = NULL;
4299 (*last_frag)->next = new_frag;
4300 *last_frag = new_frag;
4302 skb->len += new_frag->len;
4303 skb->data_len += new_frag->len;
4304 skb->truesize += new_frag->truesize;
4307 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4311 switch (__get_ctrl_sar(chan, control)) {
4312 case L2CAP_SAR_UNSEGMENTED:
4316 err = chan->ops->recv(chan->data, skb);
4319 case L2CAP_SAR_START:
4323 chan->sdu_len = get_unaligned_le16(skb->data);
4324 skb_pull(skb, L2CAP_SDULEN_SIZE);
4326 if (chan->sdu_len > chan->imtu) {
4331 if (skb->len >= chan->sdu_len)
4335 chan->sdu_last_frag = skb;
4341 case L2CAP_SAR_CONTINUE:
4345 append_skb_frag(chan->sdu, skb,
4346 &chan->sdu_last_frag);
4349 if (chan->sdu->len >= chan->sdu_len)
4359 append_skb_frag(chan->sdu, skb,
4360 &chan->sdu_last_frag);
4363 if (chan->sdu->len != chan->sdu_len)
4366 err = chan->ops->recv(chan->data, chan->sdu);
4369 /* Reassembly complete */
4371 chan->sdu_last_frag = NULL;
4379 kfree_skb(chan->sdu);
4381 chan->sdu_last_frag = NULL;
4388 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4390 BT_DBG("chan %p, Enter local busy", chan);
4392 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4393 l2cap_seq_list_clear(&chan->srej_list);
4395 __set_ack_timer(chan);
4398 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4402 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4405 control = __set_reqseq(chan, chan->buffer_seq);
4406 control |= __set_ctrl_poll(chan);
4407 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4408 l2cap_send_sframe(chan, control);
4409 chan->retry_count = 1;
4411 __clear_retrans_timer(chan);
4412 __set_monitor_timer(chan);
4414 set_bit(CONN_WAIT_F, &chan->conn_state);
4417 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4418 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4420 BT_DBG("chan %p, Exit local busy", chan);
4423 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4425 if (chan->mode == L2CAP_MODE_ERTM) {
4427 l2cap_ertm_enter_local_busy(chan);
4429 l2cap_ertm_exit_local_busy(chan);
4433 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4435 struct sk_buff *skb;
4438 while ((skb = skb_peek(&chan->srej_q)) &&
4439 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4442 if (bt_cb(skb)->control.txseq != tx_seq)
4445 skb = skb_dequeue(&chan->srej_q);
4446 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4447 err = l2cap_reassemble_sdu(chan, skb, control);
4450 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4454 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4455 tx_seq = __next_seq(chan, tx_seq);
4459 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4461 struct srej_list *l, *tmp;
4464 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4465 if (l->tx_seq == tx_seq) {
4470 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4471 control |= __set_reqseq(chan, l->tx_seq);
4472 l2cap_send_sframe(chan, control);
4474 list_add_tail(&l->list, &chan->srej_l);
4478 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4480 struct srej_list *new;
4483 while (tx_seq != chan->expected_tx_seq) {
4484 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4485 control |= __set_reqseq(chan, chan->expected_tx_seq);
4486 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4487 l2cap_send_sframe(chan, control);
4489 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4493 new->tx_seq = chan->expected_tx_seq;
4495 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4497 list_add_tail(&new->list, &chan->srej_l);
4500 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4505 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4507 u16 tx_seq = __get_txseq(chan, rx_control);
4508 u16 req_seq = __get_reqseq(chan, rx_control);
4509 u8 sar = __get_ctrl_sar(chan, rx_control);
4510 int tx_seq_offset, expected_tx_seq_offset;
4511 int num_to_ack = (chan->tx_win/6) + 1;
4514 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4515 tx_seq, rx_control);
4517 if (__is_ctrl_final(chan, rx_control) &&
4518 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4519 __clear_monitor_timer(chan);
4520 if (chan->unacked_frames > 0)
4521 __set_retrans_timer(chan);
4522 clear_bit(CONN_WAIT_F, &chan->conn_state);
4525 chan->expected_ack_seq = req_seq;
4526 l2cap_drop_acked_frames(chan);
4528 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4530 /* invalid tx_seq */
4531 if (tx_seq_offset >= chan->tx_win) {
4532 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4536 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4537 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4538 l2cap_send_ack(chan);
4542 if (tx_seq == chan->expected_tx_seq)
4545 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4546 struct srej_list *first;
4548 first = list_first_entry(&chan->srej_l,
4549 struct srej_list, list);
4550 if (tx_seq == first->tx_seq) {
4551 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4552 l2cap_check_srej_gap(chan, tx_seq);
4554 list_del(&first->list);
4557 if (list_empty(&chan->srej_l)) {
4558 chan->buffer_seq = chan->buffer_seq_srej;
4559 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4560 l2cap_send_ack(chan);
4561 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4564 struct srej_list *l;
4566 /* duplicated tx_seq */
4567 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4570 list_for_each_entry(l, &chan->srej_l, list) {
4571 if (l->tx_seq == tx_seq) {
4572 l2cap_resend_srejframe(chan, tx_seq);
4577 err = l2cap_send_srejframe(chan, tx_seq);
4579 l2cap_send_disconn_req(chan->conn, chan, -err);
4584 expected_tx_seq_offset = __seq_offset(chan,
4585 chan->expected_tx_seq, chan->buffer_seq);
4587 /* duplicated tx_seq */
4588 if (tx_seq_offset < expected_tx_seq_offset)
4591 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4593 BT_DBG("chan %p, Enter SREJ", chan);
4595 INIT_LIST_HEAD(&chan->srej_l);
4596 chan->buffer_seq_srej = chan->buffer_seq;
4598 __skb_queue_head_init(&chan->srej_q);
4599 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4601 /* Set P-bit only if there are some I-frames to ack. */
4602 if (__clear_ack_timer(chan))
4603 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4605 err = l2cap_send_srejframe(chan, tx_seq);
4607 l2cap_send_disconn_req(chan->conn, chan, -err);
4614 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4616 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4617 bt_cb(skb)->control.txseq = tx_seq;
4618 bt_cb(skb)->control.sar = sar;
4619 __skb_queue_tail(&chan->srej_q, skb);
4623 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4624 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4627 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4631 if (__is_ctrl_final(chan, rx_control)) {
4632 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4633 l2cap_retransmit_frames(chan);
4637 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4638 if (chan->num_acked == num_to_ack - 1)
4639 l2cap_send_ack(chan);
4641 __set_ack_timer(chan);
4650 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4652 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4653 __get_reqseq(chan, rx_control), rx_control);
4655 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4656 l2cap_drop_acked_frames(chan);
4658 if (__is_ctrl_poll(chan, rx_control)) {
4659 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4660 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4661 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4662 (chan->unacked_frames > 0))
4663 __set_retrans_timer(chan);
4665 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4666 l2cap_send_srejtail(chan);
4668 l2cap_send_i_or_rr_or_rnr(chan);
4671 } else if (__is_ctrl_final(chan, rx_control)) {
4672 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4674 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4675 l2cap_retransmit_frames(chan);
4678 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4679 (chan->unacked_frames > 0))
4680 __set_retrans_timer(chan);
4682 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4683 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4684 l2cap_send_ack(chan);
4686 l2cap_ertm_send(chan);
4690 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4692 u16 tx_seq = __get_reqseq(chan, rx_control);
4694 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4696 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4698 chan->expected_ack_seq = tx_seq;
4699 l2cap_drop_acked_frames(chan);
4701 if (__is_ctrl_final(chan, rx_control)) {
4702 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4703 l2cap_retransmit_frames(chan);
4705 l2cap_retransmit_frames(chan);
4707 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4708 set_bit(CONN_REJ_ACT, &chan->conn_state);
4711 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4713 u16 tx_seq = __get_reqseq(chan, rx_control);
4715 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4717 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4719 if (__is_ctrl_poll(chan, rx_control)) {
4720 chan->expected_ack_seq = tx_seq;
4721 l2cap_drop_acked_frames(chan);
4723 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4724 l2cap_retransmit_one_frame(chan, tx_seq);
4726 l2cap_ertm_send(chan);
4728 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4729 chan->srej_save_reqseq = tx_seq;
4730 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4732 } else if (__is_ctrl_final(chan, rx_control)) {
4733 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4734 chan->srej_save_reqseq == tx_seq)
4735 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4737 l2cap_retransmit_one_frame(chan, tx_seq);
4739 l2cap_retransmit_one_frame(chan, tx_seq);
4740 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4741 chan->srej_save_reqseq = tx_seq;
4742 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4747 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4749 u16 tx_seq = __get_reqseq(chan, rx_control);
4751 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4753 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4754 chan->expected_ack_seq = tx_seq;
4755 l2cap_drop_acked_frames(chan);
4757 if (__is_ctrl_poll(chan, rx_control))
4758 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4760 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4761 __clear_retrans_timer(chan);
4762 if (__is_ctrl_poll(chan, rx_control))
4763 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4767 if (__is_ctrl_poll(chan, rx_control)) {
4768 l2cap_send_srejtail(chan);
4770 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4771 l2cap_send_sframe(chan, rx_control);
4775 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4777 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4779 if (__is_ctrl_final(chan, rx_control) &&
4780 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4781 __clear_monitor_timer(chan);
4782 if (chan->unacked_frames > 0)
4783 __set_retrans_timer(chan);
4784 clear_bit(CONN_WAIT_F, &chan->conn_state);
4787 switch (__get_ctrl_super(chan, rx_control)) {
4788 case L2CAP_SUPER_RR:
4789 l2cap_data_channel_rrframe(chan, rx_control);
4792 case L2CAP_SUPER_REJ:
4793 l2cap_data_channel_rejframe(chan, rx_control);
4796 case L2CAP_SUPER_SREJ:
4797 l2cap_data_channel_srejframe(chan, rx_control);
4800 case L2CAP_SUPER_RNR:
4801 l2cap_data_channel_rnrframe(chan, rx_control);
4809 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4813 int len, next_tx_seq_offset, req_seq_offset;
4815 __unpack_control(chan, skb);
4817 control = __get_control(chan, skb->data);
4818 skb_pull(skb, __ctrl_size(chan));
4822 * We can just drop the corrupted I-frame here.
4823 * Receiver will miss it and start proper recovery
4824 * procedures and ask retransmission.
4826 if (l2cap_check_fcs(chan, skb))
4829 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4830 len -= L2CAP_SDULEN_SIZE;
4832 if (chan->fcs == L2CAP_FCS_CRC16)
4833 len -= L2CAP_FCS_SIZE;
4835 if (len > chan->mps) {
4836 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4840 req_seq = __get_reqseq(chan, control);
4842 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4844 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4845 chan->expected_ack_seq);
4847 /* check for invalid req-seq */
4848 if (req_seq_offset > next_tx_seq_offset) {
4849 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4853 if (!__is_sframe(chan, control)) {
4855 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4859 l2cap_data_channel_iframe(chan, control, skb);
4863 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4867 l2cap_data_channel_sframe(chan, control, skb);
4877 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4879 struct l2cap_chan *chan;
4884 chan = l2cap_get_chan_by_scid(conn, cid);
4886 BT_DBG("unknown cid 0x%4.4x", cid);
4887 /* Drop packet and return */
4892 BT_DBG("chan %p, len %d", chan, skb->len);
4894 if (chan->state != BT_CONNECTED)
4897 switch (chan->mode) {
4898 case L2CAP_MODE_BASIC:
4899 /* If socket recv buffers overflows we drop data here
4900 * which is *bad* because L2CAP has to be reliable.
4901 * But we don't have any other choice. L2CAP doesn't
4902 * provide flow control mechanism. */
4904 if (chan->imtu < skb->len)
4907 if (!chan->ops->recv(chan->data, skb))
4911 case L2CAP_MODE_ERTM:
4912 l2cap_ertm_data_rcv(chan, skb);
4916 case L2CAP_MODE_STREAMING:
4917 control = __get_control(chan, skb->data);
4918 skb_pull(skb, __ctrl_size(chan));
4921 if (l2cap_check_fcs(chan, skb))
4924 if (__is_sar_start(chan, control))
4925 len -= L2CAP_SDULEN_SIZE;
4927 if (chan->fcs == L2CAP_FCS_CRC16)
4928 len -= L2CAP_FCS_SIZE;
4930 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4933 tx_seq = __get_txseq(chan, control);
4935 if (chan->expected_tx_seq != tx_seq) {
4936 /* Frame(s) missing - must discard partial SDU */
4937 kfree_skb(chan->sdu);
4939 chan->sdu_last_frag = NULL;
4942 /* TODO: Notify userland of missing data */
4945 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4947 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4948 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4953 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4961 l2cap_chan_unlock(chan);
4966 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4968 struct l2cap_chan *chan;
4970 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4974 BT_DBG("chan %p, len %d", chan, skb->len);
4976 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4979 if (chan->imtu < skb->len)
4982 if (!chan->ops->recv(chan->data, skb))
4991 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4992 struct sk_buff *skb)
4994 struct l2cap_chan *chan;
4996 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5000 BT_DBG("chan %p, len %d", chan, skb->len);
5002 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5005 if (chan->imtu < skb->len)
5008 if (!chan->ops->recv(chan->data, skb))
5017 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5019 struct l2cap_hdr *lh = (void *) skb->data;
5023 skb_pull(skb, L2CAP_HDR_SIZE);
5024 cid = __le16_to_cpu(lh->cid);
5025 len = __le16_to_cpu(lh->len);
5027 if (len != skb->len) {
5032 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5035 case L2CAP_CID_LE_SIGNALING:
5036 case L2CAP_CID_SIGNALING:
5037 l2cap_sig_channel(conn, skb);
5040 case L2CAP_CID_CONN_LESS:
5041 psm = get_unaligned((__le16 *) skb->data);
5043 l2cap_conless_channel(conn, psm, skb);
5046 case L2CAP_CID_LE_DATA:
5047 l2cap_att_channel(conn, cid, skb);
5051 if (smp_sig_channel(conn, skb))
5052 l2cap_conn_del(conn->hcon, EACCES);
5056 l2cap_data_channel(conn, cid, skb);
5061 /* ---- L2CAP interface with lower layer (HCI) ---- */
5063 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5065 int exact = 0, lm1 = 0, lm2 = 0;
5066 struct l2cap_chan *c;
5068 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5070 /* Find listening sockets and check their link_mode */
5071 read_lock(&chan_list_lock);
5072 list_for_each_entry(c, &chan_list, global_l) {
5073 struct sock *sk = c->sk;
5075 if (c->state != BT_LISTEN)
5078 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5079 lm1 |= HCI_LM_ACCEPT;
5080 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5081 lm1 |= HCI_LM_MASTER;
5083 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5084 lm2 |= HCI_LM_ACCEPT;
5085 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5086 lm2 |= HCI_LM_MASTER;
5089 read_unlock(&chan_list_lock);
5091 return exact ? lm1 : lm2;
5094 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5096 struct l2cap_conn *conn;
5098 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5101 conn = l2cap_conn_add(hcon, status);
5103 l2cap_conn_ready(conn);
5105 l2cap_conn_del(hcon, bt_to_errno(status));
5110 int l2cap_disconn_ind(struct hci_conn *hcon)
5112 struct l2cap_conn *conn = hcon->l2cap_data;
5114 BT_DBG("hcon %p", hcon);
5117 return HCI_ERROR_REMOTE_USER_TERM;
5118 return conn->disc_reason;
5121 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5123 BT_DBG("hcon %p reason %d", hcon, reason);
5125 l2cap_conn_del(hcon, bt_to_errno(reason));
5129 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5131 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5134 if (encrypt == 0x00) {
5135 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5136 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5137 } else if (chan->sec_level == BT_SECURITY_HIGH)
5138 l2cap_chan_close(chan, ECONNREFUSED);
5140 if (chan->sec_level == BT_SECURITY_MEDIUM)
5141 __clear_chan_timer(chan);
5145 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5147 struct l2cap_conn *conn = hcon->l2cap_data;
5148 struct l2cap_chan *chan;
5153 BT_DBG("conn %p", conn);
5155 if (hcon->type == LE_LINK) {
5156 if (!status && encrypt)
5157 smp_distribute_keys(conn, 0);
5158 cancel_delayed_work(&conn->security_timer);
5161 mutex_lock(&conn->chan_lock);
5163 list_for_each_entry(chan, &conn->chan_l, list) {
5164 l2cap_chan_lock(chan);
5166 BT_DBG("chan->scid %d", chan->scid);
5168 if (chan->scid == L2CAP_CID_LE_DATA) {
5169 if (!status && encrypt) {
5170 chan->sec_level = hcon->sec_level;
5171 l2cap_chan_ready(chan);
5174 l2cap_chan_unlock(chan);
5178 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5179 l2cap_chan_unlock(chan);
5183 if (!status && (chan->state == BT_CONNECTED ||
5184 chan->state == BT_CONFIG)) {
5185 struct sock *sk = chan->sk;
5187 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5188 sk->sk_state_change(sk);
5190 l2cap_check_encryption(chan, encrypt);
5191 l2cap_chan_unlock(chan);
5195 if (chan->state == BT_CONNECT) {
5197 l2cap_send_conn_req(chan);
5199 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5201 } else if (chan->state == BT_CONNECT2) {
5202 struct sock *sk = chan->sk;
5203 struct l2cap_conn_rsp rsp;
5209 if (test_bit(BT_SK_DEFER_SETUP,
5210 &bt_sk(sk)->flags)) {
5211 struct sock *parent = bt_sk(sk)->parent;
5212 res = L2CAP_CR_PEND;
5213 stat = L2CAP_CS_AUTHOR_PEND;
5215 parent->sk_data_ready(parent, 0);
5217 __l2cap_state_change(chan, BT_CONFIG);
5218 res = L2CAP_CR_SUCCESS;
5219 stat = L2CAP_CS_NO_INFO;
5222 __l2cap_state_change(chan, BT_DISCONN);
5223 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5224 res = L2CAP_CR_SEC_BLOCK;
5225 stat = L2CAP_CS_NO_INFO;
5230 rsp.scid = cpu_to_le16(chan->dcid);
5231 rsp.dcid = cpu_to_le16(chan->scid);
5232 rsp.result = cpu_to_le16(res);
5233 rsp.status = cpu_to_le16(stat);
5234 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5238 l2cap_chan_unlock(chan);
5241 mutex_unlock(&conn->chan_lock);
5246 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5248 struct l2cap_conn *conn = hcon->l2cap_data;
5251 conn = l2cap_conn_add(hcon, 0);
5256 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5258 if (!(flags & ACL_CONT)) {
5259 struct l2cap_hdr *hdr;
5263 BT_ERR("Unexpected start frame (len %d)", skb->len);
5264 kfree_skb(conn->rx_skb);
5265 conn->rx_skb = NULL;
5267 l2cap_conn_unreliable(conn, ECOMM);
5270 /* Start fragment always begin with Basic L2CAP header */
5271 if (skb->len < L2CAP_HDR_SIZE) {
5272 BT_ERR("Frame is too short (len %d)", skb->len);
5273 l2cap_conn_unreliable(conn, ECOMM);
5277 hdr = (struct l2cap_hdr *) skb->data;
5278 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5280 if (len == skb->len) {
5281 /* Complete frame received */
5282 l2cap_recv_frame(conn, skb);
5286 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5288 if (skb->len > len) {
5289 BT_ERR("Frame is too long (len %d, expected len %d)",
5291 l2cap_conn_unreliable(conn, ECOMM);
5295 /* Allocate skb for the complete frame (with header) */
5296 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5300 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5302 conn->rx_len = len - skb->len;
5304 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5306 if (!conn->rx_len) {
5307 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5308 l2cap_conn_unreliable(conn, ECOMM);
5312 if (skb->len > conn->rx_len) {
5313 BT_ERR("Fragment is too long (len %d, expected %d)",
5314 skb->len, conn->rx_len);
5315 kfree_skb(conn->rx_skb);
5316 conn->rx_skb = NULL;
5318 l2cap_conn_unreliable(conn, ECOMM);
5322 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5324 conn->rx_len -= skb->len;
5326 if (!conn->rx_len) {
5327 /* Complete frame received */
5328 l2cap_recv_frame(conn, conn->rx_skb);
5329 conn->rx_skb = NULL;
5338 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5340 struct l2cap_chan *c;
5342 read_lock(&chan_list_lock);
5344 list_for_each_entry(c, &chan_list, global_l) {
5345 struct sock *sk = c->sk;
5347 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5348 batostr(&bt_sk(sk)->src),
5349 batostr(&bt_sk(sk)->dst),
5350 c->state, __le16_to_cpu(c->psm),
5351 c->scid, c->dcid, c->imtu, c->omtu,
5352 c->sec_level, c->mode);
5355 read_unlock(&chan_list_lock);
5360 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5362 return single_open(file, l2cap_debugfs_show, inode->i_private);
5365 static const struct file_operations l2cap_debugfs_fops = {
5366 .open = l2cap_debugfs_open,
5368 .llseek = seq_lseek,
5369 .release = single_release,
5372 static struct dentry *l2cap_debugfs;
5374 int __init l2cap_init(void)
5378 err = l2cap_init_sockets();
5383 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5384 bt_debugfs, NULL, &l2cap_debugfs_fops);
5386 BT_ERR("Failed to create L2CAP debug file");
5392 void l2cap_exit(void)
5394 debugfs_remove(l2cap_debugfs);
5395 l2cap_cleanup_sockets();
5398 module_param(disable_ertm, bool, 0644);
5399 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");