2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm = 1;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
85 list_for_each_entry(c, &conn->chan_l, list) {
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 mutex_unlock(&conn->chan_lock);
118 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
120 struct l2cap_chan *c;
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
129 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
131 struct l2cap_chan *c;
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
144 write_lock(&chan_list_lock);
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
169 write_unlock(&chan_list_lock);
173 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
175 write_lock(&chan_list_lock);
179 write_unlock(&chan_list_lock);
184 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
186 u16 cid = L2CAP_CID_DYN_START;
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
196 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
202 chan->ops->state_change(chan->data, state);
205 static void l2cap_state_change(struct l2cap_chan *chan, int state)
207 struct sock *sk = chan->sk;
210 __l2cap_state_change(chan, state);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
216 struct sock *sk = chan->sk;
221 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
223 struct sock *sk = chan->sk;
226 __l2cap_chan_set_err(chan, err);
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
483 l2cap_chan_hold(chan);
485 list_add(&chan->list, &conn->chan_l);
488 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
495 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
501 __clear_chan_timer(chan);
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
506 /* Delete from channel list */
507 list_del(&chan->list);
509 l2cap_chan_put(chan);
512 hci_conn_put(conn->hcon);
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
521 __l2cap_chan_set_err(chan, err);
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
527 sk->sk_state_change(sk);
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 skb_queue_purge(&chan->tx_q);
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 struct srej_list *l, *tmp;
539 __clear_retrans_timer(chan);
540 __clear_monitor_timer(chan);
541 __clear_ack_timer(chan);
543 skb_queue_purge(&chan->srej_q);
545 l2cap_seq_list_free(&chan->srej_list);
546 l2cap_seq_list_free(&chan->retrans_list);
547 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
554 static void l2cap_chan_cleanup_listen(struct sock *parent)
558 BT_DBG("parent %p", parent);
560 /* Close not yet accepted channels */
561 while ((sk = bt_accept_dequeue(parent, NULL))) {
562 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
564 l2cap_chan_lock(chan);
565 __clear_chan_timer(chan);
566 l2cap_chan_close(chan, ECONNRESET);
567 l2cap_chan_unlock(chan);
569 chan->ops->close(chan->data);
573 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
575 struct l2cap_conn *conn = chan->conn;
576 struct sock *sk = chan->sk;
578 BT_DBG("chan %p state %s sk %p", chan,
579 state_to_string(chan->state), sk);
581 switch (chan->state) {
584 l2cap_chan_cleanup_listen(sk);
586 __l2cap_state_change(chan, BT_CLOSED);
587 sock_set_flag(sk, SOCK_ZAPPED);
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
598 l2cap_chan_del(chan, reason);
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
621 l2cap_chan_del(chan, reason);
626 l2cap_chan_del(chan, reason);
631 sock_set_flag(sk, SOCK_ZAPPED);
637 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
639 if (chan->chan_type == L2CAP_CHAN_RAW) {
640 switch (chan->sec_level) {
641 case BT_SECURITY_HIGH:
642 return HCI_AT_DEDICATED_BONDING_MITM;
643 case BT_SECURITY_MEDIUM:
644 return HCI_AT_DEDICATED_BONDING;
646 return HCI_AT_NO_BONDING;
648 } else if (chan->psm == cpu_to_le16(0x0001)) {
649 if (chan->sec_level == BT_SECURITY_LOW)
650 chan->sec_level = BT_SECURITY_SDP;
652 if (chan->sec_level == BT_SECURITY_HIGH)
653 return HCI_AT_NO_BONDING_MITM;
655 return HCI_AT_NO_BONDING;
657 switch (chan->sec_level) {
658 case BT_SECURITY_HIGH:
659 return HCI_AT_GENERAL_BONDING_MITM;
660 case BT_SECURITY_MEDIUM:
661 return HCI_AT_GENERAL_BONDING;
663 return HCI_AT_NO_BONDING;
668 /* Service level security */
669 int l2cap_chan_check_security(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
674 auth_type = l2cap_get_auth_type(chan);
676 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
679 static u8 l2cap_get_ident(struct l2cap_conn *conn)
683 /* Get next available identificator.
684 * 1 - 128 are used by kernel.
685 * 129 - 199 are reserved.
686 * 200 - 254 are used by utilities like l2ping, etc.
689 spin_lock(&conn->lock);
691 if (++conn->tx_ident > 128)
696 spin_unlock(&conn->lock);
701 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
703 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
706 BT_DBG("code 0x%2.2x", code);
711 if (lmp_no_flush_capable(conn->hcon->hdev))
712 flags = ACL_START_NO_FLUSH;
716 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
717 skb->priority = HCI_PRIO_MAX;
719 hci_send_acl(conn->hchan, skb, flags);
722 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
724 struct hci_conn *hcon = chan->conn->hcon;
727 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
730 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
731 lmp_no_flush_capable(hcon->hdev))
732 flags = ACL_START_NO_FLUSH;
736 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
737 hci_send_acl(chan->conn->hchan, skb, flags);
740 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
742 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
743 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
745 if (enh & L2CAP_CTRL_FRAME_TYPE) {
748 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
749 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
756 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
757 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
764 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
766 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
767 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
769 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
772 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
773 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
780 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
781 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
788 static inline void __unpack_control(struct l2cap_chan *chan,
791 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
792 __unpack_extended_control(get_unaligned_le32(skb->data),
793 &bt_cb(skb)->control);
795 __unpack_enhanced_control(get_unaligned_le16(skb->data),
796 &bt_cb(skb)->control);
800 static u32 __pack_extended_control(struct l2cap_ctrl *control)
804 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
805 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
807 if (control->sframe) {
808 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
809 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
810 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
812 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
813 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
819 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
823 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
824 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
826 if (control->sframe) {
827 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
828 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
829 packed |= L2CAP_CTRL_FRAME_TYPE;
831 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
832 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
838 static inline void __pack_control(struct l2cap_chan *chan,
839 struct l2cap_ctrl *control,
842 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
843 put_unaligned_le32(__pack_extended_control(control),
844 skb->data + L2CAP_HDR_SIZE);
846 put_unaligned_le16(__pack_enhanced_control(control),
847 skb->data + L2CAP_HDR_SIZE);
851 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
854 struct l2cap_hdr *lh;
855 struct l2cap_conn *conn = chan->conn;
858 if (chan->state != BT_CONNECTED)
861 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
862 hlen = L2CAP_EXT_HDR_SIZE;
864 hlen = L2CAP_ENH_HDR_SIZE;
866 if (chan->fcs == L2CAP_FCS_CRC16)
867 hlen += L2CAP_FCS_SIZE;
869 BT_DBG("chan %p, control 0x%8.8x", chan, control);
871 count = min_t(unsigned int, conn->mtu, hlen);
873 control |= __set_sframe(chan);
875 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
876 control |= __set_ctrl_final(chan);
878 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
879 control |= __set_ctrl_poll(chan);
881 skb = bt_skb_alloc(count, GFP_ATOMIC);
885 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
886 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
887 lh->cid = cpu_to_le16(chan->dcid);
889 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
891 if (chan->fcs == L2CAP_FCS_CRC16) {
892 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
893 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
896 skb->priority = HCI_PRIO_MAX;
897 l2cap_do_send(chan, skb);
900 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
902 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
903 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
904 set_bit(CONN_RNR_SENT, &chan->conn_state);
906 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
908 control |= __set_reqseq(chan, chan->buffer_seq);
910 l2cap_send_sframe(chan, control);
913 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
915 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
918 static void l2cap_send_conn_req(struct l2cap_chan *chan)
920 struct l2cap_conn *conn = chan->conn;
921 struct l2cap_conn_req req;
923 req.scid = cpu_to_le16(chan->scid);
926 chan->ident = l2cap_get_ident(conn);
928 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
930 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
933 static void l2cap_chan_ready(struct l2cap_chan *chan)
935 struct sock *sk = chan->sk;
940 parent = bt_sk(sk)->parent;
942 BT_DBG("sk %p, parent %p", sk, parent);
944 /* This clears all conf flags, including CONF_NOT_COMPLETE */
945 chan->conf_state = 0;
946 __clear_chan_timer(chan);
948 __l2cap_state_change(chan, BT_CONNECTED);
949 sk->sk_state_change(sk);
952 parent->sk_data_ready(parent, 0);
957 static void l2cap_do_start(struct l2cap_chan *chan)
959 struct l2cap_conn *conn = chan->conn;
961 if (conn->hcon->type == LE_LINK) {
962 l2cap_chan_ready(chan);
966 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
970 if (l2cap_chan_check_security(chan) &&
971 __l2cap_no_conn_pending(chan))
972 l2cap_send_conn_req(chan);
974 struct l2cap_info_req req;
975 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 conn->info_ident = l2cap_get_ident(conn);
980 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
982 l2cap_send_cmd(conn, conn->info_ident,
983 L2CAP_INFO_REQ, sizeof(req), &req);
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
989 u32 local_feat_mask = l2cap_feat_mask;
991 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
994 case L2CAP_MODE_ERTM:
995 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 case L2CAP_MODE_STREAMING:
997 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1005 struct sock *sk = chan->sk;
1006 struct l2cap_disconn_req req;
1011 if (chan->mode == L2CAP_MODE_ERTM) {
1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan);
1017 req.dcid = cpu_to_le16(chan->dcid);
1018 req.scid = cpu_to_le16(chan->scid);
1019 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1020 L2CAP_DISCONN_REQ, sizeof(req), &req);
1023 __l2cap_state_change(chan, BT_DISCONN);
1024 __l2cap_chan_set_err(chan, err);
1028 /* ---- L2CAP connections ---- */
1029 static void l2cap_conn_start(struct l2cap_conn *conn)
1031 struct l2cap_chan *chan, *tmp;
1033 BT_DBG("conn %p", conn);
1035 mutex_lock(&conn->chan_lock);
1037 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1038 struct sock *sk = chan->sk;
1040 l2cap_chan_lock(chan);
1042 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1043 l2cap_chan_unlock(chan);
1047 if (chan->state == BT_CONNECT) {
1048 if (!l2cap_chan_check_security(chan) ||
1049 !__l2cap_no_conn_pending(chan)) {
1050 l2cap_chan_unlock(chan);
1054 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1055 && test_bit(CONF_STATE2_DEVICE,
1056 &chan->conf_state)) {
1057 l2cap_chan_close(chan, ECONNRESET);
1058 l2cap_chan_unlock(chan);
1062 l2cap_send_conn_req(chan);
1064 } else if (chan->state == BT_CONNECT2) {
1065 struct l2cap_conn_rsp rsp;
1067 rsp.scid = cpu_to_le16(chan->dcid);
1068 rsp.dcid = cpu_to_le16(chan->scid);
1070 if (l2cap_chan_check_security(chan)) {
1072 if (test_bit(BT_SK_DEFER_SETUP,
1073 &bt_sk(sk)->flags)) {
1074 struct sock *parent = bt_sk(sk)->parent;
1075 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1076 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1078 parent->sk_data_ready(parent, 0);
1081 __l2cap_state_change(chan, BT_CONFIG);
1082 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1083 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1087 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1088 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1091 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1094 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1095 rsp.result != L2CAP_CR_SUCCESS) {
1096 l2cap_chan_unlock(chan);
1100 set_bit(CONF_REQ_SENT, &chan->conf_state);
1101 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1102 l2cap_build_conf_req(chan, buf), buf);
1103 chan->num_conf_req++;
1106 l2cap_chan_unlock(chan);
1109 mutex_unlock(&conn->chan_lock);
1112 /* Find socket with cid and source/destination bdaddr.
1113 * Returns closest match, locked.
1115 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1119 struct l2cap_chan *c, *c1 = NULL;
1121 read_lock(&chan_list_lock);
1123 list_for_each_entry(c, &chan_list, global_l) {
1124 struct sock *sk = c->sk;
1126 if (state && c->state != state)
1129 if (c->scid == cid) {
1130 int src_match, dst_match;
1131 int src_any, dst_any;
1134 src_match = !bacmp(&bt_sk(sk)->src, src);
1135 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1136 if (src_match && dst_match) {
1137 read_unlock(&chan_list_lock);
1142 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1143 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1144 if ((src_match && dst_any) || (src_any && dst_match) ||
1145 (src_any && dst_any))
1150 read_unlock(&chan_list_lock);
1155 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1157 struct sock *parent, *sk;
1158 struct l2cap_chan *chan, *pchan;
1162 /* Check if we have socket listening on cid */
1163 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1164 conn->src, conn->dst);
1172 /* Check for backlog size */
1173 if (sk_acceptq_is_full(parent)) {
1174 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1178 chan = pchan->ops->new_connection(pchan->data);
1184 hci_conn_hold(conn->hcon);
1186 bacpy(&bt_sk(sk)->src, conn->src);
1187 bacpy(&bt_sk(sk)->dst, conn->dst);
1189 bt_accept_enqueue(parent, sk);
1191 l2cap_chan_add(conn, chan);
1193 __set_chan_timer(chan, sk->sk_sndtimeo);
1195 __l2cap_state_change(chan, BT_CONNECTED);
1196 parent->sk_data_ready(parent, 0);
1199 release_sock(parent);
1202 static void l2cap_conn_ready(struct l2cap_conn *conn)
1204 struct l2cap_chan *chan;
1206 BT_DBG("conn %p", conn);
1208 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1209 l2cap_le_conn_ready(conn);
1211 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1212 smp_conn_security(conn, conn->hcon->pending_sec_level);
1214 mutex_lock(&conn->chan_lock);
1216 list_for_each_entry(chan, &conn->chan_l, list) {
1218 l2cap_chan_lock(chan);
1220 if (conn->hcon->type == LE_LINK) {
1221 if (smp_conn_security(conn, chan->sec_level))
1222 l2cap_chan_ready(chan);
1224 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1225 struct sock *sk = chan->sk;
1226 __clear_chan_timer(chan);
1228 __l2cap_state_change(chan, BT_CONNECTED);
1229 sk->sk_state_change(sk);
1232 } else if (chan->state == BT_CONNECT)
1233 l2cap_do_start(chan);
1235 l2cap_chan_unlock(chan);
1238 mutex_unlock(&conn->chan_lock);
1241 /* Notify sockets that we cannot guaranty reliability anymore */
1242 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1244 struct l2cap_chan *chan;
1246 BT_DBG("conn %p", conn);
1248 mutex_lock(&conn->chan_lock);
1250 list_for_each_entry(chan, &conn->chan_l, list) {
1251 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1252 __l2cap_chan_set_err(chan, err);
1255 mutex_unlock(&conn->chan_lock);
1258 static void l2cap_info_timeout(struct work_struct *work)
1260 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1264 conn->info_ident = 0;
1266 l2cap_conn_start(conn);
1269 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1271 struct l2cap_conn *conn = hcon->l2cap_data;
1272 struct l2cap_chan *chan, *l;
1277 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1279 kfree_skb(conn->rx_skb);
1281 mutex_lock(&conn->chan_lock);
1284 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1285 l2cap_chan_hold(chan);
1286 l2cap_chan_lock(chan);
1288 l2cap_chan_del(chan, err);
1290 l2cap_chan_unlock(chan);
1292 chan->ops->close(chan->data);
1293 l2cap_chan_put(chan);
1296 mutex_unlock(&conn->chan_lock);
1298 hci_chan_del(conn->hchan);
1300 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1301 cancel_delayed_work_sync(&conn->info_timer);
1303 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1304 cancel_delayed_work_sync(&conn->security_timer);
1305 smp_chan_destroy(conn);
1308 hcon->l2cap_data = NULL;
1312 static void security_timeout(struct work_struct *work)
1314 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1315 security_timer.work);
1317 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1320 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1322 struct l2cap_conn *conn = hcon->l2cap_data;
1323 struct hci_chan *hchan;
1328 hchan = hci_chan_create(hcon);
1332 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1334 hci_chan_del(hchan);
1338 hcon->l2cap_data = conn;
1340 conn->hchan = hchan;
1342 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1344 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1345 conn->mtu = hcon->hdev->le_mtu;
1347 conn->mtu = hcon->hdev->acl_mtu;
1349 conn->src = &hcon->hdev->bdaddr;
1350 conn->dst = &hcon->dst;
1352 conn->feat_mask = 0;
1354 spin_lock_init(&conn->lock);
1355 mutex_init(&conn->chan_lock);
1357 INIT_LIST_HEAD(&conn->chan_l);
1359 if (hcon->type == LE_LINK)
1360 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1362 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1364 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1369 /* ---- Socket interface ---- */
1371 /* Find socket with psm and source / destination bdaddr.
1372 * Returns closest match.
1374 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1378 struct l2cap_chan *c, *c1 = NULL;
1380 read_lock(&chan_list_lock);
1382 list_for_each_entry(c, &chan_list, global_l) {
1383 struct sock *sk = c->sk;
1385 if (state && c->state != state)
1388 if (c->psm == psm) {
1389 int src_match, dst_match;
1390 int src_any, dst_any;
1393 src_match = !bacmp(&bt_sk(sk)->src, src);
1394 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1395 if (src_match && dst_match) {
1396 read_unlock(&chan_list_lock);
1401 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1402 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1403 if ((src_match && dst_any) || (src_any && dst_match) ||
1404 (src_any && dst_any))
1409 read_unlock(&chan_list_lock);
1414 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1415 bdaddr_t *dst, u8 dst_type)
1417 struct sock *sk = chan->sk;
1418 bdaddr_t *src = &bt_sk(sk)->src;
1419 struct l2cap_conn *conn;
1420 struct hci_conn *hcon;
1421 struct hci_dev *hdev;
1425 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1426 dst_type, __le16_to_cpu(chan->psm));
1428 hdev = hci_get_route(dst, src);
1430 return -EHOSTUNREACH;
1434 l2cap_chan_lock(chan);
1436 /* PSM must be odd and lsb of upper byte must be 0 */
1437 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1438 chan->chan_type != L2CAP_CHAN_RAW) {
1443 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1448 switch (chan->mode) {
1449 case L2CAP_MODE_BASIC:
1451 case L2CAP_MODE_ERTM:
1452 case L2CAP_MODE_STREAMING:
1463 switch (sk->sk_state) {
1467 /* Already connecting */
1473 /* Already connected */
1489 /* Set destination address and psm */
1490 bacpy(&bt_sk(sk)->dst, dst);
1497 auth_type = l2cap_get_auth_type(chan);
1499 if (chan->dcid == L2CAP_CID_LE_DATA)
1500 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1501 chan->sec_level, auth_type);
1503 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1504 chan->sec_level, auth_type);
1507 err = PTR_ERR(hcon);
1511 conn = l2cap_conn_add(hcon, 0);
1518 if (hcon->type == LE_LINK) {
1521 if (!list_empty(&conn->chan_l)) {
1530 /* Update source addr of the socket */
1531 bacpy(src, conn->src);
1533 l2cap_chan_unlock(chan);
1534 l2cap_chan_add(conn, chan);
1535 l2cap_chan_lock(chan);
1537 l2cap_state_change(chan, BT_CONNECT);
1538 __set_chan_timer(chan, sk->sk_sndtimeo);
1540 if (hcon->state == BT_CONNECTED) {
1541 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1542 __clear_chan_timer(chan);
1543 if (l2cap_chan_check_security(chan))
1544 l2cap_state_change(chan, BT_CONNECTED);
1546 l2cap_do_start(chan);
1552 l2cap_chan_unlock(chan);
1553 hci_dev_unlock(hdev);
1558 int __l2cap_wait_ack(struct sock *sk)
1560 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1561 DECLARE_WAITQUEUE(wait, current);
1565 add_wait_queue(sk_sleep(sk), &wait);
1566 set_current_state(TASK_INTERRUPTIBLE);
1567 while (chan->unacked_frames > 0 && chan->conn) {
1571 if (signal_pending(current)) {
1572 err = sock_intr_errno(timeo);
1577 timeo = schedule_timeout(timeo);
1579 set_current_state(TASK_INTERRUPTIBLE);
1581 err = sock_error(sk);
1585 set_current_state(TASK_RUNNING);
1586 remove_wait_queue(sk_sleep(sk), &wait);
1590 static void l2cap_monitor_timeout(struct work_struct *work)
1592 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1593 monitor_timer.work);
1595 BT_DBG("chan %p", chan);
1597 l2cap_chan_lock(chan);
1599 if (chan->retry_count >= chan->remote_max_tx) {
1600 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1601 l2cap_chan_unlock(chan);
1602 l2cap_chan_put(chan);
1606 chan->retry_count++;
1607 __set_monitor_timer(chan);
1609 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1610 l2cap_chan_unlock(chan);
1611 l2cap_chan_put(chan);
1614 static void l2cap_retrans_timeout(struct work_struct *work)
1616 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1617 retrans_timer.work);
1619 BT_DBG("chan %p", chan);
1621 l2cap_chan_lock(chan);
1623 chan->retry_count = 1;
1624 __set_monitor_timer(chan);
1626 set_bit(CONN_WAIT_F, &chan->conn_state);
1628 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1630 l2cap_chan_unlock(chan);
1631 l2cap_chan_put(chan);
1634 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1636 struct sk_buff *skb;
1638 while ((skb = skb_peek(&chan->tx_q)) &&
1639 chan->unacked_frames) {
1640 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1643 skb = skb_dequeue(&chan->tx_q);
1646 chan->unacked_frames--;
1649 if (!chan->unacked_frames)
1650 __clear_retrans_timer(chan);
1653 static void l2cap_streaming_send(struct l2cap_chan *chan)
1655 struct sk_buff *skb;
1659 while ((skb = skb_dequeue(&chan->tx_q))) {
1660 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1661 control |= __set_txseq(chan, chan->next_tx_seq);
1662 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1663 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1665 if (chan->fcs == L2CAP_FCS_CRC16) {
1666 fcs = crc16(0, (u8 *)skb->data,
1667 skb->len - L2CAP_FCS_SIZE);
1668 put_unaligned_le16(fcs,
1669 skb->data + skb->len - L2CAP_FCS_SIZE);
1672 l2cap_do_send(chan, skb);
1674 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1678 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1680 struct sk_buff *skb, *tx_skb;
1684 skb = skb_peek(&chan->tx_q);
1688 while (bt_cb(skb)->control.txseq != tx_seq) {
1689 if (skb_queue_is_last(&chan->tx_q, skb))
1692 skb = skb_queue_next(&chan->tx_q, skb);
1695 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1696 chan->remote_max_tx) {
1697 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1701 tx_skb = skb_clone(skb, GFP_ATOMIC);
1702 bt_cb(skb)->control.retries++;
1704 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1705 control &= __get_sar_mask(chan);
1707 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1708 control |= __set_ctrl_final(chan);
1710 control |= __set_reqseq(chan, chan->buffer_seq);
1711 control |= __set_txseq(chan, tx_seq);
1713 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1715 if (chan->fcs == L2CAP_FCS_CRC16) {
1716 fcs = crc16(0, (u8 *)tx_skb->data,
1717 tx_skb->len - L2CAP_FCS_SIZE);
1718 put_unaligned_le16(fcs,
1719 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1722 l2cap_do_send(chan, tx_skb);
1725 static int l2cap_ertm_send(struct l2cap_chan *chan)
1727 struct sk_buff *skb, *tx_skb;
1732 if (chan->state != BT_CONNECTED)
1735 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1738 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1740 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1741 chan->remote_max_tx) {
1742 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1746 tx_skb = skb_clone(skb, GFP_ATOMIC);
1748 bt_cb(skb)->control.retries++;
1750 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1751 control &= __get_sar_mask(chan);
1753 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1754 control |= __set_ctrl_final(chan);
1756 control |= __set_reqseq(chan, chan->buffer_seq);
1757 control |= __set_txseq(chan, chan->next_tx_seq);
1758 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1760 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1762 if (chan->fcs == L2CAP_FCS_CRC16) {
1763 fcs = crc16(0, (u8 *)skb->data,
1764 tx_skb->len - L2CAP_FCS_SIZE);
1765 put_unaligned_le16(fcs, skb->data +
1766 tx_skb->len - L2CAP_FCS_SIZE);
1769 l2cap_do_send(chan, tx_skb);
1771 __set_retrans_timer(chan);
1773 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1775 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1777 if (bt_cb(skb)->control.retries == 1) {
1778 chan->unacked_frames++;
1781 __clear_ack_timer(chan);
1784 chan->frames_sent++;
1786 if (skb_queue_is_last(&chan->tx_q, skb))
1787 chan->tx_send_head = NULL;
1789 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1795 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1799 if (!skb_queue_empty(&chan->tx_q))
1800 chan->tx_send_head = chan->tx_q.next;
1802 chan->next_tx_seq = chan->expected_ack_seq;
1803 ret = l2cap_ertm_send(chan);
1807 static void __l2cap_send_ack(struct l2cap_chan *chan)
1811 control |= __set_reqseq(chan, chan->buffer_seq);
1813 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1814 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1815 set_bit(CONN_RNR_SENT, &chan->conn_state);
1816 l2cap_send_sframe(chan, control);
1820 if (l2cap_ertm_send(chan) > 0)
1823 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1824 l2cap_send_sframe(chan, control);
1827 static void l2cap_send_ack(struct l2cap_chan *chan)
1829 __clear_ack_timer(chan);
1830 __l2cap_send_ack(chan);
1833 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1835 struct srej_list *tail;
1838 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1839 control |= __set_ctrl_final(chan);
1841 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1842 control |= __set_reqseq(chan, tail->tx_seq);
1844 l2cap_send_sframe(chan, control);
1847 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1848 struct msghdr *msg, int len,
1849 int count, struct sk_buff *skb)
1851 struct l2cap_conn *conn = chan->conn;
1852 struct sk_buff **frag;
1855 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1861 /* Continuation fragments (no L2CAP header) */
1862 frag = &skb_shinfo(skb)->frag_list;
1864 struct sk_buff *tmp;
1866 count = min_t(unsigned int, conn->mtu, len);
1868 tmp = chan->ops->alloc_skb(chan, count,
1869 msg->msg_flags & MSG_DONTWAIT);
1871 return PTR_ERR(tmp);
1875 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1878 (*frag)->priority = skb->priority;
1883 skb->len += (*frag)->len;
1884 skb->data_len += (*frag)->len;
1886 frag = &(*frag)->next;
1892 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1893 struct msghdr *msg, size_t len,
1896 struct l2cap_conn *conn = chan->conn;
1897 struct sk_buff *skb;
1898 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1899 struct l2cap_hdr *lh;
1901 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1903 count = min_t(unsigned int, (conn->mtu - hlen), len);
1905 skb = chan->ops->alloc_skb(chan, count + hlen,
1906 msg->msg_flags & MSG_DONTWAIT);
1910 skb->priority = priority;
1912 /* Create L2CAP header */
1913 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1914 lh->cid = cpu_to_le16(chan->dcid);
1915 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1916 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1918 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1919 if (unlikely(err < 0)) {
1921 return ERR_PTR(err);
1926 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1927 struct msghdr *msg, size_t len,
1930 struct l2cap_conn *conn = chan->conn;
1931 struct sk_buff *skb;
1933 struct l2cap_hdr *lh;
1935 BT_DBG("chan %p len %d", chan, (int)len);
1937 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1939 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1940 msg->msg_flags & MSG_DONTWAIT);
1944 skb->priority = priority;
1946 /* Create L2CAP header */
1947 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1948 lh->cid = cpu_to_le16(chan->dcid);
1949 lh->len = cpu_to_le16(len);
1951 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1952 if (unlikely(err < 0)) {
1954 return ERR_PTR(err);
1959 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1960 struct msghdr *msg, size_t len,
1963 struct l2cap_conn *conn = chan->conn;
1964 struct sk_buff *skb;
1965 int err, count, hlen;
1966 struct l2cap_hdr *lh;
1968 BT_DBG("chan %p len %d", chan, (int)len);
1971 return ERR_PTR(-ENOTCONN);
1973 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1974 hlen = L2CAP_EXT_HDR_SIZE;
1976 hlen = L2CAP_ENH_HDR_SIZE;
1979 hlen += L2CAP_SDULEN_SIZE;
1981 if (chan->fcs == L2CAP_FCS_CRC16)
1982 hlen += L2CAP_FCS_SIZE;
1984 count = min_t(unsigned int, (conn->mtu - hlen), len);
1986 skb = chan->ops->alloc_skb(chan, count + hlen,
1987 msg->msg_flags & MSG_DONTWAIT);
1991 /* Create L2CAP header */
1992 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1993 lh->cid = cpu_to_le16(chan->dcid);
1994 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1996 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1999 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2001 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2002 if (unlikely(err < 0)) {
2004 return ERR_PTR(err);
2007 if (chan->fcs == L2CAP_FCS_CRC16)
2008 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
2010 bt_cb(skb)->control.retries = 0;
2014 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2015 struct sk_buff_head *seg_queue,
2016 struct msghdr *msg, size_t len)
2018 struct sk_buff *skb;
2024 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2026 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2027 * so fragmented skbs are not used. The HCI layer's handling
2028 * of fragmented skbs is not compatible with ERTM's queueing.
2031 /* PDU size is derived from the HCI MTU */
2032 pdu_len = chan->conn->mtu;
2034 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2036 /* Adjust for largest possible L2CAP overhead. */
2037 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2039 /* Remote device may have requested smaller PDUs */
2040 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2042 if (len <= pdu_len) {
2043 sar = L2CAP_SAR_UNSEGMENTED;
2047 sar = L2CAP_SAR_START;
2049 pdu_len -= L2CAP_SDULEN_SIZE;
2053 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2056 __skb_queue_purge(seg_queue);
2057 return PTR_ERR(skb);
2060 bt_cb(skb)->control.sar = sar;
2061 __skb_queue_tail(seg_queue, skb);
2066 pdu_len += L2CAP_SDULEN_SIZE;
2069 if (len <= pdu_len) {
2070 sar = L2CAP_SAR_END;
2073 sar = L2CAP_SAR_CONTINUE;
2080 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2083 struct sk_buff *skb;
2085 struct sk_buff_head seg_queue;
2087 /* Connectionless channel */
2088 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2089 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2091 return PTR_ERR(skb);
2093 l2cap_do_send(chan, skb);
2097 switch (chan->mode) {
2098 case L2CAP_MODE_BASIC:
2099 /* Check outgoing MTU */
2100 if (len > chan->omtu)
2103 /* Create a basic PDU */
2104 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2106 return PTR_ERR(skb);
2108 l2cap_do_send(chan, skb);
2112 case L2CAP_MODE_ERTM:
2113 case L2CAP_MODE_STREAMING:
2114 /* Check outgoing MTU */
2115 if (len > chan->omtu) {
2120 __skb_queue_head_init(&seg_queue);
2122 /* Do segmentation before calling in to the state machine,
2123 * since it's possible to block while waiting for memory
2126 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2128 /* The channel could have been closed while segmenting,
2129 * check that it is still connected.
2131 if (chan->state != BT_CONNECTED) {
2132 __skb_queue_purge(&seg_queue);
2139 if (chan->mode == L2CAP_MODE_ERTM) {
2140 err = l2cap_tx(chan, 0, &seg_queue,
2141 L2CAP_EV_DATA_REQUEST);
2143 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2144 l2cap_streaming_send(chan);
2150 /* If the skbs were not queued for sending, they'll still be in
2151 * seg_queue and need to be purged.
2153 __skb_queue_purge(&seg_queue);
2157 BT_DBG("bad state %1.1x", chan->mode);
2164 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2166 struct sk_buff *acked_skb;
2169 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2171 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2174 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2175 chan->expected_ack_seq, chan->unacked_frames);
2177 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2178 ackseq = __next_seq(chan, ackseq)) {
2180 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2182 skb_unlink(acked_skb, &chan->tx_q);
2183 kfree_skb(acked_skb);
2184 chan->unacked_frames--;
2188 chan->expected_ack_seq = reqseq;
2190 if (chan->unacked_frames == 0)
2191 __clear_retrans_timer(chan);
2193 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2196 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2198 BT_DBG("chan %p", chan);
2200 chan->expected_tx_seq = chan->buffer_seq;
2201 l2cap_seq_list_clear(&chan->srej_list);
2202 skb_queue_purge(&chan->srej_q);
2203 chan->rx_state = L2CAP_RX_STATE_RECV;
2206 static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2207 struct l2cap_ctrl *control,
2208 struct sk_buff_head *skbs, u8 event)
2212 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2216 case L2CAP_EV_DATA_REQUEST:
2217 if (chan->tx_send_head == NULL)
2218 chan->tx_send_head = skb_peek(skbs);
2220 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2221 l2cap_ertm_send(chan);
2223 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2224 BT_DBG("Enter LOCAL_BUSY");
2225 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2227 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2228 /* The SREJ_SENT state must be aborted if we are to
2229 * enter the LOCAL_BUSY state.
2231 l2cap_abort_rx_srej_sent(chan);
2234 l2cap_send_ack(chan);
2237 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2238 BT_DBG("Exit LOCAL_BUSY");
2239 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2241 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2242 struct l2cap_ctrl local_control;
2244 memset(&local_control, 0, sizeof(local_control));
2245 local_control.sframe = 1;
2246 local_control.super = L2CAP_SUPER_RR;
2247 local_control.poll = 1;
2248 local_control.reqseq = chan->buffer_seq;
2249 l2cap_send_sframe(chan, 0);
2251 chan->retry_count = 1;
2252 __set_monitor_timer(chan);
2253 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2256 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2257 l2cap_process_reqseq(chan, control->reqseq);
2259 case L2CAP_EV_EXPLICIT_POLL:
2260 l2cap_send_rr_or_rnr(chan, 1);
2261 chan->retry_count = 1;
2262 __set_monitor_timer(chan);
2263 __clear_ack_timer(chan);
2264 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2266 case L2CAP_EV_RETRANS_TO:
2267 l2cap_send_rr_or_rnr(chan, 1);
2268 chan->retry_count = 1;
2269 __set_monitor_timer(chan);
2270 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2272 case L2CAP_EV_RECV_FBIT:
2273 /* Nothing to process */
2282 static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2283 struct l2cap_ctrl *control,
2284 struct sk_buff_head *skbs, u8 event)
2288 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2292 case L2CAP_EV_DATA_REQUEST:
2293 if (chan->tx_send_head == NULL)
2294 chan->tx_send_head = skb_peek(skbs);
2295 /* Queue data, but don't send. */
2296 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2298 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2299 BT_DBG("Enter LOCAL_BUSY");
2300 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2302 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2303 /* The SREJ_SENT state must be aborted if we are to
2304 * enter the LOCAL_BUSY state.
2306 l2cap_abort_rx_srej_sent(chan);
2309 l2cap_send_ack(chan);
2312 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2313 BT_DBG("Exit LOCAL_BUSY");
2314 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2316 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2317 struct l2cap_ctrl local_control;
2318 memset(&local_control, 0, sizeof(local_control));
2319 local_control.sframe = 1;
2320 local_control.super = L2CAP_SUPER_RR;
2321 local_control.poll = 1;
2322 local_control.reqseq = chan->buffer_seq;
2323 l2cap_send_sframe(chan, 0);
2325 chan->retry_count = 1;
2326 __set_monitor_timer(chan);
2327 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2330 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2331 l2cap_process_reqseq(chan, control->reqseq);
2335 case L2CAP_EV_RECV_FBIT:
2336 if (control && control->final) {
2337 __clear_monitor_timer(chan);
2338 if (chan->unacked_frames > 0)
2339 __set_retrans_timer(chan);
2340 chan->retry_count = 0;
2341 chan->tx_state = L2CAP_TX_STATE_XMIT;
2342 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2345 case L2CAP_EV_EXPLICIT_POLL:
2348 case L2CAP_EV_MONITOR_TO:
2349 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2350 l2cap_send_rr_or_rnr(chan, 1);
2351 __set_monitor_timer(chan);
2352 chan->retry_count++;
2354 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2364 static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2365 struct sk_buff_head *skbs, u8 event)
2369 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2370 chan, control, skbs, event, chan->tx_state);
2372 switch (chan->tx_state) {
2373 case L2CAP_TX_STATE_XMIT:
2374 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2376 case L2CAP_TX_STATE_WAIT_F:
2377 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2387 /* Copy frame to all raw sockets on that connection */
2388 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2390 struct sk_buff *nskb;
2391 struct l2cap_chan *chan;
2393 BT_DBG("conn %p", conn);
2395 mutex_lock(&conn->chan_lock);
2397 list_for_each_entry(chan, &conn->chan_l, list) {
2398 struct sock *sk = chan->sk;
2399 if (chan->chan_type != L2CAP_CHAN_RAW)
2402 /* Don't send frame to the socket it came from */
2405 nskb = skb_clone(skb, GFP_ATOMIC);
2409 if (chan->ops->recv(chan->data, nskb))
2413 mutex_unlock(&conn->chan_lock);
2416 /* ---- L2CAP signalling commands ---- */
2417 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2418 u8 code, u8 ident, u16 dlen, void *data)
2420 struct sk_buff *skb, **frag;
2421 struct l2cap_cmd_hdr *cmd;
2422 struct l2cap_hdr *lh;
2425 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2426 conn, code, ident, dlen);
2428 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2429 count = min_t(unsigned int, conn->mtu, len);
2431 skb = bt_skb_alloc(count, GFP_ATOMIC);
2435 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2436 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2438 if (conn->hcon->type == LE_LINK)
2439 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2441 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2443 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2446 cmd->len = cpu_to_le16(dlen);
2449 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2450 memcpy(skb_put(skb, count), data, count);
2456 /* Continuation fragments (no L2CAP header) */
2457 frag = &skb_shinfo(skb)->frag_list;
2459 count = min_t(unsigned int, conn->mtu, len);
2461 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2465 memcpy(skb_put(*frag, count), data, count);
2470 frag = &(*frag)->next;
2480 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2482 struct l2cap_conf_opt *opt = *ptr;
2485 len = L2CAP_CONF_OPT_SIZE + opt->len;
2493 *val = *((u8 *) opt->val);
2497 *val = get_unaligned_le16(opt->val);
2501 *val = get_unaligned_le32(opt->val);
2505 *val = (unsigned long) opt->val;
2509 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2513 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2515 struct l2cap_conf_opt *opt = *ptr;
2517 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2524 *((u8 *) opt->val) = val;
2528 put_unaligned_le16(val, opt->val);
2532 put_unaligned_le32(val, opt->val);
2536 memcpy(opt->val, (void *) val, len);
2540 *ptr += L2CAP_CONF_OPT_SIZE + len;
2543 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2545 struct l2cap_conf_efs efs;
2547 switch (chan->mode) {
2548 case L2CAP_MODE_ERTM:
2549 efs.id = chan->local_id;
2550 efs.stype = chan->local_stype;
2551 efs.msdu = cpu_to_le16(chan->local_msdu);
2552 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2553 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2554 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2557 case L2CAP_MODE_STREAMING:
2559 efs.stype = L2CAP_SERV_BESTEFFORT;
2560 efs.msdu = cpu_to_le16(chan->local_msdu);
2561 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2570 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2571 (unsigned long) &efs);
2574 static void l2cap_ack_timeout(struct work_struct *work)
2576 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2579 BT_DBG("chan %p", chan);
2581 l2cap_chan_lock(chan);
2583 __l2cap_send_ack(chan);
2585 l2cap_chan_unlock(chan);
2587 l2cap_chan_put(chan);
2590 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2594 chan->next_tx_seq = 0;
2595 chan->expected_tx_seq = 0;
2596 chan->expected_ack_seq = 0;
2597 chan->unacked_frames = 0;
2598 chan->buffer_seq = 0;
2599 chan->num_acked = 0;
2600 chan->frames_sent = 0;
2601 chan->last_acked_seq = 0;
2603 chan->sdu_last_frag = NULL;
2606 skb_queue_head_init(&chan->tx_q);
2608 if (chan->mode != L2CAP_MODE_ERTM)
2611 chan->rx_state = L2CAP_RX_STATE_RECV;
2612 chan->tx_state = L2CAP_TX_STATE_XMIT;
2614 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2615 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2616 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2618 skb_queue_head_init(&chan->srej_q);
2620 INIT_LIST_HEAD(&chan->srej_l);
2621 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2625 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2627 l2cap_seq_list_free(&chan->srej_list);
2632 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2635 case L2CAP_MODE_STREAMING:
2636 case L2CAP_MODE_ERTM:
2637 if (l2cap_mode_supported(mode, remote_feat_mask))
2641 return L2CAP_MODE_BASIC;
2645 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2647 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2650 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2652 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2655 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2657 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2658 __l2cap_ews_supported(chan)) {
2659 /* use extended control field */
2660 set_bit(FLAG_EXT_CTRL, &chan->flags);
2661 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2663 chan->tx_win = min_t(u16, chan->tx_win,
2664 L2CAP_DEFAULT_TX_WINDOW);
2665 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2669 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2671 struct l2cap_conf_req *req = data;
2672 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2673 void *ptr = req->data;
2676 BT_DBG("chan %p", chan);
2678 if (chan->num_conf_req || chan->num_conf_rsp)
2681 switch (chan->mode) {
2682 case L2CAP_MODE_STREAMING:
2683 case L2CAP_MODE_ERTM:
2684 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2687 if (__l2cap_efs_supported(chan))
2688 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2692 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2697 if (chan->imtu != L2CAP_DEFAULT_MTU)
2698 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2700 switch (chan->mode) {
2701 case L2CAP_MODE_BASIC:
2702 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2703 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2706 rfc.mode = L2CAP_MODE_BASIC;
2708 rfc.max_transmit = 0;
2709 rfc.retrans_timeout = 0;
2710 rfc.monitor_timeout = 0;
2711 rfc.max_pdu_size = 0;
2713 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2714 (unsigned long) &rfc);
2717 case L2CAP_MODE_ERTM:
2718 rfc.mode = L2CAP_MODE_ERTM;
2719 rfc.max_transmit = chan->max_tx;
2720 rfc.retrans_timeout = 0;
2721 rfc.monitor_timeout = 0;
2723 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2724 L2CAP_EXT_HDR_SIZE -
2727 rfc.max_pdu_size = cpu_to_le16(size);
2729 l2cap_txwin_setup(chan);
2731 rfc.txwin_size = min_t(u16, chan->tx_win,
2732 L2CAP_DEFAULT_TX_WINDOW);
2734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2735 (unsigned long) &rfc);
2737 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2738 l2cap_add_opt_efs(&ptr, chan);
2740 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2743 if (chan->fcs == L2CAP_FCS_NONE ||
2744 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2745 chan->fcs = L2CAP_FCS_NONE;
2746 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2749 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2750 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2754 case L2CAP_MODE_STREAMING:
2755 rfc.mode = L2CAP_MODE_STREAMING;
2757 rfc.max_transmit = 0;
2758 rfc.retrans_timeout = 0;
2759 rfc.monitor_timeout = 0;
2761 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2762 L2CAP_EXT_HDR_SIZE -
2765 rfc.max_pdu_size = cpu_to_le16(size);
2767 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2768 (unsigned long) &rfc);
2770 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2771 l2cap_add_opt_efs(&ptr, chan);
2773 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2776 if (chan->fcs == L2CAP_FCS_NONE ||
2777 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2778 chan->fcs = L2CAP_FCS_NONE;
2779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2784 req->dcid = cpu_to_le16(chan->dcid);
2785 req->flags = cpu_to_le16(0);
2790 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2792 struct l2cap_conf_rsp *rsp = data;
2793 void *ptr = rsp->data;
2794 void *req = chan->conf_req;
2795 int len = chan->conf_len;
2796 int type, hint, olen;
2798 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2799 struct l2cap_conf_efs efs;
2801 u16 mtu = L2CAP_DEFAULT_MTU;
2802 u16 result = L2CAP_CONF_SUCCESS;
2805 BT_DBG("chan %p", chan);
2807 while (len >= L2CAP_CONF_OPT_SIZE) {
2808 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2810 hint = type & L2CAP_CONF_HINT;
2811 type &= L2CAP_CONF_MASK;
2814 case L2CAP_CONF_MTU:
2818 case L2CAP_CONF_FLUSH_TO:
2819 chan->flush_to = val;
2822 case L2CAP_CONF_QOS:
2825 case L2CAP_CONF_RFC:
2826 if (olen == sizeof(rfc))
2827 memcpy(&rfc, (void *) val, olen);
2830 case L2CAP_CONF_FCS:
2831 if (val == L2CAP_FCS_NONE)
2832 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2835 case L2CAP_CONF_EFS:
2837 if (olen == sizeof(efs))
2838 memcpy(&efs, (void *) val, olen);
2841 case L2CAP_CONF_EWS:
2843 return -ECONNREFUSED;
2845 set_bit(FLAG_EXT_CTRL, &chan->flags);
2846 set_bit(CONF_EWS_RECV, &chan->conf_state);
2847 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2848 chan->remote_tx_win = val;
2855 result = L2CAP_CONF_UNKNOWN;
2856 *((u8 *) ptr++) = type;
2861 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2864 switch (chan->mode) {
2865 case L2CAP_MODE_STREAMING:
2866 case L2CAP_MODE_ERTM:
2867 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2868 chan->mode = l2cap_select_mode(rfc.mode,
2869 chan->conn->feat_mask);
2874 if (__l2cap_efs_supported(chan))
2875 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2877 return -ECONNREFUSED;
2880 if (chan->mode != rfc.mode)
2881 return -ECONNREFUSED;
2887 if (chan->mode != rfc.mode) {
2888 result = L2CAP_CONF_UNACCEPT;
2889 rfc.mode = chan->mode;
2891 if (chan->num_conf_rsp == 1)
2892 return -ECONNREFUSED;
2894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2895 sizeof(rfc), (unsigned long) &rfc);
2898 if (result == L2CAP_CONF_SUCCESS) {
2899 /* Configure output options and let the other side know
2900 * which ones we don't like. */
2902 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2903 result = L2CAP_CONF_UNACCEPT;
2906 set_bit(CONF_MTU_DONE, &chan->conf_state);
2908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2911 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2912 efs.stype != L2CAP_SERV_NOTRAFIC &&
2913 efs.stype != chan->local_stype) {
2915 result = L2CAP_CONF_UNACCEPT;
2917 if (chan->num_conf_req >= 1)
2918 return -ECONNREFUSED;
2920 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2922 (unsigned long) &efs);
2924 /* Send PENDING Conf Rsp */
2925 result = L2CAP_CONF_PENDING;
2926 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2931 case L2CAP_MODE_BASIC:
2932 chan->fcs = L2CAP_FCS_NONE;
2933 set_bit(CONF_MODE_DONE, &chan->conf_state);
2936 case L2CAP_MODE_ERTM:
2937 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2938 chan->remote_tx_win = rfc.txwin_size;
2940 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2942 chan->remote_max_tx = rfc.max_transmit;
2944 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2946 L2CAP_EXT_HDR_SIZE -
2949 rfc.max_pdu_size = cpu_to_le16(size);
2950 chan->remote_mps = size;
2952 rfc.retrans_timeout =
2953 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2954 rfc.monitor_timeout =
2955 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2957 set_bit(CONF_MODE_DONE, &chan->conf_state);
2959 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2960 sizeof(rfc), (unsigned long) &rfc);
2962 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2963 chan->remote_id = efs.id;
2964 chan->remote_stype = efs.stype;
2965 chan->remote_msdu = le16_to_cpu(efs.msdu);
2966 chan->remote_flush_to =
2967 le32_to_cpu(efs.flush_to);
2968 chan->remote_acc_lat =
2969 le32_to_cpu(efs.acc_lat);
2970 chan->remote_sdu_itime =
2971 le32_to_cpu(efs.sdu_itime);
2972 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2973 sizeof(efs), (unsigned long) &efs);
2977 case L2CAP_MODE_STREAMING:
2978 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2980 L2CAP_EXT_HDR_SIZE -
2983 rfc.max_pdu_size = cpu_to_le16(size);
2984 chan->remote_mps = size;
2986 set_bit(CONF_MODE_DONE, &chan->conf_state);
2988 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2989 sizeof(rfc), (unsigned long) &rfc);
2994 result = L2CAP_CONF_UNACCEPT;
2996 memset(&rfc, 0, sizeof(rfc));
2997 rfc.mode = chan->mode;
3000 if (result == L2CAP_CONF_SUCCESS)
3001 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3003 rsp->scid = cpu_to_le16(chan->dcid);
3004 rsp->result = cpu_to_le16(result);
3005 rsp->flags = cpu_to_le16(0x0000);
3010 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3012 struct l2cap_conf_req *req = data;
3013 void *ptr = req->data;
3016 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3017 struct l2cap_conf_efs efs;
3019 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3021 while (len >= L2CAP_CONF_OPT_SIZE) {
3022 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3025 case L2CAP_CONF_MTU:
3026 if (val < L2CAP_DEFAULT_MIN_MTU) {
3027 *result = L2CAP_CONF_UNACCEPT;
3028 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3031 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3034 case L2CAP_CONF_FLUSH_TO:
3035 chan->flush_to = val;
3036 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3040 case L2CAP_CONF_RFC:
3041 if (olen == sizeof(rfc))
3042 memcpy(&rfc, (void *)val, olen);
3044 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3045 rfc.mode != chan->mode)
3046 return -ECONNREFUSED;
3050 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3051 sizeof(rfc), (unsigned long) &rfc);
3054 case L2CAP_CONF_EWS:
3055 chan->tx_win = min_t(u16, val,
3056 L2CAP_DEFAULT_EXT_WINDOW);
3057 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3061 case L2CAP_CONF_EFS:
3062 if (olen == sizeof(efs))
3063 memcpy(&efs, (void *)val, olen);
3065 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3066 efs.stype != L2CAP_SERV_NOTRAFIC &&
3067 efs.stype != chan->local_stype)
3068 return -ECONNREFUSED;
3070 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3071 sizeof(efs), (unsigned long) &efs);
3076 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3077 return -ECONNREFUSED;
3079 chan->mode = rfc.mode;
3081 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3083 case L2CAP_MODE_ERTM:
3084 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3085 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3086 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3088 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3089 chan->local_msdu = le16_to_cpu(efs.msdu);
3090 chan->local_sdu_itime =
3091 le32_to_cpu(efs.sdu_itime);
3092 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3093 chan->local_flush_to =
3094 le32_to_cpu(efs.flush_to);
3098 case L2CAP_MODE_STREAMING:
3099 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3103 req->dcid = cpu_to_le16(chan->dcid);
3104 req->flags = cpu_to_le16(0x0000);
3109 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3111 struct l2cap_conf_rsp *rsp = data;
3112 void *ptr = rsp->data;
3114 BT_DBG("chan %p", chan);
3116 rsp->scid = cpu_to_le16(chan->dcid);
3117 rsp->result = cpu_to_le16(result);
3118 rsp->flags = cpu_to_le16(flags);
3123 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3125 struct l2cap_conn_rsp rsp;
3126 struct l2cap_conn *conn = chan->conn;
3129 rsp.scid = cpu_to_le16(chan->dcid);
3130 rsp.dcid = cpu_to_le16(chan->scid);
3131 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3132 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3133 l2cap_send_cmd(conn, chan->ident,
3134 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3136 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3139 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3140 l2cap_build_conf_req(chan, buf), buf);
3141 chan->num_conf_req++;
3144 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3148 struct l2cap_conf_rfc rfc;
3150 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3152 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3155 while (len >= L2CAP_CONF_OPT_SIZE) {
3156 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3159 case L2CAP_CONF_RFC:
3160 if (olen == sizeof(rfc))
3161 memcpy(&rfc, (void *)val, olen);
3166 /* Use sane default values in case a misbehaving remote device
3167 * did not send an RFC option.
3169 rfc.mode = chan->mode;
3170 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3171 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3172 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3174 BT_ERR("Expected RFC option was not found, using defaults");
3178 case L2CAP_MODE_ERTM:
3179 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3180 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3181 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3183 case L2CAP_MODE_STREAMING:
3184 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3188 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3190 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3192 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3195 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3196 cmd->ident == conn->info_ident) {
3197 cancel_delayed_work(&conn->info_timer);
3199 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3200 conn->info_ident = 0;
3202 l2cap_conn_start(conn);
3208 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3210 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3211 struct l2cap_conn_rsp rsp;
3212 struct l2cap_chan *chan = NULL, *pchan;
3213 struct sock *parent, *sk = NULL;
3214 int result, status = L2CAP_CS_NO_INFO;
3216 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3217 __le16 psm = req->psm;
3219 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3221 /* Check if we have socket listening on psm */
3222 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3224 result = L2CAP_CR_BAD_PSM;
3230 mutex_lock(&conn->chan_lock);
3233 /* Check if the ACL is secure enough (if not SDP) */
3234 if (psm != cpu_to_le16(0x0001) &&
3235 !hci_conn_check_link_mode(conn->hcon)) {
3236 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3237 result = L2CAP_CR_SEC_BLOCK;
3241 result = L2CAP_CR_NO_MEM;
3243 /* Check for backlog size */
3244 if (sk_acceptq_is_full(parent)) {
3245 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3249 chan = pchan->ops->new_connection(pchan->data);
3255 /* Check if we already have channel with that dcid */
3256 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3257 sock_set_flag(sk, SOCK_ZAPPED);
3258 chan->ops->close(chan->data);
3262 hci_conn_hold(conn->hcon);
3264 bacpy(&bt_sk(sk)->src, conn->src);
3265 bacpy(&bt_sk(sk)->dst, conn->dst);
3269 bt_accept_enqueue(parent, sk);
3271 __l2cap_chan_add(conn, chan);
3275 __set_chan_timer(chan, sk->sk_sndtimeo);
3277 chan->ident = cmd->ident;
3279 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3280 if (l2cap_chan_check_security(chan)) {
3281 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3282 __l2cap_state_change(chan, BT_CONNECT2);
3283 result = L2CAP_CR_PEND;
3284 status = L2CAP_CS_AUTHOR_PEND;
3285 parent->sk_data_ready(parent, 0);
3287 __l2cap_state_change(chan, BT_CONFIG);
3288 result = L2CAP_CR_SUCCESS;
3289 status = L2CAP_CS_NO_INFO;
3292 __l2cap_state_change(chan, BT_CONNECT2);
3293 result = L2CAP_CR_PEND;
3294 status = L2CAP_CS_AUTHEN_PEND;
3297 __l2cap_state_change(chan, BT_CONNECT2);
3298 result = L2CAP_CR_PEND;
3299 status = L2CAP_CS_NO_INFO;
3303 release_sock(parent);
3304 mutex_unlock(&conn->chan_lock);
3307 rsp.scid = cpu_to_le16(scid);
3308 rsp.dcid = cpu_to_le16(dcid);
3309 rsp.result = cpu_to_le16(result);
3310 rsp.status = cpu_to_le16(status);
3311 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3313 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3314 struct l2cap_info_req info;
3315 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3317 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3318 conn->info_ident = l2cap_get_ident(conn);
3320 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3322 l2cap_send_cmd(conn, conn->info_ident,
3323 L2CAP_INFO_REQ, sizeof(info), &info);
3326 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3327 result == L2CAP_CR_SUCCESS) {
3329 set_bit(CONF_REQ_SENT, &chan->conf_state);
3330 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3331 l2cap_build_conf_req(chan, buf), buf);
3332 chan->num_conf_req++;
3338 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3340 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3341 u16 scid, dcid, result, status;
3342 struct l2cap_chan *chan;
3346 scid = __le16_to_cpu(rsp->scid);
3347 dcid = __le16_to_cpu(rsp->dcid);
3348 result = __le16_to_cpu(rsp->result);
3349 status = __le16_to_cpu(rsp->status);
3351 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3352 dcid, scid, result, status);
3354 mutex_lock(&conn->chan_lock);
3357 chan = __l2cap_get_chan_by_scid(conn, scid);
3363 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3372 l2cap_chan_lock(chan);
3375 case L2CAP_CR_SUCCESS:
3376 l2cap_state_change(chan, BT_CONFIG);
3379 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3381 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3384 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3385 l2cap_build_conf_req(chan, req), req);
3386 chan->num_conf_req++;
3390 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3394 l2cap_chan_del(chan, ECONNREFUSED);
3398 l2cap_chan_unlock(chan);
3401 mutex_unlock(&conn->chan_lock);
3406 static inline void set_default_fcs(struct l2cap_chan *chan)
3408 /* FCS is enabled only in ERTM or streaming mode, if one or both
3411 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3412 chan->fcs = L2CAP_FCS_NONE;
3413 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3414 chan->fcs = L2CAP_FCS_CRC16;
3417 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3419 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3422 struct l2cap_chan *chan;
3425 dcid = __le16_to_cpu(req->dcid);
3426 flags = __le16_to_cpu(req->flags);
3428 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3430 chan = l2cap_get_chan_by_scid(conn, dcid);
3434 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3435 struct l2cap_cmd_rej_cid rej;
3437 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3438 rej.scid = cpu_to_le16(chan->scid);
3439 rej.dcid = cpu_to_le16(chan->dcid);
3441 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3446 /* Reject if config buffer is too small. */
3447 len = cmd_len - sizeof(*req);
3448 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3449 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3450 l2cap_build_conf_rsp(chan, rsp,
3451 L2CAP_CONF_REJECT, flags), rsp);
3456 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3457 chan->conf_len += len;
3459 if (flags & 0x0001) {
3460 /* Incomplete config. Send empty response. */
3461 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3462 l2cap_build_conf_rsp(chan, rsp,
3463 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3467 /* Complete config. */
3468 len = l2cap_parse_conf_req(chan, rsp);
3470 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3474 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3475 chan->num_conf_rsp++;
3477 /* Reset config buffer. */
3480 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3483 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3484 set_default_fcs(chan);
3486 l2cap_state_change(chan, BT_CONNECTED);
3488 if (chan->mode == L2CAP_MODE_ERTM ||
3489 chan->mode == L2CAP_MODE_STREAMING)
3490 err = l2cap_ertm_init(chan);
3493 l2cap_send_disconn_req(chan->conn, chan, -err);
3495 l2cap_chan_ready(chan);
3500 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3502 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3503 l2cap_build_conf_req(chan, buf), buf);
3504 chan->num_conf_req++;
3507 /* Got Conf Rsp PENDING from remote side and asume we sent
3508 Conf Rsp PENDING in the code above */
3509 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3510 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3512 /* check compatibility */
3514 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3515 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3517 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3518 l2cap_build_conf_rsp(chan, rsp,
3519 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3523 l2cap_chan_unlock(chan);
3527 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3529 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3530 u16 scid, flags, result;
3531 struct l2cap_chan *chan;
3532 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3535 scid = __le16_to_cpu(rsp->scid);
3536 flags = __le16_to_cpu(rsp->flags);
3537 result = __le16_to_cpu(rsp->result);
3539 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3542 chan = l2cap_get_chan_by_scid(conn, scid);
3547 case L2CAP_CONF_SUCCESS:
3548 l2cap_conf_rfc_get(chan, rsp->data, len);
3549 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3552 case L2CAP_CONF_PENDING:
3553 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3555 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3558 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3561 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3565 /* check compatibility */
3567 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3568 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3570 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3571 l2cap_build_conf_rsp(chan, buf,
3572 L2CAP_CONF_SUCCESS, 0x0000), buf);
3576 case L2CAP_CONF_UNACCEPT:
3577 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3580 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3581 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3585 /* throw out any old stored conf requests */
3586 result = L2CAP_CONF_SUCCESS;
3587 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3590 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3594 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3595 L2CAP_CONF_REQ, len, req);
3596 chan->num_conf_req++;
3597 if (result != L2CAP_CONF_SUCCESS)
3603 l2cap_chan_set_err(chan, ECONNRESET);
3605 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3606 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3613 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3615 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3616 set_default_fcs(chan);
3618 l2cap_state_change(chan, BT_CONNECTED);
3619 if (chan->mode == L2CAP_MODE_ERTM ||
3620 chan->mode == L2CAP_MODE_STREAMING)
3621 err = l2cap_ertm_init(chan);
3624 l2cap_send_disconn_req(chan->conn, chan, -err);
3626 l2cap_chan_ready(chan);
3630 l2cap_chan_unlock(chan);
3634 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3636 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3637 struct l2cap_disconn_rsp rsp;
3639 struct l2cap_chan *chan;
3642 scid = __le16_to_cpu(req->scid);
3643 dcid = __le16_to_cpu(req->dcid);
3645 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3647 mutex_lock(&conn->chan_lock);
3649 chan = __l2cap_get_chan_by_scid(conn, dcid);
3651 mutex_unlock(&conn->chan_lock);
3655 l2cap_chan_lock(chan);
3659 rsp.dcid = cpu_to_le16(chan->scid);
3660 rsp.scid = cpu_to_le16(chan->dcid);
3661 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3664 sk->sk_shutdown = SHUTDOWN_MASK;
3667 l2cap_chan_hold(chan);
3668 l2cap_chan_del(chan, ECONNRESET);
3670 l2cap_chan_unlock(chan);
3672 chan->ops->close(chan->data);
3673 l2cap_chan_put(chan);
3675 mutex_unlock(&conn->chan_lock);
3680 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3682 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3684 struct l2cap_chan *chan;
3686 scid = __le16_to_cpu(rsp->scid);
3687 dcid = __le16_to_cpu(rsp->dcid);
3689 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3691 mutex_lock(&conn->chan_lock);
3693 chan = __l2cap_get_chan_by_scid(conn, scid);
3695 mutex_unlock(&conn->chan_lock);
3699 l2cap_chan_lock(chan);
3701 l2cap_chan_hold(chan);
3702 l2cap_chan_del(chan, 0);
3704 l2cap_chan_unlock(chan);
3706 chan->ops->close(chan->data);
3707 l2cap_chan_put(chan);
3709 mutex_unlock(&conn->chan_lock);
3714 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3716 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3719 type = __le16_to_cpu(req->type);
3721 BT_DBG("type 0x%4.4x", type);
3723 if (type == L2CAP_IT_FEAT_MASK) {
3725 u32 feat_mask = l2cap_feat_mask;
3726 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3727 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3728 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3730 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3733 feat_mask |= L2CAP_FEAT_EXT_FLOW
3734 | L2CAP_FEAT_EXT_WINDOW;
3736 put_unaligned_le32(feat_mask, rsp->data);
3737 l2cap_send_cmd(conn, cmd->ident,
3738 L2CAP_INFO_RSP, sizeof(buf), buf);
3739 } else if (type == L2CAP_IT_FIXED_CHAN) {
3741 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3744 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3746 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3748 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3749 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3750 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3751 l2cap_send_cmd(conn, cmd->ident,
3752 L2CAP_INFO_RSP, sizeof(buf), buf);
3754 struct l2cap_info_rsp rsp;
3755 rsp.type = cpu_to_le16(type);
3756 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3757 l2cap_send_cmd(conn, cmd->ident,
3758 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3764 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3766 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3769 type = __le16_to_cpu(rsp->type);
3770 result = __le16_to_cpu(rsp->result);
3772 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3774 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3775 if (cmd->ident != conn->info_ident ||
3776 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3779 cancel_delayed_work(&conn->info_timer);
3781 if (result != L2CAP_IR_SUCCESS) {
3782 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3783 conn->info_ident = 0;
3785 l2cap_conn_start(conn);
3791 case L2CAP_IT_FEAT_MASK:
3792 conn->feat_mask = get_unaligned_le32(rsp->data);
3794 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3795 struct l2cap_info_req req;
3796 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3798 conn->info_ident = l2cap_get_ident(conn);
3800 l2cap_send_cmd(conn, conn->info_ident,
3801 L2CAP_INFO_REQ, sizeof(req), &req);
3803 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3804 conn->info_ident = 0;
3806 l2cap_conn_start(conn);
3810 case L2CAP_IT_FIXED_CHAN:
3811 conn->fixed_chan_mask = rsp->data[0];
3812 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3813 conn->info_ident = 0;
3815 l2cap_conn_start(conn);
3822 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3823 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3826 struct l2cap_create_chan_req *req = data;
3827 struct l2cap_create_chan_rsp rsp;
3830 if (cmd_len != sizeof(*req))
3836 psm = le16_to_cpu(req->psm);
3837 scid = le16_to_cpu(req->scid);
3839 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3841 /* Placeholder: Always reject */
3843 rsp.scid = cpu_to_le16(scid);
3844 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3845 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3847 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3853 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3854 struct l2cap_cmd_hdr *cmd, void *data)
3856 BT_DBG("conn %p", conn);
3858 return l2cap_connect_rsp(conn, cmd, data);
3861 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3862 u16 icid, u16 result)
3864 struct l2cap_move_chan_rsp rsp;
3866 BT_DBG("icid %d, result %d", icid, result);
3868 rsp.icid = cpu_to_le16(icid);
3869 rsp.result = cpu_to_le16(result);
3871 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3874 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3875 struct l2cap_chan *chan, u16 icid, u16 result)
3877 struct l2cap_move_chan_cfm cfm;
3880 BT_DBG("icid %d, result %d", icid, result);
3882 ident = l2cap_get_ident(conn);
3884 chan->ident = ident;
3886 cfm.icid = cpu_to_le16(icid);
3887 cfm.result = cpu_to_le16(result);
3889 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3892 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3895 struct l2cap_move_chan_cfm_rsp rsp;
3897 BT_DBG("icid %d", icid);
3899 rsp.icid = cpu_to_le16(icid);
3900 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3903 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3904 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3906 struct l2cap_move_chan_req *req = data;
3908 u16 result = L2CAP_MR_NOT_ALLOWED;
3910 if (cmd_len != sizeof(*req))
3913 icid = le16_to_cpu(req->icid);
3915 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3920 /* Placeholder: Always refuse */
3921 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3926 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3927 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3929 struct l2cap_move_chan_rsp *rsp = data;
3932 if (cmd_len != sizeof(*rsp))
3935 icid = le16_to_cpu(rsp->icid);
3936 result = le16_to_cpu(rsp->result);
3938 BT_DBG("icid %d, result %d", icid, result);
3940 /* Placeholder: Always unconfirmed */
3941 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3946 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3949 struct l2cap_move_chan_cfm *cfm = data;
3952 if (cmd_len != sizeof(*cfm))
3955 icid = le16_to_cpu(cfm->icid);
3956 result = le16_to_cpu(cfm->result);
3958 BT_DBG("icid %d, result %d", icid, result);
3960 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3965 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3966 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3968 struct l2cap_move_chan_cfm_rsp *rsp = data;
3971 if (cmd_len != sizeof(*rsp))
3974 icid = le16_to_cpu(rsp->icid);
3976 BT_DBG("icid %d", icid);
3981 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3986 if (min > max || min < 6 || max > 3200)
3989 if (to_multiplier < 10 || to_multiplier > 3200)
3992 if (max >= to_multiplier * 8)
3995 max_latency = (to_multiplier * 8 / max) - 1;
3996 if (latency > 499 || latency > max_latency)
4002 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4003 struct l2cap_cmd_hdr *cmd, u8 *data)
4005 struct hci_conn *hcon = conn->hcon;
4006 struct l2cap_conn_param_update_req *req;
4007 struct l2cap_conn_param_update_rsp rsp;
4008 u16 min, max, latency, to_multiplier, cmd_len;
4011 if (!(hcon->link_mode & HCI_LM_MASTER))
4014 cmd_len = __le16_to_cpu(cmd->len);
4015 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4018 req = (struct l2cap_conn_param_update_req *) data;
4019 min = __le16_to_cpu(req->min);
4020 max = __le16_to_cpu(req->max);
4021 latency = __le16_to_cpu(req->latency);
4022 to_multiplier = __le16_to_cpu(req->to_multiplier);
4024 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4025 min, max, latency, to_multiplier);
4027 memset(&rsp, 0, sizeof(rsp));
4029 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4031 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4033 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4035 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4039 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4044 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4045 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4049 switch (cmd->code) {
4050 case L2CAP_COMMAND_REJ:
4051 l2cap_command_rej(conn, cmd, data);
4054 case L2CAP_CONN_REQ:
4055 err = l2cap_connect_req(conn, cmd, data);
4058 case L2CAP_CONN_RSP:
4059 err = l2cap_connect_rsp(conn, cmd, data);
4062 case L2CAP_CONF_REQ:
4063 err = l2cap_config_req(conn, cmd, cmd_len, data);
4066 case L2CAP_CONF_RSP:
4067 err = l2cap_config_rsp(conn, cmd, data);
4070 case L2CAP_DISCONN_REQ:
4071 err = l2cap_disconnect_req(conn, cmd, data);
4074 case L2CAP_DISCONN_RSP:
4075 err = l2cap_disconnect_rsp(conn, cmd, data);
4078 case L2CAP_ECHO_REQ:
4079 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4082 case L2CAP_ECHO_RSP:
4085 case L2CAP_INFO_REQ:
4086 err = l2cap_information_req(conn, cmd, data);
4089 case L2CAP_INFO_RSP:
4090 err = l2cap_information_rsp(conn, cmd, data);
4093 case L2CAP_CREATE_CHAN_REQ:
4094 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4097 case L2CAP_CREATE_CHAN_RSP:
4098 err = l2cap_create_channel_rsp(conn, cmd, data);
4101 case L2CAP_MOVE_CHAN_REQ:
4102 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4105 case L2CAP_MOVE_CHAN_RSP:
4106 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4109 case L2CAP_MOVE_CHAN_CFM:
4110 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4113 case L2CAP_MOVE_CHAN_CFM_RSP:
4114 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4118 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4126 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4127 struct l2cap_cmd_hdr *cmd, u8 *data)
4129 switch (cmd->code) {
4130 case L2CAP_COMMAND_REJ:
4133 case L2CAP_CONN_PARAM_UPDATE_REQ:
4134 return l2cap_conn_param_update_req(conn, cmd, data);
4136 case L2CAP_CONN_PARAM_UPDATE_RSP:
4140 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4145 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4146 struct sk_buff *skb)
4148 u8 *data = skb->data;
4150 struct l2cap_cmd_hdr cmd;
4153 l2cap_raw_recv(conn, skb);
4155 while (len >= L2CAP_CMD_HDR_SIZE) {
4157 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4158 data += L2CAP_CMD_HDR_SIZE;
4159 len -= L2CAP_CMD_HDR_SIZE;
4161 cmd_len = le16_to_cpu(cmd.len);
4163 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4165 if (cmd_len > len || !cmd.ident) {
4166 BT_DBG("corrupted command");
4170 if (conn->hcon->type == LE_LINK)
4171 err = l2cap_le_sig_cmd(conn, &cmd, data);
4173 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4176 struct l2cap_cmd_rej_unk rej;
4178 BT_ERR("Wrong link type (%d)", err);
4180 /* FIXME: Map err to a valid reason */
4181 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4182 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4192 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4194 u16 our_fcs, rcv_fcs;
4197 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4198 hdr_size = L2CAP_EXT_HDR_SIZE;
4200 hdr_size = L2CAP_ENH_HDR_SIZE;
4202 if (chan->fcs == L2CAP_FCS_CRC16) {
4203 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4204 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4205 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4207 if (our_fcs != rcv_fcs)
4213 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4217 chan->frames_sent = 0;
4219 control |= __set_reqseq(chan, chan->buffer_seq);
4221 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4222 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
4223 l2cap_send_sframe(chan, control);
4224 set_bit(CONN_RNR_SENT, &chan->conn_state);
4227 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
4228 l2cap_retransmit_frames(chan);
4230 l2cap_ertm_send(chan);
4232 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4233 chan->frames_sent == 0) {
4234 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4235 l2cap_send_sframe(chan, control);
4239 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4241 struct sk_buff *next_skb;
4242 int tx_seq_offset, next_tx_seq_offset;
4244 bt_cb(skb)->control.txseq = tx_seq;
4245 bt_cb(skb)->control.sar = sar;
4247 next_skb = skb_peek(&chan->srej_q);
4249 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4252 if (bt_cb(next_skb)->control.txseq == tx_seq)
4255 next_tx_seq_offset = __seq_offset(chan,
4256 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4258 if (next_tx_seq_offset > tx_seq_offset) {
4259 __skb_queue_before(&chan->srej_q, next_skb, skb);
4263 if (skb_queue_is_last(&chan->srej_q, next_skb))
4266 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4269 __skb_queue_tail(&chan->srej_q, skb);
4274 static void append_skb_frag(struct sk_buff *skb,
4275 struct sk_buff *new_frag, struct sk_buff **last_frag)
4277 /* skb->len reflects data in skb as well as all fragments
4278 * skb->data_len reflects only data in fragments
4280 if (!skb_has_frag_list(skb))
4281 skb_shinfo(skb)->frag_list = new_frag;
4283 new_frag->next = NULL;
4285 (*last_frag)->next = new_frag;
4286 *last_frag = new_frag;
4288 skb->len += new_frag->len;
4289 skb->data_len += new_frag->len;
4290 skb->truesize += new_frag->truesize;
4293 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4297 switch (__get_ctrl_sar(chan, control)) {
4298 case L2CAP_SAR_UNSEGMENTED:
4302 err = chan->ops->recv(chan->data, skb);
4305 case L2CAP_SAR_START:
4309 chan->sdu_len = get_unaligned_le16(skb->data);
4310 skb_pull(skb, L2CAP_SDULEN_SIZE);
4312 if (chan->sdu_len > chan->imtu) {
4317 if (skb->len >= chan->sdu_len)
4321 chan->sdu_last_frag = skb;
4327 case L2CAP_SAR_CONTINUE:
4331 append_skb_frag(chan->sdu, skb,
4332 &chan->sdu_last_frag);
4335 if (chan->sdu->len >= chan->sdu_len)
4345 append_skb_frag(chan->sdu, skb,
4346 &chan->sdu_last_frag);
4349 if (chan->sdu->len != chan->sdu_len)
4352 err = chan->ops->recv(chan->data, chan->sdu);
4355 /* Reassembly complete */
4357 chan->sdu_last_frag = NULL;
4365 kfree_skb(chan->sdu);
4367 chan->sdu_last_frag = NULL;
4374 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4376 BT_DBG("chan %p, Enter local busy", chan);
4378 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4379 l2cap_seq_list_clear(&chan->srej_list);
4381 __set_ack_timer(chan);
4384 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4388 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4391 control = __set_reqseq(chan, chan->buffer_seq);
4392 control |= __set_ctrl_poll(chan);
4393 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4394 l2cap_send_sframe(chan, control);
4395 chan->retry_count = 1;
4397 __clear_retrans_timer(chan);
4398 __set_monitor_timer(chan);
4400 set_bit(CONN_WAIT_F, &chan->conn_state);
4403 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4404 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4406 BT_DBG("chan %p, Exit local busy", chan);
4409 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4411 if (chan->mode == L2CAP_MODE_ERTM) {
4413 l2cap_ertm_enter_local_busy(chan);
4415 l2cap_ertm_exit_local_busy(chan);
4419 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4421 struct sk_buff *skb;
4424 while ((skb = skb_peek(&chan->srej_q)) &&
4425 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4428 if (bt_cb(skb)->control.txseq != tx_seq)
4431 skb = skb_dequeue(&chan->srej_q);
4432 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4433 err = l2cap_reassemble_sdu(chan, skb, control);
4436 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4440 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4441 tx_seq = __next_seq(chan, tx_seq);
4445 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4447 struct srej_list *l, *tmp;
4450 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4451 if (l->tx_seq == tx_seq) {
4456 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4457 control |= __set_reqseq(chan, l->tx_seq);
4458 l2cap_send_sframe(chan, control);
4460 list_add_tail(&l->list, &chan->srej_l);
4464 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4466 struct srej_list *new;
4469 while (tx_seq != chan->expected_tx_seq) {
4470 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4471 control |= __set_reqseq(chan, chan->expected_tx_seq);
4472 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4473 l2cap_send_sframe(chan, control);
4475 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4479 new->tx_seq = chan->expected_tx_seq;
4481 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4483 list_add_tail(&new->list, &chan->srej_l);
4486 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4491 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4493 u16 tx_seq = __get_txseq(chan, rx_control);
4494 u16 req_seq = __get_reqseq(chan, rx_control);
4495 u8 sar = __get_ctrl_sar(chan, rx_control);
4496 int tx_seq_offset, expected_tx_seq_offset;
4497 int num_to_ack = (chan->tx_win/6) + 1;
4500 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4501 tx_seq, rx_control);
4503 if (__is_ctrl_final(chan, rx_control) &&
4504 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4505 __clear_monitor_timer(chan);
4506 if (chan->unacked_frames > 0)
4507 __set_retrans_timer(chan);
4508 clear_bit(CONN_WAIT_F, &chan->conn_state);
4511 chan->expected_ack_seq = req_seq;
4512 l2cap_drop_acked_frames(chan);
4514 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4516 /* invalid tx_seq */
4517 if (tx_seq_offset >= chan->tx_win) {
4518 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4522 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4523 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4524 l2cap_send_ack(chan);
4528 if (tx_seq == chan->expected_tx_seq)
4531 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4532 struct srej_list *first;
4534 first = list_first_entry(&chan->srej_l,
4535 struct srej_list, list);
4536 if (tx_seq == first->tx_seq) {
4537 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4538 l2cap_check_srej_gap(chan, tx_seq);
4540 list_del(&first->list);
4543 if (list_empty(&chan->srej_l)) {
4544 chan->buffer_seq = chan->buffer_seq_srej;
4545 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4546 l2cap_send_ack(chan);
4547 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4550 struct srej_list *l;
4552 /* duplicated tx_seq */
4553 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4556 list_for_each_entry(l, &chan->srej_l, list) {
4557 if (l->tx_seq == tx_seq) {
4558 l2cap_resend_srejframe(chan, tx_seq);
4563 err = l2cap_send_srejframe(chan, tx_seq);
4565 l2cap_send_disconn_req(chan->conn, chan, -err);
4570 expected_tx_seq_offset = __seq_offset(chan,
4571 chan->expected_tx_seq, chan->buffer_seq);
4573 /* duplicated tx_seq */
4574 if (tx_seq_offset < expected_tx_seq_offset)
4577 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4579 BT_DBG("chan %p, Enter SREJ", chan);
4581 INIT_LIST_HEAD(&chan->srej_l);
4582 chan->buffer_seq_srej = chan->buffer_seq;
4584 __skb_queue_head_init(&chan->srej_q);
4585 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4587 /* Set P-bit only if there are some I-frames to ack. */
4588 if (__clear_ack_timer(chan))
4589 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4591 err = l2cap_send_srejframe(chan, tx_seq);
4593 l2cap_send_disconn_req(chan->conn, chan, -err);
4600 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4602 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4603 bt_cb(skb)->control.txseq = tx_seq;
4604 bt_cb(skb)->control.sar = sar;
4605 __skb_queue_tail(&chan->srej_q, skb);
4609 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4610 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4613 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4617 if (__is_ctrl_final(chan, rx_control)) {
4618 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4619 l2cap_retransmit_frames(chan);
4623 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4624 if (chan->num_acked == num_to_ack - 1)
4625 l2cap_send_ack(chan);
4627 __set_ack_timer(chan);
4636 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4638 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4639 __get_reqseq(chan, rx_control), rx_control);
4641 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4642 l2cap_drop_acked_frames(chan);
4644 if (__is_ctrl_poll(chan, rx_control)) {
4645 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4646 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4647 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4648 (chan->unacked_frames > 0))
4649 __set_retrans_timer(chan);
4651 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4652 l2cap_send_srejtail(chan);
4654 l2cap_send_i_or_rr_or_rnr(chan);
4657 } else if (__is_ctrl_final(chan, rx_control)) {
4658 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4660 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4661 l2cap_retransmit_frames(chan);
4664 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4665 (chan->unacked_frames > 0))
4666 __set_retrans_timer(chan);
4668 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4669 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4670 l2cap_send_ack(chan);
4672 l2cap_ertm_send(chan);
4676 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4678 u16 tx_seq = __get_reqseq(chan, rx_control);
4680 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4682 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4684 chan->expected_ack_seq = tx_seq;
4685 l2cap_drop_acked_frames(chan);
4687 if (__is_ctrl_final(chan, rx_control)) {
4688 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4689 l2cap_retransmit_frames(chan);
4691 l2cap_retransmit_frames(chan);
4693 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4694 set_bit(CONN_REJ_ACT, &chan->conn_state);
4697 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4699 u16 tx_seq = __get_reqseq(chan, rx_control);
4701 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4703 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4705 if (__is_ctrl_poll(chan, rx_control)) {
4706 chan->expected_ack_seq = tx_seq;
4707 l2cap_drop_acked_frames(chan);
4709 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4710 l2cap_retransmit_one_frame(chan, tx_seq);
4712 l2cap_ertm_send(chan);
4714 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4715 chan->srej_save_reqseq = tx_seq;
4716 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4718 } else if (__is_ctrl_final(chan, rx_control)) {
4719 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4720 chan->srej_save_reqseq == tx_seq)
4721 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4723 l2cap_retransmit_one_frame(chan, tx_seq);
4725 l2cap_retransmit_one_frame(chan, tx_seq);
4726 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4727 chan->srej_save_reqseq = tx_seq;
4728 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4733 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4735 u16 tx_seq = __get_reqseq(chan, rx_control);
4737 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4739 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4740 chan->expected_ack_seq = tx_seq;
4741 l2cap_drop_acked_frames(chan);
4743 if (__is_ctrl_poll(chan, rx_control))
4744 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4746 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4747 __clear_retrans_timer(chan);
4748 if (__is_ctrl_poll(chan, rx_control))
4749 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4753 if (__is_ctrl_poll(chan, rx_control)) {
4754 l2cap_send_srejtail(chan);
4756 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4757 l2cap_send_sframe(chan, rx_control);
4761 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4763 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4765 if (__is_ctrl_final(chan, rx_control) &&
4766 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4767 __clear_monitor_timer(chan);
4768 if (chan->unacked_frames > 0)
4769 __set_retrans_timer(chan);
4770 clear_bit(CONN_WAIT_F, &chan->conn_state);
4773 switch (__get_ctrl_super(chan, rx_control)) {
4774 case L2CAP_SUPER_RR:
4775 l2cap_data_channel_rrframe(chan, rx_control);
4778 case L2CAP_SUPER_REJ:
4779 l2cap_data_channel_rejframe(chan, rx_control);
4782 case L2CAP_SUPER_SREJ:
4783 l2cap_data_channel_srejframe(chan, rx_control);
4786 case L2CAP_SUPER_RNR:
4787 l2cap_data_channel_rnrframe(chan, rx_control);
4795 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4799 int len, next_tx_seq_offset, req_seq_offset;
4801 __unpack_control(chan, skb);
4803 control = __get_control(chan, skb->data);
4804 skb_pull(skb, __ctrl_size(chan));
4808 * We can just drop the corrupted I-frame here.
4809 * Receiver will miss it and start proper recovery
4810 * procedures and ask retransmission.
4812 if (l2cap_check_fcs(chan, skb))
4815 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4816 len -= L2CAP_SDULEN_SIZE;
4818 if (chan->fcs == L2CAP_FCS_CRC16)
4819 len -= L2CAP_FCS_SIZE;
4821 if (len > chan->mps) {
4822 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4826 req_seq = __get_reqseq(chan, control);
4828 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4830 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4831 chan->expected_ack_seq);
4833 /* check for invalid req-seq */
4834 if (req_seq_offset > next_tx_seq_offset) {
4835 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4839 if (!__is_sframe(chan, control)) {
4841 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4845 l2cap_data_channel_iframe(chan, control, skb);
4849 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4853 l2cap_data_channel_sframe(chan, control, skb);
4863 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4865 struct l2cap_chan *chan;
4870 chan = l2cap_get_chan_by_scid(conn, cid);
4872 BT_DBG("unknown cid 0x%4.4x", cid);
4873 /* Drop packet and return */
4878 BT_DBG("chan %p, len %d", chan, skb->len);
4880 if (chan->state != BT_CONNECTED)
4883 switch (chan->mode) {
4884 case L2CAP_MODE_BASIC:
4885 /* If socket recv buffers overflows we drop data here
4886 * which is *bad* because L2CAP has to be reliable.
4887 * But we don't have any other choice. L2CAP doesn't
4888 * provide flow control mechanism. */
4890 if (chan->imtu < skb->len)
4893 if (!chan->ops->recv(chan->data, skb))
4897 case L2CAP_MODE_ERTM:
4898 l2cap_ertm_data_rcv(chan, skb);
4902 case L2CAP_MODE_STREAMING:
4903 control = __get_control(chan, skb->data);
4904 skb_pull(skb, __ctrl_size(chan));
4907 if (l2cap_check_fcs(chan, skb))
4910 if (__is_sar_start(chan, control))
4911 len -= L2CAP_SDULEN_SIZE;
4913 if (chan->fcs == L2CAP_FCS_CRC16)
4914 len -= L2CAP_FCS_SIZE;
4916 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4919 tx_seq = __get_txseq(chan, control);
4921 if (chan->expected_tx_seq != tx_seq) {
4922 /* Frame(s) missing - must discard partial SDU */
4923 kfree_skb(chan->sdu);
4925 chan->sdu_last_frag = NULL;
4928 /* TODO: Notify userland of missing data */
4931 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4933 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4934 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4939 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4947 l2cap_chan_unlock(chan);
4952 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4954 struct l2cap_chan *chan;
4956 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4960 BT_DBG("chan %p, len %d", chan, skb->len);
4962 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4965 if (chan->imtu < skb->len)
4968 if (!chan->ops->recv(chan->data, skb))
4977 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4978 struct sk_buff *skb)
4980 struct l2cap_chan *chan;
4982 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4986 BT_DBG("chan %p, len %d", chan, skb->len);
4988 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4991 if (chan->imtu < skb->len)
4994 if (!chan->ops->recv(chan->data, skb))
5003 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5005 struct l2cap_hdr *lh = (void *) skb->data;
5009 skb_pull(skb, L2CAP_HDR_SIZE);
5010 cid = __le16_to_cpu(lh->cid);
5011 len = __le16_to_cpu(lh->len);
5013 if (len != skb->len) {
5018 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5021 case L2CAP_CID_LE_SIGNALING:
5022 case L2CAP_CID_SIGNALING:
5023 l2cap_sig_channel(conn, skb);
5026 case L2CAP_CID_CONN_LESS:
5027 psm = get_unaligned((__le16 *) skb->data);
5029 l2cap_conless_channel(conn, psm, skb);
5032 case L2CAP_CID_LE_DATA:
5033 l2cap_att_channel(conn, cid, skb);
5037 if (smp_sig_channel(conn, skb))
5038 l2cap_conn_del(conn->hcon, EACCES);
5042 l2cap_data_channel(conn, cid, skb);
5047 /* ---- L2CAP interface with lower layer (HCI) ---- */
5049 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5051 int exact = 0, lm1 = 0, lm2 = 0;
5052 struct l2cap_chan *c;
5054 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5056 /* Find listening sockets and check their link_mode */
5057 read_lock(&chan_list_lock);
5058 list_for_each_entry(c, &chan_list, global_l) {
5059 struct sock *sk = c->sk;
5061 if (c->state != BT_LISTEN)
5064 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5065 lm1 |= HCI_LM_ACCEPT;
5066 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5067 lm1 |= HCI_LM_MASTER;
5069 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5070 lm2 |= HCI_LM_ACCEPT;
5071 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5072 lm2 |= HCI_LM_MASTER;
5075 read_unlock(&chan_list_lock);
5077 return exact ? lm1 : lm2;
5080 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5082 struct l2cap_conn *conn;
5084 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5087 conn = l2cap_conn_add(hcon, status);
5089 l2cap_conn_ready(conn);
5091 l2cap_conn_del(hcon, bt_to_errno(status));
5096 int l2cap_disconn_ind(struct hci_conn *hcon)
5098 struct l2cap_conn *conn = hcon->l2cap_data;
5100 BT_DBG("hcon %p", hcon);
5103 return HCI_ERROR_REMOTE_USER_TERM;
5104 return conn->disc_reason;
5107 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5109 BT_DBG("hcon %p reason %d", hcon, reason);
5111 l2cap_conn_del(hcon, bt_to_errno(reason));
5115 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5117 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5120 if (encrypt == 0x00) {
5121 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5122 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5123 } else if (chan->sec_level == BT_SECURITY_HIGH)
5124 l2cap_chan_close(chan, ECONNREFUSED);
5126 if (chan->sec_level == BT_SECURITY_MEDIUM)
5127 __clear_chan_timer(chan);
5131 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5133 struct l2cap_conn *conn = hcon->l2cap_data;
5134 struct l2cap_chan *chan;
5139 BT_DBG("conn %p", conn);
5141 if (hcon->type == LE_LINK) {
5142 if (!status && encrypt)
5143 smp_distribute_keys(conn, 0);
5144 cancel_delayed_work(&conn->security_timer);
5147 mutex_lock(&conn->chan_lock);
5149 list_for_each_entry(chan, &conn->chan_l, list) {
5150 l2cap_chan_lock(chan);
5152 BT_DBG("chan->scid %d", chan->scid);
5154 if (chan->scid == L2CAP_CID_LE_DATA) {
5155 if (!status && encrypt) {
5156 chan->sec_level = hcon->sec_level;
5157 l2cap_chan_ready(chan);
5160 l2cap_chan_unlock(chan);
5164 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5165 l2cap_chan_unlock(chan);
5169 if (!status && (chan->state == BT_CONNECTED ||
5170 chan->state == BT_CONFIG)) {
5171 struct sock *sk = chan->sk;
5173 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5174 sk->sk_state_change(sk);
5176 l2cap_check_encryption(chan, encrypt);
5177 l2cap_chan_unlock(chan);
5181 if (chan->state == BT_CONNECT) {
5183 l2cap_send_conn_req(chan);
5185 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5187 } else if (chan->state == BT_CONNECT2) {
5188 struct sock *sk = chan->sk;
5189 struct l2cap_conn_rsp rsp;
5195 if (test_bit(BT_SK_DEFER_SETUP,
5196 &bt_sk(sk)->flags)) {
5197 struct sock *parent = bt_sk(sk)->parent;
5198 res = L2CAP_CR_PEND;
5199 stat = L2CAP_CS_AUTHOR_PEND;
5201 parent->sk_data_ready(parent, 0);
5203 __l2cap_state_change(chan, BT_CONFIG);
5204 res = L2CAP_CR_SUCCESS;
5205 stat = L2CAP_CS_NO_INFO;
5208 __l2cap_state_change(chan, BT_DISCONN);
5209 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5210 res = L2CAP_CR_SEC_BLOCK;
5211 stat = L2CAP_CS_NO_INFO;
5216 rsp.scid = cpu_to_le16(chan->dcid);
5217 rsp.dcid = cpu_to_le16(chan->scid);
5218 rsp.result = cpu_to_le16(res);
5219 rsp.status = cpu_to_le16(stat);
5220 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5224 l2cap_chan_unlock(chan);
5227 mutex_unlock(&conn->chan_lock);
5232 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5234 struct l2cap_conn *conn = hcon->l2cap_data;
5237 conn = l2cap_conn_add(hcon, 0);
5242 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5244 if (!(flags & ACL_CONT)) {
5245 struct l2cap_hdr *hdr;
5249 BT_ERR("Unexpected start frame (len %d)", skb->len);
5250 kfree_skb(conn->rx_skb);
5251 conn->rx_skb = NULL;
5253 l2cap_conn_unreliable(conn, ECOMM);
5256 /* Start fragment always begin with Basic L2CAP header */
5257 if (skb->len < L2CAP_HDR_SIZE) {
5258 BT_ERR("Frame is too short (len %d)", skb->len);
5259 l2cap_conn_unreliable(conn, ECOMM);
5263 hdr = (struct l2cap_hdr *) skb->data;
5264 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5266 if (len == skb->len) {
5267 /* Complete frame received */
5268 l2cap_recv_frame(conn, skb);
5272 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5274 if (skb->len > len) {
5275 BT_ERR("Frame is too long (len %d, expected len %d)",
5277 l2cap_conn_unreliable(conn, ECOMM);
5281 /* Allocate skb for the complete frame (with header) */
5282 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5286 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5288 conn->rx_len = len - skb->len;
5290 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5292 if (!conn->rx_len) {
5293 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5294 l2cap_conn_unreliable(conn, ECOMM);
5298 if (skb->len > conn->rx_len) {
5299 BT_ERR("Fragment is too long (len %d, expected %d)",
5300 skb->len, conn->rx_len);
5301 kfree_skb(conn->rx_skb);
5302 conn->rx_skb = NULL;
5304 l2cap_conn_unreliable(conn, ECOMM);
5308 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5310 conn->rx_len -= skb->len;
5312 if (!conn->rx_len) {
5313 /* Complete frame received */
5314 l2cap_recv_frame(conn, conn->rx_skb);
5315 conn->rx_skb = NULL;
5324 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5326 struct l2cap_chan *c;
5328 read_lock(&chan_list_lock);
5330 list_for_each_entry(c, &chan_list, global_l) {
5331 struct sock *sk = c->sk;
5333 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5334 batostr(&bt_sk(sk)->src),
5335 batostr(&bt_sk(sk)->dst),
5336 c->state, __le16_to_cpu(c->psm),
5337 c->scid, c->dcid, c->imtu, c->omtu,
5338 c->sec_level, c->mode);
5341 read_unlock(&chan_list_lock);
5346 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5348 return single_open(file, l2cap_debugfs_show, inode->i_private);
5351 static const struct file_operations l2cap_debugfs_fops = {
5352 .open = l2cap_debugfs_open,
5354 .llseek = seq_lseek,
5355 .release = single_release,
5358 static struct dentry *l2cap_debugfs;
5360 int __init l2cap_init(void)
5364 err = l2cap_init_sockets();
5369 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5370 bt_debugfs, NULL, &l2cap_debugfs_fops);
5372 BT_ERR("Failed to create L2CAP debug file");
5378 void l2cap_exit(void)
5380 debugfs_remove(l2cap_debugfs);
5381 l2cap_cleanup_sockets();
5384 module_param(disable_ertm, bool, 0644);
5385 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");