2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
43 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
44 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46 static LIST_HEAD(chan_list);
47 static DEFINE_RWLOCK(chan_list_lock);
49 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
50 u8 code, u8 ident, u16 dlen, void *data);
51 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
54 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
55 struct l2cap_chan *chan, int err);
57 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
58 struct sk_buff_head *skbs, u8 event);
60 /* ---- L2CAP channels ---- */
62 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
66 list_for_each_entry(c, &conn->chan_l, list) {
73 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
77 list_for_each_entry(c, &conn->chan_l, list) {
84 /* Find channel with given SCID.
85 * Returns locked channel. */
86 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 mutex_lock(&conn->chan_lock);
91 c = __l2cap_get_chan_by_scid(conn, cid);
94 mutex_unlock(&conn->chan_lock);
99 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 struct l2cap_chan *c;
103 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->ident == ident)
110 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 struct l2cap_chan *c;
114 list_for_each_entry(c, &chan_list, global_l) {
115 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
121 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
125 write_lock(&chan_list_lock);
127 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
140 for (p = 0x1001; p < 0x1100; p += 2)
141 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
142 chan->psm = cpu_to_le16(p);
143 chan->sport = cpu_to_le16(p);
150 write_unlock(&chan_list_lock);
154 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 write_lock(&chan_list_lock);
160 write_unlock(&chan_list_lock);
165 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 u16 cid = L2CAP_CID_DYN_START;
169 for (; cid < L2CAP_CID_DYN_END; cid++) {
170 if (!__l2cap_get_chan_by_scid(conn, cid))
177 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
180 state_to_string(state));
183 chan->ops->state_change(chan, state);
186 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 struct sock *sk = chan->sk;
191 __l2cap_state_change(chan, state);
195 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 struct sock *sk = chan->sk;
202 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 struct sock *sk = chan->sk;
207 __l2cap_chan_set_err(chan, err);
211 static void __set_retrans_timer(struct l2cap_chan *chan)
213 if (!delayed_work_pending(&chan->monitor_timer) &&
214 chan->retrans_timeout) {
215 l2cap_set_timer(chan, &chan->retrans_timer,
216 msecs_to_jiffies(chan->retrans_timeout));
220 static void __set_monitor_timer(struct l2cap_chan *chan)
222 __clear_retrans_timer(chan);
223 if (chan->monitor_timeout) {
224 l2cap_set_timer(chan, &chan->monitor_timer,
225 msecs_to_jiffies(chan->monitor_timeout));
229 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
234 skb_queue_walk(head, skb) {
235 if (bt_cb(skb)->control.txseq == seq)
242 /* ---- L2CAP sequence number lists ---- */
244 /* For ERTM, ordered lists of sequence numbers must be tracked for
245 * SREJ requests that are received and for frames that are to be
246 * retransmitted. These seq_list functions implement a singly-linked
247 * list in an array, where membership in the list can also be checked
248 * in constant time. Items can also be added to the tail of the list
249 * and removed from the head in constant time, without further memory
253 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 size_t alloc_size, i;
257 /* Allocated size is a power of 2 to map sequence numbers
258 * (which may be up to 14 bits) in to a smaller array that is
259 * sized for the negotiated ERTM transmit windows.
261 alloc_size = roundup_pow_of_two(size);
263 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
267 seq_list->mask = alloc_size - 1;
268 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
269 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
270 for (i = 0; i < alloc_size; i++)
271 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
276 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 kfree(seq_list->list);
281 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
284 /* Constant-time check for list membership */
285 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
288 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 u16 mask = seq_list->mask;
292 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
293 /* In case someone tries to pop the head of an empty list */
294 return L2CAP_SEQ_LIST_CLEAR;
295 } else if (seq_list->head == seq) {
296 /* Head can be removed in constant time */
297 seq_list->head = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
301 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
302 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
305 /* Walk the list to find the sequence number */
306 u16 prev = seq_list->head;
307 while (seq_list->list[prev & mask] != seq) {
308 prev = seq_list->list[prev & mask];
309 if (prev == L2CAP_SEQ_LIST_TAIL)
310 return L2CAP_SEQ_LIST_CLEAR;
313 /* Unlink the number from the list and clear it */
314 seq_list->list[prev & mask] = seq_list->list[seq & mask];
315 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
316 if (seq_list->tail == seq)
317 seq_list->tail = prev;
322 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 /* Remove the head in constant time */
325 return l2cap_seq_list_remove(seq_list, seq_list->head);
328 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
332 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
335 for (i = 0; i <= seq_list->mask; i++)
336 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
342 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 u16 mask = seq_list->mask;
346 /* All appends happen in constant time */
348 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
351 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
352 seq_list->head = seq;
354 seq_list->list[seq_list->tail & mask] = seq;
356 seq_list->tail = seq;
357 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
360 static void l2cap_chan_timeout(struct work_struct *work)
362 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 struct l2cap_conn *conn = chan->conn;
367 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369 mutex_lock(&conn->chan_lock);
370 l2cap_chan_lock(chan);
372 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
373 reason = ECONNREFUSED;
374 else if (chan->state == BT_CONNECT &&
375 chan->sec_level != BT_SECURITY_SDP)
376 reason = ECONNREFUSED;
380 l2cap_chan_close(chan, reason);
382 l2cap_chan_unlock(chan);
384 chan->ops->close(chan);
385 mutex_unlock(&conn->chan_lock);
387 l2cap_chan_put(chan);
390 struct l2cap_chan *l2cap_chan_create(void)
392 struct l2cap_chan *chan;
394 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
398 mutex_init(&chan->lock);
400 write_lock(&chan_list_lock);
401 list_add(&chan->global_l, &chan_list);
402 write_unlock(&chan_list_lock);
404 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406 chan->state = BT_OPEN;
408 atomic_set(&chan->refcnt, 1);
410 /* This flag is cleared in l2cap_chan_ready() */
411 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413 BT_DBG("chan %p", chan);
418 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 write_lock(&chan_list_lock);
421 list_del(&chan->global_l);
422 write_unlock(&chan_list_lock);
424 l2cap_chan_put(chan);
427 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 chan->fcs = L2CAP_FCS_CRC16;
430 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
431 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
432 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
433 chan->sec_level = BT_SECURITY_LOW;
435 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
438 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
441 __le16_to_cpu(chan->psm), chan->dcid);
443 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
447 switch (chan->chan_type) {
448 case L2CAP_CHAN_CONN_ORIENTED:
449 if (conn->hcon->type == LE_LINK) {
451 chan->omtu = L2CAP_LE_DEFAULT_MTU;
452 chan->scid = L2CAP_CID_LE_DATA;
453 chan->dcid = L2CAP_CID_LE_DATA;
455 /* Alloc CID for connection-oriented socket */
456 chan->scid = l2cap_alloc_cid(conn);
457 chan->omtu = L2CAP_DEFAULT_MTU;
461 case L2CAP_CHAN_CONN_LESS:
462 /* Connectionless socket */
463 chan->scid = L2CAP_CID_CONN_LESS;
464 chan->dcid = L2CAP_CID_CONN_LESS;
465 chan->omtu = L2CAP_DEFAULT_MTU;
469 /* Raw socket can send/recv signalling messages only */
470 chan->scid = L2CAP_CID_SIGNALING;
471 chan->dcid = L2CAP_CID_SIGNALING;
472 chan->omtu = L2CAP_DEFAULT_MTU;
475 chan->local_id = L2CAP_BESTEFFORT_ID;
476 chan->local_stype = L2CAP_SERV_BESTEFFORT;
477 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
478 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
479 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
480 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
482 l2cap_chan_hold(chan);
484 list_add(&chan->list, &conn->chan_l);
487 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
489 mutex_lock(&conn->chan_lock);
490 __l2cap_chan_add(conn, chan);
491 mutex_unlock(&conn->chan_lock);
494 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
496 struct l2cap_conn *conn = chan->conn;
498 __clear_chan_timer(chan);
500 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
503 /* Delete from channel list */
504 list_del(&chan->list);
506 l2cap_chan_put(chan);
509 hci_conn_put(conn->hcon);
512 if (chan->ops->teardown)
513 chan->ops->teardown(chan, err);
515 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
519 case L2CAP_MODE_BASIC:
522 case L2CAP_MODE_ERTM:
523 __clear_retrans_timer(chan);
524 __clear_monitor_timer(chan);
525 __clear_ack_timer(chan);
527 skb_queue_purge(&chan->srej_q);
529 l2cap_seq_list_free(&chan->srej_list);
530 l2cap_seq_list_free(&chan->retrans_list);
534 case L2CAP_MODE_STREAMING:
535 skb_queue_purge(&chan->tx_q);
542 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
544 struct l2cap_conn *conn = chan->conn;
545 struct sock *sk = chan->sk;
547 BT_DBG("chan %p state %s sk %p", chan,
548 state_to_string(chan->state), sk);
550 switch (chan->state) {
552 if (chan->ops->teardown)
553 chan->ops->teardown(chan, 0);
558 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
559 conn->hcon->type == ACL_LINK) {
560 __set_chan_timer(chan, sk->sk_sndtimeo);
561 l2cap_send_disconn_req(conn, chan, reason);
563 l2cap_chan_del(chan, reason);
567 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
568 conn->hcon->type == ACL_LINK) {
569 struct l2cap_conn_rsp rsp;
572 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
573 result = L2CAP_CR_SEC_BLOCK;
575 result = L2CAP_CR_BAD_PSM;
576 l2cap_state_change(chan, BT_DISCONN);
578 rsp.scid = cpu_to_le16(chan->dcid);
579 rsp.dcid = cpu_to_le16(chan->scid);
580 rsp.result = cpu_to_le16(result);
581 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
582 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
586 l2cap_chan_del(chan, reason);
591 l2cap_chan_del(chan, reason);
595 if (chan->ops->teardown)
596 chan->ops->teardown(chan, 0);
601 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
603 if (chan->chan_type == L2CAP_CHAN_RAW) {
604 switch (chan->sec_level) {
605 case BT_SECURITY_HIGH:
606 return HCI_AT_DEDICATED_BONDING_MITM;
607 case BT_SECURITY_MEDIUM:
608 return HCI_AT_DEDICATED_BONDING;
610 return HCI_AT_NO_BONDING;
612 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
613 if (chan->sec_level == BT_SECURITY_LOW)
614 chan->sec_level = BT_SECURITY_SDP;
616 if (chan->sec_level == BT_SECURITY_HIGH)
617 return HCI_AT_NO_BONDING_MITM;
619 return HCI_AT_NO_BONDING;
621 switch (chan->sec_level) {
622 case BT_SECURITY_HIGH:
623 return HCI_AT_GENERAL_BONDING_MITM;
624 case BT_SECURITY_MEDIUM:
625 return HCI_AT_GENERAL_BONDING;
627 return HCI_AT_NO_BONDING;
632 /* Service level security */
633 int l2cap_chan_check_security(struct l2cap_chan *chan)
635 struct l2cap_conn *conn = chan->conn;
638 auth_type = l2cap_get_auth_type(chan);
640 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
643 static u8 l2cap_get_ident(struct l2cap_conn *conn)
647 /* Get next available identificator.
648 * 1 - 128 are used by kernel.
649 * 129 - 199 are reserved.
650 * 200 - 254 are used by utilities like l2ping, etc.
653 spin_lock(&conn->lock);
655 if (++conn->tx_ident > 128)
660 spin_unlock(&conn->lock);
665 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
667 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
670 BT_DBG("code 0x%2.2x", code);
675 if (lmp_no_flush_capable(conn->hcon->hdev))
676 flags = ACL_START_NO_FLUSH;
680 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
681 skb->priority = HCI_PRIO_MAX;
683 hci_send_acl(conn->hchan, skb, flags);
686 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
688 struct hci_conn *hcon = chan->conn->hcon;
691 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
694 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
695 lmp_no_flush_capable(hcon->hdev))
696 flags = ACL_START_NO_FLUSH;
700 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
701 hci_send_acl(chan->conn->hchan, skb, flags);
704 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
706 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
707 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
709 if (enh & L2CAP_CTRL_FRAME_TYPE) {
712 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
713 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
720 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
721 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
728 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
730 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
731 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
733 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
736 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
737 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
744 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
745 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
752 static inline void __unpack_control(struct l2cap_chan *chan,
755 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
756 __unpack_extended_control(get_unaligned_le32(skb->data),
757 &bt_cb(skb)->control);
758 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
760 __unpack_enhanced_control(get_unaligned_le16(skb->data),
761 &bt_cb(skb)->control);
762 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
766 static u32 __pack_extended_control(struct l2cap_ctrl *control)
770 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
771 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
773 if (control->sframe) {
774 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
775 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
776 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
778 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
779 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
785 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
789 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
790 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
792 if (control->sframe) {
793 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
794 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
795 packed |= L2CAP_CTRL_FRAME_TYPE;
797 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
798 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
804 static inline void __pack_control(struct l2cap_chan *chan,
805 struct l2cap_ctrl *control,
808 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
809 put_unaligned_le32(__pack_extended_control(control),
810 skb->data + L2CAP_HDR_SIZE);
812 put_unaligned_le16(__pack_enhanced_control(control),
813 skb->data + L2CAP_HDR_SIZE);
817 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
821 struct l2cap_hdr *lh;
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
825 hlen = L2CAP_EXT_HDR_SIZE;
827 hlen = L2CAP_ENH_HDR_SIZE;
829 if (chan->fcs == L2CAP_FCS_CRC16)
830 hlen += L2CAP_FCS_SIZE;
832 skb = bt_skb_alloc(hlen, GFP_KERNEL);
835 return ERR_PTR(-ENOMEM);
837 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
838 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
839 lh->cid = cpu_to_le16(chan->dcid);
841 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
842 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
844 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
846 if (chan->fcs == L2CAP_FCS_CRC16) {
847 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
848 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
851 skb->priority = HCI_PRIO_MAX;
855 static void l2cap_send_sframe(struct l2cap_chan *chan,
856 struct l2cap_ctrl *control)
861 BT_DBG("chan %p, control %p", chan, control);
863 if (!control->sframe)
866 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
870 if (control->super == L2CAP_SUPER_RR)
871 clear_bit(CONN_RNR_SENT, &chan->conn_state);
872 else if (control->super == L2CAP_SUPER_RNR)
873 set_bit(CONN_RNR_SENT, &chan->conn_state);
875 if (control->super != L2CAP_SUPER_SREJ) {
876 chan->last_acked_seq = control->reqseq;
877 __clear_ack_timer(chan);
880 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
881 control->final, control->poll, control->super);
883 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
884 control_field = __pack_extended_control(control);
886 control_field = __pack_enhanced_control(control);
888 skb = l2cap_create_sframe_pdu(chan, control_field);
890 l2cap_do_send(chan, skb);
893 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
895 struct l2cap_ctrl control;
897 BT_DBG("chan %p, poll %d", chan, poll);
899 memset(&control, 0, sizeof(control));
903 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
904 control.super = L2CAP_SUPER_RNR;
906 control.super = L2CAP_SUPER_RR;
908 control.reqseq = chan->buffer_seq;
909 l2cap_send_sframe(chan, &control);
912 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
914 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
917 static void l2cap_send_conn_req(struct l2cap_chan *chan)
919 struct l2cap_conn *conn = chan->conn;
920 struct l2cap_conn_req req;
922 req.scid = cpu_to_le16(chan->scid);
925 chan->ident = l2cap_get_ident(conn);
927 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
929 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
932 static void l2cap_chan_ready(struct l2cap_chan *chan)
934 /* This clears all conf flags, including CONF_NOT_COMPLETE */
935 chan->conf_state = 0;
936 __clear_chan_timer(chan);
938 chan->state = BT_CONNECTED;
940 if (chan->ops->ready)
941 chan->ops->ready(chan);
944 static void l2cap_do_start(struct l2cap_chan *chan)
946 struct l2cap_conn *conn = chan->conn;
948 if (conn->hcon->type == LE_LINK) {
949 l2cap_chan_ready(chan);
953 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
954 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
957 if (l2cap_chan_check_security(chan) &&
958 __l2cap_no_conn_pending(chan))
959 l2cap_send_conn_req(chan);
961 struct l2cap_info_req req;
962 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
964 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
965 conn->info_ident = l2cap_get_ident(conn);
967 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
969 l2cap_send_cmd(conn, conn->info_ident,
970 L2CAP_INFO_REQ, sizeof(req), &req);
974 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
976 u32 local_feat_mask = l2cap_feat_mask;
978 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
981 case L2CAP_MODE_ERTM:
982 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
983 case L2CAP_MODE_STREAMING:
984 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
990 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
992 struct sock *sk = chan->sk;
993 struct l2cap_disconn_req req;
998 if (chan->mode == L2CAP_MODE_ERTM) {
999 __clear_retrans_timer(chan);
1000 __clear_monitor_timer(chan);
1001 __clear_ack_timer(chan);
1004 req.dcid = cpu_to_le16(chan->dcid);
1005 req.scid = cpu_to_le16(chan->scid);
1006 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1007 L2CAP_DISCONN_REQ, sizeof(req), &req);
1010 __l2cap_state_change(chan, BT_DISCONN);
1011 __l2cap_chan_set_err(chan, err);
1015 /* ---- L2CAP connections ---- */
1016 static void l2cap_conn_start(struct l2cap_conn *conn)
1018 struct l2cap_chan *chan, *tmp;
1020 BT_DBG("conn %p", conn);
1022 mutex_lock(&conn->chan_lock);
1024 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1025 struct sock *sk = chan->sk;
1027 l2cap_chan_lock(chan);
1029 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1030 l2cap_chan_unlock(chan);
1034 if (chan->state == BT_CONNECT) {
1035 if (!l2cap_chan_check_security(chan) ||
1036 !__l2cap_no_conn_pending(chan)) {
1037 l2cap_chan_unlock(chan);
1041 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1042 && test_bit(CONF_STATE2_DEVICE,
1043 &chan->conf_state)) {
1044 l2cap_chan_close(chan, ECONNRESET);
1045 l2cap_chan_unlock(chan);
1049 l2cap_send_conn_req(chan);
1051 } else if (chan->state == BT_CONNECT2) {
1052 struct l2cap_conn_rsp rsp;
1054 rsp.scid = cpu_to_le16(chan->dcid);
1055 rsp.dcid = cpu_to_le16(chan->scid);
1057 if (l2cap_chan_check_security(chan)) {
1059 if (test_bit(BT_SK_DEFER_SETUP,
1060 &bt_sk(sk)->flags)) {
1061 struct sock *parent = bt_sk(sk)->parent;
1062 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1063 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1065 parent->sk_data_ready(parent, 0);
1068 __l2cap_state_change(chan, BT_CONFIG);
1069 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1070 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1074 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1075 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1078 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1081 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1082 rsp.result != L2CAP_CR_SUCCESS) {
1083 l2cap_chan_unlock(chan);
1087 set_bit(CONF_REQ_SENT, &chan->conf_state);
1088 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1089 l2cap_build_conf_req(chan, buf), buf);
1090 chan->num_conf_req++;
1093 l2cap_chan_unlock(chan);
1096 mutex_unlock(&conn->chan_lock);
1099 /* Find socket with cid and source/destination bdaddr.
1100 * Returns closest match, locked.
1102 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1106 struct l2cap_chan *c, *c1 = NULL;
1108 read_lock(&chan_list_lock);
1110 list_for_each_entry(c, &chan_list, global_l) {
1111 struct sock *sk = c->sk;
1113 if (state && c->state != state)
1116 if (c->scid == cid) {
1117 int src_match, dst_match;
1118 int src_any, dst_any;
1121 src_match = !bacmp(&bt_sk(sk)->src, src);
1122 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1123 if (src_match && dst_match) {
1124 read_unlock(&chan_list_lock);
1129 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1130 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1131 if ((src_match && dst_any) || (src_any && dst_match) ||
1132 (src_any && dst_any))
1137 read_unlock(&chan_list_lock);
1142 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1144 struct sock *parent, *sk;
1145 struct l2cap_chan *chan, *pchan;
1149 /* Check if we have socket listening on cid */
1150 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1151 conn->src, conn->dst);
1159 chan = pchan->ops->new_connection(pchan);
1165 hci_conn_hold(conn->hcon);
1167 bacpy(&bt_sk(sk)->src, conn->src);
1168 bacpy(&bt_sk(sk)->dst, conn->dst);
1170 bt_accept_enqueue(parent, sk);
1172 l2cap_chan_add(conn, chan);
1174 l2cap_chan_ready(chan);
1177 release_sock(parent);
1180 static void l2cap_conn_ready(struct l2cap_conn *conn)
1182 struct l2cap_chan *chan;
1184 BT_DBG("conn %p", conn);
1186 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1187 l2cap_le_conn_ready(conn);
1189 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1190 smp_conn_security(conn, conn->hcon->pending_sec_level);
1192 mutex_lock(&conn->chan_lock);
1194 list_for_each_entry(chan, &conn->chan_l, list) {
1196 l2cap_chan_lock(chan);
1198 if (conn->hcon->type == LE_LINK) {
1199 if (smp_conn_security(conn, chan->sec_level))
1200 l2cap_chan_ready(chan);
1202 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1203 struct sock *sk = chan->sk;
1204 __clear_chan_timer(chan);
1206 __l2cap_state_change(chan, BT_CONNECTED);
1207 sk->sk_state_change(sk);
1210 } else if (chan->state == BT_CONNECT)
1211 l2cap_do_start(chan);
1213 l2cap_chan_unlock(chan);
1216 mutex_unlock(&conn->chan_lock);
1219 /* Notify sockets that we cannot guaranty reliability anymore */
1220 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1222 struct l2cap_chan *chan;
1224 BT_DBG("conn %p", conn);
1226 mutex_lock(&conn->chan_lock);
1228 list_for_each_entry(chan, &conn->chan_l, list) {
1229 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1230 __l2cap_chan_set_err(chan, err);
1233 mutex_unlock(&conn->chan_lock);
1236 static void l2cap_info_timeout(struct work_struct *work)
1238 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1241 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1242 conn->info_ident = 0;
1244 l2cap_conn_start(conn);
1247 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1249 struct l2cap_conn *conn = hcon->l2cap_data;
1250 struct l2cap_chan *chan, *l;
1255 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1257 kfree_skb(conn->rx_skb);
1259 mutex_lock(&conn->chan_lock);
1262 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1263 l2cap_chan_hold(chan);
1264 l2cap_chan_lock(chan);
1266 l2cap_chan_del(chan, err);
1268 l2cap_chan_unlock(chan);
1270 chan->ops->close(chan);
1271 l2cap_chan_put(chan);
1274 mutex_unlock(&conn->chan_lock);
1276 hci_chan_del(conn->hchan);
1278 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1279 cancel_delayed_work_sync(&conn->info_timer);
1281 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1282 cancel_delayed_work_sync(&conn->security_timer);
1283 smp_chan_destroy(conn);
1286 hcon->l2cap_data = NULL;
1290 static void security_timeout(struct work_struct *work)
1292 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1293 security_timer.work);
1295 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1298 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1300 struct l2cap_conn *conn = hcon->l2cap_data;
1301 struct hci_chan *hchan;
1306 hchan = hci_chan_create(hcon);
1310 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1312 hci_chan_del(hchan);
1316 hcon->l2cap_data = conn;
1318 conn->hchan = hchan;
1320 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1322 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1323 conn->mtu = hcon->hdev->le_mtu;
1325 conn->mtu = hcon->hdev->acl_mtu;
1327 conn->src = &hcon->hdev->bdaddr;
1328 conn->dst = &hcon->dst;
1330 conn->feat_mask = 0;
1332 spin_lock_init(&conn->lock);
1333 mutex_init(&conn->chan_lock);
1335 INIT_LIST_HEAD(&conn->chan_l);
1337 if (hcon->type == LE_LINK)
1338 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1340 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1342 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1347 /* ---- Socket interface ---- */
1349 /* Find socket with psm and source / destination bdaddr.
1350 * Returns closest match.
1352 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1356 struct l2cap_chan *c, *c1 = NULL;
1358 read_lock(&chan_list_lock);
1360 list_for_each_entry(c, &chan_list, global_l) {
1361 struct sock *sk = c->sk;
1363 if (state && c->state != state)
1366 if (c->psm == psm) {
1367 int src_match, dst_match;
1368 int src_any, dst_any;
1371 src_match = !bacmp(&bt_sk(sk)->src, src);
1372 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1373 if (src_match && dst_match) {
1374 read_unlock(&chan_list_lock);
1379 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1380 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1381 if ((src_match && dst_any) || (src_any && dst_match) ||
1382 (src_any && dst_any))
1387 read_unlock(&chan_list_lock);
1392 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1393 bdaddr_t *dst, u8 dst_type)
1395 struct sock *sk = chan->sk;
1396 bdaddr_t *src = &bt_sk(sk)->src;
1397 struct l2cap_conn *conn;
1398 struct hci_conn *hcon;
1399 struct hci_dev *hdev;
1403 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1404 dst_type, __le16_to_cpu(chan->psm));
1406 hdev = hci_get_route(dst, src);
1408 return -EHOSTUNREACH;
1412 l2cap_chan_lock(chan);
1414 /* PSM must be odd and lsb of upper byte must be 0 */
1415 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1416 chan->chan_type != L2CAP_CHAN_RAW) {
1421 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1426 switch (chan->mode) {
1427 case L2CAP_MODE_BASIC:
1429 case L2CAP_MODE_ERTM:
1430 case L2CAP_MODE_STREAMING:
1439 switch (chan->state) {
1443 /* Already connecting */
1448 /* Already connected */
1462 /* Set destination address and psm */
1464 bacpy(&bt_sk(sk)->dst, dst);
1470 auth_type = l2cap_get_auth_type(chan);
1472 if (chan->dcid == L2CAP_CID_LE_DATA)
1473 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1474 chan->sec_level, auth_type);
1476 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1477 chan->sec_level, auth_type);
1480 err = PTR_ERR(hcon);
1484 conn = l2cap_conn_add(hcon, 0);
1491 if (hcon->type == LE_LINK) {
1494 if (!list_empty(&conn->chan_l)) {
1503 /* Update source addr of the socket */
1504 bacpy(src, conn->src);
1506 l2cap_chan_unlock(chan);
1507 l2cap_chan_add(conn, chan);
1508 l2cap_chan_lock(chan);
1510 l2cap_state_change(chan, BT_CONNECT);
1511 __set_chan_timer(chan, sk->sk_sndtimeo);
1513 if (hcon->state == BT_CONNECTED) {
1514 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1515 __clear_chan_timer(chan);
1516 if (l2cap_chan_check_security(chan))
1517 l2cap_state_change(chan, BT_CONNECTED);
1519 l2cap_do_start(chan);
1525 l2cap_chan_unlock(chan);
1526 hci_dev_unlock(hdev);
1531 int __l2cap_wait_ack(struct sock *sk)
1533 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1534 DECLARE_WAITQUEUE(wait, current);
1538 add_wait_queue(sk_sleep(sk), &wait);
1539 set_current_state(TASK_INTERRUPTIBLE);
1540 while (chan->unacked_frames > 0 && chan->conn) {
1544 if (signal_pending(current)) {
1545 err = sock_intr_errno(timeo);
1550 timeo = schedule_timeout(timeo);
1552 set_current_state(TASK_INTERRUPTIBLE);
1554 err = sock_error(sk);
1558 set_current_state(TASK_RUNNING);
1559 remove_wait_queue(sk_sleep(sk), &wait);
1563 static void l2cap_monitor_timeout(struct work_struct *work)
1565 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1566 monitor_timer.work);
1568 BT_DBG("chan %p", chan);
1570 l2cap_chan_lock(chan);
1573 l2cap_chan_unlock(chan);
1574 l2cap_chan_put(chan);
1578 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1580 l2cap_chan_unlock(chan);
1581 l2cap_chan_put(chan);
1584 static void l2cap_retrans_timeout(struct work_struct *work)
1586 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1587 retrans_timer.work);
1589 BT_DBG("chan %p", chan);
1591 l2cap_chan_lock(chan);
1594 l2cap_chan_unlock(chan);
1595 l2cap_chan_put(chan);
1599 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1600 l2cap_chan_unlock(chan);
1601 l2cap_chan_put(chan);
1604 static void l2cap_streaming_send(struct l2cap_chan *chan,
1605 struct sk_buff_head *skbs)
1607 struct sk_buff *skb;
1608 struct l2cap_ctrl *control;
1610 BT_DBG("chan %p, skbs %p", chan, skbs);
1612 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1614 while (!skb_queue_empty(&chan->tx_q)) {
1616 skb = skb_dequeue(&chan->tx_q);
1618 bt_cb(skb)->control.retries = 1;
1619 control = &bt_cb(skb)->control;
1621 control->reqseq = 0;
1622 control->txseq = chan->next_tx_seq;
1624 __pack_control(chan, control, skb);
1626 if (chan->fcs == L2CAP_FCS_CRC16) {
1627 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1628 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1631 l2cap_do_send(chan, skb);
1633 BT_DBG("Sent txseq %d", (int)control->txseq);
1635 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1636 chan->frames_sent++;
1640 static int l2cap_ertm_send(struct l2cap_chan *chan)
1642 struct sk_buff *skb, *tx_skb;
1643 struct l2cap_ctrl *control;
1646 BT_DBG("chan %p", chan);
1648 if (chan->state != BT_CONNECTED)
1651 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1654 while (chan->tx_send_head &&
1655 chan->unacked_frames < chan->remote_tx_win &&
1656 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1658 skb = chan->tx_send_head;
1660 bt_cb(skb)->control.retries = 1;
1661 control = &bt_cb(skb)->control;
1663 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1666 control->reqseq = chan->buffer_seq;
1667 chan->last_acked_seq = chan->buffer_seq;
1668 control->txseq = chan->next_tx_seq;
1670 __pack_control(chan, control, skb);
1672 if (chan->fcs == L2CAP_FCS_CRC16) {
1673 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1674 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1677 /* Clone after data has been modified. Data is assumed to be
1678 read-only (for locking purposes) on cloned sk_buffs.
1680 tx_skb = skb_clone(skb, GFP_KERNEL);
1685 __set_retrans_timer(chan);
1687 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1688 chan->unacked_frames++;
1689 chan->frames_sent++;
1692 if (skb_queue_is_last(&chan->tx_q, skb))
1693 chan->tx_send_head = NULL;
1695 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1697 l2cap_do_send(chan, tx_skb);
1698 BT_DBG("Sent txseq %d", (int)control->txseq);
1701 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1702 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1707 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1709 struct l2cap_ctrl control;
1710 struct sk_buff *skb;
1711 struct sk_buff *tx_skb;
1714 BT_DBG("chan %p", chan);
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1719 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1720 seq = l2cap_seq_list_pop(&chan->retrans_list);
1722 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1724 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1729 bt_cb(skb)->control.retries++;
1730 control = bt_cb(skb)->control;
1732 if (chan->max_tx != 0 &&
1733 bt_cb(skb)->control.retries > chan->max_tx) {
1734 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1735 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1736 l2cap_seq_list_clear(&chan->retrans_list);
1740 control.reqseq = chan->buffer_seq;
1741 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1746 if (skb_cloned(skb)) {
1747 /* Cloned sk_buffs are read-only, so we need a
1750 tx_skb = skb_copy(skb, GFP_ATOMIC);
1752 tx_skb = skb_clone(skb, GFP_ATOMIC);
1756 l2cap_seq_list_clear(&chan->retrans_list);
1760 /* Update skb contents */
1761 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1762 put_unaligned_le32(__pack_extended_control(&control),
1763 tx_skb->data + L2CAP_HDR_SIZE);
1765 put_unaligned_le16(__pack_enhanced_control(&control),
1766 tx_skb->data + L2CAP_HDR_SIZE);
1769 if (chan->fcs == L2CAP_FCS_CRC16) {
1770 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1771 put_unaligned_le16(fcs, skb_put(tx_skb,
1775 l2cap_do_send(chan, tx_skb);
1777 BT_DBG("Resent txseq %d", control.txseq);
1779 chan->last_acked_seq = chan->buffer_seq;
1783 static void l2cap_retransmit(struct l2cap_chan *chan,
1784 struct l2cap_ctrl *control)
1786 BT_DBG("chan %p, control %p", chan, control);
1788 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1789 l2cap_ertm_resend(chan);
1792 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1793 struct l2cap_ctrl *control)
1795 struct sk_buff *skb;
1797 BT_DBG("chan %p, control %p", chan, control);
1800 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1802 l2cap_seq_list_clear(&chan->retrans_list);
1804 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1807 if (chan->unacked_frames) {
1808 skb_queue_walk(&chan->tx_q, skb) {
1809 if (bt_cb(skb)->control.txseq == control->reqseq ||
1810 skb == chan->tx_send_head)
1814 skb_queue_walk_from(&chan->tx_q, skb) {
1815 if (skb == chan->tx_send_head)
1818 l2cap_seq_list_append(&chan->retrans_list,
1819 bt_cb(skb)->control.txseq);
1822 l2cap_ertm_resend(chan);
1826 static void l2cap_send_ack(struct l2cap_chan *chan)
1828 struct l2cap_ctrl control;
1829 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1830 chan->last_acked_seq);
1833 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1834 chan, chan->last_acked_seq, chan->buffer_seq);
1836 memset(&control, 0, sizeof(control));
1839 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1840 chan->rx_state == L2CAP_RX_STATE_RECV) {
1841 __clear_ack_timer(chan);
1842 control.super = L2CAP_SUPER_RNR;
1843 control.reqseq = chan->buffer_seq;
1844 l2cap_send_sframe(chan, &control);
1846 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1847 l2cap_ertm_send(chan);
1848 /* If any i-frames were sent, they included an ack */
1849 if (chan->buffer_seq == chan->last_acked_seq)
1853 /* Ack now if the tx window is 3/4ths full.
1854 * Calculate without mul or div
1856 threshold = chan->tx_win;
1857 threshold += threshold << 1;
1860 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1863 if (frames_to_ack >= threshold) {
1864 __clear_ack_timer(chan);
1865 control.super = L2CAP_SUPER_RR;
1866 control.reqseq = chan->buffer_seq;
1867 l2cap_send_sframe(chan, &control);
1872 __set_ack_timer(chan);
1876 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1877 struct msghdr *msg, int len,
1878 int count, struct sk_buff *skb)
1880 struct l2cap_conn *conn = chan->conn;
1881 struct sk_buff **frag;
1884 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1890 /* Continuation fragments (no L2CAP header) */
1891 frag = &skb_shinfo(skb)->frag_list;
1893 struct sk_buff *tmp;
1895 count = min_t(unsigned int, conn->mtu, len);
1897 tmp = chan->ops->alloc_skb(chan, count,
1898 msg->msg_flags & MSG_DONTWAIT);
1900 return PTR_ERR(tmp);
1904 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1907 (*frag)->priority = skb->priority;
1912 skb->len += (*frag)->len;
1913 skb->data_len += (*frag)->len;
1915 frag = &(*frag)->next;
1921 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1922 struct msghdr *msg, size_t len,
1925 struct l2cap_conn *conn = chan->conn;
1926 struct sk_buff *skb;
1927 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1928 struct l2cap_hdr *lh;
1930 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1932 count = min_t(unsigned int, (conn->mtu - hlen), len);
1934 skb = chan->ops->alloc_skb(chan, count + hlen,
1935 msg->msg_flags & MSG_DONTWAIT);
1939 skb->priority = priority;
1941 /* Create L2CAP header */
1942 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1943 lh->cid = cpu_to_le16(chan->dcid);
1944 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1945 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1947 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1948 if (unlikely(err < 0)) {
1950 return ERR_PTR(err);
1955 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1956 struct msghdr *msg, size_t len,
1959 struct l2cap_conn *conn = chan->conn;
1960 struct sk_buff *skb;
1962 struct l2cap_hdr *lh;
1964 BT_DBG("chan %p len %d", chan, (int)len);
1966 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1968 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1969 msg->msg_flags & MSG_DONTWAIT);
1973 skb->priority = priority;
1975 /* Create L2CAP header */
1976 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1977 lh->cid = cpu_to_le16(chan->dcid);
1978 lh->len = cpu_to_le16(len);
1980 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1981 if (unlikely(err < 0)) {
1983 return ERR_PTR(err);
1988 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1989 struct msghdr *msg, size_t len,
1992 struct l2cap_conn *conn = chan->conn;
1993 struct sk_buff *skb;
1994 int err, count, hlen;
1995 struct l2cap_hdr *lh;
1997 BT_DBG("chan %p len %d", chan, (int)len);
2000 return ERR_PTR(-ENOTCONN);
2002 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2003 hlen = L2CAP_EXT_HDR_SIZE;
2005 hlen = L2CAP_ENH_HDR_SIZE;
2008 hlen += L2CAP_SDULEN_SIZE;
2010 if (chan->fcs == L2CAP_FCS_CRC16)
2011 hlen += L2CAP_FCS_SIZE;
2013 count = min_t(unsigned int, (conn->mtu - hlen), len);
2015 skb = chan->ops->alloc_skb(chan, count + hlen,
2016 msg->msg_flags & MSG_DONTWAIT);
2020 /* Create L2CAP header */
2021 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2022 lh->cid = cpu_to_le16(chan->dcid);
2023 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2025 /* Control header is populated later */
2026 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2027 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2029 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2032 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2034 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2035 if (unlikely(err < 0)) {
2037 return ERR_PTR(err);
2040 bt_cb(skb)->control.fcs = chan->fcs;
2041 bt_cb(skb)->control.retries = 0;
2045 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2046 struct sk_buff_head *seg_queue,
2047 struct msghdr *msg, size_t len)
2049 struct sk_buff *skb;
2055 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2057 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2058 * so fragmented skbs are not used. The HCI layer's handling
2059 * of fragmented skbs is not compatible with ERTM's queueing.
2062 /* PDU size is derived from the HCI MTU */
2063 pdu_len = chan->conn->mtu;
2065 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2067 /* Adjust for largest possible L2CAP overhead. */
2069 pdu_len -= L2CAP_FCS_SIZE;
2071 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2072 pdu_len -= L2CAP_EXT_HDR_SIZE;
2074 pdu_len -= L2CAP_ENH_HDR_SIZE;
2076 /* Remote device may have requested smaller PDUs */
2077 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2079 if (len <= pdu_len) {
2080 sar = L2CAP_SAR_UNSEGMENTED;
2084 sar = L2CAP_SAR_START;
2086 pdu_len -= L2CAP_SDULEN_SIZE;
2090 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2093 __skb_queue_purge(seg_queue);
2094 return PTR_ERR(skb);
2097 bt_cb(skb)->control.sar = sar;
2098 __skb_queue_tail(seg_queue, skb);
2103 pdu_len += L2CAP_SDULEN_SIZE;
2106 if (len <= pdu_len) {
2107 sar = L2CAP_SAR_END;
2110 sar = L2CAP_SAR_CONTINUE;
2117 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2120 struct sk_buff *skb;
2122 struct sk_buff_head seg_queue;
2124 /* Connectionless channel */
2125 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2126 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2128 return PTR_ERR(skb);
2130 l2cap_do_send(chan, skb);
2134 switch (chan->mode) {
2135 case L2CAP_MODE_BASIC:
2136 /* Check outgoing MTU */
2137 if (len > chan->omtu)
2140 /* Create a basic PDU */
2141 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2143 return PTR_ERR(skb);
2145 l2cap_do_send(chan, skb);
2149 case L2CAP_MODE_ERTM:
2150 case L2CAP_MODE_STREAMING:
2151 /* Check outgoing MTU */
2152 if (len > chan->omtu) {
2157 __skb_queue_head_init(&seg_queue);
2159 /* Do segmentation before calling in to the state machine,
2160 * since it's possible to block while waiting for memory
2163 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2165 /* The channel could have been closed while segmenting,
2166 * check that it is still connected.
2168 if (chan->state != BT_CONNECTED) {
2169 __skb_queue_purge(&seg_queue);
2176 if (chan->mode == L2CAP_MODE_ERTM)
2177 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2179 l2cap_streaming_send(chan, &seg_queue);
2183 /* If the skbs were not queued for sending, they'll still be in
2184 * seg_queue and need to be purged.
2186 __skb_queue_purge(&seg_queue);
2190 BT_DBG("bad state %1.1x", chan->mode);
2197 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2199 struct l2cap_ctrl control;
2202 BT_DBG("chan %p, txseq %d", chan, txseq);
2204 memset(&control, 0, sizeof(control));
2206 control.super = L2CAP_SUPER_SREJ;
2208 for (seq = chan->expected_tx_seq; seq != txseq;
2209 seq = __next_seq(chan, seq)) {
2210 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2211 control.reqseq = seq;
2212 l2cap_send_sframe(chan, &control);
2213 l2cap_seq_list_append(&chan->srej_list, seq);
2217 chan->expected_tx_seq = __next_seq(chan, txseq);
2220 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2222 struct l2cap_ctrl control;
2224 BT_DBG("chan %p", chan);
2226 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2229 memset(&control, 0, sizeof(control));
2231 control.super = L2CAP_SUPER_SREJ;
2232 control.reqseq = chan->srej_list.tail;
2233 l2cap_send_sframe(chan, &control);
2236 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2238 struct l2cap_ctrl control;
2242 BT_DBG("chan %p, txseq %d", chan, txseq);
2244 memset(&control, 0, sizeof(control));
2246 control.super = L2CAP_SUPER_SREJ;
2248 /* Capture initial list head to allow only one pass through the list. */
2249 initial_head = chan->srej_list.head;
2252 seq = l2cap_seq_list_pop(&chan->srej_list);
2253 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2256 control.reqseq = seq;
2257 l2cap_send_sframe(chan, &control);
2258 l2cap_seq_list_append(&chan->srej_list, seq);
2259 } while (chan->srej_list.head != initial_head);
2262 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2264 struct sk_buff *acked_skb;
2267 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2269 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2272 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2273 chan->expected_ack_seq, chan->unacked_frames);
2275 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2276 ackseq = __next_seq(chan, ackseq)) {
2278 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2280 skb_unlink(acked_skb, &chan->tx_q);
2281 kfree_skb(acked_skb);
2282 chan->unacked_frames--;
2286 chan->expected_ack_seq = reqseq;
2288 if (chan->unacked_frames == 0)
2289 __clear_retrans_timer(chan);
2291 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2294 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2296 BT_DBG("chan %p", chan);
2298 chan->expected_tx_seq = chan->buffer_seq;
2299 l2cap_seq_list_clear(&chan->srej_list);
2300 skb_queue_purge(&chan->srej_q);
2301 chan->rx_state = L2CAP_RX_STATE_RECV;
2304 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2305 struct l2cap_ctrl *control,
2306 struct sk_buff_head *skbs, u8 event)
2308 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2312 case L2CAP_EV_DATA_REQUEST:
2313 if (chan->tx_send_head == NULL)
2314 chan->tx_send_head = skb_peek(skbs);
2316 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2317 l2cap_ertm_send(chan);
2319 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2320 BT_DBG("Enter LOCAL_BUSY");
2321 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2323 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2324 /* The SREJ_SENT state must be aborted if we are to
2325 * enter the LOCAL_BUSY state.
2327 l2cap_abort_rx_srej_sent(chan);
2330 l2cap_send_ack(chan);
2333 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2334 BT_DBG("Exit LOCAL_BUSY");
2335 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2337 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2338 struct l2cap_ctrl local_control;
2340 memset(&local_control, 0, sizeof(local_control));
2341 local_control.sframe = 1;
2342 local_control.super = L2CAP_SUPER_RR;
2343 local_control.poll = 1;
2344 local_control.reqseq = chan->buffer_seq;
2345 l2cap_send_sframe(chan, &local_control);
2347 chan->retry_count = 1;
2348 __set_monitor_timer(chan);
2349 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2352 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2353 l2cap_process_reqseq(chan, control->reqseq);
2355 case L2CAP_EV_EXPLICIT_POLL:
2356 l2cap_send_rr_or_rnr(chan, 1);
2357 chan->retry_count = 1;
2358 __set_monitor_timer(chan);
2359 __clear_ack_timer(chan);
2360 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2362 case L2CAP_EV_RETRANS_TO:
2363 l2cap_send_rr_or_rnr(chan, 1);
2364 chan->retry_count = 1;
2365 __set_monitor_timer(chan);
2366 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2368 case L2CAP_EV_RECV_FBIT:
2369 /* Nothing to process */
2376 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2377 struct l2cap_ctrl *control,
2378 struct sk_buff_head *skbs, u8 event)
2380 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2384 case L2CAP_EV_DATA_REQUEST:
2385 if (chan->tx_send_head == NULL)
2386 chan->tx_send_head = skb_peek(skbs);
2387 /* Queue data, but don't send. */
2388 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2390 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2391 BT_DBG("Enter LOCAL_BUSY");
2392 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2394 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2395 /* The SREJ_SENT state must be aborted if we are to
2396 * enter the LOCAL_BUSY state.
2398 l2cap_abort_rx_srej_sent(chan);
2401 l2cap_send_ack(chan);
2404 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2405 BT_DBG("Exit LOCAL_BUSY");
2406 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2408 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2409 struct l2cap_ctrl local_control;
2410 memset(&local_control, 0, sizeof(local_control));
2411 local_control.sframe = 1;
2412 local_control.super = L2CAP_SUPER_RR;
2413 local_control.poll = 1;
2414 local_control.reqseq = chan->buffer_seq;
2415 l2cap_send_sframe(chan, &local_control);
2417 chan->retry_count = 1;
2418 __set_monitor_timer(chan);
2419 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2422 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2423 l2cap_process_reqseq(chan, control->reqseq);
2427 case L2CAP_EV_RECV_FBIT:
2428 if (control && control->final) {
2429 __clear_monitor_timer(chan);
2430 if (chan->unacked_frames > 0)
2431 __set_retrans_timer(chan);
2432 chan->retry_count = 0;
2433 chan->tx_state = L2CAP_TX_STATE_XMIT;
2434 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2437 case L2CAP_EV_EXPLICIT_POLL:
2440 case L2CAP_EV_MONITOR_TO:
2441 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2442 l2cap_send_rr_or_rnr(chan, 1);
2443 __set_monitor_timer(chan);
2444 chan->retry_count++;
2446 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2454 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2455 struct sk_buff_head *skbs, u8 event)
2457 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2458 chan, control, skbs, event, chan->tx_state);
2460 switch (chan->tx_state) {
2461 case L2CAP_TX_STATE_XMIT:
2462 l2cap_tx_state_xmit(chan, control, skbs, event);
2464 case L2CAP_TX_STATE_WAIT_F:
2465 l2cap_tx_state_wait_f(chan, control, skbs, event);
2473 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2474 struct l2cap_ctrl *control)
2476 BT_DBG("chan %p, control %p", chan, control);
2477 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2480 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2481 struct l2cap_ctrl *control)
2483 BT_DBG("chan %p, control %p", chan, control);
2484 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2487 /* Copy frame to all raw sockets on that connection */
2488 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2490 struct sk_buff *nskb;
2491 struct l2cap_chan *chan;
2493 BT_DBG("conn %p", conn);
2495 mutex_lock(&conn->chan_lock);
2497 list_for_each_entry(chan, &conn->chan_l, list) {
2498 struct sock *sk = chan->sk;
2499 if (chan->chan_type != L2CAP_CHAN_RAW)
2502 /* Don't send frame to the socket it came from */
2505 nskb = skb_clone(skb, GFP_ATOMIC);
2509 if (chan->ops->recv(chan, nskb))
2513 mutex_unlock(&conn->chan_lock);
2516 /* ---- L2CAP signalling commands ---- */
2517 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2518 u8 code, u8 ident, u16 dlen, void *data)
2520 struct sk_buff *skb, **frag;
2521 struct l2cap_cmd_hdr *cmd;
2522 struct l2cap_hdr *lh;
2525 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2526 conn, code, ident, dlen);
2528 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2529 count = min_t(unsigned int, conn->mtu, len);
2531 skb = bt_skb_alloc(count, GFP_ATOMIC);
2535 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2536 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2538 if (conn->hcon->type == LE_LINK)
2539 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2541 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2543 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2546 cmd->len = cpu_to_le16(dlen);
2549 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2550 memcpy(skb_put(skb, count), data, count);
2556 /* Continuation fragments (no L2CAP header) */
2557 frag = &skb_shinfo(skb)->frag_list;
2559 count = min_t(unsigned int, conn->mtu, len);
2561 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2565 memcpy(skb_put(*frag, count), data, count);
2570 frag = &(*frag)->next;
2580 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2582 struct l2cap_conf_opt *opt = *ptr;
2585 len = L2CAP_CONF_OPT_SIZE + opt->len;
2593 *val = *((u8 *) opt->val);
2597 *val = get_unaligned_le16(opt->val);
2601 *val = get_unaligned_le32(opt->val);
2605 *val = (unsigned long) opt->val;
2609 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2613 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2615 struct l2cap_conf_opt *opt = *ptr;
2617 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2624 *((u8 *) opt->val) = val;
2628 put_unaligned_le16(val, opt->val);
2632 put_unaligned_le32(val, opt->val);
2636 memcpy(opt->val, (void *) val, len);
2640 *ptr += L2CAP_CONF_OPT_SIZE + len;
2643 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2645 struct l2cap_conf_efs efs;
2647 switch (chan->mode) {
2648 case L2CAP_MODE_ERTM:
2649 efs.id = chan->local_id;
2650 efs.stype = chan->local_stype;
2651 efs.msdu = cpu_to_le16(chan->local_msdu);
2652 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2653 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2654 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2657 case L2CAP_MODE_STREAMING:
2659 efs.stype = L2CAP_SERV_BESTEFFORT;
2660 efs.msdu = cpu_to_le16(chan->local_msdu);
2661 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2670 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2671 (unsigned long) &efs);
2674 static void l2cap_ack_timeout(struct work_struct *work)
2676 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2680 BT_DBG("chan %p", chan);
2682 l2cap_chan_lock(chan);
2684 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2685 chan->last_acked_seq);
2688 l2cap_send_rr_or_rnr(chan, 0);
2690 l2cap_chan_unlock(chan);
2691 l2cap_chan_put(chan);
2694 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2698 chan->next_tx_seq = 0;
2699 chan->expected_tx_seq = 0;
2700 chan->expected_ack_seq = 0;
2701 chan->unacked_frames = 0;
2702 chan->buffer_seq = 0;
2703 chan->frames_sent = 0;
2704 chan->last_acked_seq = 0;
2706 chan->sdu_last_frag = NULL;
2709 skb_queue_head_init(&chan->tx_q);
2711 if (chan->mode != L2CAP_MODE_ERTM)
2714 chan->rx_state = L2CAP_RX_STATE_RECV;
2715 chan->tx_state = L2CAP_TX_STATE_XMIT;
2717 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2718 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2719 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2721 skb_queue_head_init(&chan->srej_q);
2723 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2727 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2729 l2cap_seq_list_free(&chan->srej_list);
2734 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2737 case L2CAP_MODE_STREAMING:
2738 case L2CAP_MODE_ERTM:
2739 if (l2cap_mode_supported(mode, remote_feat_mask))
2743 return L2CAP_MODE_BASIC;
2747 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2749 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2752 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2754 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2757 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2759 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2760 __l2cap_ews_supported(chan)) {
2761 /* use extended control field */
2762 set_bit(FLAG_EXT_CTRL, &chan->flags);
2763 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2765 chan->tx_win = min_t(u16, chan->tx_win,
2766 L2CAP_DEFAULT_TX_WINDOW);
2767 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2771 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2773 struct l2cap_conf_req *req = data;
2774 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2775 void *ptr = req->data;
2778 BT_DBG("chan %p", chan);
2780 if (chan->num_conf_req || chan->num_conf_rsp)
2783 switch (chan->mode) {
2784 case L2CAP_MODE_STREAMING:
2785 case L2CAP_MODE_ERTM:
2786 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2789 if (__l2cap_efs_supported(chan))
2790 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2794 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2799 if (chan->imtu != L2CAP_DEFAULT_MTU)
2800 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2802 switch (chan->mode) {
2803 case L2CAP_MODE_BASIC:
2804 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2805 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2808 rfc.mode = L2CAP_MODE_BASIC;
2810 rfc.max_transmit = 0;
2811 rfc.retrans_timeout = 0;
2812 rfc.monitor_timeout = 0;
2813 rfc.max_pdu_size = 0;
2815 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2816 (unsigned long) &rfc);
2819 case L2CAP_MODE_ERTM:
2820 rfc.mode = L2CAP_MODE_ERTM;
2821 rfc.max_transmit = chan->max_tx;
2822 rfc.retrans_timeout = 0;
2823 rfc.monitor_timeout = 0;
2825 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2826 L2CAP_EXT_HDR_SIZE -
2829 rfc.max_pdu_size = cpu_to_le16(size);
2831 l2cap_txwin_setup(chan);
2833 rfc.txwin_size = min_t(u16, chan->tx_win,
2834 L2CAP_DEFAULT_TX_WINDOW);
2836 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2837 (unsigned long) &rfc);
2839 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2840 l2cap_add_opt_efs(&ptr, chan);
2842 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2845 if (chan->fcs == L2CAP_FCS_NONE ||
2846 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2847 chan->fcs = L2CAP_FCS_NONE;
2848 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2851 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2852 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2856 case L2CAP_MODE_STREAMING:
2857 l2cap_txwin_setup(chan);
2858 rfc.mode = L2CAP_MODE_STREAMING;
2860 rfc.max_transmit = 0;
2861 rfc.retrans_timeout = 0;
2862 rfc.monitor_timeout = 0;
2864 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2865 L2CAP_EXT_HDR_SIZE -
2868 rfc.max_pdu_size = cpu_to_le16(size);
2870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2871 (unsigned long) &rfc);
2873 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2874 l2cap_add_opt_efs(&ptr, chan);
2876 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2879 if (chan->fcs == L2CAP_FCS_NONE ||
2880 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2881 chan->fcs = L2CAP_FCS_NONE;
2882 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2887 req->dcid = cpu_to_le16(chan->dcid);
2888 req->flags = __constant_cpu_to_le16(0);
2893 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2895 struct l2cap_conf_rsp *rsp = data;
2896 void *ptr = rsp->data;
2897 void *req = chan->conf_req;
2898 int len = chan->conf_len;
2899 int type, hint, olen;
2901 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2902 struct l2cap_conf_efs efs;
2904 u16 mtu = L2CAP_DEFAULT_MTU;
2905 u16 result = L2CAP_CONF_SUCCESS;
2908 BT_DBG("chan %p", chan);
2910 while (len >= L2CAP_CONF_OPT_SIZE) {
2911 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2913 hint = type & L2CAP_CONF_HINT;
2914 type &= L2CAP_CONF_MASK;
2917 case L2CAP_CONF_MTU:
2921 case L2CAP_CONF_FLUSH_TO:
2922 chan->flush_to = val;
2925 case L2CAP_CONF_QOS:
2928 case L2CAP_CONF_RFC:
2929 if (olen == sizeof(rfc))
2930 memcpy(&rfc, (void *) val, olen);
2933 case L2CAP_CONF_FCS:
2934 if (val == L2CAP_FCS_NONE)
2935 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2938 case L2CAP_CONF_EFS:
2940 if (olen == sizeof(efs))
2941 memcpy(&efs, (void *) val, olen);
2944 case L2CAP_CONF_EWS:
2946 return -ECONNREFUSED;
2948 set_bit(FLAG_EXT_CTRL, &chan->flags);
2949 set_bit(CONF_EWS_RECV, &chan->conf_state);
2950 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2951 chan->remote_tx_win = val;
2958 result = L2CAP_CONF_UNKNOWN;
2959 *((u8 *) ptr++) = type;
2964 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2967 switch (chan->mode) {
2968 case L2CAP_MODE_STREAMING:
2969 case L2CAP_MODE_ERTM:
2970 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2971 chan->mode = l2cap_select_mode(rfc.mode,
2972 chan->conn->feat_mask);
2977 if (__l2cap_efs_supported(chan))
2978 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2980 return -ECONNREFUSED;
2983 if (chan->mode != rfc.mode)
2984 return -ECONNREFUSED;
2990 if (chan->mode != rfc.mode) {
2991 result = L2CAP_CONF_UNACCEPT;
2992 rfc.mode = chan->mode;
2994 if (chan->num_conf_rsp == 1)
2995 return -ECONNREFUSED;
2997 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2998 sizeof(rfc), (unsigned long) &rfc);
3001 if (result == L2CAP_CONF_SUCCESS) {
3002 /* Configure output options and let the other side know
3003 * which ones we don't like. */
3005 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3006 result = L2CAP_CONF_UNACCEPT;
3009 set_bit(CONF_MTU_DONE, &chan->conf_state);
3011 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3014 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3015 efs.stype != L2CAP_SERV_NOTRAFIC &&
3016 efs.stype != chan->local_stype) {
3018 result = L2CAP_CONF_UNACCEPT;
3020 if (chan->num_conf_req >= 1)
3021 return -ECONNREFUSED;
3023 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3025 (unsigned long) &efs);
3027 /* Send PENDING Conf Rsp */
3028 result = L2CAP_CONF_PENDING;
3029 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3034 case L2CAP_MODE_BASIC:
3035 chan->fcs = L2CAP_FCS_NONE;
3036 set_bit(CONF_MODE_DONE, &chan->conf_state);
3039 case L2CAP_MODE_ERTM:
3040 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3041 chan->remote_tx_win = rfc.txwin_size;
3043 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3045 chan->remote_max_tx = rfc.max_transmit;
3047 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3049 L2CAP_EXT_HDR_SIZE -
3052 rfc.max_pdu_size = cpu_to_le16(size);
3053 chan->remote_mps = size;
3055 rfc.retrans_timeout =
3056 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3057 rfc.monitor_timeout =
3058 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3060 set_bit(CONF_MODE_DONE, &chan->conf_state);
3062 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3063 sizeof(rfc), (unsigned long) &rfc);
3065 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3066 chan->remote_id = efs.id;
3067 chan->remote_stype = efs.stype;
3068 chan->remote_msdu = le16_to_cpu(efs.msdu);
3069 chan->remote_flush_to =
3070 le32_to_cpu(efs.flush_to);
3071 chan->remote_acc_lat =
3072 le32_to_cpu(efs.acc_lat);
3073 chan->remote_sdu_itime =
3074 le32_to_cpu(efs.sdu_itime);
3075 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3076 sizeof(efs), (unsigned long) &efs);
3080 case L2CAP_MODE_STREAMING:
3081 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3083 L2CAP_EXT_HDR_SIZE -
3086 rfc.max_pdu_size = cpu_to_le16(size);
3087 chan->remote_mps = size;
3089 set_bit(CONF_MODE_DONE, &chan->conf_state);
3091 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3092 sizeof(rfc), (unsigned long) &rfc);
3097 result = L2CAP_CONF_UNACCEPT;
3099 memset(&rfc, 0, sizeof(rfc));
3100 rfc.mode = chan->mode;
3103 if (result == L2CAP_CONF_SUCCESS)
3104 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3106 rsp->scid = cpu_to_le16(chan->dcid);
3107 rsp->result = cpu_to_le16(result);
3108 rsp->flags = __constant_cpu_to_le16(0);
3113 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3115 struct l2cap_conf_req *req = data;
3116 void *ptr = req->data;
3119 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3120 struct l2cap_conf_efs efs;
3122 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3124 while (len >= L2CAP_CONF_OPT_SIZE) {
3125 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3128 case L2CAP_CONF_MTU:
3129 if (val < L2CAP_DEFAULT_MIN_MTU) {
3130 *result = L2CAP_CONF_UNACCEPT;
3131 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3134 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3137 case L2CAP_CONF_FLUSH_TO:
3138 chan->flush_to = val;
3139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3143 case L2CAP_CONF_RFC:
3144 if (olen == sizeof(rfc))
3145 memcpy(&rfc, (void *)val, olen);
3147 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3148 rfc.mode != chan->mode)
3149 return -ECONNREFUSED;
3153 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3154 sizeof(rfc), (unsigned long) &rfc);
3157 case L2CAP_CONF_EWS:
3158 chan->tx_win = min_t(u16, val,
3159 L2CAP_DEFAULT_EXT_WINDOW);
3160 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3164 case L2CAP_CONF_EFS:
3165 if (olen == sizeof(efs))
3166 memcpy(&efs, (void *)val, olen);
3168 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3169 efs.stype != L2CAP_SERV_NOTRAFIC &&
3170 efs.stype != chan->local_stype)
3171 return -ECONNREFUSED;
3173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3174 sizeof(efs), (unsigned long) &efs);
3179 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3180 return -ECONNREFUSED;
3182 chan->mode = rfc.mode;
3184 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3186 case L2CAP_MODE_ERTM:
3187 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3188 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3189 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3191 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3192 chan->local_msdu = le16_to_cpu(efs.msdu);
3193 chan->local_sdu_itime =
3194 le32_to_cpu(efs.sdu_itime);
3195 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3196 chan->local_flush_to =
3197 le32_to_cpu(efs.flush_to);
3201 case L2CAP_MODE_STREAMING:
3202 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3206 req->dcid = cpu_to_le16(chan->dcid);
3207 req->flags = __constant_cpu_to_le16(0);
3212 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3214 struct l2cap_conf_rsp *rsp = data;
3215 void *ptr = rsp->data;
3217 BT_DBG("chan %p", chan);
3219 rsp->scid = cpu_to_le16(chan->dcid);
3220 rsp->result = cpu_to_le16(result);
3221 rsp->flags = cpu_to_le16(flags);
3226 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3228 struct l2cap_conn_rsp rsp;
3229 struct l2cap_conn *conn = chan->conn;
3232 rsp.scid = cpu_to_le16(chan->dcid);
3233 rsp.dcid = cpu_to_le16(chan->scid);
3234 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3235 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3236 l2cap_send_cmd(conn, chan->ident,
3237 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3239 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3242 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3243 l2cap_build_conf_req(chan, buf), buf);
3244 chan->num_conf_req++;
3247 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3251 struct l2cap_conf_rfc rfc;
3253 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3255 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3258 while (len >= L2CAP_CONF_OPT_SIZE) {
3259 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3262 case L2CAP_CONF_RFC:
3263 if (olen == sizeof(rfc))
3264 memcpy(&rfc, (void *)val, olen);
3269 /* Use sane default values in case a misbehaving remote device
3270 * did not send an RFC option.
3272 rfc.mode = chan->mode;
3273 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3274 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3275 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3277 BT_ERR("Expected RFC option was not found, using defaults");
3281 case L2CAP_MODE_ERTM:
3282 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3283 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3284 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3286 case L2CAP_MODE_STREAMING:
3287 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3291 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3293 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3295 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3298 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3299 cmd->ident == conn->info_ident) {
3300 cancel_delayed_work(&conn->info_timer);
3302 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3303 conn->info_ident = 0;
3305 l2cap_conn_start(conn);
3311 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3313 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3314 struct l2cap_conn_rsp rsp;
3315 struct l2cap_chan *chan = NULL, *pchan;
3316 struct sock *parent, *sk = NULL;
3317 int result, status = L2CAP_CS_NO_INFO;
3319 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3320 __le16 psm = req->psm;
3322 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3324 /* Check if we have socket listening on psm */
3325 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3327 result = L2CAP_CR_BAD_PSM;
3333 mutex_lock(&conn->chan_lock);
3336 /* Check if the ACL is secure enough (if not SDP) */
3337 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3338 !hci_conn_check_link_mode(conn->hcon)) {
3339 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3340 result = L2CAP_CR_SEC_BLOCK;
3344 result = L2CAP_CR_NO_MEM;
3346 /* Check if we already have channel with that dcid */
3347 if (__l2cap_get_chan_by_dcid(conn, scid))
3350 chan = pchan->ops->new_connection(pchan);
3356 hci_conn_hold(conn->hcon);
3358 bacpy(&bt_sk(sk)->src, conn->src);
3359 bacpy(&bt_sk(sk)->dst, conn->dst);
3363 bt_accept_enqueue(parent, sk);
3365 __l2cap_chan_add(conn, chan);
3369 __set_chan_timer(chan, sk->sk_sndtimeo);
3371 chan->ident = cmd->ident;
3373 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3374 if (l2cap_chan_check_security(chan)) {
3375 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3376 __l2cap_state_change(chan, BT_CONNECT2);
3377 result = L2CAP_CR_PEND;
3378 status = L2CAP_CS_AUTHOR_PEND;
3379 parent->sk_data_ready(parent, 0);
3381 __l2cap_state_change(chan, BT_CONFIG);
3382 result = L2CAP_CR_SUCCESS;
3383 status = L2CAP_CS_NO_INFO;
3386 __l2cap_state_change(chan, BT_CONNECT2);
3387 result = L2CAP_CR_PEND;
3388 status = L2CAP_CS_AUTHEN_PEND;
3391 __l2cap_state_change(chan, BT_CONNECT2);
3392 result = L2CAP_CR_PEND;
3393 status = L2CAP_CS_NO_INFO;
3397 release_sock(parent);
3398 mutex_unlock(&conn->chan_lock);
3401 rsp.scid = cpu_to_le16(scid);
3402 rsp.dcid = cpu_to_le16(dcid);
3403 rsp.result = cpu_to_le16(result);
3404 rsp.status = cpu_to_le16(status);
3405 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3407 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3408 struct l2cap_info_req info;
3409 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3411 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3412 conn->info_ident = l2cap_get_ident(conn);
3414 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3416 l2cap_send_cmd(conn, conn->info_ident,
3417 L2CAP_INFO_REQ, sizeof(info), &info);
3420 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3421 result == L2CAP_CR_SUCCESS) {
3423 set_bit(CONF_REQ_SENT, &chan->conf_state);
3424 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3425 l2cap_build_conf_req(chan, buf), buf);
3426 chan->num_conf_req++;
3432 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3434 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3435 u16 scid, dcid, result, status;
3436 struct l2cap_chan *chan;
3440 scid = __le16_to_cpu(rsp->scid);
3441 dcid = __le16_to_cpu(rsp->dcid);
3442 result = __le16_to_cpu(rsp->result);
3443 status = __le16_to_cpu(rsp->status);
3445 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3446 dcid, scid, result, status);
3448 mutex_lock(&conn->chan_lock);
3451 chan = __l2cap_get_chan_by_scid(conn, scid);
3457 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3466 l2cap_chan_lock(chan);
3469 case L2CAP_CR_SUCCESS:
3470 l2cap_state_change(chan, BT_CONFIG);
3473 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3475 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3478 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3479 l2cap_build_conf_req(chan, req), req);
3480 chan->num_conf_req++;
3484 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3488 l2cap_chan_del(chan, ECONNREFUSED);
3492 l2cap_chan_unlock(chan);
3495 mutex_unlock(&conn->chan_lock);
3500 static inline void set_default_fcs(struct l2cap_chan *chan)
3502 /* FCS is enabled only in ERTM or streaming mode, if one or both
3505 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3506 chan->fcs = L2CAP_FCS_NONE;
3507 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3508 chan->fcs = L2CAP_FCS_CRC16;
3511 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3513 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3516 struct l2cap_chan *chan;
3519 dcid = __le16_to_cpu(req->dcid);
3520 flags = __le16_to_cpu(req->flags);
3522 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3524 chan = l2cap_get_chan_by_scid(conn, dcid);
3528 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3529 struct l2cap_cmd_rej_cid rej;
3531 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3532 rej.scid = cpu_to_le16(chan->scid);
3533 rej.dcid = cpu_to_le16(chan->dcid);
3535 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3540 /* Reject if config buffer is too small. */
3541 len = cmd_len - sizeof(*req);
3542 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3543 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3544 l2cap_build_conf_rsp(chan, rsp,
3545 L2CAP_CONF_REJECT, flags), rsp);
3550 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3551 chan->conf_len += len;
3553 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3554 /* Incomplete config. Send empty response. */
3555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3556 l2cap_build_conf_rsp(chan, rsp,
3557 L2CAP_CONF_SUCCESS, flags), rsp);
3561 /* Complete config. */
3562 len = l2cap_parse_conf_req(chan, rsp);
3564 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3568 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3569 chan->num_conf_rsp++;
3571 /* Reset config buffer. */
3574 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3577 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3578 set_default_fcs(chan);
3580 if (chan->mode == L2CAP_MODE_ERTM ||
3581 chan->mode == L2CAP_MODE_STREAMING)
3582 err = l2cap_ertm_init(chan);
3585 l2cap_send_disconn_req(chan->conn, chan, -err);
3587 l2cap_chan_ready(chan);
3592 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3594 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3595 l2cap_build_conf_req(chan, buf), buf);
3596 chan->num_conf_req++;
3599 /* Got Conf Rsp PENDING from remote side and asume we sent
3600 Conf Rsp PENDING in the code above */
3601 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3602 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3604 /* check compatibility */
3606 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3607 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3609 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3610 l2cap_build_conf_rsp(chan, rsp,
3611 L2CAP_CONF_SUCCESS, flags), rsp);
3615 l2cap_chan_unlock(chan);
3619 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3621 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3622 u16 scid, flags, result;
3623 struct l2cap_chan *chan;
3624 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3627 scid = __le16_to_cpu(rsp->scid);
3628 flags = __le16_to_cpu(rsp->flags);
3629 result = __le16_to_cpu(rsp->result);
3631 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3634 chan = l2cap_get_chan_by_scid(conn, scid);
3639 case L2CAP_CONF_SUCCESS:
3640 l2cap_conf_rfc_get(chan, rsp->data, len);
3641 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3644 case L2CAP_CONF_PENDING:
3645 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3647 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3650 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3653 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3657 /* check compatibility */
3659 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3660 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3662 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3663 l2cap_build_conf_rsp(chan, buf,
3664 L2CAP_CONF_SUCCESS, 0x0000), buf);
3668 case L2CAP_CONF_UNACCEPT:
3669 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3672 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3673 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3677 /* throw out any old stored conf requests */
3678 result = L2CAP_CONF_SUCCESS;
3679 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3682 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3686 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3687 L2CAP_CONF_REQ, len, req);
3688 chan->num_conf_req++;
3689 if (result != L2CAP_CONF_SUCCESS)
3695 l2cap_chan_set_err(chan, ECONNRESET);
3697 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3698 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3702 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3705 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3707 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3708 set_default_fcs(chan);
3710 if (chan->mode == L2CAP_MODE_ERTM ||
3711 chan->mode == L2CAP_MODE_STREAMING)
3712 err = l2cap_ertm_init(chan);
3715 l2cap_send_disconn_req(chan->conn, chan, -err);
3717 l2cap_chan_ready(chan);
3721 l2cap_chan_unlock(chan);
3725 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3727 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3728 struct l2cap_disconn_rsp rsp;
3730 struct l2cap_chan *chan;
3733 scid = __le16_to_cpu(req->scid);
3734 dcid = __le16_to_cpu(req->dcid);
3736 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3738 mutex_lock(&conn->chan_lock);
3740 chan = __l2cap_get_chan_by_scid(conn, dcid);
3742 mutex_unlock(&conn->chan_lock);
3746 l2cap_chan_lock(chan);
3750 rsp.dcid = cpu_to_le16(chan->scid);
3751 rsp.scid = cpu_to_le16(chan->dcid);
3752 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3755 sk->sk_shutdown = SHUTDOWN_MASK;
3758 l2cap_chan_hold(chan);
3759 l2cap_chan_del(chan, ECONNRESET);
3761 l2cap_chan_unlock(chan);
3763 chan->ops->close(chan);
3764 l2cap_chan_put(chan);
3766 mutex_unlock(&conn->chan_lock);
3771 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3773 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3775 struct l2cap_chan *chan;
3777 scid = __le16_to_cpu(rsp->scid);
3778 dcid = __le16_to_cpu(rsp->dcid);
3780 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3782 mutex_lock(&conn->chan_lock);
3784 chan = __l2cap_get_chan_by_scid(conn, scid);
3786 mutex_unlock(&conn->chan_lock);
3790 l2cap_chan_lock(chan);
3792 l2cap_chan_hold(chan);
3793 l2cap_chan_del(chan, 0);
3795 l2cap_chan_unlock(chan);
3797 chan->ops->close(chan);
3798 l2cap_chan_put(chan);
3800 mutex_unlock(&conn->chan_lock);
3805 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3807 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3810 type = __le16_to_cpu(req->type);
3812 BT_DBG("type 0x%4.4x", type);
3814 if (type == L2CAP_IT_FEAT_MASK) {
3816 u32 feat_mask = l2cap_feat_mask;
3817 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3818 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3819 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3821 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3824 feat_mask |= L2CAP_FEAT_EXT_FLOW
3825 | L2CAP_FEAT_EXT_WINDOW;
3827 put_unaligned_le32(feat_mask, rsp->data);
3828 l2cap_send_cmd(conn, cmd->ident,
3829 L2CAP_INFO_RSP, sizeof(buf), buf);
3830 } else if (type == L2CAP_IT_FIXED_CHAN) {
3832 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3835 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3837 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3839 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3840 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3841 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3842 l2cap_send_cmd(conn, cmd->ident,
3843 L2CAP_INFO_RSP, sizeof(buf), buf);
3845 struct l2cap_info_rsp rsp;
3846 rsp.type = cpu_to_le16(type);
3847 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3848 l2cap_send_cmd(conn, cmd->ident,
3849 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3855 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3857 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3860 type = __le16_to_cpu(rsp->type);
3861 result = __le16_to_cpu(rsp->result);
3863 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3865 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3866 if (cmd->ident != conn->info_ident ||
3867 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3870 cancel_delayed_work(&conn->info_timer);
3872 if (result != L2CAP_IR_SUCCESS) {
3873 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3874 conn->info_ident = 0;
3876 l2cap_conn_start(conn);
3882 case L2CAP_IT_FEAT_MASK:
3883 conn->feat_mask = get_unaligned_le32(rsp->data);
3885 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3886 struct l2cap_info_req req;
3887 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3889 conn->info_ident = l2cap_get_ident(conn);
3891 l2cap_send_cmd(conn, conn->info_ident,
3892 L2CAP_INFO_REQ, sizeof(req), &req);
3894 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3895 conn->info_ident = 0;
3897 l2cap_conn_start(conn);
3901 case L2CAP_IT_FIXED_CHAN:
3902 conn->fixed_chan_mask = rsp->data[0];
3903 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3904 conn->info_ident = 0;
3906 l2cap_conn_start(conn);
3913 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3914 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3917 struct l2cap_create_chan_req *req = data;
3918 struct l2cap_create_chan_rsp rsp;
3921 if (cmd_len != sizeof(*req))
3927 psm = le16_to_cpu(req->psm);
3928 scid = le16_to_cpu(req->scid);
3930 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3932 /* Placeholder: Always reject */
3934 rsp.scid = cpu_to_le16(scid);
3935 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3936 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3938 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3944 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3945 struct l2cap_cmd_hdr *cmd, void *data)
3947 BT_DBG("conn %p", conn);
3949 return l2cap_connect_rsp(conn, cmd, data);
3952 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3953 u16 icid, u16 result)
3955 struct l2cap_move_chan_rsp rsp;
3957 BT_DBG("icid %d, result %d", icid, result);
3959 rsp.icid = cpu_to_le16(icid);
3960 rsp.result = cpu_to_le16(result);
3962 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3965 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3966 struct l2cap_chan *chan, u16 icid, u16 result)
3968 struct l2cap_move_chan_cfm cfm;
3971 BT_DBG("icid %d, result %d", icid, result);
3973 ident = l2cap_get_ident(conn);
3975 chan->ident = ident;
3977 cfm.icid = cpu_to_le16(icid);
3978 cfm.result = cpu_to_le16(result);
3980 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3983 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3986 struct l2cap_move_chan_cfm_rsp rsp;
3988 BT_DBG("icid %d", icid);
3990 rsp.icid = cpu_to_le16(icid);
3991 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3994 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3995 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3997 struct l2cap_move_chan_req *req = data;
3999 u16 result = L2CAP_MR_NOT_ALLOWED;
4001 if (cmd_len != sizeof(*req))
4004 icid = le16_to_cpu(req->icid);
4006 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4011 /* Placeholder: Always refuse */
4012 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4017 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4018 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4020 struct l2cap_move_chan_rsp *rsp = data;
4023 if (cmd_len != sizeof(*rsp))
4026 icid = le16_to_cpu(rsp->icid);
4027 result = le16_to_cpu(rsp->result);
4029 BT_DBG("icid %d, result %d", icid, result);
4031 /* Placeholder: Always unconfirmed */
4032 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4037 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4038 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4040 struct l2cap_move_chan_cfm *cfm = data;
4043 if (cmd_len != sizeof(*cfm))
4046 icid = le16_to_cpu(cfm->icid);
4047 result = le16_to_cpu(cfm->result);
4049 BT_DBG("icid %d, result %d", icid, result);
4051 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4056 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4057 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4059 struct l2cap_move_chan_cfm_rsp *rsp = data;
4062 if (cmd_len != sizeof(*rsp))
4065 icid = le16_to_cpu(rsp->icid);
4067 BT_DBG("icid %d", icid);
4072 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4077 if (min > max || min < 6 || max > 3200)
4080 if (to_multiplier < 10 || to_multiplier > 3200)
4083 if (max >= to_multiplier * 8)
4086 max_latency = (to_multiplier * 8 / max) - 1;
4087 if (latency > 499 || latency > max_latency)
4093 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4094 struct l2cap_cmd_hdr *cmd, u8 *data)
4096 struct hci_conn *hcon = conn->hcon;
4097 struct l2cap_conn_param_update_req *req;
4098 struct l2cap_conn_param_update_rsp rsp;
4099 u16 min, max, latency, to_multiplier, cmd_len;
4102 if (!(hcon->link_mode & HCI_LM_MASTER))
4105 cmd_len = __le16_to_cpu(cmd->len);
4106 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4109 req = (struct l2cap_conn_param_update_req *) data;
4110 min = __le16_to_cpu(req->min);
4111 max = __le16_to_cpu(req->max);
4112 latency = __le16_to_cpu(req->latency);
4113 to_multiplier = __le16_to_cpu(req->to_multiplier);
4115 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4116 min, max, latency, to_multiplier);
4118 memset(&rsp, 0, sizeof(rsp));
4120 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4122 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4124 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4126 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4130 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4135 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4136 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4140 switch (cmd->code) {
4141 case L2CAP_COMMAND_REJ:
4142 l2cap_command_rej(conn, cmd, data);
4145 case L2CAP_CONN_REQ:
4146 err = l2cap_connect_req(conn, cmd, data);
4149 case L2CAP_CONN_RSP:
4150 err = l2cap_connect_rsp(conn, cmd, data);
4153 case L2CAP_CONF_REQ:
4154 err = l2cap_config_req(conn, cmd, cmd_len, data);
4157 case L2CAP_CONF_RSP:
4158 err = l2cap_config_rsp(conn, cmd, data);
4161 case L2CAP_DISCONN_REQ:
4162 err = l2cap_disconnect_req(conn, cmd, data);
4165 case L2CAP_DISCONN_RSP:
4166 err = l2cap_disconnect_rsp(conn, cmd, data);
4169 case L2CAP_ECHO_REQ:
4170 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4173 case L2CAP_ECHO_RSP:
4176 case L2CAP_INFO_REQ:
4177 err = l2cap_information_req(conn, cmd, data);
4180 case L2CAP_INFO_RSP:
4181 err = l2cap_information_rsp(conn, cmd, data);
4184 case L2CAP_CREATE_CHAN_REQ:
4185 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4188 case L2CAP_CREATE_CHAN_RSP:
4189 err = l2cap_create_channel_rsp(conn, cmd, data);
4192 case L2CAP_MOVE_CHAN_REQ:
4193 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4196 case L2CAP_MOVE_CHAN_RSP:
4197 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4200 case L2CAP_MOVE_CHAN_CFM:
4201 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4204 case L2CAP_MOVE_CHAN_CFM_RSP:
4205 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4209 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4217 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4218 struct l2cap_cmd_hdr *cmd, u8 *data)
4220 switch (cmd->code) {
4221 case L2CAP_COMMAND_REJ:
4224 case L2CAP_CONN_PARAM_UPDATE_REQ:
4225 return l2cap_conn_param_update_req(conn, cmd, data);
4227 case L2CAP_CONN_PARAM_UPDATE_RSP:
4231 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4236 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4237 struct sk_buff *skb)
4239 u8 *data = skb->data;
4241 struct l2cap_cmd_hdr cmd;
4244 l2cap_raw_recv(conn, skb);
4246 while (len >= L2CAP_CMD_HDR_SIZE) {
4248 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4249 data += L2CAP_CMD_HDR_SIZE;
4250 len -= L2CAP_CMD_HDR_SIZE;
4252 cmd_len = le16_to_cpu(cmd.len);
4254 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4256 if (cmd_len > len || !cmd.ident) {
4257 BT_DBG("corrupted command");
4261 if (conn->hcon->type == LE_LINK)
4262 err = l2cap_le_sig_cmd(conn, &cmd, data);
4264 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4267 struct l2cap_cmd_rej_unk rej;
4269 BT_ERR("Wrong link type (%d)", err);
4271 /* FIXME: Map err to a valid reason */
4272 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4273 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4283 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4285 u16 our_fcs, rcv_fcs;
4288 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4289 hdr_size = L2CAP_EXT_HDR_SIZE;
4291 hdr_size = L2CAP_ENH_HDR_SIZE;
4293 if (chan->fcs == L2CAP_FCS_CRC16) {
4294 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4295 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4296 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4298 if (our_fcs != rcv_fcs)
4304 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4306 struct l2cap_ctrl control;
4308 BT_DBG("chan %p", chan);
4310 memset(&control, 0, sizeof(control));
4313 control.reqseq = chan->buffer_seq;
4314 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4316 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4317 control.super = L2CAP_SUPER_RNR;
4318 l2cap_send_sframe(chan, &control);
4321 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4322 chan->unacked_frames > 0)
4323 __set_retrans_timer(chan);
4325 /* Send pending iframes */
4326 l2cap_ertm_send(chan);
4328 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4329 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4330 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4333 control.super = L2CAP_SUPER_RR;
4334 l2cap_send_sframe(chan, &control);
4338 static void append_skb_frag(struct sk_buff *skb,
4339 struct sk_buff *new_frag, struct sk_buff **last_frag)
4341 /* skb->len reflects data in skb as well as all fragments
4342 * skb->data_len reflects only data in fragments
4344 if (!skb_has_frag_list(skb))
4345 skb_shinfo(skb)->frag_list = new_frag;
4347 new_frag->next = NULL;
4349 (*last_frag)->next = new_frag;
4350 *last_frag = new_frag;
4352 skb->len += new_frag->len;
4353 skb->data_len += new_frag->len;
4354 skb->truesize += new_frag->truesize;
4357 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4358 struct l2cap_ctrl *control)
4362 switch (control->sar) {
4363 case L2CAP_SAR_UNSEGMENTED:
4367 err = chan->ops->recv(chan, skb);
4370 case L2CAP_SAR_START:
4374 chan->sdu_len = get_unaligned_le16(skb->data);
4375 skb_pull(skb, L2CAP_SDULEN_SIZE);
4377 if (chan->sdu_len > chan->imtu) {
4382 if (skb->len >= chan->sdu_len)
4386 chan->sdu_last_frag = skb;
4392 case L2CAP_SAR_CONTINUE:
4396 append_skb_frag(chan->sdu, skb,
4397 &chan->sdu_last_frag);
4400 if (chan->sdu->len >= chan->sdu_len)
4410 append_skb_frag(chan->sdu, skb,
4411 &chan->sdu_last_frag);
4414 if (chan->sdu->len != chan->sdu_len)
4417 err = chan->ops->recv(chan, chan->sdu);
4420 /* Reassembly complete */
4422 chan->sdu_last_frag = NULL;
4430 kfree_skb(chan->sdu);
4432 chan->sdu_last_frag = NULL;
4439 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4443 if (chan->mode != L2CAP_MODE_ERTM)
4446 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4447 l2cap_tx(chan, NULL, NULL, event);
4450 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4453 /* Pass sequential frames to l2cap_reassemble_sdu()
4454 * until a gap is encountered.
4457 BT_DBG("chan %p", chan);
4459 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4460 struct sk_buff *skb;
4461 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4462 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4464 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4469 skb_unlink(skb, &chan->srej_q);
4470 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4471 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4476 if (skb_queue_empty(&chan->srej_q)) {
4477 chan->rx_state = L2CAP_RX_STATE_RECV;
4478 l2cap_send_ack(chan);
4484 static void l2cap_handle_srej(struct l2cap_chan *chan,
4485 struct l2cap_ctrl *control)
4487 struct sk_buff *skb;
4489 BT_DBG("chan %p, control %p", chan, control);
4491 if (control->reqseq == chan->next_tx_seq) {
4492 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4493 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4497 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4500 BT_DBG("Seq %d not available for retransmission",
4505 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4506 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4507 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4511 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4513 if (control->poll) {
4514 l2cap_pass_to_tx(chan, control);
4516 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4517 l2cap_retransmit(chan, control);
4518 l2cap_ertm_send(chan);
4520 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4521 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4522 chan->srej_save_reqseq = control->reqseq;
4525 l2cap_pass_to_tx_fbit(chan, control);
4527 if (control->final) {
4528 if (chan->srej_save_reqseq != control->reqseq ||
4529 !test_and_clear_bit(CONN_SREJ_ACT,
4531 l2cap_retransmit(chan, control);
4533 l2cap_retransmit(chan, control);
4534 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4535 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4536 chan->srej_save_reqseq = control->reqseq;
4542 static void l2cap_handle_rej(struct l2cap_chan *chan,
4543 struct l2cap_ctrl *control)
4545 struct sk_buff *skb;
4547 BT_DBG("chan %p, control %p", chan, control);
4549 if (control->reqseq == chan->next_tx_seq) {
4550 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4551 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4555 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4557 if (chan->max_tx && skb &&
4558 bt_cb(skb)->control.retries >= chan->max_tx) {
4559 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4560 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4564 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4566 l2cap_pass_to_tx(chan, control);
4568 if (control->final) {
4569 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4570 l2cap_retransmit_all(chan, control);
4572 l2cap_retransmit_all(chan, control);
4573 l2cap_ertm_send(chan);
4574 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4575 set_bit(CONN_REJ_ACT, &chan->conn_state);
4579 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4581 BT_DBG("chan %p, txseq %d", chan, txseq);
4583 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4584 chan->expected_tx_seq);
4586 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4587 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4589 /* See notes below regarding "double poll" and
4592 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4593 BT_DBG("Invalid/Ignore - after SREJ");
4594 return L2CAP_TXSEQ_INVALID_IGNORE;
4596 BT_DBG("Invalid - in window after SREJ sent");
4597 return L2CAP_TXSEQ_INVALID;
4601 if (chan->srej_list.head == txseq) {
4602 BT_DBG("Expected SREJ");
4603 return L2CAP_TXSEQ_EXPECTED_SREJ;
4606 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4607 BT_DBG("Duplicate SREJ - txseq already stored");
4608 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4611 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4612 BT_DBG("Unexpected SREJ - not requested");
4613 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4617 if (chan->expected_tx_seq == txseq) {
4618 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4620 BT_DBG("Invalid - txseq outside tx window");
4621 return L2CAP_TXSEQ_INVALID;
4624 return L2CAP_TXSEQ_EXPECTED;
4628 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4629 __seq_offset(chan, chan->expected_tx_seq,
4630 chan->last_acked_seq)){
4631 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4632 return L2CAP_TXSEQ_DUPLICATE;
4635 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4636 /* A source of invalid packets is a "double poll" condition,
4637 * where delays cause us to send multiple poll packets. If
4638 * the remote stack receives and processes both polls,
4639 * sequence numbers can wrap around in such a way that a
4640 * resent frame has a sequence number that looks like new data
4641 * with a sequence gap. This would trigger an erroneous SREJ
4644 * Fortunately, this is impossible with a tx window that's
4645 * less than half of the maximum sequence number, which allows
4646 * invalid frames to be safely ignored.
4648 * With tx window sizes greater than half of the tx window
4649 * maximum, the frame is invalid and cannot be ignored. This
4650 * causes a disconnect.
4653 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4654 BT_DBG("Invalid/Ignore - txseq outside tx window");
4655 return L2CAP_TXSEQ_INVALID_IGNORE;
4657 BT_DBG("Invalid - txseq outside tx window");
4658 return L2CAP_TXSEQ_INVALID;
4661 BT_DBG("Unexpected - txseq indicates missing frames");
4662 return L2CAP_TXSEQ_UNEXPECTED;
4666 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4667 struct l2cap_ctrl *control,
4668 struct sk_buff *skb, u8 event)
4671 bool skb_in_use = 0;
4673 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4677 case L2CAP_EV_RECV_IFRAME:
4678 switch (l2cap_classify_txseq(chan, control->txseq)) {
4679 case L2CAP_TXSEQ_EXPECTED:
4680 l2cap_pass_to_tx(chan, control);
4682 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4683 BT_DBG("Busy, discarding expected seq %d",
4688 chan->expected_tx_seq = __next_seq(chan,
4691 chan->buffer_seq = chan->expected_tx_seq;
4694 err = l2cap_reassemble_sdu(chan, skb, control);
4698 if (control->final) {
4699 if (!test_and_clear_bit(CONN_REJ_ACT,
4700 &chan->conn_state)) {
4702 l2cap_retransmit_all(chan, control);
4703 l2cap_ertm_send(chan);
4707 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4708 l2cap_send_ack(chan);
4710 case L2CAP_TXSEQ_UNEXPECTED:
4711 l2cap_pass_to_tx(chan, control);
4713 /* Can't issue SREJ frames in the local busy state.
4714 * Drop this frame, it will be seen as missing
4715 * when local busy is exited.
4717 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4718 BT_DBG("Busy, discarding unexpected seq %d",
4723 /* There was a gap in the sequence, so an SREJ
4724 * must be sent for each missing frame. The
4725 * current frame is stored for later use.
4727 skb_queue_tail(&chan->srej_q, skb);
4729 BT_DBG("Queued %p (queue len %d)", skb,
4730 skb_queue_len(&chan->srej_q));
4732 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4733 l2cap_seq_list_clear(&chan->srej_list);
4734 l2cap_send_srej(chan, control->txseq);
4736 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4738 case L2CAP_TXSEQ_DUPLICATE:
4739 l2cap_pass_to_tx(chan, control);
4741 case L2CAP_TXSEQ_INVALID_IGNORE:
4743 case L2CAP_TXSEQ_INVALID:
4745 l2cap_send_disconn_req(chan->conn, chan,
4750 case L2CAP_EV_RECV_RR:
4751 l2cap_pass_to_tx(chan, control);
4752 if (control->final) {
4753 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4755 if (!test_and_clear_bit(CONN_REJ_ACT,
4756 &chan->conn_state)) {
4758 l2cap_retransmit_all(chan, control);
4761 l2cap_ertm_send(chan);
4762 } else if (control->poll) {
4763 l2cap_send_i_or_rr_or_rnr(chan);
4765 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4766 &chan->conn_state) &&
4767 chan->unacked_frames)
4768 __set_retrans_timer(chan);
4770 l2cap_ertm_send(chan);
4773 case L2CAP_EV_RECV_RNR:
4774 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4775 l2cap_pass_to_tx(chan, control);
4776 if (control && control->poll) {
4777 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4778 l2cap_send_rr_or_rnr(chan, 0);
4780 __clear_retrans_timer(chan);
4781 l2cap_seq_list_clear(&chan->retrans_list);
4783 case L2CAP_EV_RECV_REJ:
4784 l2cap_handle_rej(chan, control);
4786 case L2CAP_EV_RECV_SREJ:
4787 l2cap_handle_srej(chan, control);
4793 if (skb && !skb_in_use) {
4794 BT_DBG("Freeing %p", skb);
4801 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4802 struct l2cap_ctrl *control,
4803 struct sk_buff *skb, u8 event)
4806 u16 txseq = control->txseq;
4807 bool skb_in_use = 0;
4809 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4813 case L2CAP_EV_RECV_IFRAME:
4814 switch (l2cap_classify_txseq(chan, txseq)) {
4815 case L2CAP_TXSEQ_EXPECTED:
4816 /* Keep frame for reassembly later */
4817 l2cap_pass_to_tx(chan, control);
4818 skb_queue_tail(&chan->srej_q, skb);
4820 BT_DBG("Queued %p (queue len %d)", skb,
4821 skb_queue_len(&chan->srej_q));
4823 chan->expected_tx_seq = __next_seq(chan, txseq);
4825 case L2CAP_TXSEQ_EXPECTED_SREJ:
4826 l2cap_seq_list_pop(&chan->srej_list);
4828 l2cap_pass_to_tx(chan, control);
4829 skb_queue_tail(&chan->srej_q, skb);
4831 BT_DBG("Queued %p (queue len %d)", skb,
4832 skb_queue_len(&chan->srej_q));
4834 err = l2cap_rx_queued_iframes(chan);
4839 case L2CAP_TXSEQ_UNEXPECTED:
4840 /* Got a frame that can't be reassembled yet.
4841 * Save it for later, and send SREJs to cover
4842 * the missing frames.
4844 skb_queue_tail(&chan->srej_q, skb);
4846 BT_DBG("Queued %p (queue len %d)", skb,
4847 skb_queue_len(&chan->srej_q));
4849 l2cap_pass_to_tx(chan, control);
4850 l2cap_send_srej(chan, control->txseq);
4852 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4853 /* This frame was requested with an SREJ, but
4854 * some expected retransmitted frames are
4855 * missing. Request retransmission of missing
4858 skb_queue_tail(&chan->srej_q, skb);
4860 BT_DBG("Queued %p (queue len %d)", skb,
4861 skb_queue_len(&chan->srej_q));
4863 l2cap_pass_to_tx(chan, control);
4864 l2cap_send_srej_list(chan, control->txseq);
4866 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4867 /* We've already queued this frame. Drop this copy. */
4868 l2cap_pass_to_tx(chan, control);
4870 case L2CAP_TXSEQ_DUPLICATE:
4871 /* Expecting a later sequence number, so this frame
4872 * was already received. Ignore it completely.
4875 case L2CAP_TXSEQ_INVALID_IGNORE:
4877 case L2CAP_TXSEQ_INVALID:
4879 l2cap_send_disconn_req(chan->conn, chan,
4884 case L2CAP_EV_RECV_RR:
4885 l2cap_pass_to_tx(chan, control);
4886 if (control->final) {
4887 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4889 if (!test_and_clear_bit(CONN_REJ_ACT,
4890 &chan->conn_state)) {
4892 l2cap_retransmit_all(chan, control);
4895 l2cap_ertm_send(chan);
4896 } else if (control->poll) {
4897 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4898 &chan->conn_state) &&
4899 chan->unacked_frames) {
4900 __set_retrans_timer(chan);
4903 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4904 l2cap_send_srej_tail(chan);
4906 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4907 &chan->conn_state) &&
4908 chan->unacked_frames)
4909 __set_retrans_timer(chan);
4911 l2cap_send_ack(chan);
4914 case L2CAP_EV_RECV_RNR:
4915 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4916 l2cap_pass_to_tx(chan, control);
4917 if (control->poll) {
4918 l2cap_send_srej_tail(chan);
4920 struct l2cap_ctrl rr_control;
4921 memset(&rr_control, 0, sizeof(rr_control));
4922 rr_control.sframe = 1;
4923 rr_control.super = L2CAP_SUPER_RR;
4924 rr_control.reqseq = chan->buffer_seq;
4925 l2cap_send_sframe(chan, &rr_control);
4929 case L2CAP_EV_RECV_REJ:
4930 l2cap_handle_rej(chan, control);
4932 case L2CAP_EV_RECV_SREJ:
4933 l2cap_handle_srej(chan, control);
4937 if (skb && !skb_in_use) {
4938 BT_DBG("Freeing %p", skb);
4945 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4947 /* Make sure reqseq is for a packet that has been sent but not acked */
4950 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4951 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4954 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4955 struct sk_buff *skb, u8 event)
4959 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4960 control, skb, event, chan->rx_state);
4962 if (__valid_reqseq(chan, control->reqseq)) {
4963 switch (chan->rx_state) {
4964 case L2CAP_RX_STATE_RECV:
4965 err = l2cap_rx_state_recv(chan, control, skb, event);
4967 case L2CAP_RX_STATE_SREJ_SENT:
4968 err = l2cap_rx_state_srej_sent(chan, control, skb,
4976 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4977 control->reqseq, chan->next_tx_seq,
4978 chan->expected_ack_seq);
4979 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4985 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4986 struct sk_buff *skb)
4990 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
4993 if (l2cap_classify_txseq(chan, control->txseq) ==
4994 L2CAP_TXSEQ_EXPECTED) {
4995 l2cap_pass_to_tx(chan, control);
4997 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4998 __next_seq(chan, chan->buffer_seq));
5000 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5002 l2cap_reassemble_sdu(chan, skb, control);
5005 kfree_skb(chan->sdu);
5008 chan->sdu_last_frag = NULL;
5012 BT_DBG("Freeing %p", skb);
5017 chan->last_acked_seq = control->txseq;
5018 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5023 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5025 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5029 __unpack_control(chan, skb);
5034 * We can just drop the corrupted I-frame here.
5035 * Receiver will miss it and start proper recovery
5036 * procedures and ask for retransmission.
5038 if (l2cap_check_fcs(chan, skb))
5041 if (!control->sframe && control->sar == L2CAP_SAR_START)
5042 len -= L2CAP_SDULEN_SIZE;
5044 if (chan->fcs == L2CAP_FCS_CRC16)
5045 len -= L2CAP_FCS_SIZE;
5047 if (len > chan->mps) {
5048 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5052 if (!control->sframe) {
5055 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5056 control->sar, control->reqseq, control->final,
5059 /* Validate F-bit - F=0 always valid, F=1 only
5060 * valid in TX WAIT_F
5062 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5065 if (chan->mode != L2CAP_MODE_STREAMING) {
5066 event = L2CAP_EV_RECV_IFRAME;
5067 err = l2cap_rx(chan, control, skb, event);
5069 err = l2cap_stream_rx(chan, control, skb);
5073 l2cap_send_disconn_req(chan->conn, chan,
5076 const u8 rx_func_to_event[4] = {
5077 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5078 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5081 /* Only I-frames are expected in streaming mode */
5082 if (chan->mode == L2CAP_MODE_STREAMING)
5085 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5086 control->reqseq, control->final, control->poll,
5091 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5095 /* Validate F and P bits */
5096 if (control->final && (control->poll ||
5097 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5100 event = rx_func_to_event[control->super];
5101 if (l2cap_rx(chan, control, skb, event))
5102 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5112 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5114 struct l2cap_chan *chan;
5116 chan = l2cap_get_chan_by_scid(conn, cid);
5118 BT_DBG("unknown cid 0x%4.4x", cid);
5119 /* Drop packet and return */
5124 BT_DBG("chan %p, len %d", chan, skb->len);
5126 if (chan->state != BT_CONNECTED)
5129 switch (chan->mode) {
5130 case L2CAP_MODE_BASIC:
5131 /* If socket recv buffers overflows we drop data here
5132 * which is *bad* because L2CAP has to be reliable.
5133 * But we don't have any other choice. L2CAP doesn't
5134 * provide flow control mechanism. */
5136 if (chan->imtu < skb->len)
5139 if (!chan->ops->recv(chan, skb))
5143 case L2CAP_MODE_ERTM:
5144 case L2CAP_MODE_STREAMING:
5145 l2cap_data_rcv(chan, skb);
5149 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5157 l2cap_chan_unlock(chan);
5162 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5164 struct l2cap_chan *chan;
5166 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5170 BT_DBG("chan %p, len %d", chan, skb->len);
5172 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5175 if (chan->imtu < skb->len)
5178 if (!chan->ops->recv(chan, skb))
5187 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5188 struct sk_buff *skb)
5190 struct l2cap_chan *chan;
5192 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5196 BT_DBG("chan %p, len %d", chan, skb->len);
5198 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5201 if (chan->imtu < skb->len)
5204 if (!chan->ops->recv(chan, skb))
5213 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5215 struct l2cap_hdr *lh = (void *) skb->data;
5219 skb_pull(skb, L2CAP_HDR_SIZE);
5220 cid = __le16_to_cpu(lh->cid);
5221 len = __le16_to_cpu(lh->len);
5223 if (len != skb->len) {
5228 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5231 case L2CAP_CID_LE_SIGNALING:
5232 case L2CAP_CID_SIGNALING:
5233 l2cap_sig_channel(conn, skb);
5236 case L2CAP_CID_CONN_LESS:
5237 psm = get_unaligned((__le16 *) skb->data);
5238 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5239 l2cap_conless_channel(conn, psm, skb);
5242 case L2CAP_CID_LE_DATA:
5243 l2cap_att_channel(conn, cid, skb);
5247 if (smp_sig_channel(conn, skb))
5248 l2cap_conn_del(conn->hcon, EACCES);
5252 l2cap_data_channel(conn, cid, skb);
5257 /* ---- L2CAP interface with lower layer (HCI) ---- */
5259 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5261 int exact = 0, lm1 = 0, lm2 = 0;
5262 struct l2cap_chan *c;
5264 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5266 /* Find listening sockets and check their link_mode */
5267 read_lock(&chan_list_lock);
5268 list_for_each_entry(c, &chan_list, global_l) {
5269 struct sock *sk = c->sk;
5271 if (c->state != BT_LISTEN)
5274 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5275 lm1 |= HCI_LM_ACCEPT;
5276 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5277 lm1 |= HCI_LM_MASTER;
5279 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5280 lm2 |= HCI_LM_ACCEPT;
5281 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5282 lm2 |= HCI_LM_MASTER;
5285 read_unlock(&chan_list_lock);
5287 return exact ? lm1 : lm2;
5290 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5292 struct l2cap_conn *conn;
5294 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5297 conn = l2cap_conn_add(hcon, status);
5299 l2cap_conn_ready(conn);
5301 l2cap_conn_del(hcon, bt_to_errno(status));
5306 int l2cap_disconn_ind(struct hci_conn *hcon)
5308 struct l2cap_conn *conn = hcon->l2cap_data;
5310 BT_DBG("hcon %p", hcon);
5313 return HCI_ERROR_REMOTE_USER_TERM;
5314 return conn->disc_reason;
5317 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5319 BT_DBG("hcon %p reason %d", hcon, reason);
5321 l2cap_conn_del(hcon, bt_to_errno(reason));
5325 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5327 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5330 if (encrypt == 0x00) {
5331 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5332 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5333 } else if (chan->sec_level == BT_SECURITY_HIGH)
5334 l2cap_chan_close(chan, ECONNREFUSED);
5336 if (chan->sec_level == BT_SECURITY_MEDIUM)
5337 __clear_chan_timer(chan);
5341 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5343 struct l2cap_conn *conn = hcon->l2cap_data;
5344 struct l2cap_chan *chan;
5349 BT_DBG("conn %p", conn);
5351 if (hcon->type == LE_LINK) {
5352 if (!status && encrypt)
5353 smp_distribute_keys(conn, 0);
5354 cancel_delayed_work(&conn->security_timer);
5357 mutex_lock(&conn->chan_lock);
5359 list_for_each_entry(chan, &conn->chan_l, list) {
5360 l2cap_chan_lock(chan);
5362 BT_DBG("chan->scid %d", chan->scid);
5364 if (chan->scid == L2CAP_CID_LE_DATA) {
5365 if (!status && encrypt) {
5366 chan->sec_level = hcon->sec_level;
5367 l2cap_chan_ready(chan);
5370 l2cap_chan_unlock(chan);
5374 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5375 l2cap_chan_unlock(chan);
5379 if (!status && (chan->state == BT_CONNECTED ||
5380 chan->state == BT_CONFIG)) {
5381 struct sock *sk = chan->sk;
5383 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5384 sk->sk_state_change(sk);
5386 l2cap_check_encryption(chan, encrypt);
5387 l2cap_chan_unlock(chan);
5391 if (chan->state == BT_CONNECT) {
5393 l2cap_send_conn_req(chan);
5395 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5397 } else if (chan->state == BT_CONNECT2) {
5398 struct sock *sk = chan->sk;
5399 struct l2cap_conn_rsp rsp;
5405 if (test_bit(BT_SK_DEFER_SETUP,
5406 &bt_sk(sk)->flags)) {
5407 struct sock *parent = bt_sk(sk)->parent;
5408 res = L2CAP_CR_PEND;
5409 stat = L2CAP_CS_AUTHOR_PEND;
5411 parent->sk_data_ready(parent, 0);
5413 __l2cap_state_change(chan, BT_CONFIG);
5414 res = L2CAP_CR_SUCCESS;
5415 stat = L2CAP_CS_NO_INFO;
5418 __l2cap_state_change(chan, BT_DISCONN);
5419 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5420 res = L2CAP_CR_SEC_BLOCK;
5421 stat = L2CAP_CS_NO_INFO;
5426 rsp.scid = cpu_to_le16(chan->dcid);
5427 rsp.dcid = cpu_to_le16(chan->scid);
5428 rsp.result = cpu_to_le16(res);
5429 rsp.status = cpu_to_le16(stat);
5430 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5433 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5434 res == L2CAP_CR_SUCCESS) {
5436 set_bit(CONF_REQ_SENT, &chan->conf_state);
5437 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5439 l2cap_build_conf_req(chan, buf),
5441 chan->num_conf_req++;
5445 l2cap_chan_unlock(chan);
5448 mutex_unlock(&conn->chan_lock);
5453 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5455 struct l2cap_conn *conn = hcon->l2cap_data;
5458 conn = l2cap_conn_add(hcon, 0);
5463 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5465 if (!(flags & ACL_CONT)) {
5466 struct l2cap_hdr *hdr;
5470 BT_ERR("Unexpected start frame (len %d)", skb->len);
5471 kfree_skb(conn->rx_skb);
5472 conn->rx_skb = NULL;
5474 l2cap_conn_unreliable(conn, ECOMM);
5477 /* Start fragment always begin with Basic L2CAP header */
5478 if (skb->len < L2CAP_HDR_SIZE) {
5479 BT_ERR("Frame is too short (len %d)", skb->len);
5480 l2cap_conn_unreliable(conn, ECOMM);
5484 hdr = (struct l2cap_hdr *) skb->data;
5485 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5487 if (len == skb->len) {
5488 /* Complete frame received */
5489 l2cap_recv_frame(conn, skb);
5493 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5495 if (skb->len > len) {
5496 BT_ERR("Frame is too long (len %d, expected len %d)",
5498 l2cap_conn_unreliable(conn, ECOMM);
5502 /* Allocate skb for the complete frame (with header) */
5503 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5507 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5509 conn->rx_len = len - skb->len;
5511 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5513 if (!conn->rx_len) {
5514 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5515 l2cap_conn_unreliable(conn, ECOMM);
5519 if (skb->len > conn->rx_len) {
5520 BT_ERR("Fragment is too long (len %d, expected %d)",
5521 skb->len, conn->rx_len);
5522 kfree_skb(conn->rx_skb);
5523 conn->rx_skb = NULL;
5525 l2cap_conn_unreliable(conn, ECOMM);
5529 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5531 conn->rx_len -= skb->len;
5533 if (!conn->rx_len) {
5534 /* Complete frame received */
5535 l2cap_recv_frame(conn, conn->rx_skb);
5536 conn->rx_skb = NULL;
5545 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5547 struct l2cap_chan *c;
5549 read_lock(&chan_list_lock);
5551 list_for_each_entry(c, &chan_list, global_l) {
5552 struct sock *sk = c->sk;
5554 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5555 batostr(&bt_sk(sk)->src),
5556 batostr(&bt_sk(sk)->dst),
5557 c->state, __le16_to_cpu(c->psm),
5558 c->scid, c->dcid, c->imtu, c->omtu,
5559 c->sec_level, c->mode);
5562 read_unlock(&chan_list_lock);
5567 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5569 return single_open(file, l2cap_debugfs_show, inode->i_private);
5572 static const struct file_operations l2cap_debugfs_fops = {
5573 .open = l2cap_debugfs_open,
5575 .llseek = seq_lseek,
5576 .release = single_release,
5579 static struct dentry *l2cap_debugfs;
5581 int __init l2cap_init(void)
5585 err = l2cap_init_sockets();
5590 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5591 bt_debugfs, NULL, &l2cap_debugfs_fops);
5593 BT_ERR("Failed to create L2CAP debug file");
5599 void l2cap_exit(void)
5601 debugfs_remove(l2cap_debugfs);
5602 l2cap_cleanup_sockets();
5605 module_param(disable_ertm, bool, 0644);
5606 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");