2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
61 static int enable_ertm = 0;
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct workqueue_struct *_busy_wq;
73 static struct bt_sock_list l2cap_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
77 static void l2cap_busy_work(struct work_struct *work);
79 static void __l2cap_sock_close(struct sock *sk, int reason);
80 static void l2cap_sock_close(struct sock *sk);
81 static void l2cap_sock_kill(struct sock *sk);
83 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
84 u8 code, u8 ident, u16 dlen, void *data);
86 /* ---- L2CAP timers ---- */
87 static void l2cap_sock_timeout(unsigned long arg)
89 struct sock *sk = (struct sock *) arg;
92 BT_DBG("sock %p state %d", sk, sk->sk_state);
96 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
97 reason = ECONNREFUSED;
98 else if (sk->sk_state == BT_CONNECT &&
99 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
100 reason = ECONNREFUSED;
104 __l2cap_sock_close(sk, reason);
112 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
114 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
115 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
118 static void l2cap_sock_clear_timer(struct sock *sk)
120 BT_DBG("sock %p state %d", sk, sk->sk_state);
121 sk_stop_timer(sk, &sk->sk_timer);
124 /* ---- L2CAP channels ---- */
125 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
128 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
129 if (l2cap_pi(s)->dcid == cid)
135 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->scid == cid)
145 /* Find channel with given SCID.
146 * Returns locked socket */
147 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
151 s = __l2cap_get_chan_by_scid(l, cid);
154 read_unlock(&l->lock);
158 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
162 if (l2cap_pi(s)->ident == ident)
168 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
172 s = __l2cap_get_chan_by_ident(l, ident);
175 read_unlock(&l->lock);
179 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
181 u16 cid = L2CAP_CID_DYN_START;
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(l, cid))
191 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
196 l2cap_pi(l->head)->prev_c = sk;
198 l2cap_pi(sk)->next_c = l->head;
199 l2cap_pi(sk)->prev_c = NULL;
203 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
205 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
207 write_lock_bh(&l->lock);
212 l2cap_pi(next)->prev_c = prev;
214 l2cap_pi(prev)->next_c = next;
215 write_unlock_bh(&l->lock);
220 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
222 struct l2cap_chan_list *l = &conn->chan_list;
224 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
225 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
227 conn->disc_reason = 0x13;
229 l2cap_pi(sk)->conn = conn;
231 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
232 /* Alloc CID for connection-oriented socket */
233 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
234 } else if (sk->sk_type == SOCK_DGRAM) {
235 /* Connectionless socket */
236 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
238 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 /* Raw socket can send/recv signalling messages only */
241 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
243 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 __l2cap_chan_link(l, sk);
249 bt_accept_enqueue(parent, sk);
253 * Must be called on the locked socket. */
254 static void l2cap_chan_del(struct sock *sk, int err)
256 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
257 struct sock *parent = bt_sk(sk)->parent;
259 l2cap_sock_clear_timer(sk);
261 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
264 /* Unlink from channel list */
265 l2cap_chan_unlink(&conn->chan_list, sk);
266 l2cap_pi(sk)->conn = NULL;
267 hci_conn_put(conn->hcon);
270 sk->sk_state = BT_CLOSED;
271 sock_set_flag(sk, SOCK_ZAPPED);
277 bt_accept_unlink(sk);
278 parent->sk_data_ready(parent, 0);
280 sk->sk_state_change(sk);
283 /* Service level security */
284 static inline int l2cap_check_security(struct sock *sk)
286 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
289 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
290 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
291 auth_type = HCI_AT_NO_BONDING_MITM;
293 auth_type = HCI_AT_NO_BONDING;
295 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
296 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
298 switch (l2cap_pi(sk)->sec_level) {
299 case BT_SECURITY_HIGH:
300 auth_type = HCI_AT_GENERAL_BONDING_MITM;
302 case BT_SECURITY_MEDIUM:
303 auth_type = HCI_AT_GENERAL_BONDING;
306 auth_type = HCI_AT_NO_BONDING;
311 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
315 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
319 /* Get next available identificator.
320 * 1 - 128 are used by kernel.
321 * 129 - 199 are reserved.
322 * 200 - 254 are used by utilities like l2ping, etc.
325 spin_lock_bh(&conn->lock);
327 if (++conn->tx_ident > 128)
332 spin_unlock_bh(&conn->lock);
337 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
339 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
341 BT_DBG("code 0x%2.2x", code);
346 hci_send_acl(conn->hcon, skb, 0);
349 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
352 struct l2cap_hdr *lh;
353 struct l2cap_conn *conn = pi->conn;
354 int count, hlen = L2CAP_HDR_SIZE + 2;
356 if (pi->fcs == L2CAP_FCS_CRC16)
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 hci_send_acl(pi->conn->hcon, skb, 0);
391 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
393 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
394 control |= L2CAP_SUPER_RCV_NOT_READY;
395 pi->conn_state |= L2CAP_CONN_RNR_SENT;
397 control |= L2CAP_SUPER_RCV_READY;
399 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
401 l2cap_send_sframe(pi, control);
404 static void l2cap_do_start(struct sock *sk)
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
408 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
412 if (l2cap_check_security(sk)) {
413 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm;
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
453 BT_DBG("conn %p", conn);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) {
468 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm;
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
475 L2CAP_CONN_REQ, sizeof(req), &req);
477 } else if (sk->sk_state == BT_CONNECT2) {
478 struct l2cap_conn_rsp rsp;
479 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
482 if (l2cap_check_security(sk)) {
483 if (bt_sk(sk)->defer_setup) {
484 struct sock *parent = bt_sk(sk)->parent;
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
487 parent->sk_data_ready(parent, 0);
490 sk->sk_state = BT_CONFIG;
491 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
495 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
496 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
499 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
500 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
506 read_unlock(&l->lock);
509 static void l2cap_conn_ready(struct l2cap_conn *conn)
511 struct l2cap_chan_list *l = &conn->chan_list;
514 BT_DBG("conn %p", conn);
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
521 if (sk->sk_type != SOCK_SEQPACKET &&
522 sk->sk_type != SOCK_STREAM) {
523 l2cap_sock_clear_timer(sk);
524 sk->sk_state = BT_CONNECTED;
525 sk->sk_state_change(sk);
526 } else if (sk->sk_state == BT_CONNECT)
532 read_unlock(&l->lock);
535 /* Notify sockets that we cannot guaranty reliability anymore */
536 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
538 struct l2cap_chan_list *l = &conn->chan_list;
541 BT_DBG("conn %p", conn);
545 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
546 if (l2cap_pi(sk)->force_reliable)
550 read_unlock(&l->lock);
553 static void l2cap_info_timeout(unsigned long arg)
555 struct l2cap_conn *conn = (void *) arg;
557 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
558 conn->info_ident = 0;
560 l2cap_conn_start(conn);
563 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
565 struct l2cap_conn *conn = hcon->l2cap_data;
570 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
574 hcon->l2cap_data = conn;
577 BT_DBG("hcon %p conn %p", hcon, conn);
579 conn->mtu = hcon->hdev->acl_mtu;
580 conn->src = &hcon->hdev->bdaddr;
581 conn->dst = &hcon->dst;
585 spin_lock_init(&conn->lock);
586 rwlock_init(&conn->chan_list.lock);
588 setup_timer(&conn->info_timer, l2cap_info_timeout,
589 (unsigned long) conn);
591 conn->disc_reason = 0x13;
596 static void l2cap_conn_del(struct hci_conn *hcon, int err)
598 struct l2cap_conn *conn = hcon->l2cap_data;
604 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
606 kfree_skb(conn->rx_skb);
609 while ((sk = conn->chan_list.head)) {
611 l2cap_chan_del(sk, err);
616 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
617 del_timer_sync(&conn->info_timer);
619 hcon->l2cap_data = NULL;
623 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
625 struct l2cap_chan_list *l = &conn->chan_list;
626 write_lock_bh(&l->lock);
627 __l2cap_chan_add(conn, sk, parent);
628 write_unlock_bh(&l->lock);
631 /* ---- Socket interface ---- */
632 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
635 struct hlist_node *node;
636 sk_for_each(sk, node, &l2cap_sk_list.head)
637 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
644 /* Find socket with psm and source bdaddr.
645 * Returns closest match.
647 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
649 struct sock *sk = NULL, *sk1 = NULL;
650 struct hlist_node *node;
652 sk_for_each(sk, node, &l2cap_sk_list.head) {
653 if (state && sk->sk_state != state)
656 if (l2cap_pi(sk)->psm == psm) {
658 if (!bacmp(&bt_sk(sk)->src, src))
662 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
666 return node ? sk : sk1;
669 /* Find socket with given address (psm, src).
670 * Returns locked socket */
671 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
674 read_lock(&l2cap_sk_list.lock);
675 s = __l2cap_get_sock_by_psm(state, psm, src);
678 read_unlock(&l2cap_sk_list.lock);
682 static void l2cap_sock_destruct(struct sock *sk)
686 skb_queue_purge(&sk->sk_receive_queue);
687 skb_queue_purge(&sk->sk_write_queue);
690 static void l2cap_sock_cleanup_listen(struct sock *parent)
694 BT_DBG("parent %p", parent);
696 /* Close not yet accepted channels */
697 while ((sk = bt_accept_dequeue(parent, NULL)))
698 l2cap_sock_close(sk);
700 parent->sk_state = BT_CLOSED;
701 sock_set_flag(parent, SOCK_ZAPPED);
704 /* Kill socket (only if zapped and orphan)
705 * Must be called on unlocked socket.
707 static void l2cap_sock_kill(struct sock *sk)
709 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
712 BT_DBG("sk %p state %d", sk, sk->sk_state);
714 /* Kill poor orphan */
715 bt_sock_unlink(&l2cap_sk_list, sk);
716 sock_set_flag(sk, SOCK_DEAD);
720 static void __l2cap_sock_close(struct sock *sk, int reason)
722 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
724 switch (sk->sk_state) {
726 l2cap_sock_cleanup_listen(sk);
731 if (sk->sk_type == SOCK_SEQPACKET ||
732 sk->sk_type == SOCK_STREAM) {
733 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
735 sk->sk_state = BT_DISCONN;
736 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
737 l2cap_send_disconn_req(conn, sk);
739 l2cap_chan_del(sk, reason);
743 if (sk->sk_type == SOCK_SEQPACKET ||
744 sk->sk_type == SOCK_STREAM) {
745 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
746 struct l2cap_conn_rsp rsp;
749 if (bt_sk(sk)->defer_setup)
750 result = L2CAP_CR_SEC_BLOCK;
752 result = L2CAP_CR_BAD_PSM;
754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
755 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
756 rsp.result = cpu_to_le16(result);
757 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
758 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
759 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
761 l2cap_chan_del(sk, reason);
766 l2cap_chan_del(sk, reason);
770 sock_set_flag(sk, SOCK_ZAPPED);
775 /* Must be called on unlocked socket. */
776 static void l2cap_sock_close(struct sock *sk)
778 l2cap_sock_clear_timer(sk);
780 __l2cap_sock_close(sk, ECONNRESET);
785 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
787 struct l2cap_pinfo *pi = l2cap_pi(sk);
792 sk->sk_type = parent->sk_type;
793 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
795 pi->imtu = l2cap_pi(parent)->imtu;
796 pi->omtu = l2cap_pi(parent)->omtu;
797 pi->mode = l2cap_pi(parent)->mode;
798 pi->fcs = l2cap_pi(parent)->fcs;
799 pi->max_tx = l2cap_pi(parent)->max_tx;
800 pi->tx_win = l2cap_pi(parent)->tx_win;
801 pi->sec_level = l2cap_pi(parent)->sec_level;
802 pi->role_switch = l2cap_pi(parent)->role_switch;
803 pi->force_reliable = l2cap_pi(parent)->force_reliable;
805 pi->imtu = L2CAP_DEFAULT_MTU;
807 if (enable_ertm && sk->sk_type == SOCK_STREAM)
808 pi->mode = L2CAP_MODE_ERTM;
810 pi->mode = L2CAP_MODE_BASIC;
811 pi->max_tx = max_transmit;
812 pi->fcs = L2CAP_FCS_CRC16;
813 pi->tx_win = tx_window;
814 pi->sec_level = BT_SECURITY_LOW;
816 pi->force_reliable = 0;
819 /* Default config options */
821 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
822 skb_queue_head_init(TX_QUEUE(sk));
823 skb_queue_head_init(SREJ_QUEUE(sk));
824 skb_queue_head_init(BUSY_QUEUE(sk));
825 INIT_LIST_HEAD(SREJ_LIST(sk));
828 static struct proto l2cap_proto = {
830 .owner = THIS_MODULE,
831 .obj_size = sizeof(struct l2cap_pinfo)
834 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
838 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
842 sock_init_data(sock, sk);
843 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
845 sk->sk_destruct = l2cap_sock_destruct;
846 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
848 sock_reset_flag(sk, SOCK_ZAPPED);
850 sk->sk_protocol = proto;
851 sk->sk_state = BT_OPEN;
853 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
855 bt_sock_link(&l2cap_sk_list, sk);
859 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
864 BT_DBG("sock %p", sock);
866 sock->state = SS_UNCONNECTED;
868 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
869 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
870 return -ESOCKTNOSUPPORT;
872 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
875 sock->ops = &l2cap_sock_ops;
877 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
881 l2cap_sock_init(sk, NULL);
885 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
887 struct sock *sk = sock->sk;
888 struct sockaddr_l2 la;
893 if (!addr || addr->sa_family != AF_BLUETOOTH)
896 memset(&la, 0, sizeof(la));
897 len = min_t(unsigned int, sizeof(la), alen);
898 memcpy(&la, addr, len);
905 if (sk->sk_state != BT_OPEN) {
910 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
911 !capable(CAP_NET_BIND_SERVICE)) {
916 write_lock_bh(&l2cap_sk_list.lock);
918 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
921 /* Save source address */
922 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
923 l2cap_pi(sk)->psm = la.l2_psm;
924 l2cap_pi(sk)->sport = la.l2_psm;
925 sk->sk_state = BT_BOUND;
927 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
928 __le16_to_cpu(la.l2_psm) == 0x0003)
929 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
932 write_unlock_bh(&l2cap_sk_list.lock);
939 static int l2cap_do_connect(struct sock *sk)
941 bdaddr_t *src = &bt_sk(sk)->src;
942 bdaddr_t *dst = &bt_sk(sk)->dst;
943 struct l2cap_conn *conn;
944 struct hci_conn *hcon;
945 struct hci_dev *hdev;
949 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
952 hdev = hci_get_route(dst, src);
954 return -EHOSTUNREACH;
956 hci_dev_lock_bh(hdev);
960 if (sk->sk_type == SOCK_RAW) {
961 switch (l2cap_pi(sk)->sec_level) {
962 case BT_SECURITY_HIGH:
963 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
965 case BT_SECURITY_MEDIUM:
966 auth_type = HCI_AT_DEDICATED_BONDING;
969 auth_type = HCI_AT_NO_BONDING;
972 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
973 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
974 auth_type = HCI_AT_NO_BONDING_MITM;
976 auth_type = HCI_AT_NO_BONDING;
978 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
979 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
981 switch (l2cap_pi(sk)->sec_level) {
982 case BT_SECURITY_HIGH:
983 auth_type = HCI_AT_GENERAL_BONDING_MITM;
985 case BT_SECURITY_MEDIUM:
986 auth_type = HCI_AT_GENERAL_BONDING;
989 auth_type = HCI_AT_NO_BONDING;
994 hcon = hci_connect(hdev, ACL_LINK, dst,
995 l2cap_pi(sk)->sec_level, auth_type);
999 conn = l2cap_conn_add(hcon, 0);
1007 /* Update source addr of the socket */
1008 bacpy(src, conn->src);
1010 l2cap_chan_add(conn, sk, NULL);
1012 sk->sk_state = BT_CONNECT;
1013 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1015 if (hcon->state == BT_CONNECTED) {
1016 if (sk->sk_type != SOCK_SEQPACKET &&
1017 sk->sk_type != SOCK_STREAM) {
1018 l2cap_sock_clear_timer(sk);
1019 sk->sk_state = BT_CONNECTED;
1025 hci_dev_unlock_bh(hdev);
1030 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1032 struct sock *sk = sock->sk;
1033 struct sockaddr_l2 la;
1036 BT_DBG("sk %p", sk);
1038 if (!addr || alen < sizeof(addr->sa_family) ||
1039 addr->sa_family != AF_BLUETOOTH)
1042 memset(&la, 0, sizeof(la));
1043 len = min_t(unsigned int, sizeof(la), alen);
1044 memcpy(&la, addr, len);
1051 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1057 switch (l2cap_pi(sk)->mode) {
1058 case L2CAP_MODE_BASIC:
1060 case L2CAP_MODE_ERTM:
1061 case L2CAP_MODE_STREAMING:
1070 switch (sk->sk_state) {
1074 /* Already connecting */
1078 /* Already connected */
1091 /* Set destination address and psm */
1092 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1093 l2cap_pi(sk)->psm = la.l2_psm;
1095 err = l2cap_do_connect(sk);
1100 err = bt_sock_wait_state(sk, BT_CONNECTED,
1101 sock_sndtimeo(sk, flags & O_NONBLOCK));
1107 static int l2cap_sock_listen(struct socket *sock, int backlog)
1109 struct sock *sk = sock->sk;
1112 BT_DBG("sk %p backlog %d", sk, backlog);
1116 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1117 || sk->sk_state != BT_BOUND) {
1122 switch (l2cap_pi(sk)->mode) {
1123 case L2CAP_MODE_BASIC:
1125 case L2CAP_MODE_ERTM:
1126 case L2CAP_MODE_STREAMING:
1135 if (!l2cap_pi(sk)->psm) {
1136 bdaddr_t *src = &bt_sk(sk)->src;
1141 write_lock_bh(&l2cap_sk_list.lock);
1143 for (psm = 0x1001; psm < 0x1100; psm += 2)
1144 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1145 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1146 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1151 write_unlock_bh(&l2cap_sk_list.lock);
1157 sk->sk_max_ack_backlog = backlog;
1158 sk->sk_ack_backlog = 0;
1159 sk->sk_state = BT_LISTEN;
1166 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1168 DECLARE_WAITQUEUE(wait, current);
1169 struct sock *sk = sock->sk, *nsk;
1173 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1175 if (sk->sk_state != BT_LISTEN) {
1180 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1182 BT_DBG("sk %p timeo %ld", sk, timeo);
1184 /* Wait for an incoming connection. (wake-one). */
1185 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1186 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1187 set_current_state(TASK_INTERRUPTIBLE);
1194 timeo = schedule_timeout(timeo);
1195 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1197 if (sk->sk_state != BT_LISTEN) {
1202 if (signal_pending(current)) {
1203 err = sock_intr_errno(timeo);
1207 set_current_state(TASK_RUNNING);
1208 remove_wait_queue(sk_sleep(sk), &wait);
1213 newsock->state = SS_CONNECTED;
1215 BT_DBG("new socket %p", nsk);
1222 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1224 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1225 struct sock *sk = sock->sk;
1227 BT_DBG("sock %p, sk %p", sock, sk);
1229 addr->sa_family = AF_BLUETOOTH;
1230 *len = sizeof(struct sockaddr_l2);
1233 la->l2_psm = l2cap_pi(sk)->psm;
1234 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1235 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1237 la->l2_psm = l2cap_pi(sk)->sport;
1238 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1239 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1245 static int __l2cap_wait_ack(struct sock *sk)
1247 DECLARE_WAITQUEUE(wait, current);
1251 add_wait_queue(sk_sleep(sk), &wait);
1252 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1253 set_current_state(TASK_INTERRUPTIBLE);
1258 if (signal_pending(current)) {
1259 err = sock_intr_errno(timeo);
1264 timeo = schedule_timeout(timeo);
1267 err = sock_error(sk);
1271 set_current_state(TASK_RUNNING);
1272 remove_wait_queue(sk_sleep(sk), &wait);
1276 static void l2cap_monitor_timeout(unsigned long arg)
1278 struct sock *sk = (void *) arg;
1281 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1282 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1287 l2cap_pi(sk)->retry_count++;
1288 __mod_monitor_timer();
1290 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1294 static void l2cap_retrans_timeout(unsigned long arg)
1296 struct sock *sk = (void *) arg;
1299 l2cap_pi(sk)->retry_count = 1;
1300 __mod_monitor_timer();
1302 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1304 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1308 static void l2cap_drop_acked_frames(struct sock *sk)
1310 struct sk_buff *skb;
1312 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1313 l2cap_pi(sk)->unacked_frames) {
1314 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1317 skb = skb_dequeue(TX_QUEUE(sk));
1320 l2cap_pi(sk)->unacked_frames--;
1323 if (!l2cap_pi(sk)->unacked_frames)
1324 del_timer(&l2cap_pi(sk)->retrans_timer);
1327 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1329 struct l2cap_pinfo *pi = l2cap_pi(sk);
1331 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1333 hci_send_acl(pi->conn->hcon, skb, 0);
1336 static int l2cap_streaming_send(struct sock *sk)
1338 struct sk_buff *skb, *tx_skb;
1339 struct l2cap_pinfo *pi = l2cap_pi(sk);
1342 while ((skb = sk->sk_send_head)) {
1343 tx_skb = skb_clone(skb, GFP_ATOMIC);
1345 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1346 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1347 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1349 if (pi->fcs == L2CAP_FCS_CRC16) {
1350 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1351 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1354 l2cap_do_send(sk, tx_skb);
1356 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1358 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1359 sk->sk_send_head = NULL;
1361 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1363 skb = skb_dequeue(TX_QUEUE(sk));
1369 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1371 struct l2cap_pinfo *pi = l2cap_pi(sk);
1372 struct sk_buff *skb, *tx_skb;
1375 skb = skb_peek(TX_QUEUE(sk));
1380 if (bt_cb(skb)->tx_seq == tx_seq)
1383 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1386 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1388 if (pi->remote_max_tx &&
1389 bt_cb(skb)->retries == pi->remote_max_tx) {
1390 l2cap_send_disconn_req(pi->conn, sk);
1394 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++;
1396 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1397 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1398 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1399 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1401 if (pi->fcs == L2CAP_FCS_CRC16) {
1402 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1403 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1406 l2cap_do_send(sk, tx_skb);
1409 static int l2cap_ertm_send(struct sock *sk)
1411 struct sk_buff *skb, *tx_skb;
1412 struct l2cap_pinfo *pi = l2cap_pi(sk);
1416 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1419 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1420 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1422 if (pi->remote_max_tx &&
1423 bt_cb(skb)->retries == pi->remote_max_tx) {
1424 l2cap_send_disconn_req(pi->conn, sk);
1428 tx_skb = skb_clone(skb, GFP_ATOMIC);
1430 bt_cb(skb)->retries++;
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1433 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1434 control |= L2CAP_CTRL_FINAL;
1435 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1437 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1438 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1439 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1442 if (pi->fcs == L2CAP_FCS_CRC16) {
1443 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1444 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1447 l2cap_do_send(sk, tx_skb);
1449 __mod_retrans_timer();
1451 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1452 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1454 pi->unacked_frames++;
1457 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1458 sk->sk_send_head = NULL;
1460 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1468 static int l2cap_retransmit_frames(struct sock *sk)
1470 struct l2cap_pinfo *pi = l2cap_pi(sk);
1473 spin_lock_bh(&pi->send_lock);
1475 if (!skb_queue_empty(TX_QUEUE(sk)))
1476 sk->sk_send_head = TX_QUEUE(sk)->next;
1478 pi->next_tx_seq = pi->expected_ack_seq;
1479 ret = l2cap_ertm_send(sk);
1481 spin_unlock_bh(&pi->send_lock);
1486 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1488 struct sock *sk = (struct sock *)pi;
1492 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1494 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1495 control |= L2CAP_SUPER_RCV_NOT_READY;
1496 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1497 l2cap_send_sframe(pi, control);
1501 spin_lock_bh(&pi->send_lock);
1502 nframes = l2cap_ertm_send(sk);
1503 spin_unlock_bh(&pi->send_lock);
1508 control |= L2CAP_SUPER_RCV_READY;
1509 l2cap_send_sframe(pi, control);
1512 static void l2cap_send_srejtail(struct sock *sk)
1514 struct srej_list *tail;
1517 control = L2CAP_SUPER_SELECT_REJECT;
1518 control |= L2CAP_CTRL_FINAL;
1520 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1521 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1523 l2cap_send_sframe(l2cap_pi(sk), control);
1526 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1528 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1529 struct sk_buff **frag;
1532 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1538 /* Continuation fragments (no L2CAP header) */
1539 frag = &skb_shinfo(skb)->frag_list;
1541 count = min_t(unsigned int, conn->mtu, len);
1543 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1546 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1552 frag = &(*frag)->next;
1558 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1560 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1561 struct sk_buff *skb;
1562 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1563 struct l2cap_hdr *lh;
1565 BT_DBG("sk %p len %d", sk, (int)len);
1567 count = min_t(unsigned int, (conn->mtu - hlen), len);
1568 skb = bt_skb_send_alloc(sk, count + hlen,
1569 msg->msg_flags & MSG_DONTWAIT, &err);
1571 return ERR_PTR(-ENOMEM);
1573 /* Create L2CAP header */
1574 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1575 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1576 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1577 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1579 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1580 if (unlikely(err < 0)) {
1582 return ERR_PTR(err);
1587 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1589 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1590 struct sk_buff *skb;
1591 int err, count, hlen = L2CAP_HDR_SIZE;
1592 struct l2cap_hdr *lh;
1594 BT_DBG("sk %p len %d", sk, (int)len);
1596 count = min_t(unsigned int, (conn->mtu - hlen), len);
1597 skb = bt_skb_send_alloc(sk, count + hlen,
1598 msg->msg_flags & MSG_DONTWAIT, &err);
1600 return ERR_PTR(-ENOMEM);
1602 /* Create L2CAP header */
1603 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1604 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1605 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1607 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1608 if (unlikely(err < 0)) {
1610 return ERR_PTR(err);
1615 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1617 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1618 struct sk_buff *skb;
1619 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1620 struct l2cap_hdr *lh;
1622 BT_DBG("sk %p len %d", sk, (int)len);
1625 return ERR_PTR(-ENOTCONN);
1630 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1633 count = min_t(unsigned int, (conn->mtu - hlen), len);
1634 skb = bt_skb_send_alloc(sk, count + hlen,
1635 msg->msg_flags & MSG_DONTWAIT, &err);
1637 return ERR_PTR(-ENOMEM);
1639 /* Create L2CAP header */
1640 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1641 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1642 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1643 put_unaligned_le16(control, skb_put(skb, 2));
1645 put_unaligned_le16(sdulen, skb_put(skb, 2));
1647 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1648 if (unlikely(err < 0)) {
1650 return ERR_PTR(err);
1653 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1654 put_unaligned_le16(0, skb_put(skb, 2));
1656 bt_cb(skb)->retries = 0;
1660 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1662 struct l2cap_pinfo *pi = l2cap_pi(sk);
1663 struct sk_buff *skb;
1664 struct sk_buff_head sar_queue;
1668 skb_queue_head_init(&sar_queue);
1669 control = L2CAP_SDU_START;
1670 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1672 return PTR_ERR(skb);
1674 __skb_queue_tail(&sar_queue, skb);
1675 len -= pi->remote_mps;
1676 size += pi->remote_mps;
1681 if (len > pi->remote_mps) {
1682 control = L2CAP_SDU_CONTINUE;
1683 buflen = pi->remote_mps;
1685 control = L2CAP_SDU_END;
1689 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1691 skb_queue_purge(&sar_queue);
1692 return PTR_ERR(skb);
1695 __skb_queue_tail(&sar_queue, skb);
1699 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1700 spin_lock_bh(&pi->send_lock);
1701 if (sk->sk_send_head == NULL)
1702 sk->sk_send_head = sar_queue.next;
1703 spin_unlock_bh(&pi->send_lock);
1708 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1710 struct sock *sk = sock->sk;
1711 struct l2cap_pinfo *pi = l2cap_pi(sk);
1712 struct sk_buff *skb;
1716 BT_DBG("sock %p, sk %p", sock, sk);
1718 err = sock_error(sk);
1722 if (msg->msg_flags & MSG_OOB)
1727 if (sk->sk_state != BT_CONNECTED) {
1732 /* Connectionless channel */
1733 if (sk->sk_type == SOCK_DGRAM) {
1734 skb = l2cap_create_connless_pdu(sk, msg, len);
1738 l2cap_do_send(sk, skb);
1745 case L2CAP_MODE_BASIC:
1746 /* Check outgoing MTU */
1747 if (len > pi->omtu) {
1752 /* Create a basic PDU */
1753 skb = l2cap_create_basic_pdu(sk, msg, len);
1759 l2cap_do_send(sk, skb);
1763 case L2CAP_MODE_ERTM:
1764 case L2CAP_MODE_STREAMING:
1765 /* Entire SDU fits into one PDU */
1766 if (len <= pi->remote_mps) {
1767 control = L2CAP_SDU_UNSEGMENTED;
1768 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1773 __skb_queue_tail(TX_QUEUE(sk), skb);
1775 if (pi->mode == L2CAP_MODE_ERTM)
1776 spin_lock_bh(&pi->send_lock);
1778 if (sk->sk_send_head == NULL)
1779 sk->sk_send_head = skb;
1781 if (pi->mode == L2CAP_MODE_ERTM)
1782 spin_unlock_bh(&pi->send_lock);
1784 /* Segment SDU into multiples PDUs */
1785 err = l2cap_sar_segment_sdu(sk, msg, len);
1790 if (pi->mode == L2CAP_MODE_STREAMING) {
1791 err = l2cap_streaming_send(sk);
1793 spin_lock_bh(&pi->send_lock);
1794 err = l2cap_ertm_send(sk);
1795 spin_unlock_bh(&pi->send_lock);
1803 BT_DBG("bad state %1.1x", pi->mode);
1812 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1814 struct sock *sk = sock->sk;
1818 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1819 struct l2cap_conn_rsp rsp;
1821 sk->sk_state = BT_CONFIG;
1823 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1824 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1825 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1826 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1827 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1828 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1836 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1839 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1841 struct sock *sk = sock->sk;
1842 struct l2cap_options opts;
1846 BT_DBG("sk %p", sk);
1852 opts.imtu = l2cap_pi(sk)->imtu;
1853 opts.omtu = l2cap_pi(sk)->omtu;
1854 opts.flush_to = l2cap_pi(sk)->flush_to;
1855 opts.mode = l2cap_pi(sk)->mode;
1856 opts.fcs = l2cap_pi(sk)->fcs;
1857 opts.max_tx = l2cap_pi(sk)->max_tx;
1858 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1860 len = min_t(unsigned int, sizeof(opts), optlen);
1861 if (copy_from_user((char *) &opts, optval, len)) {
1866 l2cap_pi(sk)->mode = opts.mode;
1867 switch (l2cap_pi(sk)->mode) {
1868 case L2CAP_MODE_BASIC:
1870 case L2CAP_MODE_ERTM:
1871 case L2CAP_MODE_STREAMING:
1880 l2cap_pi(sk)->imtu = opts.imtu;
1881 l2cap_pi(sk)->omtu = opts.omtu;
1882 l2cap_pi(sk)->fcs = opts.fcs;
1883 l2cap_pi(sk)->max_tx = opts.max_tx;
1884 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1888 if (get_user(opt, (u32 __user *) optval)) {
1893 if (opt & L2CAP_LM_AUTH)
1894 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1895 if (opt & L2CAP_LM_ENCRYPT)
1896 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1897 if (opt & L2CAP_LM_SECURE)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1900 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1901 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1913 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1915 struct sock *sk = sock->sk;
1916 struct bt_security sec;
1920 BT_DBG("sk %p", sk);
1922 if (level == SOL_L2CAP)
1923 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1925 if (level != SOL_BLUETOOTH)
1926 return -ENOPROTOOPT;
1932 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1933 && sk->sk_type != SOCK_RAW) {
1938 sec.level = BT_SECURITY_LOW;
1940 len = min_t(unsigned int, sizeof(sec), optlen);
1941 if (copy_from_user((char *) &sec, optval, len)) {
1946 if (sec.level < BT_SECURITY_LOW ||
1947 sec.level > BT_SECURITY_HIGH) {
1952 l2cap_pi(sk)->sec_level = sec.level;
1955 case BT_DEFER_SETUP:
1956 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1961 if (get_user(opt, (u32 __user *) optval)) {
1966 bt_sk(sk)->defer_setup = opt;
1978 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1980 struct sock *sk = sock->sk;
1981 struct l2cap_options opts;
1982 struct l2cap_conninfo cinfo;
1986 BT_DBG("sk %p", sk);
1988 if (get_user(len, optlen))
1995 opts.imtu = l2cap_pi(sk)->imtu;
1996 opts.omtu = l2cap_pi(sk)->omtu;
1997 opts.flush_to = l2cap_pi(sk)->flush_to;
1998 opts.mode = l2cap_pi(sk)->mode;
1999 opts.fcs = l2cap_pi(sk)->fcs;
2000 opts.max_tx = l2cap_pi(sk)->max_tx;
2001 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2003 len = min_t(unsigned int, len, sizeof(opts));
2004 if (copy_to_user(optval, (char *) &opts, len))
2010 switch (l2cap_pi(sk)->sec_level) {
2011 case BT_SECURITY_LOW:
2012 opt = L2CAP_LM_AUTH;
2014 case BT_SECURITY_MEDIUM:
2015 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2017 case BT_SECURITY_HIGH:
2018 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2026 if (l2cap_pi(sk)->role_switch)
2027 opt |= L2CAP_LM_MASTER;
2029 if (l2cap_pi(sk)->force_reliable)
2030 opt |= L2CAP_LM_RELIABLE;
2032 if (put_user(opt, (u32 __user *) optval))
2036 case L2CAP_CONNINFO:
2037 if (sk->sk_state != BT_CONNECTED &&
2038 !(sk->sk_state == BT_CONNECT2 &&
2039 bt_sk(sk)->defer_setup)) {
2044 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2045 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2047 len = min_t(unsigned int, len, sizeof(cinfo));
2048 if (copy_to_user(optval, (char *) &cinfo, len))
2062 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2064 struct sock *sk = sock->sk;
2065 struct bt_security sec;
2068 BT_DBG("sk %p", sk);
2070 if (level == SOL_L2CAP)
2071 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2073 if (level != SOL_BLUETOOTH)
2074 return -ENOPROTOOPT;
2076 if (get_user(len, optlen))
2083 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2084 && sk->sk_type != SOCK_RAW) {
2089 sec.level = l2cap_pi(sk)->sec_level;
2091 len = min_t(unsigned int, len, sizeof(sec));
2092 if (copy_to_user(optval, (char *) &sec, len))
2097 case BT_DEFER_SETUP:
2098 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2103 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2117 static int l2cap_sock_shutdown(struct socket *sock, int how)
2119 struct sock *sk = sock->sk;
2122 BT_DBG("sock %p, sk %p", sock, sk);
2128 if (!sk->sk_shutdown) {
2129 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2130 err = __l2cap_wait_ack(sk);
2132 sk->sk_shutdown = SHUTDOWN_MASK;
2133 l2cap_sock_clear_timer(sk);
2134 __l2cap_sock_close(sk, 0);
2136 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2137 err = bt_sock_wait_state(sk, BT_CLOSED,
2144 static int l2cap_sock_release(struct socket *sock)
2146 struct sock *sk = sock->sk;
2149 BT_DBG("sock %p, sk %p", sock, sk);
2154 err = l2cap_sock_shutdown(sock, 2);
2157 l2cap_sock_kill(sk);
2161 static void l2cap_chan_ready(struct sock *sk)
2163 struct sock *parent = bt_sk(sk)->parent;
2165 BT_DBG("sk %p, parent %p", sk, parent);
2167 l2cap_pi(sk)->conf_state = 0;
2168 l2cap_sock_clear_timer(sk);
2171 /* Outgoing channel.
2172 * Wake up socket sleeping on connect.
2174 sk->sk_state = BT_CONNECTED;
2175 sk->sk_state_change(sk);
2177 /* Incoming channel.
2178 * Wake up socket sleeping on accept.
2180 parent->sk_data_ready(parent, 0);
2184 /* Copy frame to all raw sockets on that connection */
2185 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2187 struct l2cap_chan_list *l = &conn->chan_list;
2188 struct sk_buff *nskb;
2191 BT_DBG("conn %p", conn);
2193 read_lock(&l->lock);
2194 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2195 if (sk->sk_type != SOCK_RAW)
2198 /* Don't send frame to the socket it came from */
2201 nskb = skb_clone(skb, GFP_ATOMIC);
2205 if (sock_queue_rcv_skb(sk, nskb))
2208 read_unlock(&l->lock);
2211 /* ---- L2CAP signalling commands ---- */
2212 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2213 u8 code, u8 ident, u16 dlen, void *data)
2215 struct sk_buff *skb, **frag;
2216 struct l2cap_cmd_hdr *cmd;
2217 struct l2cap_hdr *lh;
2220 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2221 conn, code, ident, dlen);
2223 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2224 count = min_t(unsigned int, conn->mtu, len);
2226 skb = bt_skb_alloc(count, GFP_ATOMIC);
2230 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2231 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2232 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2234 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2237 cmd->len = cpu_to_le16(dlen);
2240 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2241 memcpy(skb_put(skb, count), data, count);
2247 /* Continuation fragments (no L2CAP header) */
2248 frag = &skb_shinfo(skb)->frag_list;
2250 count = min_t(unsigned int, conn->mtu, len);
2252 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2256 memcpy(skb_put(*frag, count), data, count);
2261 frag = &(*frag)->next;
2271 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2273 struct l2cap_conf_opt *opt = *ptr;
2276 len = L2CAP_CONF_OPT_SIZE + opt->len;
2284 *val = *((u8 *) opt->val);
2288 *val = __le16_to_cpu(*((__le16 *) opt->val));
2292 *val = __le32_to_cpu(*((__le32 *) opt->val));
2296 *val = (unsigned long) opt->val;
2300 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2304 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2306 struct l2cap_conf_opt *opt = *ptr;
2308 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2315 *((u8 *) opt->val) = val;
2319 *((__le16 *) opt->val) = cpu_to_le16(val);
2323 *((__le32 *) opt->val) = cpu_to_le32(val);
2327 memcpy(opt->val, (void *) val, len);
2331 *ptr += L2CAP_CONF_OPT_SIZE + len;
2334 static void l2cap_ack_timeout(unsigned long arg)
2336 struct sock *sk = (void *) arg;
2339 l2cap_send_ack(l2cap_pi(sk));
2343 static inline void l2cap_ertm_init(struct sock *sk)
2345 l2cap_pi(sk)->expected_ack_seq = 0;
2346 l2cap_pi(sk)->unacked_frames = 0;
2347 l2cap_pi(sk)->buffer_seq = 0;
2348 l2cap_pi(sk)->num_acked = 0;
2349 l2cap_pi(sk)->frames_sent = 0;
2351 setup_timer(&l2cap_pi(sk)->retrans_timer,
2352 l2cap_retrans_timeout, (unsigned long) sk);
2353 setup_timer(&l2cap_pi(sk)->monitor_timer,
2354 l2cap_monitor_timeout, (unsigned long) sk);
2355 setup_timer(&l2cap_pi(sk)->ack_timer,
2356 l2cap_ack_timeout, (unsigned long) sk);
2358 __skb_queue_head_init(SREJ_QUEUE(sk));
2359 __skb_queue_head_init(BUSY_QUEUE(sk));
2360 spin_lock_init(&l2cap_pi(sk)->send_lock);
2362 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2365 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2367 u32 local_feat_mask = l2cap_feat_mask;
2369 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2372 case L2CAP_MODE_ERTM:
2373 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2374 case L2CAP_MODE_STREAMING:
2375 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2381 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2384 case L2CAP_MODE_STREAMING:
2385 case L2CAP_MODE_ERTM:
2386 if (l2cap_mode_supported(mode, remote_feat_mask))
2390 return L2CAP_MODE_BASIC;
2394 static int l2cap_build_conf_req(struct sock *sk, void *data)
2396 struct l2cap_pinfo *pi = l2cap_pi(sk);
2397 struct l2cap_conf_req *req = data;
2398 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2399 void *ptr = req->data;
2401 BT_DBG("sk %p", sk);
2403 if (pi->num_conf_req || pi->num_conf_rsp)
2407 case L2CAP_MODE_STREAMING:
2408 case L2CAP_MODE_ERTM:
2409 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2410 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2411 l2cap_send_disconn_req(pi->conn, sk);
2414 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2420 case L2CAP_MODE_BASIC:
2421 if (pi->imtu != L2CAP_DEFAULT_MTU)
2422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2425 case L2CAP_MODE_ERTM:
2426 rfc.mode = L2CAP_MODE_ERTM;
2427 rfc.txwin_size = pi->tx_win;
2428 rfc.max_transmit = pi->max_tx;
2429 rfc.retrans_timeout = 0;
2430 rfc.monitor_timeout = 0;
2431 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2432 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2433 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2436 sizeof(rfc), (unsigned long) &rfc);
2438 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2441 if (pi->fcs == L2CAP_FCS_NONE ||
2442 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2443 pi->fcs = L2CAP_FCS_NONE;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2448 case L2CAP_MODE_STREAMING:
2449 rfc.mode = L2CAP_MODE_STREAMING;
2451 rfc.max_transmit = 0;
2452 rfc.retrans_timeout = 0;
2453 rfc.monitor_timeout = 0;
2454 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2455 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2456 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2459 sizeof(rfc), (unsigned long) &rfc);
2461 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2464 if (pi->fcs == L2CAP_FCS_NONE ||
2465 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2466 pi->fcs = L2CAP_FCS_NONE;
2467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2472 /* FIXME: Need actual value of the flush timeout */
2473 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2474 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2476 req->dcid = cpu_to_le16(pi->dcid);
2477 req->flags = cpu_to_le16(0);
2482 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2484 struct l2cap_pinfo *pi = l2cap_pi(sk);
2485 struct l2cap_conf_rsp *rsp = data;
2486 void *ptr = rsp->data;
2487 void *req = pi->conf_req;
2488 int len = pi->conf_len;
2489 int type, hint, olen;
2491 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2492 u16 mtu = L2CAP_DEFAULT_MTU;
2493 u16 result = L2CAP_CONF_SUCCESS;
2495 BT_DBG("sk %p", sk);
2497 while (len >= L2CAP_CONF_OPT_SIZE) {
2498 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2500 hint = type & L2CAP_CONF_HINT;
2501 type &= L2CAP_CONF_MASK;
2504 case L2CAP_CONF_MTU:
2508 case L2CAP_CONF_FLUSH_TO:
2512 case L2CAP_CONF_QOS:
2515 case L2CAP_CONF_RFC:
2516 if (olen == sizeof(rfc))
2517 memcpy(&rfc, (void *) val, olen);
2520 case L2CAP_CONF_FCS:
2521 if (val == L2CAP_FCS_NONE)
2522 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2530 result = L2CAP_CONF_UNKNOWN;
2531 *((u8 *) ptr++) = type;
2536 if (pi->num_conf_rsp || pi->num_conf_req)
2540 case L2CAP_MODE_STREAMING:
2541 case L2CAP_MODE_ERTM:
2542 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2543 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2544 return -ECONNREFUSED;
2547 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2552 if (pi->mode != rfc.mode) {
2553 result = L2CAP_CONF_UNACCEPT;
2554 rfc.mode = pi->mode;
2556 if (pi->num_conf_rsp == 1)
2557 return -ECONNREFUSED;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2560 sizeof(rfc), (unsigned long) &rfc);
2564 if (result == L2CAP_CONF_SUCCESS) {
2565 /* Configure output options and let the other side know
2566 * which ones we don't like. */
2568 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2569 result = L2CAP_CONF_UNACCEPT;
2572 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2577 case L2CAP_MODE_BASIC:
2578 pi->fcs = L2CAP_FCS_NONE;
2579 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2582 case L2CAP_MODE_ERTM:
2583 pi->remote_tx_win = rfc.txwin_size;
2584 pi->remote_max_tx = rfc.max_transmit;
2585 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2586 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2588 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2590 rfc.retrans_timeout =
2591 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2592 rfc.monitor_timeout =
2593 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2595 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2598 sizeof(rfc), (unsigned long) &rfc);
2602 case L2CAP_MODE_STREAMING:
2603 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2604 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2606 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2608 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2611 sizeof(rfc), (unsigned long) &rfc);
2616 result = L2CAP_CONF_UNACCEPT;
2618 memset(&rfc, 0, sizeof(rfc));
2619 rfc.mode = pi->mode;
2622 if (result == L2CAP_CONF_SUCCESS)
2623 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2625 rsp->scid = cpu_to_le16(pi->dcid);
2626 rsp->result = cpu_to_le16(result);
2627 rsp->flags = cpu_to_le16(0x0000);
2632 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2634 struct l2cap_pinfo *pi = l2cap_pi(sk);
2635 struct l2cap_conf_req *req = data;
2636 void *ptr = req->data;
2639 struct l2cap_conf_rfc rfc;
2641 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2643 while (len >= L2CAP_CONF_OPT_SIZE) {
2644 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2647 case L2CAP_CONF_MTU:
2648 if (val < L2CAP_DEFAULT_MIN_MTU) {
2649 *result = L2CAP_CONF_UNACCEPT;
2650 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2656 case L2CAP_CONF_FLUSH_TO:
2658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2662 case L2CAP_CONF_RFC:
2663 if (olen == sizeof(rfc))
2664 memcpy(&rfc, (void *)val, olen);
2666 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2667 rfc.mode != pi->mode)
2668 return -ECONNREFUSED;
2670 pi->mode = rfc.mode;
2673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2674 sizeof(rfc), (unsigned long) &rfc);
2679 if (*result == L2CAP_CONF_SUCCESS) {
2681 case L2CAP_MODE_ERTM:
2682 pi->remote_tx_win = rfc.txwin_size;
2683 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2684 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2685 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2687 case L2CAP_MODE_STREAMING:
2688 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2692 req->dcid = cpu_to_le16(pi->dcid);
2693 req->flags = cpu_to_le16(0x0000);
2698 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2700 struct l2cap_conf_rsp *rsp = data;
2701 void *ptr = rsp->data;
2703 BT_DBG("sk %p", sk);
2705 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2706 rsp->result = cpu_to_le16(result);
2707 rsp->flags = cpu_to_le16(flags);
2712 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2714 struct l2cap_pinfo *pi = l2cap_pi(sk);
2717 struct l2cap_conf_rfc rfc;
2719 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2721 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2724 while (len >= L2CAP_CONF_OPT_SIZE) {
2725 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2728 case L2CAP_CONF_RFC:
2729 if (olen == sizeof(rfc))
2730 memcpy(&rfc, (void *)val, olen);
2737 case L2CAP_MODE_ERTM:
2738 pi->remote_tx_win = rfc.txwin_size;
2739 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2740 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2741 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2743 case L2CAP_MODE_STREAMING:
2744 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2748 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2750 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2752 if (rej->reason != 0x0000)
2755 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2756 cmd->ident == conn->info_ident) {
2757 del_timer(&conn->info_timer);
2759 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2760 conn->info_ident = 0;
2762 l2cap_conn_start(conn);
2768 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2770 struct l2cap_chan_list *list = &conn->chan_list;
2771 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2772 struct l2cap_conn_rsp rsp;
2773 struct sock *sk, *parent;
2774 int result, status = L2CAP_CS_NO_INFO;
2776 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2777 __le16 psm = req->psm;
2779 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2781 /* Check if we have socket listening on psm */
2782 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2784 result = L2CAP_CR_BAD_PSM;
2788 /* Check if the ACL is secure enough (if not SDP) */
2789 if (psm != cpu_to_le16(0x0001) &&
2790 !hci_conn_check_link_mode(conn->hcon)) {
2791 conn->disc_reason = 0x05;
2792 result = L2CAP_CR_SEC_BLOCK;
2796 result = L2CAP_CR_NO_MEM;
2798 /* Check for backlog size */
2799 if (sk_acceptq_is_full(parent)) {
2800 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2804 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2808 write_lock_bh(&list->lock);
2810 /* Check if we already have channel with that dcid */
2811 if (__l2cap_get_chan_by_dcid(list, scid)) {
2812 write_unlock_bh(&list->lock);
2813 sock_set_flag(sk, SOCK_ZAPPED);
2814 l2cap_sock_kill(sk);
2818 hci_conn_hold(conn->hcon);
2820 l2cap_sock_init(sk, parent);
2821 bacpy(&bt_sk(sk)->src, conn->src);
2822 bacpy(&bt_sk(sk)->dst, conn->dst);
2823 l2cap_pi(sk)->psm = psm;
2824 l2cap_pi(sk)->dcid = scid;
2826 __l2cap_chan_add(conn, sk, parent);
2827 dcid = l2cap_pi(sk)->scid;
2829 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2831 l2cap_pi(sk)->ident = cmd->ident;
2833 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2834 if (l2cap_check_security(sk)) {
2835 if (bt_sk(sk)->defer_setup) {
2836 sk->sk_state = BT_CONNECT2;
2837 result = L2CAP_CR_PEND;
2838 status = L2CAP_CS_AUTHOR_PEND;
2839 parent->sk_data_ready(parent, 0);
2841 sk->sk_state = BT_CONFIG;
2842 result = L2CAP_CR_SUCCESS;
2843 status = L2CAP_CS_NO_INFO;
2846 sk->sk_state = BT_CONNECT2;
2847 result = L2CAP_CR_PEND;
2848 status = L2CAP_CS_AUTHEN_PEND;
2851 sk->sk_state = BT_CONNECT2;
2852 result = L2CAP_CR_PEND;
2853 status = L2CAP_CS_NO_INFO;
2856 write_unlock_bh(&list->lock);
2859 bh_unlock_sock(parent);
2862 rsp.scid = cpu_to_le16(scid);
2863 rsp.dcid = cpu_to_le16(dcid);
2864 rsp.result = cpu_to_le16(result);
2865 rsp.status = cpu_to_le16(status);
2866 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2868 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2869 struct l2cap_info_req info;
2870 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2872 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2873 conn->info_ident = l2cap_get_ident(conn);
2875 mod_timer(&conn->info_timer, jiffies +
2876 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2878 l2cap_send_cmd(conn, conn->info_ident,
2879 L2CAP_INFO_REQ, sizeof(info), &info);
2885 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2887 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2888 u16 scid, dcid, result, status;
2892 scid = __le16_to_cpu(rsp->scid);
2893 dcid = __le16_to_cpu(rsp->dcid);
2894 result = __le16_to_cpu(rsp->result);
2895 status = __le16_to_cpu(rsp->status);
2897 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2900 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2904 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2910 case L2CAP_CR_SUCCESS:
2911 sk->sk_state = BT_CONFIG;
2912 l2cap_pi(sk)->ident = 0;
2913 l2cap_pi(sk)->dcid = dcid;
2914 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2916 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2919 l2cap_build_conf_req(sk, req), req);
2920 l2cap_pi(sk)->num_conf_req++;
2924 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2928 l2cap_chan_del(sk, ECONNREFUSED);
2936 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2938 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2944 dcid = __le16_to_cpu(req->dcid);
2945 flags = __le16_to_cpu(req->flags);
2947 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2949 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2953 if (sk->sk_state == BT_DISCONN)
2956 /* Reject if config buffer is too small. */
2957 len = cmd_len - sizeof(*req);
2958 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2959 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2960 l2cap_build_conf_rsp(sk, rsp,
2961 L2CAP_CONF_REJECT, flags), rsp);
2966 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2967 l2cap_pi(sk)->conf_len += len;
2969 if (flags & 0x0001) {
2970 /* Incomplete config. Send empty response. */
2971 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2972 l2cap_build_conf_rsp(sk, rsp,
2973 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2977 /* Complete config. */
2978 len = l2cap_parse_conf_req(sk, rsp);
2980 l2cap_send_disconn_req(conn, sk);
2984 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2985 l2cap_pi(sk)->num_conf_rsp++;
2987 /* Reset config buffer. */
2988 l2cap_pi(sk)->conf_len = 0;
2990 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2993 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2994 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2995 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2996 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2998 sk->sk_state = BT_CONNECTED;
3000 l2cap_pi(sk)->next_tx_seq = 0;
3001 l2cap_pi(sk)->expected_tx_seq = 0;
3002 __skb_queue_head_init(TX_QUEUE(sk));
3003 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3004 l2cap_ertm_init(sk);
3006 l2cap_chan_ready(sk);
3010 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3012 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3013 l2cap_build_conf_req(sk, buf), buf);
3014 l2cap_pi(sk)->num_conf_req++;
3022 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3024 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3025 u16 scid, flags, result;
3027 int len = cmd->len - sizeof(*rsp);
3029 scid = __le16_to_cpu(rsp->scid);
3030 flags = __le16_to_cpu(rsp->flags);
3031 result = __le16_to_cpu(rsp->result);
3033 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3034 scid, flags, result);
3036 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3041 case L2CAP_CONF_SUCCESS:
3042 l2cap_conf_rfc_get(sk, rsp->data, len);
3045 case L2CAP_CONF_UNACCEPT:
3046 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3049 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3050 l2cap_send_disconn_req(conn, sk);
3054 /* throw out any old stored conf requests */
3055 result = L2CAP_CONF_SUCCESS;
3056 len = l2cap_parse_conf_rsp(sk, rsp->data,
3059 l2cap_send_disconn_req(conn, sk);
3063 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3064 L2CAP_CONF_REQ, len, req);
3065 l2cap_pi(sk)->num_conf_req++;
3066 if (result != L2CAP_CONF_SUCCESS)
3072 sk->sk_state = BT_DISCONN;
3073 sk->sk_err = ECONNRESET;
3074 l2cap_sock_set_timer(sk, HZ * 5);
3075 l2cap_send_disconn_req(conn, sk);
3082 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3084 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3085 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3086 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3087 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3089 sk->sk_state = BT_CONNECTED;
3090 l2cap_pi(sk)->next_tx_seq = 0;
3091 l2cap_pi(sk)->expected_tx_seq = 0;
3092 __skb_queue_head_init(TX_QUEUE(sk));
3093 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3094 l2cap_ertm_init(sk);
3096 l2cap_chan_ready(sk);
3104 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3106 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3107 struct l2cap_disconn_rsp rsp;
3111 scid = __le16_to_cpu(req->scid);
3112 dcid = __le16_to_cpu(req->dcid);
3114 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3116 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3120 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3121 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3122 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3124 sk->sk_shutdown = SHUTDOWN_MASK;
3126 skb_queue_purge(TX_QUEUE(sk));
3128 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3129 skb_queue_purge(SREJ_QUEUE(sk));
3130 skb_queue_purge(BUSY_QUEUE(sk));
3131 del_timer(&l2cap_pi(sk)->retrans_timer);
3132 del_timer(&l2cap_pi(sk)->monitor_timer);
3133 del_timer(&l2cap_pi(sk)->ack_timer);
3136 l2cap_chan_del(sk, ECONNRESET);
3139 l2cap_sock_kill(sk);
3143 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3145 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3149 scid = __le16_to_cpu(rsp->scid);
3150 dcid = __le16_to_cpu(rsp->dcid);
3152 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3154 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3158 skb_queue_purge(TX_QUEUE(sk));
3160 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3161 skb_queue_purge(SREJ_QUEUE(sk));
3162 skb_queue_purge(BUSY_QUEUE(sk));
3163 del_timer(&l2cap_pi(sk)->retrans_timer);
3164 del_timer(&l2cap_pi(sk)->monitor_timer);
3165 del_timer(&l2cap_pi(sk)->ack_timer);
3168 l2cap_chan_del(sk, 0);
3171 l2cap_sock_kill(sk);
3175 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3177 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3180 type = __le16_to_cpu(req->type);
3182 BT_DBG("type 0x%4.4x", type);
3184 if (type == L2CAP_IT_FEAT_MASK) {
3186 u32 feat_mask = l2cap_feat_mask;
3187 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3188 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3189 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3191 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3193 put_unaligned_le32(feat_mask, rsp->data);
3194 l2cap_send_cmd(conn, cmd->ident,
3195 L2CAP_INFO_RSP, sizeof(buf), buf);
3196 } else if (type == L2CAP_IT_FIXED_CHAN) {
3198 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3199 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3200 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3201 memcpy(buf + 4, l2cap_fixed_chan, 8);
3202 l2cap_send_cmd(conn, cmd->ident,
3203 L2CAP_INFO_RSP, sizeof(buf), buf);
3205 struct l2cap_info_rsp rsp;
3206 rsp.type = cpu_to_le16(type);
3207 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3208 l2cap_send_cmd(conn, cmd->ident,
3209 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3215 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3217 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3220 type = __le16_to_cpu(rsp->type);
3221 result = __le16_to_cpu(rsp->result);
3223 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3225 del_timer(&conn->info_timer);
3227 if (type == L2CAP_IT_FEAT_MASK) {
3228 conn->feat_mask = get_unaligned_le32(rsp->data);
3230 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3231 struct l2cap_info_req req;
3232 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3234 conn->info_ident = l2cap_get_ident(conn);
3236 l2cap_send_cmd(conn, conn->info_ident,
3237 L2CAP_INFO_REQ, sizeof(req), &req);
3239 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3240 conn->info_ident = 0;
3242 l2cap_conn_start(conn);
3244 } else if (type == L2CAP_IT_FIXED_CHAN) {
3245 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3246 conn->info_ident = 0;
3248 l2cap_conn_start(conn);
3254 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3256 u8 *data = skb->data;
3258 struct l2cap_cmd_hdr cmd;
3261 l2cap_raw_recv(conn, skb);
3263 while (len >= L2CAP_CMD_HDR_SIZE) {
3265 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3266 data += L2CAP_CMD_HDR_SIZE;
3267 len -= L2CAP_CMD_HDR_SIZE;
3269 cmd_len = le16_to_cpu(cmd.len);
3271 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3273 if (cmd_len > len || !cmd.ident) {
3274 BT_DBG("corrupted command");
3279 case L2CAP_COMMAND_REJ:
3280 l2cap_command_rej(conn, &cmd, data);
3283 case L2CAP_CONN_REQ:
3284 err = l2cap_connect_req(conn, &cmd, data);
3287 case L2CAP_CONN_RSP:
3288 err = l2cap_connect_rsp(conn, &cmd, data);
3291 case L2CAP_CONF_REQ:
3292 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3295 case L2CAP_CONF_RSP:
3296 err = l2cap_config_rsp(conn, &cmd, data);
3299 case L2CAP_DISCONN_REQ:
3300 err = l2cap_disconnect_req(conn, &cmd, data);
3303 case L2CAP_DISCONN_RSP:
3304 err = l2cap_disconnect_rsp(conn, &cmd, data);
3307 case L2CAP_ECHO_REQ:
3308 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3311 case L2CAP_ECHO_RSP:
3314 case L2CAP_INFO_REQ:
3315 err = l2cap_information_req(conn, &cmd, data);
3318 case L2CAP_INFO_RSP:
3319 err = l2cap_information_rsp(conn, &cmd, data);
3323 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3329 struct l2cap_cmd_rej rej;
3330 BT_DBG("error %d", err);
3332 /* FIXME: Map err to a valid reason */
3333 rej.reason = cpu_to_le16(0);
3334 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3344 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3346 u16 our_fcs, rcv_fcs;
3347 int hdr_size = L2CAP_HDR_SIZE + 2;
3349 if (pi->fcs == L2CAP_FCS_CRC16) {
3350 skb_trim(skb, skb->len - 2);
3351 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3352 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3354 if (our_fcs != rcv_fcs)
3360 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3362 struct l2cap_pinfo *pi = l2cap_pi(sk);
3365 pi->frames_sent = 0;
3366 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3368 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3370 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3371 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3372 l2cap_send_sframe(pi, control);
3373 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3374 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3377 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3378 __mod_retrans_timer();
3380 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3382 spin_lock_bh(&pi->send_lock);
3383 l2cap_ertm_send(sk);
3384 spin_unlock_bh(&pi->send_lock);
3386 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3387 pi->frames_sent == 0) {
3388 control |= L2CAP_SUPER_RCV_READY;
3389 l2cap_send_sframe(pi, control);
3393 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3395 struct sk_buff *next_skb;
3397 bt_cb(skb)->tx_seq = tx_seq;
3398 bt_cb(skb)->sar = sar;
3400 next_skb = skb_peek(SREJ_QUEUE(sk));
3402 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3407 if (bt_cb(next_skb)->tx_seq == tx_seq)
3410 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3411 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3415 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3418 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3420 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3425 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3427 struct l2cap_pinfo *pi = l2cap_pi(sk);
3428 struct sk_buff *_skb;
3431 switch (control & L2CAP_CTRL_SAR) {
3432 case L2CAP_SDU_UNSEGMENTED:
3433 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3436 err = sock_queue_rcv_skb(sk, skb);
3442 case L2CAP_SDU_START:
3443 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3446 pi->sdu_len = get_unaligned_le16(skb->data);
3448 if (pi->sdu_len > pi->imtu)
3451 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3455 /* pull sdu_len bytes only after alloc, because of Local Busy
3456 * condition we have to be sure that this will be executed
3457 * only once, i.e., when alloc does not fail */
3460 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3462 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3463 pi->partial_sdu_len = skb->len;
3466 case L2CAP_SDU_CONTINUE:
3467 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3473 pi->partial_sdu_len += skb->len;
3474 if (pi->partial_sdu_len > pi->sdu_len)
3477 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3482 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3488 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3489 pi->partial_sdu_len += skb->len;
3491 if (pi->partial_sdu_len > pi->imtu)
3494 if (pi->partial_sdu_len != pi->sdu_len)
3497 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3500 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3502 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3506 err = sock_queue_rcv_skb(sk, _skb);
3509 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3513 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3528 l2cap_send_disconn_req(pi->conn, sk);
3533 static void l2cap_busy_work(struct work_struct *work)
3535 DECLARE_WAITQUEUE(wait, current);
3536 struct l2cap_pinfo *pi =
3537 container_of(work, struct l2cap_pinfo, busy_work);
3538 struct sock *sk = (struct sock *)pi;
3539 int n_tries = 0, timeo = HZ/5, err;
3540 struct sk_buff *skb;
3545 add_wait_queue(sk_sleep(sk), &wait);
3546 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3547 set_current_state(TASK_INTERRUPTIBLE);
3549 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3551 l2cap_send_disconn_req(pi->conn, sk);
3558 if (signal_pending(current)) {
3559 err = sock_intr_errno(timeo);
3564 timeo = schedule_timeout(timeo);
3567 err = sock_error(sk);
3571 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3572 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3573 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3575 skb_queue_head(BUSY_QUEUE(sk), skb);
3579 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3586 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3589 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3590 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3591 l2cap_send_sframe(pi, control);
3592 l2cap_pi(sk)->retry_count = 1;
3594 del_timer(&pi->retrans_timer);
3595 __mod_monitor_timer();
3597 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3600 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3601 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3603 set_current_state(TASK_RUNNING);
3604 remove_wait_queue(sk_sleep(sk), &wait);
3609 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3611 struct l2cap_pinfo *pi = l2cap_pi(sk);
3614 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3615 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3616 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3620 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3622 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3626 /* Busy Condition */
3627 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3628 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3629 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3631 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3632 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3633 l2cap_send_sframe(pi, sctrl);
3635 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3637 queue_work(_busy_wq, &pi->busy_work);
3642 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3644 struct l2cap_pinfo *pi = l2cap_pi(sk);
3645 struct sk_buff *_skb;
3649 * TODO: We have to notify the userland if some data is lost with the
3653 switch (control & L2CAP_CTRL_SAR) {
3654 case L2CAP_SDU_UNSEGMENTED:
3655 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3660 err = sock_queue_rcv_skb(sk, skb);
3666 case L2CAP_SDU_START:
3667 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3672 pi->sdu_len = get_unaligned_le16(skb->data);
3675 if (pi->sdu_len > pi->imtu) {
3680 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3686 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3688 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3689 pi->partial_sdu_len = skb->len;
3693 case L2CAP_SDU_CONTINUE:
3694 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3697 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3699 pi->partial_sdu_len += skb->len;
3700 if (pi->partial_sdu_len > pi->sdu_len)
3708 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3711 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3713 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3714 pi->partial_sdu_len += skb->len;
3716 if (pi->partial_sdu_len > pi->imtu)
3719 if (pi->partial_sdu_len == pi->sdu_len) {
3720 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3721 err = sock_queue_rcv_skb(sk, _skb);
3736 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3738 struct sk_buff *skb;
3741 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3742 if (bt_cb(skb)->tx_seq != tx_seq)
3745 skb = skb_dequeue(SREJ_QUEUE(sk));
3746 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3747 l2cap_ertm_reassembly_sdu(sk, skb, control);
3748 l2cap_pi(sk)->buffer_seq_srej =
3749 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3754 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3756 struct l2cap_pinfo *pi = l2cap_pi(sk);
3757 struct srej_list *l, *tmp;
3760 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3761 if (l->tx_seq == tx_seq) {
3766 control = L2CAP_SUPER_SELECT_REJECT;
3767 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3768 l2cap_send_sframe(pi, control);
3770 list_add_tail(&l->list, SREJ_LIST(sk));
3774 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3776 struct l2cap_pinfo *pi = l2cap_pi(sk);
3777 struct srej_list *new;
3780 while (tx_seq != pi->expected_tx_seq) {
3781 control = L2CAP_SUPER_SELECT_REJECT;
3782 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3783 l2cap_send_sframe(pi, control);
3785 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3786 new->tx_seq = pi->expected_tx_seq++;
3787 list_add_tail(&new->list, SREJ_LIST(sk));
3789 pi->expected_tx_seq++;
3792 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 u8 tx_seq = __get_txseq(rx_control);
3796 u8 req_seq = __get_reqseq(rx_control);
3797 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3798 u8 tx_seq_offset, expected_tx_seq_offset;
3799 int num_to_ack = (pi->tx_win/6) + 1;
3802 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3804 if (L2CAP_CTRL_FINAL & rx_control &&
3805 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3806 del_timer(&pi->monitor_timer);
3807 if (pi->unacked_frames > 0)
3808 __mod_retrans_timer();
3809 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3812 pi->expected_ack_seq = req_seq;
3813 l2cap_drop_acked_frames(sk);
3815 if (tx_seq == pi->expected_tx_seq)
3818 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3819 if (tx_seq_offset < 0)
3820 tx_seq_offset += 64;
3822 /* invalid tx_seq */
3823 if (tx_seq_offset >= pi->tx_win) {
3824 l2cap_send_disconn_req(pi->conn, sk);
3828 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3831 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3832 struct srej_list *first;
3834 first = list_first_entry(SREJ_LIST(sk),
3835 struct srej_list, list);
3836 if (tx_seq == first->tx_seq) {
3837 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3838 l2cap_check_srej_gap(sk, tx_seq);
3840 list_del(&first->list);
3843 if (list_empty(SREJ_LIST(sk))) {
3844 pi->buffer_seq = pi->buffer_seq_srej;
3845 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3849 struct srej_list *l;
3851 /* duplicated tx_seq */
3852 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3855 list_for_each_entry(l, SREJ_LIST(sk), list) {
3856 if (l->tx_seq == tx_seq) {
3857 l2cap_resend_srejframe(sk, tx_seq);
3861 l2cap_send_srejframe(sk, tx_seq);
3864 expected_tx_seq_offset =
3865 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3866 if (expected_tx_seq_offset < 0)
3867 expected_tx_seq_offset += 64;
3869 /* duplicated tx_seq */
3870 if (tx_seq_offset < expected_tx_seq_offset)
3873 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3875 INIT_LIST_HEAD(SREJ_LIST(sk));
3876 pi->buffer_seq_srej = pi->buffer_seq;
3878 __skb_queue_head_init(SREJ_QUEUE(sk));
3879 __skb_queue_head_init(BUSY_QUEUE(sk));
3880 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3882 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3884 l2cap_send_srejframe(sk, tx_seq);
3889 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3891 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3892 bt_cb(skb)->tx_seq = tx_seq;
3893 bt_cb(skb)->sar = sar;
3894 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3898 if (rx_control & L2CAP_CTRL_FINAL) {
3899 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3900 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3902 l2cap_retransmit_frames(sk);
3905 err = l2cap_push_rx_skb(sk, skb, rx_control);
3911 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3912 if (pi->num_acked == num_to_ack - 1)
3922 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3924 struct l2cap_pinfo *pi = l2cap_pi(sk);
3926 pi->expected_ack_seq = __get_reqseq(rx_control);
3927 l2cap_drop_acked_frames(sk);
3929 if (rx_control & L2CAP_CTRL_POLL) {
3930 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3931 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3932 (pi->unacked_frames > 0))
3933 __mod_retrans_timer();
3935 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3936 l2cap_send_srejtail(sk);
3938 l2cap_send_i_or_rr_or_rnr(sk);
3941 } else if (rx_control & L2CAP_CTRL_FINAL) {
3942 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3944 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3945 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3947 l2cap_retransmit_frames(sk);
3950 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3951 (pi->unacked_frames > 0))
3952 __mod_retrans_timer();
3954 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3955 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3958 spin_lock_bh(&pi->send_lock);
3959 l2cap_ertm_send(sk);
3960 spin_unlock_bh(&pi->send_lock);
3965 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3967 struct l2cap_pinfo *pi = l2cap_pi(sk);
3968 u8 tx_seq = __get_reqseq(rx_control);
3970 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3972 pi->expected_ack_seq = tx_seq;
3973 l2cap_drop_acked_frames(sk);
3975 if (rx_control & L2CAP_CTRL_FINAL) {
3976 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3977 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3979 l2cap_retransmit_frames(sk);
3981 l2cap_retransmit_frames(sk);
3983 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3984 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3987 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3989 struct l2cap_pinfo *pi = l2cap_pi(sk);
3990 u8 tx_seq = __get_reqseq(rx_control);
3992 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3994 if (rx_control & L2CAP_CTRL_POLL) {
3995 pi->expected_ack_seq = tx_seq;
3996 l2cap_drop_acked_frames(sk);
3997 l2cap_retransmit_one_frame(sk, tx_seq);
3999 spin_lock_bh(&pi->send_lock);
4000 l2cap_ertm_send(sk);
4001 spin_unlock_bh(&pi->send_lock);
4003 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4004 pi->srej_save_reqseq = tx_seq;
4005 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4007 } else if (rx_control & L2CAP_CTRL_FINAL) {
4008 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4009 pi->srej_save_reqseq == tx_seq)
4010 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4012 l2cap_retransmit_one_frame(sk, tx_seq);
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4015 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4016 pi->srej_save_reqseq = tx_seq;
4017 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4022 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4024 struct l2cap_pinfo *pi = l2cap_pi(sk);
4025 u8 tx_seq = __get_reqseq(rx_control);
4027 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4028 pi->expected_ack_seq = tx_seq;
4029 l2cap_drop_acked_frames(sk);
4031 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4032 del_timer(&pi->retrans_timer);
4033 if (rx_control & L2CAP_CTRL_POLL)
4034 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4038 if (rx_control & L2CAP_CTRL_POLL)
4039 l2cap_send_srejtail(sk);
4041 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4044 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4046 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4048 if (L2CAP_CTRL_FINAL & rx_control &&
4049 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4050 del_timer(&l2cap_pi(sk)->monitor_timer);
4051 if (l2cap_pi(sk)->unacked_frames > 0)
4052 __mod_retrans_timer();
4053 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4056 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4057 case L2CAP_SUPER_RCV_READY:
4058 l2cap_data_channel_rrframe(sk, rx_control);
4061 case L2CAP_SUPER_REJECT:
4062 l2cap_data_channel_rejframe(sk, rx_control);
4065 case L2CAP_SUPER_SELECT_REJECT:
4066 l2cap_data_channel_srejframe(sk, rx_control);
4069 case L2CAP_SUPER_RCV_NOT_READY:
4070 l2cap_data_channel_rnrframe(sk, rx_control);
4078 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4081 struct l2cap_pinfo *pi;
4083 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
4085 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4087 BT_DBG("unknown cid 0x%4.4x", cid);
4093 BT_DBG("sk %p, len %d", sk, skb->len);
4095 if (sk->sk_state != BT_CONNECTED)
4099 case L2CAP_MODE_BASIC:
4100 /* If socket recv buffers overflows we drop data here
4101 * which is *bad* because L2CAP has to be reliable.
4102 * But we don't have any other choice. L2CAP doesn't
4103 * provide flow control mechanism. */
4105 if (pi->imtu < skb->len)
4108 if (!sock_queue_rcv_skb(sk, skb))
4112 case L2CAP_MODE_ERTM:
4113 control = get_unaligned_le16(skb->data);
4117 if (__is_sar_start(control))
4120 if (pi->fcs == L2CAP_FCS_CRC16)
4124 * We can just drop the corrupted I-frame here.
4125 * Receiver will miss it and start proper recovery
4126 * procedures and ask retransmission.
4128 if (len > pi->mps) {
4129 l2cap_send_disconn_req(pi->conn, sk);
4133 if (l2cap_check_fcs(pi, skb))
4136 req_seq = __get_reqseq(control);
4137 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4138 if (req_seq_offset < 0)
4139 req_seq_offset += 64;
4141 next_tx_seq_offset =
4142 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4143 if (next_tx_seq_offset < 0)
4144 next_tx_seq_offset += 64;
4146 /* check for invalid req-seq */
4147 if (req_seq_offset > next_tx_seq_offset) {
4148 l2cap_send_disconn_req(pi->conn, sk);
4152 if (__is_iframe(control)) {
4154 l2cap_send_disconn_req(pi->conn, sk);
4158 l2cap_data_channel_iframe(sk, control, skb);
4161 l2cap_send_disconn_req(pi->conn, sk);
4165 l2cap_data_channel_sframe(sk, control, skb);
4170 case L2CAP_MODE_STREAMING:
4171 control = get_unaligned_le16(skb->data);
4175 if (__is_sar_start(control))
4178 if (pi->fcs == L2CAP_FCS_CRC16)
4181 if (len > pi->mps || len < 4 || __is_sframe(control))
4184 if (l2cap_check_fcs(pi, skb))
4187 tx_seq = __get_txseq(control);
4189 if (pi->expected_tx_seq == tx_seq)
4190 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4192 pi->expected_tx_seq = (tx_seq + 1) % 64;
4194 l2cap_streaming_reassembly_sdu(sk, skb, control);
4199 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4213 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4217 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4221 BT_DBG("sk %p, len %d", sk, skb->len);
4223 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4226 if (l2cap_pi(sk)->imtu < skb->len)
4229 if (!sock_queue_rcv_skb(sk, skb))
4241 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4243 struct l2cap_hdr *lh = (void *) skb->data;
4247 skb_pull(skb, L2CAP_HDR_SIZE);
4248 cid = __le16_to_cpu(lh->cid);
4249 len = __le16_to_cpu(lh->len);
4251 if (len != skb->len) {
4256 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4259 case L2CAP_CID_SIGNALING:
4260 l2cap_sig_channel(conn, skb);
4263 case L2CAP_CID_CONN_LESS:
4264 psm = get_unaligned_le16(skb->data);
4266 l2cap_conless_channel(conn, psm, skb);
4270 l2cap_data_channel(conn, cid, skb);
4275 /* ---- L2CAP interface with lower layer (HCI) ---- */
4277 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4279 int exact = 0, lm1 = 0, lm2 = 0;
4280 register struct sock *sk;
4281 struct hlist_node *node;
4283 if (type != ACL_LINK)
4286 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4288 /* Find listening sockets and check their link_mode */
4289 read_lock(&l2cap_sk_list.lock);
4290 sk_for_each(sk, node, &l2cap_sk_list.head) {
4291 if (sk->sk_state != BT_LISTEN)
4294 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4295 lm1 |= HCI_LM_ACCEPT;
4296 if (l2cap_pi(sk)->role_switch)
4297 lm1 |= HCI_LM_MASTER;
4299 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4300 lm2 |= HCI_LM_ACCEPT;
4301 if (l2cap_pi(sk)->role_switch)
4302 lm2 |= HCI_LM_MASTER;
4305 read_unlock(&l2cap_sk_list.lock);
4307 return exact ? lm1 : lm2;
4310 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4312 struct l2cap_conn *conn;
4314 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4316 if (hcon->type != ACL_LINK)
4320 conn = l2cap_conn_add(hcon, status);
4322 l2cap_conn_ready(conn);
4324 l2cap_conn_del(hcon, bt_err(status));
4329 static int l2cap_disconn_ind(struct hci_conn *hcon)
4331 struct l2cap_conn *conn = hcon->l2cap_data;
4333 BT_DBG("hcon %p", hcon);
4335 if (hcon->type != ACL_LINK || !conn)
4338 return conn->disc_reason;
4341 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4343 BT_DBG("hcon %p reason %d", hcon, reason);
4345 if (hcon->type != ACL_LINK)
4348 l2cap_conn_del(hcon, bt_err(reason));
4353 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4355 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4358 if (encrypt == 0x00) {
4359 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4360 l2cap_sock_clear_timer(sk);
4361 l2cap_sock_set_timer(sk, HZ * 5);
4362 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4363 __l2cap_sock_close(sk, ECONNREFUSED);
4365 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4366 l2cap_sock_clear_timer(sk);
4370 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4372 struct l2cap_chan_list *l;
4373 struct l2cap_conn *conn = hcon->l2cap_data;
4379 l = &conn->chan_list;
4381 BT_DBG("conn %p", conn);
4383 read_lock(&l->lock);
4385 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4388 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4393 if (!status && (sk->sk_state == BT_CONNECTED ||
4394 sk->sk_state == BT_CONFIG)) {
4395 l2cap_check_encryption(sk, encrypt);
4400 if (sk->sk_state == BT_CONNECT) {
4402 struct l2cap_conn_req req;
4403 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4404 req.psm = l2cap_pi(sk)->psm;
4406 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4408 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4409 L2CAP_CONN_REQ, sizeof(req), &req);
4411 l2cap_sock_clear_timer(sk);
4412 l2cap_sock_set_timer(sk, HZ / 10);
4414 } else if (sk->sk_state == BT_CONNECT2) {
4415 struct l2cap_conn_rsp rsp;
4419 sk->sk_state = BT_CONFIG;
4420 result = L2CAP_CR_SUCCESS;
4422 sk->sk_state = BT_DISCONN;
4423 l2cap_sock_set_timer(sk, HZ / 10);
4424 result = L2CAP_CR_SEC_BLOCK;
4427 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4428 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4429 rsp.result = cpu_to_le16(result);
4430 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4431 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4432 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4438 read_unlock(&l->lock);
4443 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4445 struct l2cap_conn *conn = hcon->l2cap_data;
4447 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4450 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4452 if (flags & ACL_START) {
4453 struct l2cap_hdr *hdr;
4457 BT_ERR("Unexpected start frame (len %d)", skb->len);
4458 kfree_skb(conn->rx_skb);
4459 conn->rx_skb = NULL;
4461 l2cap_conn_unreliable(conn, ECOMM);
4465 BT_ERR("Frame is too short (len %d)", skb->len);
4466 l2cap_conn_unreliable(conn, ECOMM);
4470 hdr = (struct l2cap_hdr *) skb->data;
4471 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4473 if (len == skb->len) {
4474 /* Complete frame received */
4475 l2cap_recv_frame(conn, skb);
4479 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4481 if (skb->len > len) {
4482 BT_ERR("Frame is too long (len %d, expected len %d)",
4484 l2cap_conn_unreliable(conn, ECOMM);
4488 /* Allocate skb for the complete frame (with header) */
4489 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4493 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4495 conn->rx_len = len - skb->len;
4497 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4499 if (!conn->rx_len) {
4500 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4501 l2cap_conn_unreliable(conn, ECOMM);
4505 if (skb->len > conn->rx_len) {
4506 BT_ERR("Fragment is too long (len %d, expected %d)",
4507 skb->len, conn->rx_len);
4508 kfree_skb(conn->rx_skb);
4509 conn->rx_skb = NULL;
4511 l2cap_conn_unreliable(conn, ECOMM);
4515 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4517 conn->rx_len -= skb->len;
4519 if (!conn->rx_len) {
4520 /* Complete frame received */
4521 l2cap_recv_frame(conn, conn->rx_skb);
4522 conn->rx_skb = NULL;
4531 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4534 struct hlist_node *node;
4536 read_lock_bh(&l2cap_sk_list.lock);
4538 sk_for_each(sk, node, &l2cap_sk_list.head) {
4539 struct l2cap_pinfo *pi = l2cap_pi(sk);
4541 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4542 batostr(&bt_sk(sk)->src),
4543 batostr(&bt_sk(sk)->dst),
4544 sk->sk_state, __le16_to_cpu(pi->psm),
4546 pi->imtu, pi->omtu, pi->sec_level);
4549 read_unlock_bh(&l2cap_sk_list.lock);
4554 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4556 return single_open(file, l2cap_debugfs_show, inode->i_private);
4559 static const struct file_operations l2cap_debugfs_fops = {
4560 .open = l2cap_debugfs_open,
4562 .llseek = seq_lseek,
4563 .release = single_release,
4566 static struct dentry *l2cap_debugfs;
4568 static const struct proto_ops l2cap_sock_ops = {
4569 .family = PF_BLUETOOTH,
4570 .owner = THIS_MODULE,
4571 .release = l2cap_sock_release,
4572 .bind = l2cap_sock_bind,
4573 .connect = l2cap_sock_connect,
4574 .listen = l2cap_sock_listen,
4575 .accept = l2cap_sock_accept,
4576 .getname = l2cap_sock_getname,
4577 .sendmsg = l2cap_sock_sendmsg,
4578 .recvmsg = l2cap_sock_recvmsg,
4579 .poll = bt_sock_poll,
4580 .ioctl = bt_sock_ioctl,
4581 .mmap = sock_no_mmap,
4582 .socketpair = sock_no_socketpair,
4583 .shutdown = l2cap_sock_shutdown,
4584 .setsockopt = l2cap_sock_setsockopt,
4585 .getsockopt = l2cap_sock_getsockopt
4588 static const struct net_proto_family l2cap_sock_family_ops = {
4589 .family = PF_BLUETOOTH,
4590 .owner = THIS_MODULE,
4591 .create = l2cap_sock_create,
4594 static struct hci_proto l2cap_hci_proto = {
4596 .id = HCI_PROTO_L2CAP,
4597 .connect_ind = l2cap_connect_ind,
4598 .connect_cfm = l2cap_connect_cfm,
4599 .disconn_ind = l2cap_disconn_ind,
4600 .disconn_cfm = l2cap_disconn_cfm,
4601 .security_cfm = l2cap_security_cfm,
4602 .recv_acldata = l2cap_recv_acldata
4605 static int __init l2cap_init(void)
4609 err = proto_register(&l2cap_proto, 0);
4613 _busy_wq = create_singlethread_workqueue("l2cap");
4617 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4619 BT_ERR("L2CAP socket registration failed");
4623 err = hci_register_proto(&l2cap_hci_proto);
4625 BT_ERR("L2CAP protocol registration failed");
4626 bt_sock_unregister(BTPROTO_L2CAP);
4631 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4632 bt_debugfs, NULL, &l2cap_debugfs_fops);
4634 BT_ERR("Failed to create L2CAP debug file");
4637 BT_INFO("L2CAP ver %s", VERSION);
4638 BT_INFO("L2CAP socket layer initialized");
4643 proto_unregister(&l2cap_proto);
4647 static void __exit l2cap_exit(void)
4649 debugfs_remove(l2cap_debugfs);
4651 flush_workqueue(_busy_wq);
4652 destroy_workqueue(_busy_wq);
4654 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4655 BT_ERR("L2CAP socket unregistration failed");
4657 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4658 BT_ERR("L2CAP protocol unregistration failed");
4660 proto_unregister(&l2cap_proto);
4663 void l2cap_load(void)
4665 /* Dummy function to trigger automatic L2CAP module loading by
4666 * other modules that use L2CAP sockets but don't use any other
4667 * symbols from it. */
4669 EXPORT_SYMBOL(l2cap_load);
4671 module_init(l2cap_init);
4672 module_exit(l2cap_exit);
4674 module_param(enable_ertm, bool, 0644);
4675 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4677 module_param(max_transmit, uint, 0644);
4678 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4680 module_param(tx_window, uint, 0644);
4681 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4683 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4684 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4685 MODULE_VERSION(VERSION);
4686 MODULE_LICENSE("GPL");
4687 MODULE_ALIAS("bt-proto-0");