return err;
}
+static void l2cap_pass_to_tx(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
+}
+
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
skb->truesize += new_frag->truesize;
}
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
+ struct l2cap_ctrl *control)
{
int err = -EINVAL;
- switch (__get_ctrl_sar(chan, control)) {
+ switch (control->sar) {
case L2CAP_SAR_UNSEGMENTED:
if (chan->sdu)
break;
skb = skb_dequeue(&chan->srej_q);
control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
- err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
return 0;
}
- err = l2cap_reassemble_sdu(chan, skb, rx_control);
chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
if (err < 0) {
return 0;
}
+static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
+{
+ BT_DBG("chan %p, txseq %d", chan, txseq);
+
+ BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
+ chan->expected_tx_seq);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ /* See notes below regarding "double poll" and
+ * invalid packets.
+ */
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - after SREJ");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - in window after SREJ sent");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ }
+
+ if (chan->srej_list.head == txseq) {
+ BT_DBG("Expected SREJ");
+ return L2CAP_TXSEQ_EXPECTED_SREJ;
+ }
+
+ if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
+ BT_DBG("Duplicate SREJ - txseq already stored");
+ return L2CAP_TXSEQ_DUPLICATE_SREJ;
+ }
+
+ if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
+ BT_DBG("Unexpected SREJ - not requested");
+ return L2CAP_TXSEQ_UNEXPECTED_SREJ;
+ }
+ }
+
+ if (chan->expected_tx_seq == txseq) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ } else {
+ BT_DBG("Expected");
+ return L2CAP_TXSEQ_EXPECTED;
+ }
+ }
+
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) <
+ __seq_offset(chan, chan->expected_tx_seq,
+ chan->last_acked_seq)){
+ BT_DBG("Duplicate - expected_tx_seq later than txseq");
+ return L2CAP_TXSEQ_DUPLICATE;
+ }
+
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
+ /* A source of invalid packets is a "double poll" condition,
+ * where delays cause us to send multiple poll packets. If
+ * the remote stack receives and processes both polls,
+ * sequence numbers can wrap around in such a way that a
+ * resent frame has a sequence number that looks like new data
+ * with a sequence gap. This would trigger an erroneous SREJ
+ * request.
+ *
+ * Fortunately, this is impossible with a tx window that's
+ * less than half of the maximum sequence number, which allows
+ * invalid frames to be safely ignored.
+ *
+ * With tx window sizes greater than half of the tx window
+ * maximum, the frame is invalid and cannot be ignored. This
+ * causes a disconnect.
+ */
+
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ } else {
+ BT_DBG("Unexpected - txseq indicates missing frames");
+ return L2CAP_TXSEQ_UNEXPECTED;
+ }
+}
+
static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff *skb, u8 event)
{
static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff *skb)
{
- /* Placeholder */
- return -ENOTSUPP;
+ int err = 0;
+
+ BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+ chan->rx_state);
+
+ if (l2cap_classify_txseq(chan, control->txseq) ==
+ L2CAP_TXSEQ_EXPECTED) {
+ l2cap_pass_to_tx(chan, control);
+
+ BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
+ __next_seq(chan, chan->buffer_seq));
+
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+
+ l2cap_reassemble_sdu(chan, skb, control);
+ } else {
+ if (chan->sdu) {
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ }
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ if (skb) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+ }
+
+ chan->last_acked_seq = control->txseq;
+ chan->expected_tx_seq = __next_seq(chan, control->txseq);
+
+ return err;
}
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)