1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
7 static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
9 /* Currently voice traffic (AC_VO) always runs without aggregation,
10 * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
11 * for non AC_BK/AC_BE and set smaller timeout for it. */
12 return HZ / (tidno >= 4 ? 25 : 10);
16 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
20 tid->head = ieee80211_sn_inc(tid->head);
22 skb = tid->reorder_buf[idx];
26 tid->reorder_buf[idx] = NULL;
28 __skb_queue_tail(frames, skb);
32 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
33 struct sk_buff_head *frames,
38 while (ieee80211_sn_less(tid->head, head)) {
39 idx = tid->head % tid->size;
40 mt76_aggr_release(tid, frames, idx);
45 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
47 int idx = tid->head % tid->size;
49 while (tid->reorder_buf[idx]) {
50 mt76_aggr_release(tid, frames, idx);
51 idx = tid->head % tid->size;
56 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
58 struct mt76_rx_status *status;
60 int start, idx, nframes;
65 mt76_rx_aggr_release_head(tid, frames);
67 start = tid->head % tid->size;
68 nframes = tid->nframes;
70 for (idx = (tid->head + 1) % tid->size;
71 idx != start && nframes;
72 idx = (idx + 1) % tid->size) {
73 skb = tid->reorder_buf[idx];
78 status = (struct mt76_rx_status *)skb->cb;
79 if (!time_after32(jiffies,
80 status->reorder_time +
81 mt76_aggr_tid_to_timeo(tid->num)))
84 mt76_rx_aggr_release_frames(tid, frames, status->seqno);
87 mt76_rx_aggr_release_head(tid, frames);
91 mt76_rx_aggr_reorder_work(struct work_struct *work)
93 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
95 struct mt76_dev *dev = tid->dev;
96 struct sk_buff_head frames;
99 __skb_queue_head_init(&frames);
104 spin_lock(&tid->lock);
105 mt76_rx_aggr_check_release(tid, &frames);
106 nframes = tid->nframes;
107 spin_unlock(&tid->lock);
110 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
111 mt76_aggr_tid_to_timeo(tid->num));
112 mt76_rx_complete(dev, &frames, NULL);
119 mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
121 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
122 struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
123 struct mt76_wcid *wcid = status->wcid;
124 struct mt76_rx_tid *tid;
125 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
128 if (!ieee80211_is_ctl(bar->frame_control))
131 if (!ieee80211_is_back_req(bar->frame_control))
134 status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12;
135 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
136 tid = rcu_dereference(wcid->aggr[tidno]);
140 spin_lock_bh(&tid->lock);
142 mt76_rx_aggr_release_frames(tid, frames, seqno);
143 mt76_rx_aggr_release_head(tid, frames);
145 spin_unlock_bh(&tid->lock);
148 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
150 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
151 struct mt76_wcid *wcid = status->wcid;
152 struct ieee80211_sta *sta;
153 struct mt76_rx_tid *tid;
155 u16 seqno, head, size, idx;
156 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
159 __skb_queue_tail(frames, skb);
161 sta = wcid_to_sta(wcid);
166 if (!(status->flag & RX_FLAG_8023))
167 mt76_rx_aggr_check_ctl(skb, frames);
171 /* not part of a BA session */
172 ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
173 if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
174 ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
177 tid = rcu_dereference(wcid->aggr[tidno]);
181 status->flag |= RX_FLAG_DUP_VALIDATED;
182 spin_lock_bh(&tid->lock);
188 seqno = status->seqno;
190 sn_less = ieee80211_sn_less(seqno, head);
200 __skb_unlink(skb, frames);
206 tid->head = ieee80211_sn_inc(head);
208 mt76_rx_aggr_release_head(tid, frames);
212 __skb_unlink(skb, frames);
215 * Frame sequence number exceeds buffering window, free up some space
216 * by releasing previous frames
218 if (!ieee80211_sn_less(seqno, head + size)) {
219 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
220 mt76_rx_aggr_release_frames(tid, frames, head);
225 /* Discard if the current slot is already in use */
226 if (tid->reorder_buf[idx]) {
231 status->reorder_time = jiffies;
232 tid->reorder_buf[idx] = skb;
234 mt76_rx_aggr_release_head(tid, frames);
236 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
237 mt76_aggr_tid_to_timeo(tid->num));
240 spin_unlock_bh(&tid->lock);
243 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
246 struct mt76_rx_tid *tid;
248 mt76_rx_aggr_stop(dev, wcid, tidno);
250 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
258 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
259 spin_lock_init(&tid->lock);
261 rcu_assign_pointer(wcid->aggr[tidno], tid);
265 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
267 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
269 u16 size = tid->size;
272 spin_lock_bh(&tid->lock);
275 for (i = 0; tid->nframes && i < size; i++) {
276 struct sk_buff *skb = tid->reorder_buf[i];
281 tid->reorder_buf[i] = NULL;
286 spin_unlock_bh(&tid->lock);
288 cancel_delayed_work_sync(&tid->reorder_work);
291 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
293 struct mt76_rx_tid *tid = NULL;
295 tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
296 lockdep_is_held(&dev->mutex));
298 mt76_rx_aggr_shutdown(dev, tid);
299 kfree_rcu(tid, rcu_head);
302 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);