2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 #include <linux/log2.h>
28 #include <linux/bitfield.h>
30 /* when under memory pressure rx ring refill may fail and needs a retry */
31 #define HTT_RX_RING_REFILL_RETRY_MS 50
33 #define HTT_RX_RING_REFILL_RESCHED_MS 5
35 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
37 static struct sk_buff *
38 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
40 struct ath10k_skb_rxcb *rxcb;
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
50 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
53 struct ath10k_skb_rxcb *rxcb;
57 if (htt->rx_ring.in_ord_rx) {
58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59 skb = ATH10K_RXCB_SKB(rxcb);
60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
61 skb->len + skb_tailroom(skb),
63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
76 dev_kfree_skb_any(skb);
80 htt->rx_ring.fill_cnt = 0;
81 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
86 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
88 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
91 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
93 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
96 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
99 htt->rx_ring.paddrs_ring_32 = vaddr;
102 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
105 htt->rx_ring.paddrs_ring_64 = vaddr;
108 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
109 dma_addr_t paddr, int idx)
111 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
114 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
115 dma_addr_t paddr, int idx)
117 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
120 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
122 htt->rx_ring.paddrs_ring_32[idx] = 0;
125 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
127 htt->rx_ring.paddrs_ring_64[idx] = 0;
130 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
132 return (void *)htt->rx_ring.paddrs_ring_32;
135 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
137 return (void *)htt->rx_ring.paddrs_ring_64;
140 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
142 struct htt_rx_desc *rx_desc;
143 struct ath10k_skb_rxcb *rxcb;
148 /* The Full Rx Reorder firmware has no way of telling the host
149 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
150 * To keep things simple make sure ring is always half empty. This
151 * guarantees there'll be no replenishment overruns possible.
153 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
155 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
157 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
163 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
165 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
168 /* Clear rx_desc attention word before posting to Rx ring */
169 rx_desc = (struct htt_rx_desc *)skb->data;
170 rx_desc->attention.flags = __cpu_to_le32(0);
172 paddr = dma_map_single(htt->ar->dev, skb->data,
173 skb->len + skb_tailroom(skb),
176 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
177 dev_kfree_skb_any(skb);
182 rxcb = ATH10K_SKB_RXCB(skb);
184 htt->rx_ring.netbufs_ring[idx] = skb;
185 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
186 htt->rx_ring.fill_cnt++;
188 if (htt->rx_ring.in_ord_rx) {
189 hash_add(htt->rx_ring.skb_table,
190 &ATH10K_SKB_RXCB(skb)->hlist,
196 idx &= htt->rx_ring.size_mask;
201 * Make sure the rx buffer is updated before available buffer
202 * index to avoid any potential rx ring corruption.
205 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
209 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
211 lockdep_assert_held(&htt->rx_ring.lock);
212 return __ath10k_htt_rx_ring_fill_n(htt, num);
215 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
217 int ret, num_deficit, num_to_fill;
219 /* Refilling the whole RX ring buffer proves to be a bad idea. The
220 * reason is RX may take up significant amount of CPU cycles and starve
221 * other tasks, e.g. TX on an ethernet device while acting as a bridge
222 * with ath10k wlan interface. This ended up with very poor performance
223 * once CPU the host system was overwhelmed with RX on ath10k.
225 * By limiting the number of refills the replenishing occurs
226 * progressively. This in turns makes use of the fact tasklets are
227 * processed in FIFO order. This means actual RX processing can starve
228 * out refilling. If there's not enough buffers on RX ring FW will not
229 * report RX until it is refilled with enough buffers. This
230 * automatically balances load wrt to CPU power.
232 * This probably comes at a cost of lower maximum throughput but
233 * improves the average and stability.
235 spin_lock_bh(&htt->rx_ring.lock);
236 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
237 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
238 num_deficit -= num_to_fill;
239 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
240 if (ret == -ENOMEM) {
242 * Failed to fill it to the desired level -
243 * we'll start a timer and try again next time.
244 * As long as enough buffers are left in the ring for
245 * another A-MPDU rx, no special recovery is needed.
247 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
248 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
249 } else if (num_deficit > 0) {
250 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
251 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
253 spin_unlock_bh(&htt->rx_ring.lock);
256 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
258 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
260 ath10k_htt_rx_msdu_buff_replenish(htt);
263 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
265 struct ath10k_htt *htt = &ar->htt;
268 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
271 spin_lock_bh(&htt->rx_ring.lock);
272 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
273 htt->rx_ring.fill_cnt));
276 ath10k_htt_rx_ring_free(htt);
278 spin_unlock_bh(&htt->rx_ring.lock);
283 void ath10k_htt_rx_free(struct ath10k_htt *htt)
285 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
288 del_timer_sync(&htt->rx_ring.refill_retry_timer);
290 skb_queue_purge(&htt->rx_msdus_q);
291 skb_queue_purge(&htt->rx_in_ord_compl_q);
292 skb_queue_purge(&htt->tx_fetch_ind_q);
294 spin_lock_bh(&htt->rx_ring.lock);
295 ath10k_htt_rx_ring_free(htt);
296 spin_unlock_bh(&htt->rx_ring.lock);
298 dma_free_coherent(htt->ar->dev,
299 ath10k_htt_get_rx_ring_size(htt),
300 ath10k_htt_get_vaddr_ring(htt),
301 htt->rx_ring.base_paddr);
303 dma_free_coherent(htt->ar->dev,
304 sizeof(*htt->rx_ring.alloc_idx.vaddr),
305 htt->rx_ring.alloc_idx.vaddr,
306 htt->rx_ring.alloc_idx.paddr);
308 kfree(htt->rx_ring.netbufs_ring);
311 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
313 struct ath10k *ar = htt->ar;
315 struct sk_buff *msdu;
317 lockdep_assert_held(&htt->rx_ring.lock);
319 if (htt->rx_ring.fill_cnt == 0) {
320 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
324 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
325 msdu = htt->rx_ring.netbufs_ring[idx];
326 htt->rx_ring.netbufs_ring[idx] = NULL;
327 ath10k_htt_reset_paddrs_ring(htt, idx);
330 idx &= htt->rx_ring.size_mask;
331 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
332 htt->rx_ring.fill_cnt--;
334 dma_unmap_single(htt->ar->dev,
335 ATH10K_SKB_RXCB(msdu)->paddr,
336 msdu->len + skb_tailroom(msdu),
338 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
339 msdu->data, msdu->len + skb_tailroom(msdu));
344 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
345 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
346 struct sk_buff_head *amsdu)
348 struct ath10k *ar = htt->ar;
349 int msdu_len, msdu_chaining = 0;
350 struct sk_buff *msdu;
351 struct htt_rx_desc *rx_desc;
353 lockdep_assert_held(&htt->rx_ring.lock);
356 int last_msdu, msdu_len_invalid, msdu_chained;
358 msdu = ath10k_htt_rx_netbuf_pop(htt);
360 __skb_queue_purge(amsdu);
364 __skb_queue_tail(amsdu, msdu);
366 rx_desc = (struct htt_rx_desc *)msdu->data;
368 /* FIXME: we must report msdu payload since this is what caller
371 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
372 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
375 * Sanity check - confirm the HW is finished filling in the
377 * If the HW and SW are working correctly, then it's guaranteed
378 * that the HW's MAC DMA is done before this point in the SW.
379 * To prevent the case that we handle a stale Rx descriptor,
380 * just assert for now until we have a way to recover.
382 if (!(__le32_to_cpu(rx_desc->attention.flags)
383 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
384 __skb_queue_purge(amsdu);
388 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
389 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
390 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
391 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
392 RX_MSDU_START_INFO0_MSDU_LENGTH);
393 msdu_chained = rx_desc->frag_info.ring2_more_count;
395 if (msdu_len_invalid)
399 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
400 msdu_len -= msdu->len;
402 /* Note: Chained buffers do not contain rx descriptor */
403 while (msdu_chained--) {
404 msdu = ath10k_htt_rx_netbuf_pop(htt);
406 __skb_queue_purge(amsdu);
410 __skb_queue_tail(amsdu, msdu);
412 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
413 msdu_len -= msdu->len;
417 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
418 RX_MSDU_END_INFO0_LAST_MSDU;
420 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
421 sizeof(*rx_desc) - sizeof(u32));
427 if (skb_queue_empty(amsdu))
431 * Don't refill the ring yet.
433 * First, the elements popped here are still in use - it is not
434 * safe to overwrite them until the matching call to
435 * mpdu_desc_list_next. Second, for efficiency it is preferable to
436 * refill the rx ring with 1 PPDU's worth of rx buffers (something
437 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
438 * (something like 3 buffers). Consequently, we'll rely on the txrx
439 * SW to tell us when it is done pulling all the PPDU's rx buffers
440 * out of the rx ring, and then refill it just once.
443 return msdu_chaining;
446 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
449 struct ath10k *ar = htt->ar;
450 struct ath10k_skb_rxcb *rxcb;
451 struct sk_buff *msdu;
453 lockdep_assert_held(&htt->rx_ring.lock);
455 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
459 rxcb = ATH10K_SKB_RXCB(msdu);
460 hash_del(&rxcb->hlist);
461 htt->rx_ring.fill_cnt--;
463 dma_unmap_single(htt->ar->dev, rxcb->paddr,
464 msdu->len + skb_tailroom(msdu),
466 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
467 msdu->data, msdu->len + skb_tailroom(msdu));
472 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
473 struct sk_buff *frag_list,
474 unsigned int frag_len)
476 skb_shinfo(skb_head)->frag_list = frag_list;
477 skb_head->data_len = frag_len;
478 skb_head->len += skb_head->data_len;
481 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
482 struct sk_buff *msdu,
483 struct htt_rx_in_ord_msdu_desc **msdu_desc)
485 struct ath10k *ar = htt->ar;
487 struct sk_buff *frag_buf;
488 struct sk_buff *prev_frag_buf;
490 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
491 struct htt_rx_desc *rxd;
492 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
494 rxd = (void *)msdu->data;
495 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
497 skb_put(msdu, sizeof(struct htt_rx_desc));
498 skb_pull(msdu, sizeof(struct htt_rx_desc));
499 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
500 amsdu_len -= msdu->len;
502 last_frag = ind_desc->reserved;
505 ath10k_warn(ar, "invalid amsdu len %u, left %d",
506 __le16_to_cpu(ind_desc->msdu_len),
513 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
514 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
516 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
520 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
521 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
523 amsdu_len -= frag_buf->len;
524 prev_frag_buf = frag_buf;
525 last_frag = ind_desc->reserved;
528 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
529 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
531 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
533 prev_frag_buf->next = NULL;
537 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
538 last_frag = ind_desc->reserved;
539 amsdu_len -= frag_buf->len;
541 prev_frag_buf->next = frag_buf;
542 prev_frag_buf = frag_buf;
546 ath10k_warn(ar, "invalid amsdu len %u, left %d",
547 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
550 *msdu_desc = ind_desc;
552 prev_frag_buf->next = NULL;
557 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
558 struct sk_buff *msdu,
559 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
561 struct ath10k *ar = htt->ar;
563 struct sk_buff *frag_buf;
564 struct sk_buff *prev_frag_buf;
566 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
567 struct htt_rx_desc *rxd;
568 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
570 rxd = (void *)msdu->data;
571 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
573 skb_put(msdu, sizeof(struct htt_rx_desc));
574 skb_pull(msdu, sizeof(struct htt_rx_desc));
575 skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
576 amsdu_len -= msdu->len;
578 last_frag = ind_desc->reserved;
581 ath10k_warn(ar, "invalid amsdu len %u, left %d",
582 __le16_to_cpu(ind_desc->msdu_len),
589 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
590 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
592 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
596 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
597 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
599 amsdu_len -= frag_buf->len;
600 prev_frag_buf = frag_buf;
601 last_frag = ind_desc->reserved;
604 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
605 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
607 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
609 prev_frag_buf->next = NULL;
613 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
614 last_frag = ind_desc->reserved;
615 amsdu_len -= frag_buf->len;
617 prev_frag_buf->next = frag_buf;
618 prev_frag_buf = frag_buf;
622 ath10k_warn(ar, "invalid amsdu len %u, left %d",
623 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
626 *msdu_desc = ind_desc;
628 prev_frag_buf->next = NULL;
632 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
633 struct htt_rx_in_ord_ind *ev,
634 struct sk_buff_head *list)
636 struct ath10k *ar = htt->ar;
637 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
638 struct htt_rx_desc *rxd;
639 struct sk_buff *msdu;
644 lockdep_assert_held(&htt->rx_ring.lock);
646 msdu_count = __le16_to_cpu(ev->msdu_count);
647 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
649 while (msdu_count--) {
650 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
652 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
654 __skb_queue_purge(list);
658 if (!is_offload && ar->monitor_arvif) {
659 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
662 __skb_queue_purge(list);
665 __skb_queue_tail(list, msdu);
670 __skb_queue_tail(list, msdu);
673 rxd = (void *)msdu->data;
675 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
677 skb_put(msdu, sizeof(*rxd));
678 skb_pull(msdu, sizeof(*rxd));
679 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
681 if (!(__le32_to_cpu(rxd->attention.flags) &
682 RX_ATTENTION_FLAGS_MSDU_DONE)) {
683 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
694 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
695 struct htt_rx_in_ord_ind *ev,
696 struct sk_buff_head *list)
698 struct ath10k *ar = htt->ar;
699 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
700 struct htt_rx_desc *rxd;
701 struct sk_buff *msdu;
706 lockdep_assert_held(&htt->rx_ring.lock);
708 msdu_count = __le16_to_cpu(ev->msdu_count);
709 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
711 while (msdu_count--) {
712 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
713 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
715 __skb_queue_purge(list);
719 if (!is_offload && ar->monitor_arvif) {
720 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
723 __skb_queue_purge(list);
726 __skb_queue_tail(list, msdu);
731 __skb_queue_tail(list, msdu);
734 rxd = (void *)msdu->data;
736 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
738 skb_put(msdu, sizeof(*rxd));
739 skb_pull(msdu, sizeof(*rxd));
740 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
742 if (!(__le32_to_cpu(rxd->attention.flags) &
743 RX_ATTENTION_FLAGS_MSDU_DONE)) {
744 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
755 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
757 struct ath10k *ar = htt->ar;
759 void *vaddr, *vaddr_ring;
761 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
763 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
766 htt->rx_confused = false;
768 /* XXX: The fill level could be changed during runtime in response to
769 * the host processing latency. Is this really worth it?
771 htt->rx_ring.size = HTT_RX_RING_SIZE;
772 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
773 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
775 if (!is_power_of_2(htt->rx_ring.size)) {
776 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
780 htt->rx_ring.netbufs_ring =
781 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
783 if (!htt->rx_ring.netbufs_ring)
786 size = ath10k_htt_get_rx_ring_size(htt);
788 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
792 ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
793 htt->rx_ring.base_paddr = paddr;
795 vaddr = dma_alloc_coherent(htt->ar->dev,
796 sizeof(*htt->rx_ring.alloc_idx.vaddr),
801 htt->rx_ring.alloc_idx.vaddr = vaddr;
802 htt->rx_ring.alloc_idx.paddr = paddr;
803 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
804 *htt->rx_ring.alloc_idx.vaddr = 0;
806 /* Initialize the Rx refill retry timer */
807 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
809 spin_lock_init(&htt->rx_ring.lock);
811 htt->rx_ring.fill_cnt = 0;
812 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
813 hash_init(htt->rx_ring.skb_table);
815 skb_queue_head_init(&htt->rx_msdus_q);
816 skb_queue_head_init(&htt->rx_in_ord_compl_q);
817 skb_queue_head_init(&htt->tx_fetch_ind_q);
818 atomic_set(&htt->num_mpdus_ready, 0);
820 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
821 htt->rx_ring.size, htt->rx_ring.fill_level);
825 dma_free_coherent(htt->ar->dev,
826 ath10k_htt_get_rx_ring_size(htt),
828 htt->rx_ring.base_paddr);
830 kfree(htt->rx_ring.netbufs_ring);
835 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
836 enum htt_rx_mpdu_encrypt_type type)
839 case HTT_RX_MPDU_ENCRYPT_NONE:
841 case HTT_RX_MPDU_ENCRYPT_WEP40:
842 case HTT_RX_MPDU_ENCRYPT_WEP104:
843 return IEEE80211_WEP_IV_LEN;
844 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
845 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
846 return IEEE80211_TKIP_IV_LEN;
847 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
848 return IEEE80211_CCMP_HDR_LEN;
849 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
850 return IEEE80211_CCMP_256_HDR_LEN;
851 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
852 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
853 return IEEE80211_GCMP_HDR_LEN;
854 case HTT_RX_MPDU_ENCRYPT_WEP128:
855 case HTT_RX_MPDU_ENCRYPT_WAPI:
859 ath10k_warn(ar, "unsupported encryption type %d\n", type);
863 #define MICHAEL_MIC_LEN 8
865 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
866 enum htt_rx_mpdu_encrypt_type type)
869 case HTT_RX_MPDU_ENCRYPT_NONE:
870 case HTT_RX_MPDU_ENCRYPT_WEP40:
871 case HTT_RX_MPDU_ENCRYPT_WEP104:
872 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
873 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
875 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
876 return IEEE80211_CCMP_MIC_LEN;
877 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
878 return IEEE80211_CCMP_256_MIC_LEN;
879 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
880 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
881 return IEEE80211_GCMP_MIC_LEN;
882 case HTT_RX_MPDU_ENCRYPT_WEP128:
883 case HTT_RX_MPDU_ENCRYPT_WAPI:
887 ath10k_warn(ar, "unsupported encryption type %d\n", type);
891 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
892 enum htt_rx_mpdu_encrypt_type type)
895 case HTT_RX_MPDU_ENCRYPT_NONE:
896 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
897 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
898 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
899 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
901 case HTT_RX_MPDU_ENCRYPT_WEP40:
902 case HTT_RX_MPDU_ENCRYPT_WEP104:
903 return IEEE80211_WEP_ICV_LEN;
904 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
905 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
906 return IEEE80211_TKIP_ICV_LEN;
907 case HTT_RX_MPDU_ENCRYPT_WEP128:
908 case HTT_RX_MPDU_ENCRYPT_WAPI:
912 ath10k_warn(ar, "unsupported encryption type %d\n", type);
916 struct amsdu_subframe_hdr {
922 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
924 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
930 ret = RATE_INFO_BW_20;
933 ret = RATE_INFO_BW_40;
936 ret = RATE_INFO_BW_80;
939 ret = RATE_INFO_BW_160;
946 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
947 struct ieee80211_rx_status *status,
948 struct htt_rx_desc *rxd)
950 struct ieee80211_supported_band *sband;
951 u8 cck, rate, bw, sgi, mcs, nss;
954 u32 info1, info2, info3;
956 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
957 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
958 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
960 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
964 /* To get legacy rate index band is required. Since band can't
965 * be undefined check if freq is non-zero.
970 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
971 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
972 rate &= ~RX_PPDU_START_RATE_FLAG;
974 sband = &ar->mac.sbands[status->band];
975 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
978 case HTT_RX_HT_WITH_TXBF:
979 /* HT-SIG - Table 20-11 in info2 and info3 */
982 bw = (info2 >> 7) & 1;
983 sgi = (info3 >> 7) & 1;
985 status->rate_idx = mcs;
986 status->encoding = RX_ENC_HT;
988 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
990 status->bw = RATE_INFO_BW_40;
993 case HTT_RX_VHT_WITH_TXBF:
994 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
999 group_id = (info2 >> 4) & 0x3F;
1001 if (GROUP_ID_IS_SU_MIMO(group_id)) {
1002 mcs = (info3 >> 4) & 0x0F;
1003 nss = ((info2 >> 10) & 0x07) + 1;
1005 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1006 * so it's impossible to decode MCS. Also since
1007 * firmware consumes Group Id Management frames host
1008 * has no knowledge regarding group/user position
1009 * mapping so it's impossible to pick the correct Nsts
1012 * Bandwidth and SGI are valid so report the rateinfo
1013 * on best-effort basis.
1020 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1021 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1022 __le32_to_cpu(rxd->attention.flags),
1023 __le32_to_cpu(rxd->mpdu_start.info0),
1024 __le32_to_cpu(rxd->mpdu_start.info1),
1025 __le32_to_cpu(rxd->msdu_start.common.info0),
1026 __le32_to_cpu(rxd->msdu_start.common.info1),
1027 rxd->ppdu_start.info0,
1028 __le32_to_cpu(rxd->ppdu_start.info1),
1029 __le32_to_cpu(rxd->ppdu_start.info2),
1030 __le32_to_cpu(rxd->ppdu_start.info3),
1031 __le32_to_cpu(rxd->ppdu_start.info4));
1033 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1034 __le32_to_cpu(rxd->msdu_end.common.info0),
1035 __le32_to_cpu(rxd->mpdu_end.info0));
1037 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1038 "rx desc msdu payload: ",
1039 rxd->msdu_payload, 50);
1042 status->rate_idx = mcs;
1046 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1048 status->bw = ath10k_bw_to_mac80211_bw(bw);
1049 status->encoding = RX_ENC_VHT;
1056 static struct ieee80211_channel *
1057 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1059 struct ath10k_peer *peer;
1060 struct ath10k_vif *arvif;
1061 struct cfg80211_chan_def def;
1064 lockdep_assert_held(&ar->data_lock);
1069 if (rxd->attention.flags &
1070 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1073 if (!(rxd->msdu_end.common.info0 &
1074 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1077 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1078 RX_MPDU_START_INFO0_PEER_IDX);
1080 peer = ath10k_peer_find_by_id(ar, peer_id);
1084 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1085 if (WARN_ON_ONCE(!arvif))
1088 if (ath10k_mac_vif_chan(arvif->vif, &def))
1094 static struct ieee80211_channel *
1095 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1097 struct ath10k_vif *arvif;
1098 struct cfg80211_chan_def def;
1100 lockdep_assert_held(&ar->data_lock);
1102 list_for_each_entry(arvif, &ar->arvifs, list) {
1103 if (arvif->vdev_id == vdev_id &&
1104 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1112 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1113 struct ieee80211_chanctx_conf *conf,
1116 struct cfg80211_chan_def *def = data;
1121 static struct ieee80211_channel *
1122 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1124 struct cfg80211_chan_def def = {};
1126 ieee80211_iter_chan_contexts_atomic(ar->hw,
1127 ath10k_htt_rx_h_any_chan_iter,
1133 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1134 struct ieee80211_rx_status *status,
1135 struct htt_rx_desc *rxd,
1138 struct ieee80211_channel *ch;
1140 spin_lock_bh(&ar->data_lock);
1141 ch = ar->scan_channel;
1143 ch = ar->rx_channel;
1145 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1147 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1149 ch = ath10k_htt_rx_h_any_channel(ar);
1151 ch = ar->tgt_oper_chan;
1152 spin_unlock_bh(&ar->data_lock);
1157 status->band = ch->band;
1158 status->freq = ch->center_freq;
1163 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1164 struct ieee80211_rx_status *status,
1165 struct htt_rx_desc *rxd)
1169 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1170 status->chains &= ~BIT(i);
1172 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
1173 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1174 rxd->ppdu_start.rssi_chains[i].pri20_mhz;
1176 status->chains |= BIT(i);
1180 /* FIXME: Get real NF */
1181 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1182 rxd->ppdu_start.rssi_comb;
1183 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1186 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1187 struct ieee80211_rx_status *status,
1188 struct htt_rx_desc *rxd)
1190 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1191 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1192 * TSF. Is it worth holding frames until end of PPDU is known?
1194 * FIXME: Can we get/compute 64bit TSF?
1196 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1197 status->flag |= RX_FLAG_MACTIME_END;
1200 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1201 struct sk_buff_head *amsdu,
1202 struct ieee80211_rx_status *status,
1205 struct sk_buff *first;
1206 struct htt_rx_desc *rxd;
1210 if (skb_queue_empty(amsdu))
1213 first = skb_peek(amsdu);
1214 rxd = (void *)first->data - sizeof(*rxd);
1216 is_first_ppdu = !!(rxd->attention.flags &
1217 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1218 is_last_ppdu = !!(rxd->attention.flags &
1219 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1221 if (is_first_ppdu) {
1222 /* New PPDU starts so clear out the old per-PPDU status. */
1224 status->rate_idx = 0;
1226 status->encoding = RX_ENC_LEGACY;
1227 status->bw = RATE_INFO_BW_20;
1229 status->flag &= ~RX_FLAG_MACTIME_END;
1230 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1232 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1233 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1234 status->ampdu_reference = ar->ampdu_reference;
1236 ath10k_htt_rx_h_signal(ar, status, rxd);
1237 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1238 ath10k_htt_rx_h_rates(ar, status, rxd);
1242 ath10k_htt_rx_h_mactime(ar, status, rxd);
1244 /* set ampdu last segment flag */
1245 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1246 ar->ampdu_reference++;
1250 static const char * const tid_to_ac[] = {
1261 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1266 if (!ieee80211_is_data_qos(hdr->frame_control))
1269 qc = ieee80211_get_qos_ctl(hdr);
1270 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1272 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1274 snprintf(out, size, "tid %d", tid);
1279 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1280 struct ieee80211_rx_status *rx_status,
1281 struct sk_buff *skb)
1283 struct ieee80211_rx_status *status;
1285 status = IEEE80211_SKB_RXCB(skb);
1286 *status = *rx_status;
1288 skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1291 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1293 struct ieee80211_rx_status *status;
1294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1297 status = IEEE80211_SKB_RXCB(skb);
1299 ath10k_dbg(ar, ATH10K_DBG_DATA,
1300 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1303 ieee80211_get_SA(hdr),
1304 ath10k_get_tid(hdr, tid, sizeof(tid)),
1305 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1307 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1308 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1309 (status->encoding == RX_ENC_HT) ? "ht" : "",
1310 (status->encoding == RX_ENC_VHT) ? "vht" : "",
1311 (status->bw == RATE_INFO_BW_40) ? "40" : "",
1312 (status->bw == RATE_INFO_BW_80) ? "80" : "",
1313 (status->bw == RATE_INFO_BW_160) ? "160" : "",
1314 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1318 status->band, status->flag,
1319 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1320 !!(status->flag & RX_FLAG_MMIC_ERROR),
1321 !!(status->flag & RX_FLAG_AMSDU_MORE));
1322 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1323 skb->data, skb->len);
1324 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1325 trace_ath10k_rx_payload(ar, skb->data, skb->len);
1327 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1330 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1331 struct ieee80211_hdr *hdr)
1333 int len = ieee80211_hdrlen(hdr->frame_control);
1335 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1336 ar->running_fw->fw_file.fw_features))
1337 len = round_up(len, 4);
1342 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1343 struct sk_buff *msdu,
1344 struct ieee80211_rx_status *status,
1345 enum htt_rx_mpdu_encrypt_type enctype,
1347 const u8 first_hdr[64])
1349 struct ieee80211_hdr *hdr;
1350 struct htt_rx_desc *rxd;
1355 bool msdu_limit_err;
1356 int bytes_aligned = ar->hw_params.decap_align_bytes;
1359 rxd = (void *)msdu->data - sizeof(*rxd);
1360 is_first = !!(rxd->msdu_end.common.info0 &
1361 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1362 is_last = !!(rxd->msdu_end.common.info0 &
1363 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1365 /* Delivered decapped frame:
1367 * [crypto param] <-- can be trimmed if !fcs_err &&
1368 * !decrypt_err && !peer_idx_invalid
1369 * [amsdu header] <-- only if A-MSDU
1372 * [FCS] <-- at end, needs to be trimmed
1375 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1376 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1377 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1378 * a single last MSDU with this msdu limit error set.
1380 msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
1382 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1383 * without first MSDU is expected in that case, and handled later here.
1385 /* This probably shouldn't happen but warn just in case */
1386 if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1389 /* This probably shouldn't happen but warn just in case */
1390 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1393 skb_trim(msdu, msdu->len - FCS_LEN);
1395 /* Push original 80211 header */
1396 if (unlikely(msdu_limit_err)) {
1397 hdr = (struct ieee80211_hdr *)first_hdr;
1398 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1399 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1401 if (ieee80211_is_data_qos(hdr->frame_control)) {
1402 qos = ieee80211_get_qos_ctl(hdr);
1403 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1407 memcpy(skb_push(msdu, crypto_len),
1408 (void *)hdr + round_up(hdr_len, bytes_aligned),
1411 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1414 /* In most cases this will be true for sniffed frames. It makes sense
1415 * to deliver them as-is without stripping the crypto param. This is
1416 * necessary for software based decryption.
1418 * If there's no error then the frame is decrypted. At least that is
1419 * the case for frames that come in via fragmented rx indication.
1424 /* The payload is decrypted so strip crypto params. Start from tail
1425 * since hdr is used to compute some stuff.
1428 hdr = (void *)msdu->data;
1431 if (status->flag & RX_FLAG_IV_STRIPPED) {
1432 skb_trim(msdu, msdu->len -
1433 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1435 skb_trim(msdu, msdu->len -
1436 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1439 if (status->flag & RX_FLAG_MIC_STRIPPED)
1440 skb_trim(msdu, msdu->len -
1441 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1444 if (status->flag & RX_FLAG_ICV_STRIPPED)
1445 skb_trim(msdu, msdu->len -
1446 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1450 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1451 !ieee80211_has_morefrags(hdr->frame_control) &&
1452 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1453 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1456 if (status->flag & RX_FLAG_IV_STRIPPED) {
1457 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1458 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1460 memmove((void *)msdu->data + crypto_len,
1461 (void *)msdu->data, hdr_len);
1462 skb_pull(msdu, crypto_len);
1466 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1467 struct sk_buff *msdu,
1468 struct ieee80211_rx_status *status,
1469 const u8 first_hdr[64],
1470 enum htt_rx_mpdu_encrypt_type enctype)
1472 struct ieee80211_hdr *hdr;
1473 struct htt_rx_desc *rxd;
1478 int bytes_aligned = ar->hw_params.decap_align_bytes;
1480 /* Delivered decapped frame:
1481 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1484 * Note: The nwifi header doesn't have QoS Control and is
1485 * (always?) a 3addr frame.
1487 * Note2: There's no A-MSDU subframe header. Even if it's part
1491 /* pull decapped header and copy SA & DA */
1492 rxd = (void *)msdu->data - sizeof(*rxd);
1494 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1495 skb_put(msdu, l3_pad_bytes);
1497 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1499 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1500 ether_addr_copy(da, ieee80211_get_DA(hdr));
1501 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1502 skb_pull(msdu, hdr_len);
1504 /* push original 802.11 header */
1505 hdr = (struct ieee80211_hdr *)first_hdr;
1506 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1508 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1509 memcpy(skb_push(msdu,
1510 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1511 (void *)hdr + round_up(hdr_len, bytes_aligned),
1512 ath10k_htt_rx_crypto_param_len(ar, enctype));
1515 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1517 /* original 802.11 header has a different DA and in
1518 * case of 4addr it may also have different SA
1520 hdr = (struct ieee80211_hdr *)msdu->data;
1521 ether_addr_copy(ieee80211_get_DA(hdr), da);
1522 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1525 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1526 struct sk_buff *msdu,
1527 enum htt_rx_mpdu_encrypt_type enctype)
1529 struct ieee80211_hdr *hdr;
1530 struct htt_rx_desc *rxd;
1531 size_t hdr_len, crypto_len;
1533 bool is_first, is_last, is_amsdu;
1534 int bytes_aligned = ar->hw_params.decap_align_bytes;
1536 rxd = (void *)msdu->data - sizeof(*rxd);
1537 hdr = (void *)rxd->rx_hdr_status;
1539 is_first = !!(rxd->msdu_end.common.info0 &
1540 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1541 is_last = !!(rxd->msdu_end.common.info0 &
1542 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1543 is_amsdu = !(is_first && is_last);
1548 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1549 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1551 rfc1042 += round_up(hdr_len, bytes_aligned) +
1552 round_up(crypto_len, bytes_aligned);
1556 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1561 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1562 struct sk_buff *msdu,
1563 struct ieee80211_rx_status *status,
1564 const u8 first_hdr[64],
1565 enum htt_rx_mpdu_encrypt_type enctype)
1567 struct ieee80211_hdr *hdr;
1574 struct htt_rx_desc *rxd;
1575 int bytes_aligned = ar->hw_params.decap_align_bytes;
1577 /* Delivered decapped frame:
1578 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1582 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1583 if (WARN_ON_ONCE(!rfc1042))
1586 rxd = (void *)msdu->data - sizeof(*rxd);
1587 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1588 skb_put(msdu, l3_pad_bytes);
1589 skb_pull(msdu, l3_pad_bytes);
1591 /* pull decapped header and copy SA & DA */
1592 eth = (struct ethhdr *)msdu->data;
1593 ether_addr_copy(da, eth->h_dest);
1594 ether_addr_copy(sa, eth->h_source);
1595 skb_pull(msdu, sizeof(struct ethhdr));
1597 /* push rfc1042/llc/snap */
1598 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1599 sizeof(struct rfc1042_hdr));
1601 /* push original 802.11 header */
1602 hdr = (struct ieee80211_hdr *)first_hdr;
1603 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1605 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1606 memcpy(skb_push(msdu,
1607 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1608 (void *)hdr + round_up(hdr_len, bytes_aligned),
1609 ath10k_htt_rx_crypto_param_len(ar, enctype));
1612 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1614 /* original 802.11 header has a different DA and in
1615 * case of 4addr it may also have different SA
1617 hdr = (struct ieee80211_hdr *)msdu->data;
1618 ether_addr_copy(ieee80211_get_DA(hdr), da);
1619 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1622 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1623 struct sk_buff *msdu,
1624 struct ieee80211_rx_status *status,
1625 const u8 first_hdr[64],
1626 enum htt_rx_mpdu_encrypt_type enctype)
1628 struct ieee80211_hdr *hdr;
1631 struct htt_rx_desc *rxd;
1632 int bytes_aligned = ar->hw_params.decap_align_bytes;
1634 /* Delivered decapped frame:
1635 * [amsdu header] <-- replaced with 802.11 hdr
1640 rxd = (void *)msdu->data - sizeof(*rxd);
1641 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1643 skb_put(msdu, l3_pad_bytes);
1644 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1646 hdr = (struct ieee80211_hdr *)first_hdr;
1647 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1649 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1650 memcpy(skb_push(msdu,
1651 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1652 (void *)hdr + round_up(hdr_len, bytes_aligned),
1653 ath10k_htt_rx_crypto_param_len(ar, enctype));
1656 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1659 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1660 struct sk_buff *msdu,
1661 struct ieee80211_rx_status *status,
1663 enum htt_rx_mpdu_encrypt_type enctype,
1666 struct htt_rx_desc *rxd;
1667 enum rx_msdu_decap_format decap;
1669 /* First msdu's decapped header:
1670 * [802.11 header] <-- padded to 4 bytes long
1671 * [crypto param] <-- padded to 4 bytes long
1672 * [amsdu header] <-- only if A-MSDU
1675 * Other (2nd, 3rd, ..) msdu's decapped header:
1676 * [amsdu header] <-- only if A-MSDU
1680 rxd = (void *)msdu->data - sizeof(*rxd);
1681 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1682 RX_MSDU_START_INFO1_DECAP_FORMAT);
1685 case RX_MSDU_DECAP_RAW:
1686 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1687 is_decrypted, first_hdr);
1689 case RX_MSDU_DECAP_NATIVE_WIFI:
1690 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1693 case RX_MSDU_DECAP_ETHERNET2_DIX:
1694 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1696 case RX_MSDU_DECAP_8023_SNAP_LLC:
1697 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1703 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1705 struct htt_rx_desc *rxd;
1707 bool is_ip4, is_ip6;
1708 bool is_tcp, is_udp;
1709 bool ip_csum_ok, tcpudp_csum_ok;
1711 rxd = (void *)skb->data - sizeof(*rxd);
1712 flags = __le32_to_cpu(rxd->attention.flags);
1713 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1715 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1716 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1717 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1718 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1719 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1720 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1722 if (!is_ip4 && !is_ip6)
1723 return CHECKSUM_NONE;
1724 if (!is_tcp && !is_udp)
1725 return CHECKSUM_NONE;
1727 return CHECKSUM_NONE;
1728 if (!tcpudp_csum_ok)
1729 return CHECKSUM_NONE;
1731 return CHECKSUM_UNNECESSARY;
1734 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1736 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1739 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1740 struct sk_buff_head *amsdu,
1741 struct ieee80211_rx_status *status,
1742 bool fill_crypt_header,
1744 enum ath10k_pkt_rx_err *err)
1746 struct sk_buff *first;
1747 struct sk_buff *last;
1748 struct sk_buff *msdu;
1749 struct htt_rx_desc *rxd;
1750 struct ieee80211_hdr *hdr;
1751 enum htt_rx_mpdu_encrypt_type enctype;
1755 bool has_crypto_err;
1757 bool has_peer_idx_invalid;
1762 if (skb_queue_empty(amsdu))
1765 first = skb_peek(amsdu);
1766 rxd = (void *)first->data - sizeof(*rxd);
1768 is_mgmt = !!(rxd->attention.flags &
1769 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1771 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1772 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1774 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1775 * decapped header. It'll be used for undecapping of each MSDU.
1777 hdr = (void *)rxd->rx_hdr_status;
1778 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1781 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1783 /* Each A-MSDU subframe will use the original header as the base and be
1784 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1786 hdr = (void *)first_hdr;
1788 if (ieee80211_is_data_qos(hdr->frame_control)) {
1789 qos = ieee80211_get_qos_ctl(hdr);
1790 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1793 /* Some attention flags are valid only in the last MSDU. */
1794 last = skb_peek_tail(amsdu);
1795 rxd = (void *)last->data - sizeof(*rxd);
1796 attention = __le32_to_cpu(rxd->attention.flags);
1798 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1799 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1800 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1801 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1803 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1804 * e.g. due to fcs error, missing peer or invalid key data it will
1805 * report the frame as raw.
1807 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1810 !has_peer_idx_invalid);
1812 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1813 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1814 RX_FLAG_MMIC_ERROR |
1816 RX_FLAG_IV_STRIPPED |
1817 RX_FLAG_ONLY_MONITOR |
1818 RX_FLAG_MMIC_STRIPPED);
1821 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1824 status->flag |= RX_FLAG_MMIC_ERROR;
1828 *err = ATH10K_PKT_RX_ERR_FCS;
1829 else if (has_tkip_err)
1830 *err = ATH10K_PKT_RX_ERR_TKIP;
1831 else if (has_crypto_err)
1832 *err = ATH10K_PKT_RX_ERR_CRYPT;
1833 else if (has_peer_idx_invalid)
1834 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1837 /* Firmware reports all necessary management frames via WMI already.
1838 * They are not reported to monitor interfaces at all so pass the ones
1839 * coming via HTT to monitor interfaces instead. This simplifies
1843 status->flag |= RX_FLAG_ONLY_MONITOR;
1846 status->flag |= RX_FLAG_DECRYPTED;
1848 if (likely(!is_mgmt))
1849 status->flag |= RX_FLAG_MMIC_STRIPPED;
1851 if (fill_crypt_header)
1852 status->flag |= RX_FLAG_MIC_STRIPPED |
1853 RX_FLAG_ICV_STRIPPED;
1855 status->flag |= RX_FLAG_IV_STRIPPED;
1858 skb_queue_walk(amsdu, msdu) {
1859 ath10k_htt_rx_h_csum_offload(msdu);
1860 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1863 /* Undecapping involves copying the original 802.11 header back
1864 * to sk_buff. If frame is protected and hardware has decrypted
1865 * it then remove the protected bit.
1872 if (fill_crypt_header)
1875 hdr = (void *)msdu->data;
1876 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1880 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1881 struct sk_buff_head *amsdu,
1882 struct ieee80211_rx_status *status)
1884 struct sk_buff *msdu;
1885 struct sk_buff *first_subframe;
1887 first_subframe = skb_peek(amsdu);
1889 while ((msdu = __skb_dequeue(amsdu))) {
1890 /* Setup per-MSDU flags */
1891 if (skb_queue_empty(amsdu))
1892 status->flag &= ~RX_FLAG_AMSDU_MORE;
1894 status->flag |= RX_FLAG_AMSDU_MORE;
1896 if (msdu == first_subframe) {
1897 first_subframe = NULL;
1898 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1900 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1903 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1907 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1908 unsigned long int *unchain_cnt)
1910 struct sk_buff *skb, *first;
1913 int amsdu_len = skb_queue_len(amsdu);
1915 /* TODO: Might could optimize this by using
1916 * skb_try_coalesce or similar method to
1917 * decrease copying, or maybe get mac80211 to
1918 * provide a way to just receive a list of
1922 first = __skb_dequeue(amsdu);
1924 /* Allocate total length all at once. */
1925 skb_queue_walk(amsdu, skb)
1926 total_len += skb->len;
1928 space = total_len - skb_tailroom(first);
1930 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1931 /* TODO: bump some rx-oom error stat */
1932 /* put it back together so we can free the
1933 * whole list at once.
1935 __skb_queue_head(amsdu, first);
1939 /* Walk list again, copying contents into
1942 while ((skb = __skb_dequeue(amsdu))) {
1943 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1945 dev_kfree_skb_any(skb);
1948 __skb_queue_head(amsdu, first);
1950 *unchain_cnt += amsdu_len - 1;
1955 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1956 struct sk_buff_head *amsdu,
1957 unsigned long int *drop_cnt,
1958 unsigned long int *unchain_cnt)
1960 struct sk_buff *first;
1961 struct htt_rx_desc *rxd;
1962 enum rx_msdu_decap_format decap;
1964 first = skb_peek(amsdu);
1965 rxd = (void *)first->data - sizeof(*rxd);
1966 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1967 RX_MSDU_START_INFO1_DECAP_FORMAT);
1969 /* FIXME: Current unchaining logic can only handle simple case of raw
1970 * msdu chaining. If decapping is other than raw the chaining may be
1971 * more complex and this isn't handled by the current code. Don't even
1972 * try re-constructing such frames - it'll be pretty much garbage.
1974 if (decap != RX_MSDU_DECAP_RAW ||
1975 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1976 *drop_cnt += skb_queue_len(amsdu);
1977 __skb_queue_purge(amsdu);
1981 ath10k_unchain_msdu(amsdu, unchain_cnt);
1984 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1985 struct sk_buff_head *amsdu,
1986 struct ieee80211_rx_status *rx_status)
1988 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1989 * invalid/dangerous frames.
1992 if (!rx_status->freq) {
1993 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1997 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1998 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2005 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2006 struct sk_buff_head *amsdu,
2007 struct ieee80211_rx_status *rx_status,
2008 unsigned long int *drop_cnt)
2010 if (skb_queue_empty(amsdu))
2013 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2017 *drop_cnt += skb_queue_len(amsdu);
2019 __skb_queue_purge(amsdu);
2022 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2024 struct ath10k *ar = htt->ar;
2025 struct ieee80211_rx_status *rx_status = &htt->rx_status;
2026 struct sk_buff_head amsdu;
2028 unsigned long int drop_cnt = 0;
2029 unsigned long int unchain_cnt = 0;
2030 unsigned long int drop_cnt_filter = 0;
2031 unsigned long int msdus_to_queue, num_msdus;
2032 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2033 u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2035 __skb_queue_head_init(&amsdu);
2037 spin_lock_bh(&htt->rx_ring.lock);
2038 if (htt->rx_confused) {
2039 spin_unlock_bh(&htt->rx_ring.lock);
2042 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2043 spin_unlock_bh(&htt->rx_ring.lock);
2046 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2047 __skb_queue_purge(&amsdu);
2048 /* FIXME: It's probably a good idea to reboot the
2049 * device instead of leaving it inoperable.
2051 htt->rx_confused = true;
2055 num_msdus = skb_queue_len(&amsdu);
2057 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2059 /* only for ret = 1 indicates chained msdus */
2061 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2063 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2064 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
2065 msdus_to_queue = skb_queue_len(&amsdu);
2066 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2068 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2069 unchain_cnt, drop_cnt, drop_cnt_filter,
2075 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2076 struct htt_rx_indication_hl *rx,
2077 struct sk_buff *skb)
2079 struct ath10k *ar = htt->ar;
2080 struct ath10k_peer *peer;
2081 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2082 struct fw_rx_desc_hl *fw_desc;
2083 struct ieee80211_hdr *hdr;
2084 struct ieee80211_rx_status *rx_status;
2087 int num_mpdu_ranges;
2089 struct ieee80211_channel *ch;
2091 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2093 spin_lock_bh(&ar->data_lock);
2094 peer = ath10k_peer_find_by_id(ar, peer_id);
2095 spin_unlock_bh(&ar->data_lock);
2097 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2099 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2100 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2101 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2102 fw_desc = &rx->fw_desc;
2103 rx_desc_len = fw_desc->len;
2105 /* I have not yet seen any case where num_mpdu_ranges > 1.
2106 * qcacld does not seem handle that case either, so we introduce the
2107 * same limitiation here as well.
2109 if (num_mpdu_ranges > 1)
2111 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2114 if (mpdu_ranges->mpdu_range_status !=
2115 HTT_RX_IND_MPDU_STATUS_OK) {
2116 ath10k_warn(ar, "MPDU range status: %d\n",
2117 mpdu_ranges->mpdu_range_status);
2121 /* Strip off all headers before the MAC header before delivery to
2124 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2125 sizeof(rx->ppdu) + sizeof(rx->prefix) +
2126 sizeof(rx->fw_desc) +
2127 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2128 skb_pull(skb, tot_hdr_len);
2130 hdr = (struct ieee80211_hdr *)skb->data;
2131 rx_status = IEEE80211_SKB_RXCB(skb);
2132 rx_status->chains |= BIT(0);
2133 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2134 rx->ppdu.combined_rssi;
2135 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2137 spin_lock_bh(&ar->data_lock);
2138 ch = ar->scan_channel;
2140 ch = ar->rx_channel;
2142 ch = ath10k_htt_rx_h_any_channel(ar);
2144 ch = ar->tgt_oper_chan;
2145 spin_unlock_bh(&ar->data_lock);
2148 rx_status->band = ch->band;
2149 rx_status->freq = ch->center_freq;
2151 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2152 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2154 rx_status->flag |= RX_FLAG_AMSDU_MORE;
2156 /* Not entirely sure about this, but all frames from the chipset has
2157 * the protected flag set even though they have already been decrypted.
2158 * Unmasking this flag is necessary in order for mac80211 not to drop
2160 * TODO: Verify this is always the case or find out a way to check
2161 * if there has been hw decryption.
2163 if (ieee80211_has_protected(hdr->frame_control)) {
2164 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2165 rx_status->flag |= RX_FLAG_DECRYPTED |
2166 RX_FLAG_IV_STRIPPED |
2167 RX_FLAG_MMIC_STRIPPED;
2170 ieee80211_rx_ni(ar->hw, skb);
2172 /* We have delivered the skb to the upper layers (mac80211) so we
2177 /* Tell the caller that it must free the skb since we have not
2183 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2184 struct htt_rx_indication *rx)
2186 struct ath10k *ar = htt->ar;
2187 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2188 int num_mpdu_ranges;
2189 int i, mpdu_count = 0;
2193 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2194 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2195 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2196 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2198 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
2200 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
2202 (sizeof(struct htt_rx_indication_mpdu_range) *
2205 for (i = 0; i < num_mpdu_ranges; i++)
2206 mpdu_count += mpdu_ranges[i].mpdu_count;
2208 atomic_add(mpdu_count, &htt->num_mpdus_ready);
2210 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
2214 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
2215 struct sk_buff *skb)
2217 struct ath10k_htt *htt = &ar->htt;
2218 struct htt_resp *resp = (struct htt_resp *)skb->data;
2219 struct htt_tx_done tx_done = {};
2220 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
2221 __le16 msdu_id, *msdus;
2222 bool rssi_enabled = false;
2227 case HTT_DATA_TX_STATUS_NO_ACK:
2228 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2230 case HTT_DATA_TX_STATUS_OK:
2231 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2233 case HTT_DATA_TX_STATUS_DISCARD:
2234 case HTT_DATA_TX_STATUS_POSTPONE:
2235 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
2236 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2239 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
2240 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2244 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
2245 resp->data_tx_completion.num_msdus);
2247 msdu_count = resp->data_tx_completion.num_msdus;
2249 if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI)
2250 rssi_enabled = true;
2252 for (i = 0; i < msdu_count; i++) {
2253 msdus = resp->data_tx_completion.msdus;
2255 tx_done.msdu_id = __le16_to_cpu(msdu_id);
2258 /* Total no of MSDUs should be even,
2259 * if odd MSDUs are sent firmware fills
2260 * last msdu id with 0xffff
2262 if (msdu_count & 0x01) {
2263 msdu_id = msdus[msdu_count + i + 1];
2264 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2266 msdu_id = msdus[msdu_count + i];
2267 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2271 /* kfifo_put: In practice firmware shouldn't fire off per-CE
2272 * interrupt and main interrupt (MSI/-X range case) for the same
2273 * HTC service so it should be safe to use kfifo_put w/o lock.
2275 * From kfifo_put() documentation:
2276 * Note that with only one concurrent reader and one concurrent
2277 * writer, you don't need extra locking to use these macro.
2279 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
2280 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
2281 tx_done.msdu_id, tx_done.status);
2282 ath10k_txrx_tx_unref(htt, &tx_done);
2287 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
2289 struct htt_rx_addba *ev = &resp->rx_addba;
2290 struct ath10k_peer *peer;
2291 struct ath10k_vif *arvif;
2292 u16 info0, tid, peer_id;
2294 info0 = __le16_to_cpu(ev->info0);
2295 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2296 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2298 ath10k_dbg(ar, ATH10K_DBG_HTT,
2299 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2300 tid, peer_id, ev->window_size);
2302 spin_lock_bh(&ar->data_lock);
2303 peer = ath10k_peer_find_by_id(ar, peer_id);
2305 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2307 spin_unlock_bh(&ar->data_lock);
2311 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2313 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2315 spin_unlock_bh(&ar->data_lock);
2319 ath10k_dbg(ar, ATH10K_DBG_HTT,
2320 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2321 peer->addr, tid, ev->window_size);
2323 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2324 spin_unlock_bh(&ar->data_lock);
2327 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
2329 struct htt_rx_delba *ev = &resp->rx_delba;
2330 struct ath10k_peer *peer;
2331 struct ath10k_vif *arvif;
2332 u16 info0, tid, peer_id;
2334 info0 = __le16_to_cpu(ev->info0);
2335 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2336 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2338 ath10k_dbg(ar, ATH10K_DBG_HTT,
2339 "htt rx delba tid %hu peer_id %hu\n",
2342 spin_lock_bh(&ar->data_lock);
2343 peer = ath10k_peer_find_by_id(ar, peer_id);
2345 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2347 spin_unlock_bh(&ar->data_lock);
2351 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2353 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2355 spin_unlock_bh(&ar->data_lock);
2359 ath10k_dbg(ar, ATH10K_DBG_HTT,
2360 "htt rx stop rx ba session sta %pM tid %hu\n",
2363 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2364 spin_unlock_bh(&ar->data_lock);
2367 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
2368 struct sk_buff_head *amsdu)
2370 struct sk_buff *msdu;
2371 struct htt_rx_desc *rxd;
2373 if (skb_queue_empty(list))
2376 if (WARN_ON(!skb_queue_empty(amsdu)))
2379 while ((msdu = __skb_dequeue(list))) {
2380 __skb_queue_tail(amsdu, msdu);
2382 rxd = (void *)msdu->data - sizeof(*rxd);
2383 if (rxd->msdu_end.common.info0 &
2384 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
2388 msdu = skb_peek_tail(amsdu);
2389 rxd = (void *)msdu->data - sizeof(*rxd);
2390 if (!(rxd->msdu_end.common.info0 &
2391 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
2392 skb_queue_splice_init(amsdu, list);
2399 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
2400 struct sk_buff *skb)
2402 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2404 if (!ieee80211_has_protected(hdr->frame_control))
2407 /* Offloaded frames are already decrypted but firmware insists they are
2408 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2409 * will drop the frame.
2412 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2413 status->flag |= RX_FLAG_DECRYPTED |
2414 RX_FLAG_IV_STRIPPED |
2415 RX_FLAG_MMIC_STRIPPED;
2418 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2419 struct sk_buff_head *list)
2421 struct ath10k_htt *htt = &ar->htt;
2422 struct ieee80211_rx_status *status = &htt->rx_status;
2423 struct htt_rx_offload_msdu *rx;
2424 struct sk_buff *msdu;
2427 while ((msdu = __skb_dequeue(list))) {
2428 /* Offloaded frames don't have Rx descriptor. Instead they have
2429 * a short meta information header.
2432 rx = (void *)msdu->data;
2434 skb_put(msdu, sizeof(*rx));
2435 skb_pull(msdu, sizeof(*rx));
2437 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2438 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2439 dev_kfree_skb_any(msdu);
2443 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2445 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2446 * actual payload is unaligned. Align the frame. Otherwise
2447 * mac80211 complains. This shouldn't reduce performance much
2448 * because these offloaded frames are rare.
2450 offset = 4 - ((unsigned long)msdu->data & 3);
2451 skb_put(msdu, offset);
2452 memmove(msdu->data + offset, msdu->data, msdu->len);
2453 skb_pull(msdu, offset);
2455 /* FIXME: The frame is NWifi. Re-construct QoS Control
2456 * if possible later.
2459 memset(status, 0, sizeof(*status));
2460 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2462 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2463 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2464 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2468 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2470 struct ath10k_htt *htt = &ar->htt;
2471 struct htt_resp *resp = (void *)skb->data;
2472 struct ieee80211_rx_status *status = &htt->rx_status;
2473 struct sk_buff_head list;
2474 struct sk_buff_head amsdu;
2483 lockdep_assert_held(&htt->rx_ring.lock);
2485 if (htt->rx_confused)
2488 skb_pull(skb, sizeof(resp->hdr));
2489 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2491 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2492 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2493 vdev_id = resp->rx_in_ord_ind.vdev_id;
2494 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2495 offload = !!(resp->rx_in_ord_ind.info &
2496 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2497 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2499 ath10k_dbg(ar, ATH10K_DBG_HTT,
2500 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2501 vdev_id, peer_id, tid, offload, frag, msdu_count);
2503 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2504 ath10k_warn(ar, "dropping invalid in order rx indication\n");
2508 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2509 * extracted and processed.
2511 __skb_queue_head_init(&list);
2512 if (ar->hw_params.target_64bit)
2513 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2516 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2520 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2521 htt->rx_confused = true;
2525 /* Offloaded frames are very different and need to be handled
2529 ath10k_htt_rx_h_rx_offload(ar, &list);
2531 while (!skb_queue_empty(&list)) {
2532 __skb_queue_head_init(&amsdu);
2533 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2536 /* Note: The in-order indication may report interleaved
2537 * frames from different PPDUs meaning reported rx rate
2538 * to mac80211 isn't accurate/reliable. It's still
2539 * better to report something than nothing though. This
2540 * should still give an idea about rx rate to the user.
2542 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
2543 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
2544 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
2546 ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
2551 /* Should not happen. */
2552 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2553 htt->rx_confused = true;
2554 __skb_queue_purge(&list);
2561 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2562 const __le32 *resp_ids,
2568 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2571 for (i = 0; i < num_resp_ids; i++) {
2572 resp_id = le32_to_cpu(resp_ids[i]);
2574 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2577 /* TODO: free resp_id */
2581 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2583 struct ieee80211_hw *hw = ar->hw;
2584 struct ieee80211_txq *txq;
2585 struct htt_resp *resp = (struct htt_resp *)skb->data;
2586 struct htt_tx_fetch_record *record;
2588 size_t max_num_bytes;
2589 size_t max_num_msdus;
2592 const __le32 *resp_ids;
2600 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2602 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2603 if (unlikely(skb->len < len)) {
2604 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2608 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2609 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2611 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2612 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2614 if (unlikely(skb->len < len)) {
2615 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2619 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2620 num_records, num_resp_ids,
2621 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2623 if (!ar->htt.tx_q_state.enabled) {
2624 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2628 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2629 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2635 for (i = 0; i < num_records; i++) {
2636 record = &resp->tx_fetch_ind.records[i];
2637 peer_id = MS(le16_to_cpu(record->info),
2638 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2639 tid = MS(le16_to_cpu(record->info),
2640 HTT_TX_FETCH_RECORD_INFO_TID);
2641 max_num_msdus = le16_to_cpu(record->num_msdus);
2642 max_num_bytes = le32_to_cpu(record->num_bytes);
2644 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2645 i, peer_id, tid, max_num_msdus, max_num_bytes);
2647 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2648 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2649 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2654 spin_lock_bh(&ar->data_lock);
2655 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2656 spin_unlock_bh(&ar->data_lock);
2658 /* It is okay to release the lock and use txq because RCU read
2662 if (unlikely(!txq)) {
2663 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2671 while (num_msdus < max_num_msdus &&
2672 num_bytes < max_num_bytes) {
2673 ret = ath10k_mac_tx_push_txq(hw, txq);
2681 record->num_msdus = cpu_to_le16(num_msdus);
2682 record->num_bytes = cpu_to_le32(num_bytes);
2684 ath10k_htt_tx_txq_recalc(hw, txq);
2689 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2690 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2692 ret = ath10k_htt_tx_fetch_resp(ar,
2693 resp->tx_fetch_ind.token,
2694 resp->tx_fetch_ind.fetch_seq_num,
2695 resp->tx_fetch_ind.records,
2697 if (unlikely(ret)) {
2698 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2699 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2700 /* FIXME: request fw restart */
2703 ath10k_htt_tx_txq_sync(ar);
2706 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2707 struct sk_buff *skb)
2709 const struct htt_resp *resp = (void *)skb->data;
2713 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2715 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2716 if (unlikely(skb->len < len)) {
2717 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2721 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2722 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2724 if (unlikely(skb->len < len)) {
2725 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2729 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2730 resp->tx_fetch_confirm.resp_ids,
2734 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2735 struct sk_buff *skb)
2737 const struct htt_resp *resp = (void *)skb->data;
2738 const struct htt_tx_mode_switch_record *record;
2739 struct ieee80211_txq *txq;
2740 struct ath10k_txq *artxq;
2743 enum htt_tx_mode_switch_mode mode;
2752 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2754 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2755 if (unlikely(skb->len < len)) {
2756 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2760 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2761 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2763 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2764 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2765 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2766 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2768 ath10k_dbg(ar, ATH10K_DBG_HTT,
2769 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2770 info0, info1, enable, num_records, mode, threshold);
2772 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2774 if (unlikely(skb->len < len)) {
2775 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2780 case HTT_TX_MODE_SWITCH_PUSH:
2781 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2784 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2792 ar->htt.tx_q_state.enabled = enable;
2793 ar->htt.tx_q_state.mode = mode;
2794 ar->htt.tx_q_state.num_push_allowed = threshold;
2798 for (i = 0; i < num_records; i++) {
2799 record = &resp->tx_mode_switch_ind.records[i];
2800 info0 = le16_to_cpu(record->info0);
2801 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2802 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2804 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2805 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2806 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2811 spin_lock_bh(&ar->data_lock);
2812 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2813 spin_unlock_bh(&ar->data_lock);
2815 /* It is okay to release the lock and use txq because RCU read
2819 if (unlikely(!txq)) {
2820 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2825 spin_lock_bh(&ar->htt.tx_lock);
2826 artxq = (void *)txq->drv_priv;
2827 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2828 spin_unlock_bh(&ar->htt.tx_lock);
2833 ath10k_mac_tx_push_pending(ar);
2836 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2840 release = ath10k_htt_t2h_msg_handler(ar, skb);
2842 /* Free the indication buffer */
2844 dev_kfree_skb_any(skb);
2847 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
2849 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2850 18, 24, 36, 48, 54};
2853 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2854 if (rate == legacy_rates[i])
2858 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
2863 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
2864 struct ath10k_sta *arsta,
2865 struct ath10k_per_peer_tx_stats *pstats,
2868 struct rate_info *txrate = &arsta->txrate;
2869 struct ath10k_htt_tx_stats *tx_stats;
2870 int idx, ht_idx, gi, mcs, bw, nss;
2872 if (!arsta->tx_stats)
2875 tx_stats = arsta->tx_stats;
2876 gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI);
2877 ht_idx = txrate->mcs + txrate->nss * 8;
2881 idx = mcs * 8 + 8 * 10 * nss;
2884 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
2886 if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) {
2887 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
2888 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
2889 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
2890 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
2891 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
2892 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
2893 } else if (txrate->flags == RATE_INFO_FLAGS_MCS) {
2894 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
2895 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
2896 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
2897 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
2898 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
2899 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
2901 mcs = legacy_rate_idx;
2903 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
2904 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
2905 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
2906 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
2907 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
2908 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
2911 if (ATH10K_HW_AMPDU(pstats->flags)) {
2912 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
2914 if (txrate->flags == RATE_INFO_FLAGS_MCS) {
2915 STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
2916 pstats->succ_bytes + pstats->retry_bytes;
2917 STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
2918 pstats->succ_pkts + pstats->retry_pkts;
2920 STATS_OP_FMT(AMPDU).vht[0][mcs] +=
2921 pstats->succ_bytes + pstats->retry_bytes;
2922 STATS_OP_FMT(AMPDU).vht[1][mcs] +=
2923 pstats->succ_pkts + pstats->retry_pkts;
2925 STATS_OP_FMT(AMPDU).bw[0][bw] +=
2926 pstats->succ_bytes + pstats->retry_bytes;
2927 STATS_OP_FMT(AMPDU).nss[0][nss] +=
2928 pstats->succ_bytes + pstats->retry_bytes;
2929 STATS_OP_FMT(AMPDU).gi[0][gi] +=
2930 pstats->succ_bytes + pstats->retry_bytes;
2931 STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
2932 pstats->succ_bytes + pstats->retry_bytes;
2933 STATS_OP_FMT(AMPDU).bw[1][bw] +=
2934 pstats->succ_pkts + pstats->retry_pkts;
2935 STATS_OP_FMT(AMPDU).nss[1][nss] +=
2936 pstats->succ_pkts + pstats->retry_pkts;
2937 STATS_OP_FMT(AMPDU).gi[1][gi] +=
2938 pstats->succ_pkts + pstats->retry_pkts;
2939 STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
2940 pstats->succ_pkts + pstats->retry_pkts;
2942 tx_stats->ack_fails +=
2943 ATH10K_HW_BA_FAIL(pstats->flags);
2946 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
2947 STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes;
2948 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
2950 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
2951 STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts;
2952 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
2954 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
2955 STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes;
2956 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
2958 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
2959 STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts;
2960 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
2962 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
2963 STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes;
2964 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
2966 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
2967 STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
2968 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
2970 if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
2971 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
2972 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
2973 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
2974 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
2975 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
2976 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
2979 tx_stats->tx_duration += pstats->duration;
2983 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2984 struct ieee80211_sta *sta,
2985 struct ath10k_per_peer_tx_stats *peer_stats)
2987 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2988 struct ieee80211_chanctx_conf *conf = NULL;
2991 bool skip_auto_rate;
2992 struct rate_info txrate;
2994 lockdep_assert_held(&ar->data_lock);
2996 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2997 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2998 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2999 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3000 sgi = ATH10K_HW_GI(peer_stats->flags);
3001 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3003 /* Firmware's rate control skips broadcast/management frames,
3004 * if host has configure fixed rates and in some other special cases.
3009 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3010 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
3014 if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3015 (txrate.mcs > 7 || txrate.nss < 1)) {
3016 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
3017 txrate.mcs, txrate.nss);
3021 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3022 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3023 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3024 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3025 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3026 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3027 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3029 rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3032 arsta->txrate.legacy = rate;
3033 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3034 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3035 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3037 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3038 arsta->txrate.mcs = txrate.mcs;
3041 switch (txrate.flags) {
3042 case WMI_RATE_PREAMBLE_OFDM:
3043 if (arsta->arvif && arsta->arvif->vif)
3044 conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
3045 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3046 arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3048 case WMI_RATE_PREAMBLE_CCK:
3049 arsta->tx_info.status.rates[0].idx = rate_idx;
3051 arsta->tx_info.status.rates[0].flags |=
3052 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3053 IEEE80211_TX_RC_SHORT_GI);
3055 case WMI_RATE_PREAMBLE_HT:
3056 arsta->tx_info.status.rates[0].idx =
3057 txrate.mcs + ((txrate.nss - 1) * 8);
3059 arsta->tx_info.status.rates[0].flags |=
3060 IEEE80211_TX_RC_SHORT_GI;
3061 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3063 case WMI_RATE_PREAMBLE_VHT:
3064 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3065 txrate.mcs, txrate.nss);
3067 arsta->tx_info.status.rates[0].flags |=
3068 IEEE80211_TX_RC_SHORT_GI;
3069 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3073 arsta->txrate.nss = txrate.nss;
3074 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3076 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3078 switch (arsta->txrate.bw) {
3079 case RATE_INFO_BW_40:
3080 arsta->tx_info.status.rates[0].flags |=
3081 IEEE80211_TX_RC_40_MHZ_WIDTH;
3083 case RATE_INFO_BW_80:
3084 arsta->tx_info.status.rates[0].flags |=
3085 IEEE80211_TX_RC_80_MHZ_WIDTH;
3089 if (peer_stats->succ_pkts) {
3090 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3091 arsta->tx_info.status.rates[0].count = 1;
3092 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3095 if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3096 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3100 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
3101 struct sk_buff *skb)
3103 struct htt_resp *resp = (struct htt_resp *)skb->data;
3104 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3105 struct htt_per_peer_tx_stats_ind *tx_stats;
3106 struct ieee80211_sta *sta;
3107 struct ath10k_peer *peer;
3109 u8 ppdu_len, num_ppdu;
3111 num_ppdu = resp->peer_tx_stats.num_ppdu;
3112 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
3114 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
3115 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
3119 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3120 (resp->peer_tx_stats.payload);
3121 peer_id = __le16_to_cpu(tx_stats->peer_id);
3124 spin_lock_bh(&ar->data_lock);
3125 peer = ath10k_peer_find_by_id(ar, peer_id);
3126 if (!peer || !peer->sta) {
3127 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
3133 for (i = 0; i < num_ppdu; i++) {
3134 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3135 (resp->peer_tx_stats.payload + i * ppdu_len);
3137 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
3138 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
3139 p_tx_stats->failed_bytes =
3140 __le32_to_cpu(tx_stats->failed_bytes);
3141 p_tx_stats->ratecode = tx_stats->ratecode;
3142 p_tx_stats->flags = tx_stats->flags;
3143 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
3144 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
3145 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3146 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
3148 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3152 spin_unlock_bh(&ar->data_lock);
3156 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
3158 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
3159 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3160 struct ath10k_10_2_peer_tx_stats *tx_stats;
3161 struct ieee80211_sta *sta;
3162 struct ath10k_peer *peer;
3163 u16 log_type = __le16_to_cpu(hdr->log_type);
3166 if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
3169 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
3170 ATH10K_10_2_TX_STATS_OFFSET);
3172 if (!tx_stats->tx_ppdu_cnt)
3175 peer_id = tx_stats->peer_id;
3178 spin_lock_bh(&ar->data_lock);
3179 peer = ath10k_peer_find_by_id(ar, peer_id);
3180 if (!peer || !peer->sta) {
3181 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
3187 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
3188 p_tx_stats->succ_bytes =
3189 __le16_to_cpu(tx_stats->success_bytes[i]);
3190 p_tx_stats->retry_bytes =
3191 __le16_to_cpu(tx_stats->retry_bytes[i]);
3192 p_tx_stats->failed_bytes =
3193 __le16_to_cpu(tx_stats->failed_bytes[i]);
3194 p_tx_stats->ratecode = tx_stats->ratecode[i];
3195 p_tx_stats->flags = tx_stats->flags[i];
3196 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
3197 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
3198 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
3200 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3202 spin_unlock_bh(&ar->data_lock);
3208 spin_unlock_bh(&ar->data_lock);
3212 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3214 struct ath10k_htt *htt = &ar->htt;
3215 struct htt_resp *resp = (struct htt_resp *)skb->data;
3216 enum htt_t2h_msg_type type;
3218 /* confirm alignment */
3219 if (!IS_ALIGNED((unsigned long)skb->data, 4))
3220 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
3222 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
3223 resp->hdr.msg_type);
3225 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
3226 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
3227 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
3230 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
3233 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
3234 htt->target_version_major = resp->ver_resp.major;
3235 htt->target_version_minor = resp->ver_resp.minor;
3236 complete(&htt->target_version_received);
3239 case HTT_T2H_MSG_TYPE_RX_IND:
3240 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
3241 return ath10k_htt_rx_proc_rx_ind_hl(htt,
3245 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
3247 case HTT_T2H_MSG_TYPE_PEER_MAP: {
3248 struct htt_peer_map_event ev = {
3249 .vdev_id = resp->peer_map.vdev_id,
3250 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
3252 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
3253 ath10k_peer_map_event(htt, &ev);
3256 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
3257 struct htt_peer_unmap_event ev = {
3258 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
3260 ath10k_peer_unmap_event(htt, &ev);
3263 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
3264 struct htt_tx_done tx_done = {};
3265 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
3266 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
3268 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
3271 case HTT_MGMT_TX_STATUS_OK:
3272 tx_done.status = HTT_TX_COMPL_STATE_ACK;
3273 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
3275 (resp->mgmt_tx_completion.flags &
3276 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
3278 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
3282 case HTT_MGMT_TX_STATUS_RETRY:
3283 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
3285 case HTT_MGMT_TX_STATUS_DROP:
3286 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
3290 status = ath10k_txrx_tx_unref(htt, &tx_done);
3292 spin_lock_bh(&htt->tx_lock);
3293 ath10k_htt_tx_mgmt_dec_pending(htt);
3294 spin_unlock_bh(&htt->tx_lock);
3298 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
3299 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
3301 case HTT_T2H_MSG_TYPE_SEC_IND: {
3302 struct ath10k *ar = htt->ar;
3303 struct htt_security_indication *ev = &resp->security_indication;
3305 ath10k_dbg(ar, ATH10K_DBG_HTT,
3306 "sec ind peer_id %d unicast %d type %d\n",
3307 __le16_to_cpu(ev->peer_id),
3308 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
3309 MS(ev->flags, HTT_SECURITY_TYPE));
3310 complete(&ar->install_key_done);
3313 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
3314 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
3315 skb->data, skb->len);
3316 atomic_inc(&htt->num_mpdus_ready);
3319 case HTT_T2H_MSG_TYPE_TEST:
3321 case HTT_T2H_MSG_TYPE_STATS_CONF:
3322 trace_ath10k_htt_stats(ar, skb->data, skb->len);
3324 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
3325 /* Firmware can return tx frames if it's unable to fully
3326 * process them and suspects host may be able to fix it. ath10k
3327 * sends all tx frames as already inspected so this shouldn't
3328 * happen unless fw has a bug.
3330 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
3332 case HTT_T2H_MSG_TYPE_RX_ADDBA:
3333 ath10k_htt_rx_addba(ar, resp);
3335 case HTT_T2H_MSG_TYPE_RX_DELBA:
3336 ath10k_htt_rx_delba(ar, resp);
3338 case HTT_T2H_MSG_TYPE_PKTLOG: {
3339 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
3341 offsetof(struct htt_resp,
3342 pktlog_msg.payload));
3344 if (ath10k_peer_stats_enabled(ar))
3345 ath10k_fetch_10_2_tx_stats(ar,
3346 resp->pktlog_msg.payload);
3349 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
3350 /* Ignore this event because mac80211 takes care of Rx
3351 * aggregation reordering.
3355 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
3356 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
3359 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
3361 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
3362 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
3363 u32 freq = __le32_to_cpu(resp->chan_change.freq);
3365 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
3366 ath10k_dbg(ar, ATH10K_DBG_HTT,
3367 "htt chan change freq %u phymode %s\n",
3368 freq, ath10k_wmi_phymode_str(phymode));
3371 case HTT_T2H_MSG_TYPE_AGGR_CONF:
3373 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
3374 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
3376 if (!tx_fetch_ind) {
3377 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
3380 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
3383 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
3384 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
3386 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
3387 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
3389 case HTT_T2H_MSG_TYPE_PEER_STATS:
3390 ath10k_htt_fetch_peer_stats(ar, skb);
3392 case HTT_T2H_MSG_TYPE_EN_STATS:
3394 ath10k_warn(ar, "htt event (%d) not handled\n",
3395 resp->hdr.msg_type);
3396 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
3397 skb->data, skb->len);
3402 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
3404 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
3405 struct sk_buff *skb)
3407 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
3408 dev_kfree_skb_any(skb);
3410 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
3412 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
3414 struct sk_buff *skb;
3416 while (quota < budget) {
3417 if (skb_queue_empty(&ar->htt.rx_msdus_q))
3420 skb = skb_dequeue(&ar->htt.rx_msdus_q);
3423 ath10k_process_rx(ar, skb);
3430 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
3432 struct ath10k_htt *htt = &ar->htt;
3433 struct htt_tx_done tx_done = {};
3434 struct sk_buff_head tx_ind_q;
3435 struct sk_buff *skb;
3436 unsigned long flags;
3437 int quota = 0, done, ret;
3438 bool resched_napi = false;
3440 __skb_queue_head_init(&tx_ind_q);
3442 /* Process pending frames before dequeuing more data
3445 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
3446 if (quota == budget) {
3447 resched_napi = true;
3451 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
3452 spin_lock_bh(&htt->rx_ring.lock);
3453 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
3454 spin_unlock_bh(&htt->rx_ring.lock);
3456 dev_kfree_skb_any(skb);
3458 resched_napi = true;
3463 while (atomic_read(&htt->num_mpdus_ready)) {
3464 ret = ath10k_htt_rx_handle_amsdu(htt);
3466 resched_napi = true;
3469 atomic_dec(&htt->num_mpdus_ready);
3472 /* Deliver received data after processing data from hardware */
3473 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
3475 /* From NAPI documentation:
3476 * The napi poll() function may also process TX completions, in which
3477 * case if it processes the entire TX ring then it should count that
3478 * work as the rest of the budget.
3480 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
3483 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
3484 * From kfifo_get() documentation:
3485 * Note that with only one concurrent reader and one concurrent writer,
3486 * you don't need extra locking to use these macro.
3488 while (kfifo_get(&htt->txdone_fifo, &tx_done))
3489 ath10k_txrx_tx_unref(htt, &tx_done);
3491 ath10k_mac_tx_push_pending(ar);
3493 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
3494 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
3495 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
3497 while ((skb = __skb_dequeue(&tx_ind_q))) {
3498 ath10k_htt_rx_tx_fetch_ind(ar, skb);
3499 dev_kfree_skb_any(skb);
3503 ath10k_htt_rx_msdu_buff_replenish(htt);
3504 /* In case of rx failure or more data to read, report budget
3505 * to reschedule NAPI poll
3507 done = resched_napi ? budget : quota;
3511 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
3513 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
3514 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
3515 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
3516 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
3517 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
3518 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
3521 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
3522 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
3523 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
3524 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
3525 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
3526 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
3529 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
3532 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
3534 struct ath10k *ar = htt->ar;
3536 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
3537 htt->rx_ops = &htt_rx_ops_hl;
3538 else if (ar->hw_params.target_64bit)
3539 htt->rx_ops = &htt_rx_ops_64;
3541 htt->rx_ops = &htt_rx_ops_32;