1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2020-2023 Intel Corporation
10 #include "fw/api/commands.h"
11 #include "fw/api/tx.h"
12 #include "fw/api/datapath.h"
16 #include <linux/dmapool.h>
19 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
21 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
22 struct iwl_txq *txq, u16 byte_cnt,
25 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
26 u8 filled_tfd_size, num_fetch_chunks;
30 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
33 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
34 num_tbs * sizeof(struct iwl_tfh_tb);
36 * filled_tfd_size contains the number of filled bytes in the TFD.
37 * Dividing it by 64 will give the number of chunks to fetch
38 * to SRAM- 0 for one chunk, 1 for 2 and so on.
39 * If, for example, TFD contains only 3 TBs then 32 bytes
40 * of the TFD are used, and only one chunk of 64 bytes should
43 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
45 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
46 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
48 /* Starting from AX210, the HW expects bytes */
49 WARN_ON(trans->txqs.bc_table_dword);
50 WARN_ON(len > 0x3FFF);
51 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
52 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
54 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
56 /* Before AX210, the HW expects DW */
57 WARN_ON(!trans->txqs.bc_table_dword);
58 len = DIV_ROUND_UP(len, 4);
60 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
61 scd_bc_tbl->tfd_offset[idx] = bc_ent;
66 * iwl_txq_inc_wr_ptr - Send new write index to hardware
68 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
70 lockdep_assert_held(&txq->lock);
72 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
75 * if not in power-save mode, uCode will never sleep when we're
76 * trying to tx (during RFKILL, we're not trying to tx).
78 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
81 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
82 struct iwl_tfh_tfd *tfd)
84 return le16_to_cpu(tfd->num_tbs) & 0x1f;
87 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
88 struct iwl_tfh_tfd *tfd)
92 /* Sanity check on number of chunks */
93 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
95 if (num_tbs > trans->txqs.tfd.max_tbs) {
96 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
100 /* first TB is never freed - it's the bidirectional DMA data */
101 for (i = 1; i < num_tbs; i++) {
102 if (meta->tbs & BIT(i))
103 dma_unmap_page(trans->dev,
104 le64_to_cpu(tfd->tbs[i].addr),
105 le16_to_cpu(tfd->tbs[i].tb_len),
108 dma_unmap_single(trans->dev,
109 le64_to_cpu(tfd->tbs[i].addr),
110 le16_to_cpu(tfd->tbs[i].tb_len),
117 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
119 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
120 * idx is bounded by n_window
122 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
125 lockdep_assert_held(&txq->lock);
130 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
131 iwl_txq_get_tfd(trans, txq, idx));
133 skb = txq->entries[idx].skb;
135 /* Can be called from irqs-disabled context
136 * If skb is not NULL, it means that the whole queue is being
137 * freed and that the queue is not empty - free the skb
140 iwl_op_mode_free_skb(trans->op_mode, skb);
141 txq->entries[idx].skb = NULL;
145 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
146 dma_addr_t addr, u16 len)
148 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
149 struct iwl_tfh_tb *tb;
152 * Only WARN here so we know about the issue, but we mess up our
153 * unmap path because not every place currently checks for errors
154 * returned from this function - it can only return an error if
155 * there's no more space, and so when we know there is enough we
156 * don't always check ...
158 WARN(iwl_txq_crosses_4g_boundary(addr, len),
159 "possible DMA problem with iova:0x%llx, len:%d\n",
160 (unsigned long long)addr, len);
162 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
166 /* Each TFD can point to a maximum max_tbs Tx buffers */
167 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
168 IWL_ERR(trans, "Error can not send more than %d chunks\n",
169 trans->txqs.tfd.max_tbs);
173 put_unaligned_le64(addr, &tb->addr);
174 tb->tb_len = cpu_to_le16(len);
176 tfd->num_tbs = cpu_to_le16(idx + 1);
181 static struct page *get_workaround_page(struct iwl_trans *trans,
184 struct page **page_ptr;
187 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
189 ret = alloc_page(GFP_ATOMIC);
193 /* set the chaining pointer to the previous page if there */
194 *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
201 * Add a TB and if needed apply the FH HW bug workaround;
202 * meta != NULL indicates that it's a page mapping and we
203 * need to dma_unmap_page() and set the meta->tbs bit in
206 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
208 struct iwl_tfh_tfd *tfd,
209 dma_addr_t phys, void *virt,
210 u16 len, struct iwl_cmd_meta *meta)
212 dma_addr_t oldphys = phys;
216 if (unlikely(dma_mapping_error(trans->dev, phys)))
219 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
220 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
226 meta->tbs |= BIT(ret);
233 * Work around a hardware bug. If (as expressed in the
234 * condition above) the TB ends on a 32-bit boundary,
235 * then the next TB may be accessed with the wrong
237 * To work around it, copy the data elsewhere and make
238 * a new mapping for it so the device will not fail.
241 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
246 page = get_workaround_page(trans, skb);
252 memcpy(page_address(page), virt, len);
254 phys = dma_map_single(trans->dev, page_address(page), len,
256 if (unlikely(dma_mapping_error(trans->dev, phys)))
258 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
260 /* unmap the new allocation as single */
266 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
267 len, (unsigned long long)oldphys, (unsigned long long)phys);
272 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
274 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
276 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
282 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
285 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
286 struct page **page_ptr;
288 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
290 if (WARN_ON(*page_ptr))
297 * Check if there's enough room on this page
299 * Note that we put a page chaining pointer *last* in the
300 * page - we need it somewhere, and if it's there then we
301 * avoid DMA mapping the last bits of the page which may
302 * trigger the 32-bit boundary hardware bug.
304 * (see also get_workaround_page() in tx-gen2.c)
306 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
310 /* We don't have enough room on this page, get a new one. */
311 __free_page(p->page);
314 p->page = alloc_page(GFP_ATOMIC);
317 p->pos = page_address(p->page);
318 /* set the chaining pointer to NULL */
319 *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
327 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
329 struct iwl_tfh_tfd *tfd, int start_len,
331 struct iwl_device_tx_cmd *dev_cmd)
334 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
335 struct ieee80211_hdr *hdr = (void *)skb->data;
336 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
337 unsigned int mss = skb_shinfo(skb)->gso_size;
338 u16 length, amsdu_pad;
340 struct iwl_tso_hdr_page *hdr_page;
343 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
344 &dev_cmd->hdr, start_len, 0);
346 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
347 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
348 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
351 /* total amount of header we may need for this A-MSDU */
352 hdr_room = DIV_ROUND_UP(total_len, mss) *
353 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
355 /* Our device supports 9 segments at most, it will fit in 1 page */
356 hdr_page = get_page_hdr(trans, hdr_room, skb);
360 start_hdr = hdr_page->pos;
363 * Pull the ieee80211 header to be able to use TSO core,
364 * we will restore it for the tx_status flow.
366 skb_pull(skb, hdr_len);
369 * Remove the length of all the headers that we don't actually
370 * have in the MPDU by themselves, but that we duplicate into
371 * all the different MSDUs inside the A-MSDU.
373 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
375 tso_start(skb, &tso);
378 /* this is the data left for this subframe */
379 unsigned int data_left = min_t(unsigned int, mss, total_len);
382 u8 *subf_hdrs_start = hdr_page->pos;
384 total_len -= data_left;
386 memset(hdr_page->pos, 0, amsdu_pad);
387 hdr_page->pos += amsdu_pad;
388 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
390 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
391 hdr_page->pos += ETH_ALEN;
392 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
393 hdr_page->pos += ETH_ALEN;
395 length = snap_ip_tcp_hdrlen + data_left;
396 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
397 hdr_page->pos += sizeof(length);
400 * This will copy the SNAP as well which will be considered
403 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
405 hdr_page->pos += snap_ip_tcp_hdrlen;
407 tb_len = hdr_page->pos - start_hdr;
408 tb_phys = dma_map_single(trans->dev, start_hdr,
409 tb_len, DMA_TO_DEVICE);
410 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
413 * No need for _with_wa, this is from the TSO page and
414 * we leave some space at the end of it so can't hit
415 * the buggy scenario.
417 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
418 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
420 /* add this subframe's headers' length to the tx_cmd */
421 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
423 /* prepare the start_hdr for the next subframe */
424 start_hdr = hdr_page->pos;
426 /* put the payload */
430 tb_len = min_t(unsigned int, tso.size, data_left);
431 tb_phys = dma_map_single(trans->dev, tso.data,
432 tb_len, DMA_TO_DEVICE);
433 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
440 tso_build_data(skb, &tso, tb_len);
444 /* re -add the WiFi header */
445 skb_push(skb, hdr_len);
455 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
457 struct iwl_device_tx_cmd *dev_cmd,
459 struct iwl_cmd_meta *out_meta,
463 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
464 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
469 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
472 * No need for _with_wa, the first TB allocation is aligned up
473 * to a 64-byte boundary and thus can't be at the end or cross
474 * a page boundary (much less a 2^32 boundary).
476 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
479 * The second TB (tb1) points to the remainder of the TX command
480 * and the 802.11 header - dword aligned size
481 * (This calculation modifies the TX command, so do it before the
482 * setup of the first TB)
484 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
487 /* do not align A-MSDU to dword as the subframe header aligns it */
489 /* map the data for TB1 */
490 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
491 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
492 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
495 * No need for _with_wa(), we ensure (via alignment) that the data
496 * here can never cross or end at a page boundary.
498 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
500 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
504 /* building the A-MSDU might have changed this data, memcpy it now */
505 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
509 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
513 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
515 struct iwl_tfh_tfd *tfd,
516 struct iwl_cmd_meta *out_meta)
520 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
521 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
523 unsigned int fragsz = skb_frag_size(frag);
529 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
530 fragsz, DMA_TO_DEVICE);
531 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
532 skb_frag_address(frag),
542 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
544 struct iwl_device_tx_cmd *dev_cmd,
546 struct iwl_cmd_meta *out_meta,
551 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
552 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
554 int len, tb1_len, tb2_len;
556 struct sk_buff *frag;
558 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
560 /* The first TB points to bi-directional DMA data */
561 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
564 * No need for _with_wa, the first TB allocation is aligned up
565 * to a 64-byte boundary and thus can't be at the end or cross
566 * a page boundary (much less a 2^32 boundary).
568 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
571 * The second TB (tb1) points to the remainder of the TX command
572 * and the 802.11 header - dword aligned size
573 * (This calculation modifies the TX command, so do it before the
574 * setup of the first TB)
576 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
580 tb1_len = ALIGN(len, 4);
584 /* map the data for TB1 */
585 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
586 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
587 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
590 * No need for _with_wa(), we ensure (via alignment) that the data
591 * here can never cross or end at a page boundary.
593 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
594 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
595 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
597 /* set up TFD's third entry to point to remainder of skb's head */
598 tb2_len = skb_headlen(skb) - hdr_len;
603 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
604 tb2_len, DMA_TO_DEVICE);
605 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
606 skb->data + hdr_len, tb2_len,
612 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
615 skb_walk_frags(skb, frag) {
618 tb_phys = dma_map_single(trans->dev, frag->data,
619 skb_headlen(frag), DMA_TO_DEVICE);
620 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
622 skb_headlen(frag), NULL);
625 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
632 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
637 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
639 struct iwl_device_tx_cmd *dev_cmd,
641 struct iwl_cmd_meta *out_meta)
643 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
644 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
645 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
649 /* There must be data left over for TB1 or this code must be changed */
650 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
651 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
652 offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
654 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
655 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
656 offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
659 memset(tfd, 0, sizeof(*tfd));
661 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
662 len = sizeof(struct iwl_tx_cmd_gen2);
664 len = sizeof(struct iwl_tx_cmd_gen3);
666 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
667 (*ieee80211_get_qos_ctl(hdr) &
668 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
670 hdr_len = ieee80211_hdrlen(hdr->frame_control);
673 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
674 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
675 * built in the higher layers already.
677 if (amsdu && skb_shinfo(skb)->gso_size)
678 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
679 out_meta, hdr_len, len);
680 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
681 hdr_len, len, !amsdu);
684 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
690 * To avoid ambiguity between empty and completely full queues, there
691 * should always be less than max_tfd_queue_size elements in the queue.
692 * If q->n_window is smaller than max_tfd_queue_size, there is no need
693 * to reserve any queue entries for this purpose.
695 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
698 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
701 * max_tfd_queue_size is a power of 2, so the following is equivalent to
702 * modulo by max_tfd_queue_size and is well defined.
704 used = (q->write_ptr - q->read_ptr) &
705 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
707 if (WARN_ON(used > max))
713 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
714 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
716 struct iwl_cmd_meta *out_meta;
717 struct iwl_txq *txq = trans->txqs.txq[txq_id];
722 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
723 "queue %d out of range", txq_id))
726 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
727 "TX on unused queue %d\n", txq_id))
730 if (skb_is_nonlinear(skb) &&
731 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
732 __skb_linearize(skb))
735 spin_lock(&txq->lock);
737 if (iwl_txq_space(trans, txq) < txq->high_mark) {
738 iwl_txq_stop(trans, txq);
740 /* don't put the packet on the ring, if there is no room */
741 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
742 struct iwl_device_tx_cmd **dev_cmd_ptr;
744 dev_cmd_ptr = (void *)((u8 *)skb->cb +
745 trans->txqs.dev_cmd_offs);
747 *dev_cmd_ptr = dev_cmd;
748 __skb_queue_tail(&txq->overflow_q, skb);
749 spin_unlock(&txq->lock);
754 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
756 /* Set up driver data for this TFD */
757 txq->entries[idx].skb = skb;
758 txq->entries[idx].cmd = dev_cmd;
760 dev_cmd->hdr.sequence =
761 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
764 /* Set up first empty entry in queue's array of Tx/cmd buffers */
765 out_meta = &txq->entries[idx].meta;
768 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
770 spin_unlock(&txq->lock);
774 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
775 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
776 (void *)dev_cmd->payload;
778 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
780 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
781 (void *)dev_cmd->payload;
783 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
786 /* Set up entry for this TFD in Tx byte-count array */
787 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
788 iwl_txq_gen2_get_num_tbs(trans, tfd));
790 /* start timer if queue currently empty */
791 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
792 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
794 /* Tell device the write index *just past* this latest filled TFD */
795 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
796 iwl_txq_inc_wr_ptr(trans, txq);
798 * At this point the frame is "transmitted" successfully
799 * and we will get a TX status notification eventually.
801 spin_unlock(&txq->lock);
805 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
808 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
810 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
812 struct iwl_txq *txq = trans->txqs.txq[txq_id];
814 spin_lock_bh(&txq->lock);
815 while (txq->write_ptr != txq->read_ptr) {
816 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
817 txq_id, txq->read_ptr);
819 if (txq_id != trans->txqs.cmd.q_id) {
820 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
821 struct sk_buff *skb = txq->entries[idx].skb;
823 if (!WARN_ON_ONCE(!skb))
824 iwl_txq_free_tso_page(trans, skb);
826 iwl_txq_gen2_free_tfd(trans, txq);
827 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
830 while (!skb_queue_empty(&txq->overflow_q)) {
831 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
833 iwl_op_mode_free_skb(trans->op_mode, skb);
836 spin_unlock_bh(&txq->lock);
838 /* just in case - this queue may have been stopped */
839 iwl_wake_queue(trans, txq);
842 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
845 struct device *dev = trans->dev;
847 /* De-alloc circular buffer of TFDs */
849 dma_free_coherent(dev,
850 trans->txqs.tfd.size * txq->n_window,
851 txq->tfds, txq->dma_addr);
852 dma_free_coherent(dev,
853 sizeof(*txq->first_tb_bufs) * txq->n_window,
854 txq->first_tb_bufs, txq->first_tb_dma);
858 if (txq->bc_tbl.addr)
859 dma_pool_free(trans->txqs.bc_pool,
860 txq->bc_tbl.addr, txq->bc_tbl.dma);
865 * iwl_pcie_txq_free - Deallocate DMA queue.
866 * @txq: Transmit queue to deallocate.
868 * Empty queue by removing and destroying all BD's.
870 * 0-fill, but do not free "txq" descriptor structure.
872 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
877 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
878 "queue %d out of range", txq_id))
881 txq = trans->txqs.txq[txq_id];
886 iwl_txq_gen2_unmap(trans, txq_id);
888 /* De-alloc array of command/tx buffers */
889 if (txq_id == trans->txqs.cmd.q_id)
890 for (i = 0; i < txq->n_window; i++) {
891 kfree_sensitive(txq->entries[i].cmd);
892 kfree_sensitive(txq->entries[i].free_buf);
894 del_timer_sync(&txq->stuck_timer);
896 iwl_txq_gen2_free_memory(trans, txq);
898 trans->txqs.txq[txq_id] = NULL;
900 clear_bit(txq_id, trans->txqs.queue_used);
904 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
906 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
908 q->n_window = slots_num;
910 /* slots_num must be power-of-two size, otherwise
911 * iwl_txq_get_cmd_index is broken. */
912 if (WARN_ON(!is_power_of_2(slots_num)))
915 q->low_mark = q->n_window / 4;
919 q->high_mark = q->n_window / 8;
920 if (q->high_mark < 2)
929 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
933 u32 tfd_queue_max_size =
934 trans->trans_cfg->base_params->max_tfd_queue_size;
936 txq->need_update = false;
938 /* max_tfd_queue_size must be power-of-two size, otherwise
939 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
940 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
941 "Max tfd queue size must be a power of two, but is %d",
945 /* Initialize queue's high/low-water marks, and head/tail indexes */
946 ret = iwl_queue_init(txq, slots_num);
950 spin_lock_init(&txq->lock);
953 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
955 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
958 __skb_queue_head_init(&txq->overflow_q);
963 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
965 struct page **page_ptr;
968 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
973 struct page *tmp = next;
975 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
981 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
983 u32 txq_id = txq->id;
988 if (trans->trans_cfg->gen2) {
989 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
990 txq->read_ptr, txq->write_ptr);
991 /* TODO: access new SCD registers and dump them */
995 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
996 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
997 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1000 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1001 txq_id, active ? "" : "in", fifo,
1002 jiffies_to_msecs(txq->wd_timeout),
1003 txq->read_ptr, txq->write_ptr,
1004 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1005 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1006 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1007 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1008 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1011 static void iwl_txq_stuck_timer(struct timer_list *t)
1013 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1014 struct iwl_trans *trans = txq->trans;
1016 spin_lock(&txq->lock);
1017 /* check if triggered erroneously */
1018 if (txq->read_ptr == txq->write_ptr) {
1019 spin_unlock(&txq->lock);
1022 spin_unlock(&txq->lock);
1024 iwl_txq_log_scd_error(trans, txq);
1026 iwl_force_nmi(trans);
1029 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1032 size_t tfd_sz = trans->txqs.tfd.size *
1033 trans->trans_cfg->base_params->max_tfd_queue_size;
1037 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
1040 if (WARN_ON(txq->entries || txq->tfds))
1043 if (trans->trans_cfg->gen2)
1044 tfd_sz = trans->txqs.tfd.size * slots_num;
1046 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1049 txq->n_window = slots_num;
1051 txq->entries = kcalloc(slots_num,
1052 sizeof(struct iwl_pcie_txq_entry),
1059 for (i = 0; i < slots_num; i++) {
1060 txq->entries[i].cmd =
1061 kmalloc(sizeof(struct iwl_device_cmd),
1063 if (!txq->entries[i].cmd)
1067 /* Circular buffer of transmit frame descriptors (TFDs),
1068 * shared with device */
1069 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1070 &txq->dma_addr, GFP_KERNEL);
1074 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1076 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1078 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1081 if (!txq->first_tb_bufs)
1086 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1089 if (txq->entries && cmd_queue)
1090 for (i = 0; i < slots_num; i++)
1091 kfree(txq->entries[i].cmd);
1092 kfree(txq->entries);
1093 txq->entries = NULL;
1098 static struct iwl_txq *
1099 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
1101 size_t bc_tbl_size, bc_tbl_entries;
1102 struct iwl_txq *txq;
1105 WARN_ON(!trans->txqs.bc_tbl_size);
1107 bc_tbl_size = trans->txqs.bc_tbl_size;
1108 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1110 if (WARN_ON(size > bc_tbl_entries))
1111 return ERR_PTR(-EINVAL);
1113 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1115 return ERR_PTR(-ENOMEM);
1117 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1119 if (!txq->bc_tbl.addr) {
1120 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1122 return ERR_PTR(-ENOMEM);
1125 ret = iwl_txq_alloc(trans, txq, size, false);
1127 IWL_ERR(trans, "Tx queue alloc failed\n");
1130 ret = iwl_txq_init(trans, txq, size, false);
1132 IWL_ERR(trans, "Tx queue init failed\n");
1136 txq->wd_timeout = msecs_to_jiffies(timeout);
1141 iwl_txq_gen2_free_memory(trans, txq);
1142 return ERR_PTR(ret);
1145 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1146 struct iwl_host_cmd *hcmd)
1148 struct iwl_tx_queue_cfg_rsp *rsp;
1152 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1155 goto error_free_resp;
1158 rsp = (void *)hcmd->resp_pkt->data;
1159 qid = le16_to_cpu(rsp->queue_number);
1160 wr_ptr = le16_to_cpu(rsp->write_pointer);
1162 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1163 WARN_ONCE(1, "queue index %d unsupported", qid);
1165 goto error_free_resp;
1168 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1169 WARN_ONCE(1, "queue %d already used", qid);
1171 goto error_free_resp;
1174 if (WARN_ONCE(trans->txqs.txq[qid],
1175 "queue %d already allocated\n", qid)) {
1177 goto error_free_resp;
1181 trans->txqs.txq[qid] = txq;
1182 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1184 /* Place first TFD at index corresponding to start sequence number */
1185 txq->read_ptr = wr_ptr;
1186 txq->write_ptr = wr_ptr;
1188 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1190 iwl_free_resp(hcmd);
1194 iwl_free_resp(hcmd);
1195 iwl_txq_gen2_free_memory(trans, txq);
1199 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1200 u8 tid, int size, unsigned int timeout)
1202 struct iwl_txq *txq;
1204 struct iwl_tx_queue_cfg_cmd old;
1205 struct iwl_scd_queue_cfg_cmd new;
1207 struct iwl_host_cmd hcmd = {
1208 .flags = CMD_WANT_SKB,
1212 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1213 trans->hw_rev_step == SILICON_A_STEP)
1216 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1218 return PTR_ERR(txq);
1220 if (trans->txqs.queue_alloc_cmd_ver == 0) {
1221 memset(&cmd.old, 0, sizeof(cmd.old));
1222 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1223 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1224 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1225 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1228 if (hweight32(sta_mask) != 1) {
1232 cmd.old.sta_id = ffs(sta_mask) - 1;
1234 hcmd.id = SCD_QUEUE_CFG;
1235 hcmd.len[0] = sizeof(cmd.old);
1236 hcmd.data[0] = &cmd.old;
1237 } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
1238 memset(&cmd.new, 0, sizeof(cmd.new));
1239 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1240 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1241 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1242 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1243 cmd.new.u.add.flags = cpu_to_le32(flags);
1244 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1245 cmd.new.u.add.tid = tid;
1247 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1248 hcmd.len[0] = sizeof(cmd.new);
1249 hcmd.data[0] = &cmd.new;
1255 ret = iwl_trans_send_cmd(trans, &hcmd);
1259 return iwl_txq_alloc_response(trans, txq, &hcmd);
1262 iwl_txq_gen2_free_memory(trans, txq);
1266 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1268 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1269 "queue %d out of range", queue))
1273 * Upon HW Rfkill - we stop the device, and then stop the queues
1274 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1275 * allow the op_mode to call txq_disable after it already called
1278 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1279 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1280 "queue %d not used", queue);
1284 iwl_txq_gen2_free(trans, queue);
1286 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1289 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1293 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1295 /* Free all TX queues */
1296 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1297 if (!trans->txqs.txq[i])
1300 iwl_txq_gen2_free(trans, i);
1304 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1306 struct iwl_txq *queue;
1309 /* alloc and init the tx queue */
1310 if (!trans->txqs.txq[txq_id]) {
1311 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1313 IWL_ERR(trans, "Not enough memory for tx queue\n");
1316 trans->txqs.txq[txq_id] = queue;
1317 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1319 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1323 queue = trans->txqs.txq[txq_id];
1326 ret = iwl_txq_init(trans, queue, queue_size,
1327 (txq_id == trans->txqs.cmd.q_id));
1329 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1332 trans->txqs.txq[txq_id]->id = txq_id;
1333 set_bit(txq_id, trans->txqs.queue_used);
1338 iwl_txq_gen2_tx_free(trans);
1342 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1345 struct iwl_tfd *tfd;
1346 struct iwl_tfd_tb *tb;
1350 if (trans->trans_cfg->gen2) {
1351 struct iwl_tfh_tfd *tfh_tfd = _tfd;
1352 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
1354 return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
1358 tb = &tfd->tbs[idx];
1359 addr = get_unaligned_le32(&tb->lo);
1361 if (sizeof(dma_addr_t) <= sizeof(u32))
1364 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1367 * shift by 16 twice to avoid warnings on 32-bit
1368 * (where this code never runs anyway due to the
1369 * if statement above)
1371 return addr | ((hi_len << 16) << 16);
1374 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1375 struct iwl_cmd_meta *meta,
1376 struct iwl_txq *txq, int index)
1379 void *tfd = iwl_txq_get_tfd(trans, txq, index);
1381 /* Sanity check on number of chunks */
1382 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1384 if (num_tbs > trans->txqs.tfd.max_tbs) {
1385 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1386 /* @todo issue fatal error, it is quite serious situation */
1390 /* first TB is never freed - it's the bidirectional DMA data */
1392 for (i = 1; i < num_tbs; i++) {
1393 if (meta->tbs & BIT(i))
1394 dma_unmap_page(trans->dev,
1395 iwl_txq_gen1_tfd_tb_get_addr(trans,
1397 iwl_txq_gen1_tfd_tb_get_len(trans,
1401 dma_unmap_single(trans->dev,
1402 iwl_txq_gen1_tfd_tb_get_addr(trans,
1404 iwl_txq_gen1_tfd_tb_get_len(trans,
1411 if (trans->trans_cfg->gen2) {
1412 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1414 tfd_fh->num_tbs = 0;
1416 struct iwl_tfd *tfd_fh = (void *)tfd;
1418 tfd_fh->num_tbs = 0;
1422 #define IWL_TX_CRC_SIZE 4
1423 #define IWL_TX_DELIMITER_SIZE 4
1426 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1428 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1429 struct iwl_txq *txq, u16 byte_cnt,
1432 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1433 int write_ptr = txq->write_ptr;
1434 int txq_id = txq->id;
1436 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1438 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1439 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1440 u8 sta_id = tx_cmd->sta_id;
1442 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1444 sec_ctl = tx_cmd->sec_ctl;
1446 switch (sec_ctl & TX_CMD_SEC_MSK) {
1447 case TX_CMD_SEC_CCM:
1448 len += IEEE80211_CCMP_MIC_LEN;
1450 case TX_CMD_SEC_TKIP:
1451 len += IEEE80211_TKIP_ICV_LEN;
1453 case TX_CMD_SEC_WEP:
1454 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1457 if (trans->txqs.bc_table_dword)
1458 len = DIV_ROUND_UP(len, 4);
1460 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1463 bc_ent = cpu_to_le16(len | (sta_id << 12));
1465 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1467 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1468 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1472 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1473 struct iwl_txq *txq)
1475 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1476 int txq_id = txq->id;
1477 int read_ptr = txq->read_ptr;
1480 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1481 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1483 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1485 if (txq_id != trans->txqs.cmd.q_id)
1486 sta_id = tx_cmd->sta_id;
1488 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1490 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1492 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1493 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1498 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1499 * @trans - transport private data
1501 * @dma_dir - the direction of the DMA mapping
1503 * Does NOT advance any TFD circular buffer read/write indexes
1504 * Does NOT free the TFD itself (which is within circular buffer)
1506 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1508 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1509 * idx is bounded by n_window
1511 int rd_ptr = txq->read_ptr;
1512 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1513 struct sk_buff *skb;
1515 lockdep_assert_held(&txq->lock);
1520 /* We have only q->n_window txq->entries, but we use
1521 * TFD_QUEUE_SIZE_MAX tfds
1523 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
1526 skb = txq->entries[idx].skb;
1528 /* Can be called from irqs-disabled context
1529 * If skb is not NULL, it means that the whole queue is being
1530 * freed and that the queue is not empty - free the skb
1533 iwl_op_mode_free_skb(trans->op_mode, skb);
1534 txq->entries[idx].skb = NULL;
1538 void iwl_txq_progress(struct iwl_txq *txq)
1540 lockdep_assert_held(&txq->lock);
1542 if (!txq->wd_timeout)
1546 * station is asleep and we send data - that must
1547 * be uAPSD or PS-Poll. Don't rearm the timer.
1553 * if empty delete timer, otherwise move timer forward
1554 * since we're making progress on this queue
1556 if (txq->read_ptr == txq->write_ptr)
1557 del_timer(&txq->stuck_timer);
1559 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1562 /* Frees buffers until index _not_ inclusive */
1563 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1564 struct sk_buff_head *skbs)
1566 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1567 int tfd_num, read_ptr, last_to_free;
1569 /* This function is not meant to release cmd queue*/
1570 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1576 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1577 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1579 spin_lock_bh(&txq->lock);
1581 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1582 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1587 if (read_ptr == tfd_num)
1590 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1591 txq_id, txq->read_ptr, tfd_num, ssn);
1593 /*Since we free until index _not_ inclusive, the one before index is
1594 * the last we will free. This one must be used */
1595 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1597 if (!iwl_txq_used(txq, last_to_free)) {
1599 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1600 __func__, txq_id, last_to_free,
1601 trans->trans_cfg->base_params->max_tfd_queue_size,
1602 txq->write_ptr, txq->read_ptr);
1604 iwl_op_mode_time_point(trans->op_mode,
1605 IWL_FW_INI_TIME_POINT_FAKE_TX,
1610 if (WARN_ON(!skb_queue_empty(skbs)))
1614 read_ptr != tfd_num;
1615 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1616 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1617 struct sk_buff *skb = txq->entries[read_ptr].skb;
1619 if (WARN_ON_ONCE(!skb))
1622 iwl_txq_free_tso_page(trans, skb);
1624 __skb_queue_tail(skbs, skb);
1626 txq->entries[read_ptr].skb = NULL;
1628 if (!trans->trans_cfg->gen2)
1629 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1631 iwl_txq_free_tfd(trans, txq);
1634 iwl_txq_progress(txq);
1636 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1637 test_bit(txq_id, trans->txqs.queue_stopped)) {
1638 struct sk_buff_head overflow_skbs;
1640 __skb_queue_head_init(&overflow_skbs);
1641 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1644 * We are going to transmit from the overflow queue.
1645 * Remember this state so that wait_for_txq_empty will know we
1646 * are adding more packets to the TFD queue. It cannot rely on
1647 * the state of &txq->overflow_q, as we just emptied it, but
1648 * haven't TXed the content yet.
1650 txq->overflow_tx = true;
1653 * This is tricky: we are in reclaim path which is non
1654 * re-entrant, so noone will try to take the access the
1655 * txq data from that path. We stopped tx, so we can't
1656 * have tx as well. Bottom line, we can unlock and re-lock
1659 spin_unlock_bh(&txq->lock);
1661 while (!skb_queue_empty(&overflow_skbs)) {
1662 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1663 struct iwl_device_tx_cmd *dev_cmd_ptr;
1665 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1666 trans->txqs.dev_cmd_offs);
1669 * Note that we can very well be overflowing again.
1670 * In that case, iwl_txq_space will be small again
1671 * and we won't wake mac80211's queue.
1673 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1676 if (iwl_txq_space(trans, txq) > txq->low_mark)
1677 iwl_wake_queue(trans, txq);
1679 spin_lock_bh(&txq->lock);
1680 txq->overflow_tx = false;
1684 spin_unlock_bh(&txq->lock);
1687 /* Set wr_ptr of specific device and txq */
1688 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1690 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1692 spin_lock_bh(&txq->lock);
1694 txq->write_ptr = ptr;
1695 txq->read_ptr = txq->write_ptr;
1697 spin_unlock_bh(&txq->lock);
1700 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1705 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1706 struct iwl_txq *txq = trans->txqs.txq[queue];
1709 spin_lock_bh(&txq->lock);
1713 if (txq->frozen == freeze)
1716 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1717 freeze ? "Freezing" : "Waking", queue);
1719 txq->frozen = freeze;
1721 if (txq->read_ptr == txq->write_ptr)
1725 if (unlikely(time_after(now,
1726 txq->stuck_timer.expires))) {
1728 * The timer should have fired, maybe it is
1729 * spinning right now on the lock.
1733 /* remember how long until the timer fires */
1734 txq->frozen_expiry_remainder =
1735 txq->stuck_timer.expires - now;
1736 del_timer(&txq->stuck_timer);
1741 * Wake a non-empty queue -> arm timer with the
1742 * remainder before it froze
1744 mod_timer(&txq->stuck_timer,
1745 now + txq->frozen_expiry_remainder);
1748 spin_unlock_bh(&txq->lock);
1752 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1754 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
1755 struct iwl_host_cmd *cmd)
1757 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
1758 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1762 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
1764 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1766 "Command %s: a command is already active!\n", cmd_str))
1769 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
1771 cmd_idx = trans->ops->send_cmd(trans, cmd);
1774 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1775 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
1780 ret = wait_event_timeout(trans->wait_command_queue,
1781 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1783 HOST_COMPLETE_TIMEOUT);
1785 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1786 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1788 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1789 txq->read_ptr, txq->write_ptr);
1791 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1792 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1796 iwl_trans_sync_nmi(trans);
1800 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1801 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
1803 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
1810 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1811 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1812 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1817 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1818 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
1826 if (cmd->flags & CMD_WANT_SKB) {
1828 * Cancel the CMD_WANT_SKB flag for the cmd in the
1829 * TX cmd queue. Otherwise in case the cmd comes
1830 * in later, it will possibly set an invalid
1831 * address (cmd->meta.source).
1833 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1836 if (cmd->resp_pkt) {
1838 cmd->resp_pkt = NULL;
1844 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
1845 struct iwl_host_cmd *cmd)
1847 /* Make sure the NIC is still alive in the bus */
1848 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1851 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1852 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1853 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1858 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
1859 !(cmd->flags & CMD_SEND_IN_D3))) {
1860 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
1864 if (cmd->flags & CMD_ASYNC) {
1867 /* An asynchronous command can not expect an SKB to be set. */
1868 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1871 ret = trans->ops->send_cmd(trans, cmd);
1874 "Error sending %s: enqueue_hcmd failed: %d\n",
1875 iwl_get_cmd_string(trans, cmd->id), ret);
1881 return iwl_trans_txq_send_hcmd_sync(trans, cmd);