1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2020-2022 Intel Corporation
5 #ifndef __iwl_trans_queue_tx_h__
6 #define __iwl_trans_queue_tx_h__
10 struct iwl_tso_hdr_page {
15 static inline dma_addr_t
16 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
18 return txq->first_tb_dma +
19 sizeof(struct iwl_pcie_first_tb_buf) * idx;
22 static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
24 return index & (q->n_window - 1);
27 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
29 static inline void iwl_wake_queue(struct iwl_trans *trans,
32 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
33 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
34 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
38 static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
39 struct iwl_txq *txq, int idx)
41 if (trans->trans_cfg->use_tfh)
42 idx = iwl_txq_get_cmd_index(txq, idx);
44 return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
47 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
50 * We need this inline in case dma_addr_t is only 32-bits - since the
51 * hardware is always 64-bit, the issue can still occur in that case,
52 * so use u64 for 'phys' here to force the addition in 64-bit.
54 static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
56 return upper_32_bits(phys) != upper_32_bits(phys + len);
59 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
61 static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
63 if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
64 iwl_op_mode_queue_full(trans->op_mode, txq->id);
65 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
67 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
73 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
74 * @index -- current index
76 static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
79 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
83 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
84 * @index -- current index
86 static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
89 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
92 static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
94 int index = iwl_txq_get_cmd_index(q, i);
95 int r = iwl_txq_get_cmd_index(q, q->read_ptr);
96 int w = iwl_txq_get_cmd_index(q, q->write_ptr);
99 (index >= r && index < w) :
100 !(index < r && index >= w);
103 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
105 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
107 int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
108 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
111 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
112 struct iwl_cmd_meta *meta,
113 struct iwl_tfh_tfd *tfd);
115 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
116 u32 sta_mask, u8 tid,
117 int size, unsigned int timeout);
119 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
120 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
122 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
123 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
124 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
125 void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
126 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
128 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
130 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
131 struct sk_buff *skb);
133 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
138 if (trans->trans_cfg->use_tfh) {
139 struct iwl_tfh_tfd *tfh_tfd = _tfd;
141 return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
144 tfd = (struct iwl_tfd *)_tfd;
145 return tfd->num_tbs & 0x1f;
148 static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
152 struct iwl_tfd_tb *tb;
154 if (trans->trans_cfg->use_tfh) {
155 struct iwl_tfh_tfd *tfh_tfd = _tfd;
156 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
158 return le16_to_cpu(tfh_tb->tb_len);
161 tfd = (struct iwl_tfd *)_tfd;
164 return le16_to_cpu(tb->hi_n_len) >> 4;
167 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
168 struct iwl_cmd_meta *meta,
169 struct iwl_txq *txq, int index);
170 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
171 struct iwl_txq *txq);
172 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
173 struct iwl_txq *txq, u16 byte_cnt,
175 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
176 struct sk_buff_head *skbs);
177 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
178 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
180 void iwl_txq_progress(struct iwl_txq *txq);
181 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
182 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
183 #endif /* __iwl_trans_queue_tx_h__ */