2 ****************************************************************************************
6 * Copyright (C) ESWIN 2015-2020
8 ****************************************************************************************
13 #include <linux/types.h>
14 #include <linux/bitops.h>
15 #include <linux/ieee80211.h>
17 #ifdef CONFIG_ECRNX_SOFTMAC
18 #include <net/mac80211.h>
19 #include "ecrnx_baws.h"
22 * Softmac TXQ configuration
23 * - STA have one TXQ per TID
24 * - VIF have one TXQ per HW queue
26 * Txq mapping looks like
27 * for NX_REMOTE_STA_MAX=10 and NX_VIRT_DEV_MAX=4
29 * | TXQ | VIF | STA | TID | HWQ |
30 * |-----+-----+-------+------+-----|-
31 * | 0 | | 0 | 0 | 1 | 16 TXQ per STA
41 * | 16 | | 0 | 16 | 1 |
42 * |-----+-----+-------+------+-----|-
43 * | ... | | | | | same for all STAs
44 * |-----+-----+-------+------+-----|-
45 * | 160 | 0 | | | 0 | 5 TXQ per VIF
48 * |-----+-----+-------+------+-----|-
49 * | ... | | | | | same for all VIFs
50 * |-----+-----+-------+------+-----|-
52 * NOTE: When using CONFIG_MAC80211_TXQ only one TXQ is allocated by mac80211
53 * for the VIF (associated to BE ac). To avoid too much differences with case
54 * where TXQ are allocated by the driver the "missing" VIF TXQs are allocated
55 * by the driver. Actually driver also allocates txq for BE (to avoid having
56 * modify ac parameter to access the TXQ) but this one is never used.
57 * Driver check if nb_ready_mac80211 field is equal to NOT_MAC80211_TXQ in
58 * order to distinguish non mac80211 txq.
59 * When the txq interface (.wake_tx_queue) is used only the TXQ
60 * allocated by mac80211 will be used and thus BE access category will always
61 * be used. When "VIF" frames needs to be pushed on different access category
62 * mac80211 will use the tx interface (.tx) and in this case driver will select
63 * the txq associated to the requested access category.
65 #define NX_NB_TID_PER_STA IEEE80211_NUM_TIDS
66 #define NX_NB_TXQ_PER_STA NX_NB_TID_PER_STA
67 #define NX_NB_TXQ_PER_VIF NX_TXQ_CNT
68 #define NX_NB_TXQ ((NX_NB_TXQ_PER_STA * NX_REMOTE_STA_MAX) + \
69 (NX_NB_TXQ_PER_VIF * NX_VIRT_DEV_MAX))
71 #define NX_FIRST_VIF_TXQ_IDX (NX_REMOTE_STA_MAX * NX_NB_TXQ_PER_STA)
73 #define NOT_MAC80211_TXQ ULONG_MAX
75 #else /* i.e. #ifdef CONFIG_ECRNX_FULLMAC */
77 * Fullmac TXQ configuration:
78 * - STA: 1 TXQ per TID (limited to 8)
79 * 1 TXQ for bufferable MGT frames
80 * - VIF: 1 TXQ for Multi/Broadcast +
81 * 1 TXQ for MGT for unknown STAs or non-bufferable MGT frames
82 * - 1 TXQ for offchannel transmissions
85 * Txq mapping looks like
86 * for NX_REMOTE_STA_MAX=10 and NX_VIRT_DEV_MAX=4
88 * | TXQ | NDEV_ID | VIF | STA | TID | HWQ |
89 * |-----+---------+-----+-------+------+-----|-
90 * | 0 | 0 | | 0 | 0 | 1 | 9 TXQ per STA
91 * | 1 | 1 | | 0 | 1 | 0 | (8 data + 1 mgmt)
92 * | 2 | 2 | | 0 | 2 | 0 |
93 * | 3 | 3 | | 0 | 3 | 1 |
94 * | 4 | 4 | | 0 | 4 | 2 |
95 * | 5 | 5 | | 0 | 5 | 2 |
96 * | 6 | 6 | | 0 | 6 | 3 |
97 * | 7 | 7 | | 0 | 7 | 3 |
98 * | 8 | N/A | | 0 | MGMT | 3 |
99 * |-----+---------+-----+-------+------+-----|-
100 * | ... | | | | | | Same for all STAs
101 * |-----+---------+-----+-------+------+-----|-
102 * | 90 | 80 | 0 | BC/MC | 0 | 1/4 | 1 TXQ for BC/MC per VIF
104 * | 93 | 80 | 3 | BC/MC | 0 | 1/4 |
105 * |-----+---------+-----+-------+------+-----|-
106 * | 94 | N/A | 0 | N/A | MGMT | 3 | 1 TXQ for unknown STA per VIF
108 * | 97 | N/A | 3 | N/A | MGMT | 3 |
109 * |-----+---------+-----+-------+------+-----|-
110 * | 98 | N/A | | N/A | MGMT | 3 | 1 TXQ for offchannel frame
112 #define NX_NB_TID_PER_STA 8
113 #define NX_NB_TXQ_PER_STA (NX_NB_TID_PER_STA + 1)
114 #define NX_NB_TXQ_PER_VIF 2
115 #define NX_NB_TXQ ((NX_NB_TXQ_PER_STA * NX_REMOTE_STA_MAX) + \
116 (NX_NB_TXQ_PER_VIF * NX_VIRT_DEV_MAX) + 1)
118 #define NX_FIRST_VIF_TXQ_IDX (NX_REMOTE_STA_MAX * NX_NB_TXQ_PER_STA)
119 #define NX_FIRST_BCMC_TXQ_IDX NX_FIRST_VIF_TXQ_IDX
120 #define NX_FIRST_UNK_TXQ_IDX (NX_FIRST_BCMC_TXQ_IDX + NX_VIRT_DEV_MAX)
122 #define NX_OFF_CHAN_TXQ_IDX (NX_FIRST_VIF_TXQ_IDX + \
123 (NX_VIRT_DEV_MAX * NX_NB_TXQ_PER_VIF))
124 #define NX_BCMC_TXQ_TYPE 0
125 #define NX_UNK_TXQ_TYPE 1
128 * Each data TXQ is a netdev queue. TXQ to send MGT are not data TXQ as
129 * they did not recieved buffer from netdev interface.
130 * Need to allocate the maximum case.
131 * AP : all STAs + 1 BC/MC
133 #define NX_NB_NDEV_TXQ ((NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX) + 1 )
134 #define NX_BCMC_TXQ_NDEV_IDX (NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX)
135 #define NX_STA_NDEV_IDX(tid, sta_idx) ((tid) + (sta_idx) * NX_NB_TID_PER_STA)
136 #define NDEV_NO_TXQ 0xffff
137 #if (NX_NB_NDEV_TXQ >= NDEV_NO_TXQ)
138 #error("Need to increase struct ecrnx_txq->ndev_idx size")
141 /* stop netdev queue when number of queued buffers if greater than this */
142 #define ECRNX_NDEV_FLOW_CTRL_STOP 200
143 /* restart netdev queue when number of queued buffers is lower than this */
144 #define ECRNX_NDEV_FLOW_CTRL_RESTART 100
146 #endif /* CONFIG_ECRNX_SOFTMAC */
148 #define TXQ_INACTIVE 0xffff
149 #if (NX_NB_TXQ >= TXQ_INACTIVE)
150 #error("Need to increase struct ecrnx_txq->idx size")
153 #define NX_TXQ_INITIAL_CREDITS 20 //4
155 #define ECRNX_TXQ_CLEANUP_INTERVAL (10 * HZ) //10s in jiffies
156 #define ECRNX_TXQ_MAX_QUEUE_JIFFIES (20 * HZ)
158 * TXQ tid sorted by decreasing priority
160 extern const int nx_tid_prio[NX_NB_TID_PER_STA];
163 * struct ecrnx_hwq - Structure used to save information relative to
164 * an AC TX queue (aka HW queue)
165 * @list: List of TXQ, that have buffers ready for this HWQ
166 * @credits: available credit for the queue (i.e. nb of buffers that
167 * can be pushed to FW )
168 * @id Id of the HWQ among ECRNX_HWQ_....
169 * @size size of the queue
170 * @need_processing Indicate if hwq should be processed
171 * @len number of packet ready to be pushed to fw for this HW queue
172 * @len_stop threshold to stop mac80211(i.e. netdev) queues. Stop queue when
173 * driver has more than @len_stop packets ready.
174 * @len_start threshold to wake mac8011 queues. Wake queue when driver has
175 * less than @len_start packets ready.
178 struct list_head list;
179 u8 credits[CONFIG_USER_MAX];
182 bool need_processing;
183 #ifdef CONFIG_ECRNX_SOFTMAC
187 #endif /* CONFIG_ECRNX_SOFTMAC */
191 * enum ecrnx_push_flags - Flags of pushed buffer
193 * @ECRNX_PUSH_RETRY Pushing a buffer for retry
194 * @ECRNX_PUSH_IMMEDIATE Pushing a buffer without queuing it first
196 enum ecrnx_push_flags {
197 ECRNX_PUSH_RETRY = BIT(0),
198 ECRNX_PUSH_IMMEDIATE = BIT(1),
202 * enum ecrnx_txq_flags - TXQ status flag
204 * @ECRNX_TXQ_IN_HWQ_LIST: The queue is scheduled for transmission
205 * @ECRNX_TXQ_STOP_FULL: No more credits for the queue
206 * @ECRNX_TXQ_STOP_CSA: CSA is in progress
207 * @ECRNX_TXQ_STOP_STA_PS: Destiniation sta is currently in power save mode
208 * @ECRNX_TXQ_STOP_VIF_PS: Vif owning this queue is currently in power save mode
209 * @ECRNX_TXQ_STOP_CHAN: Channel of this queue is not the current active channel
210 * @ECRNX_TXQ_STOP_MU_POS: TXQ is stopped waiting for all the buffers pushed to
212 * @ECRNX_TXQ_STOP: All possible reason to have a txq stopped
213 * @ECRNX_TXQ_NDEV_FLOW_CTRL: associated netdev queue is currently stopped.
214 * Note: when a TXQ is flowctrl it is NOT stopped
216 enum ecrnx_txq_flags {
217 ECRNX_TXQ_IN_HWQ_LIST = BIT(0),
218 ECRNX_TXQ_STOP_FULL = BIT(1),
219 ECRNX_TXQ_STOP_CSA = BIT(2),
220 ECRNX_TXQ_STOP_STA_PS = BIT(3),
221 ECRNX_TXQ_STOP_VIF_PS = BIT(4),
222 ECRNX_TXQ_STOP_CHAN = BIT(5),
223 ECRNX_TXQ_STOP_MU_POS = BIT(6),
224 ECRNX_TXQ_STOP = (ECRNX_TXQ_STOP_FULL | ECRNX_TXQ_STOP_CSA |
225 ECRNX_TXQ_STOP_STA_PS | ECRNX_TXQ_STOP_VIF_PS |
226 ECRNX_TXQ_STOP_CHAN) ,
227 ECRNX_TXQ_NDEV_FLOW_CTRL = BIT(7),
232 * struct ecrnx_txq - Structure used to save information relative to
235 * @idx: Unique txq idx. Set to TXQ_INACTIVE if txq is not used.
236 * @status: bitfield of @ecrnx_txq_flags.
237 * @credits: available credit for the queue (i.e. nb of buffers that
238 * can be pushed to FW).
239 * @pkt_sent: number of consecutive pkt sent without leaving HW queue list
240 * @pkt_pushed: number of pkt currently pending for transmission confirmation
241 * @sched_list: list node for HW queue schedule list (ecrnx_hwq.list)
242 * @sk_list: list of buffers to push to fw
243 * @last_retry_skb: pointer on the last skb in @sk_list that is a retry.
244 * (retry skb are stored at the beginning of the list)
245 * NULL if no retry skb is queued in @sk_list
246 * @nb_retry: Number of retry packet queued.
247 * @hwq: Pointer on the associated HW queue.
248 * @push_limit: number of packet to push before removing the txq from hwq list.
249 * (we always have push_limit < skb_queue_len(sk_list))
253 * @baw: Block Ack window information
254 * @amsdu_anchor: pointer to ecrnx_sw_txhdr of the first subframe of the A-MSDU.
255 * NULL if no A-MSDU frame is in construction
257 * @amsdu_vht_len_cap:
258 * @nb_ready_mac80211: Number of buffer ready in mac80211 txq
261 * @ps_id: Index to use for Power save mode (LEGACY or UAPSD)
262 * @ndev_idx: txq idx from netdev point of view (0xFF for non netdev queue)
263 * @ndev: pointer to ndev of the corresponding vif
264 * @amsdu: pointer to ecrnx_sw_txhdr of the first subframe of the A-MSDU.
265 * NULL if no A-MSDU frame is in construction
266 * @amsdu_len: Maximum size allowed for an A-MSDU. 0 means A-MSDU not allowed
273 u8 pkt_pushed[CONFIG_USER_MAX];
274 struct list_head sched_list;
275 struct sk_buff_head sk_list;
276 struct sk_buff *last_retry_skb;
277 struct ecrnx_hwq *hwq;
281 #ifdef CONFIG_MAC80211_TXQ
282 unsigned long nb_ready_mac80211;
284 #ifdef CONFIG_ECRNX_SOFTMAC
285 struct ecrnx_baw baw;
286 struct ieee80211_sta *sta;
287 #ifdef CONFIG_ECRNX_AMSDUS_TX
288 struct ecrnx_sw_txhdr *amsdu_anchor;
289 u16 amsdu_ht_len_cap;
290 u16 amsdu_vht_len_cap;
291 #endif /* CONFIG_ECRNX_AMSDUS_TX */
292 #else /* ! CONFIG_ECRNX_SOFTMAC */
293 struct ecrnx_sta *sta;
296 struct net_device *ndev;
297 #ifdef CONFIG_ECRNX_AMSDUS_TX
298 struct ecrnx_sw_txhdr *amsdu;
300 #endif /* CONFIG_ECRNX_AMSDUS_TX */
301 #endif /* CONFIG_ECRNX_SOFTMAC */
302 #ifdef CONFIG_ECRNX_MUMIMO_TX
310 struct ecrnx_sw_txhdr;
312 #ifdef CONFIG_ECRNX_MUMIMO_TX
313 #define ECRNX_TXQ_GROUP_ID(txq) ((txq)->mumimo_info & 0x3f)
314 #define ECRNX_TXQ_POS_ID(txq) (((txq)->mumimo_info >> 6) & 0x3)
316 #define ECRNX_TXQ_GROUP_ID(txq) 0
317 #define ECRNX_TXQ_POS_ID(txq) 0
318 #endif /* CONFIG_ECRNX_MUMIMO_TX */
320 static inline bool ecrnx_txq_is_stopped(struct ecrnx_txq *txq)
322 return (txq->status & ECRNX_TXQ_STOP);
325 static inline bool ecrnx_txq_is_full(struct ecrnx_txq *txq)
327 return (txq->status & ECRNX_TXQ_STOP_FULL);
330 static inline bool ecrnx_txq_is_scheduled(struct ecrnx_txq *txq)
332 return (txq->status & ECRNX_TXQ_IN_HWQ_LIST);
336 * ecrnx_txq_is_ready_for_push - Check if a TXQ is ready for push
341 * - txq is not stopped
342 * - and hwq has credits
343 * - and there is no buffer queued
344 * then a buffer can be immediately pushed without having to queue it first
345 * @return: true if the 3 conditions are met and false otherwise.
347 static inline bool ecrnx_txq_is_ready_for_push(struct ecrnx_txq *txq)
349 return (!ecrnx_txq_is_stopped(txq) &&
350 txq->hwq->credits[ECRNX_TXQ_POS_ID(txq)] > 0 &&
351 skb_queue_empty(&txq->sk_list));
355 * foreach_sta_txq - Macro to iterate over all TXQ of a STA in increasing
358 * @sta: pointer to ecrnx_sta
359 * @txq: pointer to ecrnx_txq updated with the next TXQ at each iteration
360 * @tid: int updated with the TXQ tid at each iteration
361 * @ecrnx_hw: main driver data
363 #ifdef CONFIG_MAC80211_TXQ
364 #define foreach_sta_txq(sta, txq, tid, ecrnx_hw) \
365 for (tid = 0, txq = ecrnx_txq_sta_get(sta, 0); \
366 tid < NX_NB_TXQ_PER_STA; \
367 tid++, txq = ecrnx_txq_sta_get(sta, tid))
369 #elif defined(CONFIG_ECRNX_SOFTMAC)
370 #define foreach_sta_txq(sta, txq, tid, ecrnx_hw) \
371 for (tid = 0, txq = &sta->txqs[0]; \
372 tid < NX_NB_TXQ_PER_STA; \
375 #else /* CONFIG_ECRNX_FULLMAC */
376 #define foreach_sta_txq(sta, txq, tid, ecrnx_hw) \
377 for (tid = 0, txq = ecrnx_txq_sta_get(sta, 0, ecrnx_hw); \
378 tid < (is_multicast_sta(sta->sta_idx) ? 1 : NX_NB_TXQ_PER_STA); \
384 * foreach_sta_txq_prio - Macro to iterate over all TXQ of a STA in
385 * decreasing priority order
387 * @sta: pointer to ecrnx_sta
388 * @txq: pointer to ecrnx_txq updated with the next TXQ at each iteration
389 * @tid: int updated with the TXQ tid at each iteration
390 * @i: int updated with ieration count
391 * @ecrnx_hw: main driver data
393 * Note: For fullmac txq for mgmt frame is skipped
395 #ifdef CONFIG_ECRNX_SOFTMAC
396 #define foreach_sta_txq_prio(sta, txq, tid, i, ecrnx_hw) \
397 for (i = 0, tid = nx_tid_prio[0], txq = ecrnx_txq_sta_get(sta, tid); \
398 i < NX_NB_TID_PER_STA; \
399 i++, tid = nx_tid_prio[i], txq = ecrnx_txq_sta_get(sta, tid))
400 #else /* CONFIG_ECRNX_FULLMAC */
401 #define foreach_sta_txq_prio(sta, txq, tid, i, ecrnx_hw) \
402 for (i = 0, tid = nx_tid_prio[0], txq = ecrnx_txq_sta_get(sta, tid, ecrnx_hw); \
403 i < NX_NB_TID_PER_STA; \
404 i++, tid = nx_tid_prio[i], txq = ecrnx_txq_sta_get(sta, tid, ecrnx_hw))
408 * foreach_vif_txq - Macro to iterate over all TXQ of a VIF (in AC order)
410 * @vif: pointer to ecrnx_vif
411 * @txq: pointer to ecrnx_txq updated with the next TXQ at each iteration
412 * @ac: int updated with the TXQ ac at each iteration
414 #ifdef CONFIG_MAC80211_TXQ
415 #define foreach_vif_txq(vif, txq, ac) \
416 for (ac = ECRNX_HWQ_BK, txq = ecrnx_txq_vif_get(vif, ac); \
417 ac < NX_NB_TXQ_PER_VIF; \
418 ac++, txq = ecrnx_txq_vif_get(vif, ac))
421 #define foreach_vif_txq(vif, txq, ac) \
422 for (ac = ECRNX_HWQ_BK, txq = &vif->txqs[0]; \
423 ac < NX_NB_TXQ_PER_VIF; \
427 #ifdef CONFIG_ECRNX_SOFTMAC
428 struct ecrnx_txq *ecrnx_txq_sta_get(struct ecrnx_sta *sta, u8 tid);
429 struct ecrnx_txq *ecrnx_txq_vif_get(struct ecrnx_vif *vif, u8 ac);
431 struct ecrnx_txq *ecrnx_txq_sta_get(struct ecrnx_sta *sta, u8 tid,
432 struct ecrnx_hw * ecrnx_hw);
433 struct ecrnx_txq *ecrnx_txq_vif_get(struct ecrnx_vif *vif, u8 type);
434 #endif /* CONFIG_ECRNX_SOFTMAC */
437 * ecrnx_txq_vif_get_status - return status bits related to the vif
439 * @ecrnx_vif: Pointer to vif structure
441 static inline u8 ecrnx_txq_vif_get_status(struct ecrnx_vif *ecrnx_vif)
443 struct ecrnx_txq *txq = ecrnx_txq_vif_get(ecrnx_vif, 0);
444 return (txq->status & (ECRNX_TXQ_STOP_CHAN | ECRNX_TXQ_STOP_VIF_PS));
447 void ecrnx_txq_vif_init(struct ecrnx_hw * ecrnx_hw, struct ecrnx_vif *vif,
449 void ecrnx_txq_vif_deinit(struct ecrnx_hw * ecrnx_hw, struct ecrnx_vif *vif);
450 void ecrnx_txq_sta_init(struct ecrnx_hw * ecrnx_hw, struct ecrnx_sta *ecrnx_sta,
452 void ecrnx_txq_sta_deinit(struct ecrnx_hw * ecrnx_hw, struct ecrnx_sta *ecrnx_sta);
453 #ifdef CONFIG_ECRNX_FULLMAC
454 void ecrnx_txq_unk_vif_init(struct ecrnx_vif *ecrnx_vif);
455 void ecrnx_txq_unk_vif_deinit(struct ecrnx_vif *vif);
456 void ecrnx_txq_offchan_init(struct ecrnx_vif *ecrnx_vif);
457 void ecrnx_txq_offchan_deinit(struct ecrnx_vif *ecrnx_vif);
458 void ecrnx_txq_tdls_vif_init(struct ecrnx_vif *ecrnx_vif);
459 void ecrnx_txq_tdls_vif_deinit(struct ecrnx_vif *vif);
460 void ecrnx_txq_tdls_sta_start(struct ecrnx_vif *ecrnx_vif, u16 reason,
461 struct ecrnx_hw *ecrnx_hw);
462 void ecrnx_txq_tdls_sta_stop(struct ecrnx_vif *ecrnx_vif, u16 reason,
463 struct ecrnx_hw *ecrnx_hw);
464 void ecrnx_txq_prepare(struct ecrnx_hw *ecrnx_hw);
468 void ecrnx_txq_add_to_hw_list(struct ecrnx_txq *txq);
469 void ecrnx_txq_del_from_hw_list(struct ecrnx_txq *txq);
470 void ecrnx_txq_stop(struct ecrnx_txq *txq, u16 reason);
471 void ecrnx_txq_start(struct ecrnx_txq *txq, u16 reason);
472 void ecrnx_txq_vif_start(struct ecrnx_vif *vif, u16 reason,
473 struct ecrnx_hw *ecrnx_hw);
474 void ecrnx_txq_vif_stop(struct ecrnx_vif *vif, u16 reason,
475 struct ecrnx_hw *ecrnx_hw);
477 #ifdef CONFIG_ECRNX_SOFTMAC
478 void ecrnx_txq_sta_start(struct ecrnx_sta *sta, u16 reason);
479 void ecrnx_txq_sta_stop(struct ecrnx_sta *sta, u16 reason);
480 void ecrnx_txq_tdls_sta_start(struct ecrnx_sta *ecrnx_sta, u16 reason,
481 struct ecrnx_hw *ecrnx_hw);
482 void ecrnx_txq_tdls_sta_stop(struct ecrnx_sta *ecrnx_sta, u16 reason,
483 struct ecrnx_hw *ecrnx_hw);
485 void ecrnx_txq_sta_start(struct ecrnx_sta *sta, u16 reason,
486 struct ecrnx_hw *ecrnx_hw);
487 void ecrnx_txq_sta_stop(struct ecrnx_sta *sta, u16 reason,
488 struct ecrnx_hw *ecrnx_hw);
489 void ecrnx_txq_offchan_start(struct ecrnx_hw *ecrnx_hw);
490 void ecrnx_txq_sta_switch_vif(struct ecrnx_sta *sta, struct ecrnx_vif *old_vif,
491 struct ecrnx_vif *new_vif);
493 #endif /* CONFIG_ECRNX_SOFTMAC */
495 int ecrnx_txq_queue_skb(struct sk_buff *skb, struct ecrnx_txq *txq,
496 struct ecrnx_hw *ecrnx_hw, bool retry,
497 struct sk_buff *skb_prev);
498 void ecrnx_txq_confirm_any(struct ecrnx_hw *ecrnx_hw, struct ecrnx_txq *txq,
499 struct ecrnx_hwq *hwq, struct ecrnx_sw_txhdr *sw_txhdr);
500 void ecrnx_txq_drop_skb(struct ecrnx_txq *txq, struct sk_buff *skb, struct ecrnx_hw *ecrnx_hw, bool retry_packet);
502 void ecrnx_hwq_init(struct ecrnx_hw *ecrnx_hw);
503 void ecrnx_hwq_process(struct ecrnx_hw *ecrnx_hw, struct ecrnx_hwq *hwq);
504 void ecrnx_hwq_process_all(struct ecrnx_hw *ecrnx_hw);
506 #endif /* _ECRNX_TXQ_H_ */