2 ******************************************************************************
6 * Copyright (C) ESWIN 2015-2020
8 ******************************************************************************
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
14 #include "ecrnx_defs.h"
16 #include "ecrnx_msg_tx.h"
17 #include "ecrnx_mesh.h"
18 #include "ecrnx_events.h"
19 #include "ecrnx_compat.h"
21 #ifdef CONFIG_ECRNX_ESWIN
22 #include "eswin_utils.h"
24 /******************************************************************************
25 * Power Save functions
26 *****************************************************************************/
28 * ecrnx_set_traffic_status - Inform FW if traffic is available for STA in PS
30 * @ecrnx_hw: Driver main data
31 * @sta: Sta in PS mode
32 * @available: whether traffic is buffered for the STA
33 * @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID)
35 void ecrnx_set_traffic_status(struct ecrnx_hw *ecrnx_hw,
36 struct ecrnx_sta *sta,
40 if (sta->tdls.active) {
41 ecrnx_send_tdls_peer_traffic_ind_req(ecrnx_hw,
42 ecrnx_hw->vif_table[sta->vif_idx]);
44 bool uapsd = (ps_id != LEGACY_PS_ID);
45 ecrnx_send_me_traffic_ind(ecrnx_hw, sta->sta_idx, uapsd, available);
46 trace_ps_traffic_update(sta->sta_idx, available, uapsd);
51 * ecrnx_ps_bh_enable - Enable/disable PS mode for one STA
53 * @ecrnx_hw: Driver main data
54 * @sta: Sta which enters/leaves PS mode
55 * @enable: PS mode status
57 * This function will enable/disable PS mode for one STA.
58 * When enabling PS mode:
59 * - Stop all STA's txq for ECRNX_TXQ_STOP_STA_PS reason
60 * - Count how many buffers are already ready for this STA
61 * - For BC/MC sta, update all queued SKB to use hw_queue BCMC
62 * - Update TIM if some packet are ready
64 * When disabling PS mode:
65 * - Start all STA's txq for ECRNX_TXQ_STOP_STA_PS reason
66 * - For BC/MC sta, update all queued SKB to use hw_queue AC_BE
67 * - Update TIM if some packet are ready (otherwise fw will not update TIM
68 * in beacon for this STA)
70 * All counter/skb updates are protected from TX path by taking tx_lock
72 * NOTE: _bh_ in function name indicates that this function is called
73 * from a bottom_half tasklet.
75 void ecrnx_ps_bh_enable(struct ecrnx_hw *ecrnx_hw, struct ecrnx_sta *sta,
78 struct ecrnx_txq *txq;
83 spin_lock(&ecrnx_hw->tx_lock);
84 sta->ps.active = true;
85 sta->ps.sp_cnt[LEGACY_PS_ID] = 0;
86 sta->ps.sp_cnt[UAPSD_ID] = 0;
87 ecrnx_txq_sta_stop(sta, ECRNX_TXQ_STOP_STA_PS, ecrnx_hw);
89 if (is_multicast_sta(sta->sta_idx)) {
90 txq = ecrnx_txq_sta_get(sta, 0, ecrnx_hw);
91 sta->ps.pkt_ready[LEGACY_PS_ID] = skb_queue_len(&txq->sk_list);
92 sta->ps.pkt_ready[UAPSD_ID] = 0;
93 txq->hwq = &ecrnx_hw->hwq[ECRNX_HWQ_BCMC];
96 sta->ps.pkt_ready[LEGACY_PS_ID] = 0;
97 sta->ps.pkt_ready[UAPSD_ID] = 0;
98 foreach_sta_txq(sta, txq, i, ecrnx_hw) {
99 sta->ps.pkt_ready[txq->ps_id] += skb_queue_len(&txq->sk_list);
103 spin_unlock(&ecrnx_hw->tx_lock);
105 if (sta->ps.pkt_ready[LEGACY_PS_ID])
106 ecrnx_set_traffic_status(ecrnx_hw, sta, true, LEGACY_PS_ID);
108 if (sta->ps.pkt_ready[UAPSD_ID])
109 ecrnx_set_traffic_status(ecrnx_hw, sta, true, UAPSD_ID);
111 trace_ps_disable(sta->sta_idx);
113 spin_lock(&ecrnx_hw->tx_lock);
114 sta->ps.active = false;
116 if (is_multicast_sta(sta->sta_idx)) {
117 txq = ecrnx_txq_sta_get(sta, 0, ecrnx_hw);
118 txq->hwq = &ecrnx_hw->hwq[ECRNX_HWQ_BE];
122 foreach_sta_txq(sta, txq, i, ecrnx_hw) {
127 ecrnx_txq_sta_start(sta, ECRNX_TXQ_STOP_STA_PS, ecrnx_hw);
128 spin_unlock(&ecrnx_hw->tx_lock);
130 if (sta->ps.pkt_ready[LEGACY_PS_ID])
131 ecrnx_set_traffic_status(ecrnx_hw, sta, false, LEGACY_PS_ID);
133 if (sta->ps.pkt_ready[UAPSD_ID])
134 ecrnx_set_traffic_status(ecrnx_hw, sta, false, UAPSD_ID);
139 * ecrnx_ps_bh_traffic_req - Handle traffic request for STA in PS mode
141 * @ecrnx_hw: Driver main data
142 * @sta: Sta which enters/leaves PS mode
143 * @pkt_req: number of pkt to push
144 * @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID)
146 * This function will make sure that @pkt_req are pushed to fw
147 * whereas the STA is in PS mode.
148 * If request is 0, send all traffic
149 * If request is greater than available pkt, reduce request
150 * Note: request will also be reduce if txq credits are not available
152 * All counter updates are protected from TX path by taking tx_lock
154 * NOTE: _bh_ in function name indicates that this function is called
155 * from the bottom_half tasklet.
157 void ecrnx_ps_bh_traffic_req(struct ecrnx_hw *ecrnx_hw, struct ecrnx_sta *sta,
158 u16 pkt_req, u8 ps_id)
162 struct ecrnx_txq *txq;
164 #ifndef CONFIG_ECRNX_ESWIN
165 if (WARN(!sta->ps.active, "sta %pM is not in Power Save mode",
171 ECRNX_DBG(" sta is not in Power Save mode %02x:%02x:%02x:%02x:%02x:%02x %d %d \n", sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2], \
172 sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],pkt_req, ps_id);
177 trace_ps_traffic_req(sta, pkt_req, ps_id);
179 spin_lock(&ecrnx_hw->tx_lock);
181 /* Fw may ask to stop a service period with PS_SP_INTERRUPTED. This only
182 happens for p2p-go interface if NOA starts during a service period */
183 if ((pkt_req == PS_SP_INTERRUPTED) && (ps_id == UAPSD_ID)) {
185 sta->ps.sp_cnt[ps_id] = 0;
186 foreach_sta_txq(sta, txq, tid, ecrnx_hw) {
192 pkt_ready_all = (sta->ps.pkt_ready[ps_id] - sta->ps.sp_cnt[ps_id]);
194 /* Don't start SP until previous one is finished or we don't have
195 packet ready (which must not happen for U-APSD) */
196 if (sta->ps.sp_cnt[ps_id] || pkt_ready_all <= 0) {
200 /* Adapt request to what is available. */
201 if (pkt_req == 0 || pkt_req > pkt_ready_all) {
202 pkt_req = pkt_ready_all;
205 /* Reset the SP counter */
206 sta->ps.sp_cnt[ps_id] = 0;
208 /* "dispatch" the request between txq */
209 if (is_multicast_sta(sta->sta_idx)) {
210 txq = ecrnx_txq_sta_get(sta, 0, ecrnx_hw);
211 if (txq->credits <= 0)
213 if (pkt_req > txq->credits)
214 pkt_req = txq->credits;
215 txq->push_limit = pkt_req;
216 sta->ps.sp_cnt[ps_id] = pkt_req;
217 ECRNX_DBG("%s-%d:sta:0x%p, sta_idx:%d, txq:0x%p, txq status:%d \n", __func__, __LINE__, sta, sta->sta_idx, txq, txq->status);
218 ecrnx_txq_add_to_hw_list(txq);
219 ecrnx_txq_sta_start(sta, ECRNX_TXQ_STOP_STA_PS, ecrnx_hw);
223 for (i = 0; i < NX_NB_TID_PER_STA; i++) {
224 tid = nx_tid_prio[i];
226 #ifdef CONFIG_ECRNX_SOFTMAC
227 txq = ecrnx_txq_sta_get(sta, tid);
229 txq = ecrnx_txq_sta_get(sta, tid, ecrnx_hw);
232 txq_len = skb_queue_len(&txq->sk_list);
234 if (txq->ps_id != ps_id)
237 if (txq_len > txq->credits)
238 txq_len = txq->credits;
243 if (txq_len < pkt_req) {
244 /* Not enough pkt queued in this txq, add this
245 txq to hwq list and process next txq */
247 txq->push_limit = txq_len;
248 sta->ps.sp_cnt[ps_id] += txq_len;
249 ecrnx_txq_add_to_hw_list(txq);
251 /* Enough pkt in this txq to comlete the request
252 add this txq to hwq list and stop processing txq */
253 txq->push_limit = pkt_req;
254 sta->ps.sp_cnt[ps_id] += pkt_req;
255 ecrnx_txq_add_to_hw_list(txq);
262 spin_unlock(&ecrnx_hw->tx_lock);
265 /******************************************************************************
267 *****************************************************************************/
268 #define PRIO_STA_NULL 0xAA
270 static const int ecrnx_down_hwq2tid[3] = {
276 static void ecrnx_downgrade_ac(struct ecrnx_sta *sta, struct sk_buff *skb)
278 int8_t ac = ecrnx_tid2hwq[skb->priority];
280 if (WARN((ac > ECRNX_HWQ_VO),
281 "Unexepcted ac %d for skb before downgrade", ac))
284 while (sta->acm & BIT(ac)) {
285 if (ac == ECRNX_HWQ_BK) {
290 skb->priority = ecrnx_down_hwq2tid[ac];
294 static void ecrnx_tx_statistic(struct ecrnx_hw *ecrnx_hw, struct ecrnx_txq *txq,
295 union ecrnx_hw_txstatus ecrnx_txst, unsigned int data_len)
297 struct ecrnx_sta *sta = txq->sta;
298 if (!sta || !ecrnx_txst.acknowledged)
300 sta->stats.tx_pkts ++;
301 sta->stats.tx_bytes += data_len;
302 sta->stats.last_act = ecrnx_hw->stats.last_tx;
304 u16 ecrnx_select_txq(struct ecrnx_vif *ecrnx_vif, struct sk_buff *skb)
306 struct ecrnx_hw *ecrnx_hw = ecrnx_vif->ecrnx_hw;
307 struct wireless_dev *wdev = &ecrnx_vif->wdev;
308 struct ecrnx_sta *sta = NULL;
309 struct ecrnx_txq *txq;
311 bool tdls_mgmgt_frame = false;
313 switch (wdev->iftype) {
314 case NL80211_IFTYPE_STATION:
315 case NL80211_IFTYPE_P2P_CLIENT:
318 eth = (struct ethhdr *)skb->data;
319 if (eth->h_proto == cpu_to_be16(ETH_P_TDLS)) {
320 tdls_mgmgt_frame = true;
322 if ((ecrnx_vif->tdls_status == TDLS_LINK_ACTIVE) &&
323 (ecrnx_vif->sta.tdls_sta != NULL) &&
324 (memcmp(eth->h_dest, ecrnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN) == 0))
325 sta = ecrnx_vif->sta.tdls_sta;
327 sta = ecrnx_vif->sta.ap;
330 case NL80211_IFTYPE_AP_VLAN:
332 struct ecrnx_sta *cur;
333 struct ethhdr *eth = (struct ethhdr *)skb->data;
335 if (ecrnx_vif->ap_vlan.sta_4a) {
336 sta = ecrnx_vif->ap_vlan.sta_4a;
340 /* AP_VLAN interface is not used for a 4A STA,
341 fallback searching sta amongs all AP's clients */
342 ecrnx_vif = ecrnx_vif->ap_vlan.master;
344 if (is_multicast_ether_addr(eth->h_dest)) {
345 sta = &ecrnx_hw->sta_table[ecrnx_vif->ap.bcmc_index];
347 list_for_each_entry(cur, &ecrnx_vif->ap.sta_list, list) {
348 if (!memcmp(cur->mac_addr, eth->h_dest, ETH_ALEN)) {
357 case NL80211_IFTYPE_AP:
358 case NL80211_IFTYPE_P2P_GO:
360 struct ecrnx_sta *cur;
361 struct ethhdr *eth = (struct ethhdr *)skb->data;
363 if (is_multicast_ether_addr(eth->h_dest)) {
364 sta = &ecrnx_hw->sta_table[ecrnx_vif->ap.bcmc_index];
366 list_for_each_entry(cur, &ecrnx_vif->ap.sta_list, list) {
367 if (!memcmp(cur->mac_addr, eth->h_dest, ETH_ALEN)) {
376 case NL80211_IFTYPE_MESH_POINT:
378 struct ethhdr *eth = (struct ethhdr *)skb->data;
380 if (!ecrnx_vif->is_resending) {
382 * If ethernet source address is not the address of a mesh wireless interface, we are proxy for
383 * this address and have to inform the HW
385 if (memcmp(ð->h_source[0], &ecrnx_vif->ndev->perm_addr[0], ETH_ALEN)) {
386 /* Check if LMAC is already informed */
387 if (!ecrnx_get_mesh_proxy_info(ecrnx_vif, (u8 *)ð->h_source, true)) {
388 ecrnx_send_mesh_proxy_add_req(ecrnx_hw, ecrnx_vif, (u8 *)ð->h_source);
393 if (is_multicast_ether_addr(eth->h_dest)) {
394 sta = &ecrnx_hw->sta_table[ecrnx_vif->ap.bcmc_index];
396 /* Path to be used */
397 struct ecrnx_mesh_path *p_mesh_path = NULL;
398 struct ecrnx_mesh_path *p_cur_path;
399 /* Check if destination is proxied by a peer Mesh STA */
400 struct ecrnx_mesh_proxy *p_mesh_proxy = ecrnx_get_mesh_proxy_info(ecrnx_vif, (u8 *)ð->h_dest, false);
401 /* Mesh Target address */
402 struct mac_addr *p_tgt_mac_addr;
405 p_tgt_mac_addr = &p_mesh_proxy->proxy_addr;
407 p_tgt_mac_addr = (struct mac_addr *)ð->h_dest;
410 /* Look for path with provided target address */
411 list_for_each_entry(p_cur_path, &ecrnx_vif->ap.mpath_list, list) {
412 if (!memcmp(&p_cur_path->tgt_mac_addr, p_tgt_mac_addr, ETH_ALEN)) {
413 p_mesh_path = p_cur_path;
419 sta = p_mesh_path->nhop_sta;
421 ecrnx_send_mesh_path_create_req(ecrnx_hw, ecrnx_vif, (u8 *)p_tgt_mac_addr);
433 if (tdls_mgmgt_frame) {
434 skb_set_queue_mapping(skb, NX_STA_NDEV_IDX(skb->priority, sta->sta_idx));
436 /* use the data classifier to determine what 802.1d tag the
438 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
439 skb->priority = cfg80211_classify8021d(skb) & IEEE80211_QOS_CTL_TAG1D_MASK;
441 skb->priority = cfg80211_classify8021d(skb, NULL) & IEEE80211_QOS_CTL_TAG1D_MASK;
445 ecrnx_downgrade_ac(sta, skb);
447 txq = ecrnx_txq_sta_get(sta, skb->priority, ecrnx_hw);
448 netdev_queue = txq->ndev_idx;
452 skb->priority = 0xFF;
453 txq = ecrnx_txq_sta_get(sta, 0, ecrnx_hw);
454 netdev_queue = txq->ndev_idx;
458 /* This packet will be dropped in xmit function, still need to select
459 an active queue for xmit to be called. As it most likely to happen
460 for AP interface, select BCMC queue
461 (TODO: select another queue if BCMC queue is stopped) */
462 skb->priority = PRIO_STA_NULL;
463 netdev_queue = NX_BCMC_TXQ_NDEV_IDX;
466 BUG_ON(netdev_queue >= NX_NB_NDEV_TXQ);
472 * ecrnx_set_more_data_flag - Update MORE_DATA flag in tx sw desc
474 * @ecrnx_hw: Driver main data
475 * @sw_txhdr: Header for pkt to be pushed
477 * If STA is in PS mode
478 * - Set EOSP in case the packet is the last of the UAPSD service period
479 * - Set MORE_DATA flag if more pkt are ready for this sta
480 * - Update TIM if this is the last pkt buffered for this sta
482 * note: tx_lock already taken.
484 static inline void ecrnx_set_more_data_flag(struct ecrnx_hw *ecrnx_hw,
485 struct ecrnx_sw_txhdr *sw_txhdr)
487 struct ecrnx_sta *sta = sw_txhdr->ecrnx_sta;
488 struct ecrnx_vif *vif = sw_txhdr->ecrnx_vif;
489 struct ecrnx_txq *txq = sw_txhdr->txq;
491 if (unlikely(sta->ps.active)) {
492 sta->ps.pkt_ready[txq->ps_id]--;
493 sta->ps.sp_cnt[txq->ps_id]--;
497 if (((txq->ps_id == UAPSD_ID) || (vif->wdev.iftype == NL80211_IFTYPE_MESH_POINT) || (sta->tdls.active))
498 && !sta->ps.sp_cnt[txq->ps_id]) {
499 sw_txhdr->desc.host.flags |= TXU_CNTRL_EOSP;
502 if (sta->ps.pkt_ready[txq->ps_id]) {
503 sw_txhdr->desc.host.flags |= TXU_CNTRL_MORE_DATA;
505 ecrnx_set_traffic_status(ecrnx_hw, sta, false, txq->ps_id);
511 * ecrnx_get_tx_info - Get STA and tid for one skb
513 * @ecrnx_vif: vif ptr
515 * @tid: pointer updated with the tid to use for this skb
517 * @return: pointer on the destination STA (may be NULL)
519 * skb has already been parsed in ecrnx_select_queue function
520 * simply re-read information form skb.
522 static struct ecrnx_sta *ecrnx_get_tx_info(struct ecrnx_vif *ecrnx_vif,
526 struct ecrnx_hw *ecrnx_hw = ecrnx_vif->ecrnx_hw;
527 struct ecrnx_sta *sta;
530 *tid = skb->priority;
531 if (unlikely(skb->priority == PRIO_STA_NULL)) {
534 int ndev_idx = skb_get_queue_mapping(skb);
536 if (ndev_idx == NX_BCMC_TXQ_NDEV_IDX)
537 sta_idx = NX_REMOTE_STA_MAX + master_vif_idx(ecrnx_vif);
539 sta_idx = ndev_idx / NX_NB_TID_PER_STA;
541 sta = &ecrnx_hw->sta_table[sta_idx];
547 #ifndef CONFIG_ECRNX_ESWIN
549 * ecrnx_prep_tx - Prepare buffer for DMA transmission
551 * @ecrnx_hw: Driver main data
552 * @txhdr: Tx descriptor
554 * Maps hw_txhdr and buffer data for transmission via DMA.
555 * - Data buffer with be downloaded by embebded side.
556 * - hw_txhdr will be uploaded by embedded side when buffer has been
557 * transmitted over the air.
559 static int ecrnx_prep_dma_tx(struct ecrnx_hw *ecrnx_hw, struct ecrnx_txhdr *txhdr, bool eth_hdr)
561 struct ecrnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
562 struct ecrnx_hw_txhdr *hw_txhdr = &txhdr->hw_hdr;
563 struct txdesc_api *desc = &sw_txhdr->desc;
566 txhdr->hw_hdr.cfm.status.value = 0;
567 /* MAP (and sync) memory for DMA */
568 dma_addr = dma_map_single(ecrnx_hw->dev, hw_txhdr,
569 sw_txhdr->map_len, DMA_BIDIRECTIONAL);
570 if (WARN_ON(dma_mapping_error(ecrnx_hw->dev, dma_addr)))
573 sw_txhdr->dma_addr = dma_addr;
575 desc->host.status_desc_addr = dma_addr;
576 dma_addr += ECRNX_TX_DATA_OFT(sw_txhdr);
578 dma_addr += sizeof(struct ethhdr);
579 #ifdef CONFIG_ECRNX_SPLIT_TX_BUF
580 desc->host.packet_len[0] = sw_txhdr->frame_len;
581 desc->host.packet_addr[0] = dma_addr;
582 desc->host.packet_cnt = 1;
584 desc->host.packet_len = sw_txhdr->frame_len;
585 desc->host.packet_addr = dma_addr;
592 * ecrnx_tx_push - Push one packet to fw
594 * @ecrnx_hw: Driver main data
595 * @txhdr: tx desc of the buffer to push
596 * @flags: push flags (see @ecrnx_push_flags)
598 * Push one packet to fw. Sw desc of the packet has already been updated.
599 * Only MORE_DATA flag will be set if needed.
601 void ecrnx_tx_push(struct ecrnx_hw *ecrnx_hw, struct ecrnx_txhdr *txhdr, int flags)
603 struct ecrnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
604 struct sk_buff *skb = sw_txhdr->skb;
605 struct ecrnx_txq *txq = sw_txhdr->txq;
606 u16 hw_queue = txq->hwq->id;
609 lockdep_assert_held(&ecrnx_hw->tx_lock);
611 /* RETRY flag is not always set so retest here */
613 flags |= ECRNX_PUSH_RETRY;
615 if (txq->nb_retry == 0) {
616 WARN(skb != txq->last_retry_skb,
617 "last retry buffer is not the expected one");
618 txq->last_retry_skb = NULL;
620 } else if (!(flags & ECRNX_PUSH_RETRY)) {
624 #ifdef CONFIG_ECRNX_AMSDUS_TX
625 if (txq->amsdu == sw_txhdr) {
626 WARN((flags & ECRNX_PUSH_RETRY), "End A-MSDU on a retry");
627 ecrnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
629 } else if (!(flags & ECRNX_PUSH_RETRY) &&
630 !(sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU)) {
631 ecrnx_hw->stats.amsdus[0].done++;
633 #endif /* CONFIG_ECRNX_AMSDUS_TX */
635 /* Wait here to update hw_queue, as for multicast STA hwq may change
636 between queue and push (because of PS) */
637 sw_txhdr->hw_queue = hw_queue;
639 #ifdef CONFIG_ECRNX_MUMIMO_TX
640 /* MU group is only selected during hwq processing */
641 sw_txhdr->desc.host.mumimo_info = txq->mumimo_info;
642 user = ECRNX_TXQ_POS_ID(txq);
643 #endif /* CONFIG_ECRNX_MUMIMO_TX */
645 if (sw_txhdr->ecrnx_sta) {
646 /* only for AP mode */
647 ecrnx_set_more_data_flag(ecrnx_hw, sw_txhdr);
650 trace_push_desc(skb, sw_txhdr, flags);
652 txq->pkt_pushed[user]++;
653 if (txq->credits <= 0){
654 ECRNX_DBG("%s-%d:ecrnx_txq_stop,reaosn:0x%x \n", __func__, __LINE__, ECRNX_TXQ_STOP_FULL);
655 ecrnx_txq_stop(txq, ECRNX_TXQ_STOP_FULL);
661 ecrnx_ipc_txdesc_push(ecrnx_hw, &sw_txhdr->desc, skb, hw_queue, user);
662 txq->hwq->credits[user]--;
663 ecrnx_hw->stats.cfm_balance[hw_queue]++;
669 * ecrnx_tx_retry - Push an AMPDU pkt that need to be retried
671 * @ecrnx_hw: Driver main data
672 * @skb: pkt to re-push
673 * @txhdr: tx desc of the pkt to re-push
674 * @sw_retry: Indicates if fw decide to retry this buffer
675 * (i.e. it has never been transmitted over the air)
677 * Called when a packet needs to be repushed to the firmware.
678 * First update sw descriptor and then queue it in the retry list.
680 static void ecrnx_tx_retry(struct ecrnx_hw *ecrnx_hw, struct sk_buff *skb,
681 struct ecrnx_txhdr *txhdr, bool sw_retry)
683 struct ecrnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
684 struct tx_cfm_tag *cfm = &txhdr->hw_hdr.cfm;
685 struct ecrnx_txq *txq = sw_txhdr->txq;
686 #ifndef CONFIG_ECRNX_ESWIN
687 dma_addr_t cfm_dma_addr;
692 sw_txhdr->desc.host.sn = cfm->sn;
693 sw_txhdr->desc.host.pn[0] = cfm->pn[0];
694 sw_txhdr->desc.host.pn[1] = cfm->pn[1];
695 sw_txhdr->desc.host.pn[2] = cfm->pn[2];
696 sw_txhdr->desc.host.pn[3] = cfm->pn[3];
697 sw_txhdr->desc.host.timestamp = cfm->timestamp;
698 sw_txhdr->desc.host.flags |= TXU_CNTRL_RETRY;
700 #ifdef CONFIG_ECRNX_AMSDUS_TX
701 if (sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU)
702 ecrnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].failed++;
706 /* MORE_DATA will be re-set if needed when pkt will be repushed */
707 sw_txhdr->desc.host.flags &= ~TXU_CNTRL_MORE_DATA;
709 cfm->status.value = 0;
710 //TODO:need to check here.
711 #ifndef CONFIG_ECRNX_ESWIN
712 cfm_dma_addr = (ptr_addr)sw_txhdr->desc.host.status_desc_addr;
713 dma_sync_single_for_device(ecrnx_hw->dev, cfm_dma_addr, sizeof(cfm), DMA_BIDIRECTIONAL);
716 if (txq->credits > 0){
717 ecrnx_txq_start(txq, ECRNX_TXQ_STOP_FULL);
718 ECRNX_DBG("%s-%d:ecrnx_txq_start,reaosn:0x%x \n", __func__, __LINE__, ECRNX_TXQ_STOP_FULL);
720 /* Queue the buffer */
721 if (ecrnx_txq_queue_skb(skb, txq, ecrnx_hw, true, NULL))
723 /* baoyong:we need to send this AMPDU retry pkt asap, so process it now */
724 ecrnx_hwq_process(ecrnx_hw, txq->hwq);
731 #ifdef CONFIG_ECRNX_AMSDUS_TX
732 /* return size of subframe (including header) */
733 static inline int ecrnx_amsdu_subframe_length(struct ethhdr *eth, int eth_len)
735 /* ethernet header is replaced with amdsu header that have the same size
736 Only need to check if LLC/SNAP header will be added */
739 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
740 len += sizeof(rfc1042_header) + 2;
746 static inline bool ecrnx_amsdu_is_aggregable(struct sk_buff *skb)
748 /* need to add some check on buffer to see if it can be aggregated ? */
754 * ecrnx_amsdu_del_subframe_header - remove AMSDU header
756 * amsdu_txhdr: amsdu tx descriptor
758 * Move back the ethernet header at the "beginning" of the data buffer.
759 * (which has been moved in @ecrnx_amsdu_add_subframe_header)
761 static void ecrnx_amsdu_del_subframe_header(struct ecrnx_amsdu_txhdr *amsdu_txhdr)
763 struct sk_buff *skb = amsdu_txhdr->skb;
768 pos += sizeof(struct ecrnx_amsdu_txhdr);
769 eth = (struct ethhdr*)pos;
770 pos += amsdu_txhdr->pad + sizeof(struct ethhdr);
772 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
773 pos += sizeof(rfc1042_header) + 2;
776 memmove(pos, eth, sizeof(*eth));
777 skb_pull(skb, (pos - skb->data));
781 * ecrnx_amsdu_add_subframe_header - Add AMSDU header and link subframe
783 * @ecrnx_hw Driver main data
784 * @skb Buffer to aggregate
785 * @sw_txhdr Tx descriptor for the first A-MSDU subframe
787 * return 0 on sucess, -1 otherwise
789 * This functions Add A-MSDU header and LLC/SNAP header in the buffer
790 * and update sw_txhdr of the first subframe to link this buffer.
791 * If an error happens, the buffer will be queued as a normal buffer.
795 * +-------------+ +-------------+
796 * | HEADROOM | | HEADROOM |
797 * | | +-------------+ <- data
798 * | | | amsdu_txhdr |
800 * | | +-------------+
801 * | | | ETH hdr | keep original eth hdr
802 * | | | | to restore it once transmitted
803 * | | +-------------+ <- packet_addr[x]
805 * | | +-------------+
806 * data -> +-------------+ | AMSDU HDR |
807 * | ETH hdr | +-------------+
809 * +-------------+ +-------------+
812 * +-------------+ +-------------+
814 * Called with tx_lock hold
816 static int ecrnx_amsdu_add_subframe_header(struct ecrnx_hw *ecrnx_hw,
818 struct ecrnx_sw_txhdr *sw_txhdr)
820 struct ecrnx_amsdu *amsdu = &sw_txhdr->amsdu;
821 struct ecrnx_amsdu_txhdr *amsdu_txhdr;
822 struct ethhdr *amsdu_hdr, *eth = (struct ethhdr *)skb->data;
823 int headroom_need, map_len, msdu_len, amsdu_len, map_oft = 0;
824 #ifndef CONFIG_ECRNX_ESWIN
829 map_len = ECRNX_TX_DMA_MAP_LEN(skb);
830 msdu_len = skb->len - sizeof(*eth);
831 headroom_need = sizeof(*amsdu_txhdr) + amsdu->pad +
833 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
834 headroom_need += sizeof(rfc1042_header) + 2;
835 msdu_len += sizeof(rfc1042_header) + 2;
837 amsdu_len = msdu_len + sizeof(*amsdu_hdr) + amsdu->pad;
839 /* we should have enough headroom (checked in xmit) */
840 if (WARN_ON(skb_headroom(skb) < headroom_need)) {
844 /* allocate headroom */
845 pos = skb_push(skb, headroom_need);
846 amsdu_txhdr = (struct ecrnx_amsdu_txhdr *)pos;
847 pos += sizeof(*amsdu_txhdr);
849 /* move eth header */
850 memmove(pos, eth, sizeof(*eth));
851 eth = (struct ethhdr *)pos;
854 /* Add padding from previous subframe */
856 memset(pos, 0, amsdu->pad);
860 amsdu_hdr = (struct ethhdr *)pos;
861 memcpy(amsdu_hdr->h_dest, eth->h_dest, ETH_ALEN);
862 memcpy(amsdu_hdr->h_source, eth->h_source, ETH_ALEN);
863 amsdu_hdr->h_proto = htons(msdu_len);
864 pos += sizeof(*amsdu_hdr);
866 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
867 memcpy(pos, rfc1042_header, sizeof(rfc1042_header));
868 pos += sizeof(rfc1042_header) + 2;
871 if (amsdu_len < map_len) {
872 map_oft = map_len - amsdu_len;
873 map_start -= map_oft;
875 /* MAP (and sync) memory for DMA */
876 #ifndef CONFIG_ECRNX_ESWIN
877 dma_addr = dma_map_single(ecrnx_hw->dev, map_start, map_len, DMA_BIDIRECTIONAL);
878 if (WARN_ON(dma_mapping_error(ecrnx_hw->dev, dma_addr))) {
880 memmove(pos, eth, sizeof(*eth));
881 skb_pull(skb, headroom_need);
886 /* update amdsu_txhdr */
887 amsdu_txhdr->map_len = map_len;
888 #ifdef CONFIG_ECRNX_ESWIN
889 amsdu_txhdr->send_pos = map_start;
891 amsdu_txhdr->dma_addr = dma_addr;
893 amsdu_txhdr->skb = skb;
894 amsdu_txhdr->pad = amsdu->pad;
895 amsdu_txhdr->msdu_len = msdu_len;
897 /* update ecrnx_sw_txhdr (of the first subframe) */
898 BUG_ON(amsdu->nb != sw_txhdr->desc.host.packet_cnt);
899 #ifdef CONFIG_ECRNX_ESWIN
900 sw_txhdr->desc.host.packet_addr[amsdu->nb] = skb;
902 sw_txhdr->desc.host.packet_addr[amsdu->nb] = dma_addr + map_oft;
904 sw_txhdr->desc.host.packet_len[amsdu->nb] = amsdu_len;
905 sw_txhdr->desc.host.packet_cnt++;
908 amsdu->pad = AMSDU_PADDING(amsdu_len - amsdu->pad);
909 list_add_tail(&amsdu_txhdr->list, &amsdu->hdrs);
910 amsdu->len += amsdu_len;
912 ecrnx_ipc_sta_buffer(ecrnx_hw, sw_txhdr->txq->sta,
913 sw_txhdr->txq->tid, msdu_len);
915 trace_amsdu_subframe(sw_txhdr);
920 * ecrnx_amsdu_add_subframe - Add this buffer as an A-MSDU subframe if possible
922 * @ecrnx_hw Driver main data
923 * @skb Buffer to aggregate if possible
924 * @sta Destination STA
925 * @txq sta's txq used for this buffer
927 * Tyr to aggregate the buffer in an A-MSDU. If it succeed then the
928 * buffer is added as a new A-MSDU subframe with AMSDU and LLC/SNAP
929 * headers added (so FW won't have to modify this subframe).
931 * To be added as subframe :
932 * - sta must allow amsdu
933 * - buffer must be aggregable (to be defined)
934 * - at least one other aggregable buffer is pending in the queue
935 * or an a-msdu (with enough free space) is currently in progress
937 * returns true if buffer has been added as A-MDSP subframe, false otherwise
940 static bool ecrnx_amsdu_add_subframe(struct ecrnx_hw *ecrnx_hw, struct sk_buff *skb,
941 struct ecrnx_sta *sta, struct ecrnx_txq *txq)
945 ecrnx_adjust_amsdu_maxnb(ecrnx_hw);
947 /* immediately return if amsdu are not allowed for this sta */
948 if (!txq->amsdu_len || ecrnx_hw->mod_params->amsdu_maxnb < 2 ||
949 !ecrnx_amsdu_is_aggregable(skb)
953 spin_lock_bh(&ecrnx_hw->tx_lock);
955 /* aggreagation already in progress, add this buffer if enough space
956 available, otherwise end the current amsdu */
957 struct ecrnx_sw_txhdr *sw_txhdr = txq->amsdu;
958 eth = (struct ethhdr *)(skb->data);
960 if (((sw_txhdr->amsdu.len + sw_txhdr->amsdu.pad +
961 ecrnx_amsdu_subframe_length(eth, skb->len)) > txq->amsdu_len) ||
962 ecrnx_amsdu_add_subframe_header(ecrnx_hw, skb, sw_txhdr)) {
967 if (sw_txhdr->amsdu.nb >= ecrnx_hw->mod_params->amsdu_maxnb) {
968 ecrnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
969 /* max number of subframes reached */
973 /* Check if a new amsdu can be started with the previous buffer
974 (if any) and this one */
975 struct sk_buff *skb_prev = skb_peek_tail(&txq->sk_list);
976 struct ecrnx_txhdr *txhdr;
977 struct ecrnx_sw_txhdr *sw_txhdr;
980 if (!skb_prev || !ecrnx_amsdu_is_aggregable(skb_prev))
983 txhdr = (struct ecrnx_txhdr *)skb_prev->data;
984 sw_txhdr = txhdr->sw_hdr;
985 if ((sw_txhdr->amsdu.len) ||
986 (sw_txhdr->desc.host.flags & TXU_CNTRL_RETRY))
987 /* previous buffer is already a complete amsdu or a retry */
990 eth = (struct ethhdr *)(skb_prev->data + sw_txhdr->headroom);
991 len1 = ecrnx_amsdu_subframe_length(eth, (sw_txhdr->frame_len +
992 sizeof(struct ethhdr)));
994 eth = (struct ethhdr *)(skb->data);
995 len2 = ecrnx_amsdu_subframe_length(eth, skb->len);
997 if (len1 + AMSDU_PADDING(len1) + len2 > txq->amsdu_len)
998 /* not enough space to aggregate those two buffers */
1001 /* Add subframe header.
1002 Note: Fw will take care of adding AMDSU header for the first
1003 subframe while generating 802.11 MAC header */
1004 INIT_LIST_HEAD(&sw_txhdr->amsdu.hdrs);
1005 sw_txhdr->amsdu.len = len1;
1006 sw_txhdr->amsdu.nb = 1;
1007 sw_txhdr->amsdu.pad = AMSDU_PADDING(len1);
1008 if (ecrnx_amsdu_add_subframe_header(ecrnx_hw, skb, sw_txhdr))
1011 sw_txhdr->desc.host.flags |= TXU_CNTRL_AMSDU;
1013 if (sw_txhdr->amsdu.nb < ecrnx_hw->mod_params->amsdu_maxnb)
1014 txq->amsdu = sw_txhdr;
1016 ecrnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
1022 spin_unlock_bh(&ecrnx_hw->tx_lock);
1026 * ecrnx_amsdu_dismantle - Dismantle an already formatted A-MSDU
1028 * @ecrnx_hw Driver main data
1029 * @sw_txhdr_main Software descriptor of the A-MSDU to dismantle.
1031 * The a-mdsu is always fully dismantled (i.e don't try to reduce it's size to
1032 * fit the new limit).
1033 * The DMA mapping can be re-used as ecrnx_amsdu_add_subframe_header ensure that
1034 * enough data in the skb bufer are 'DMA mapped'.
1035 * It would have been slightly simple to unmap/re-map but it is a little faster like this
1036 * and not that much more complicated to read.
1038 static void ecrnx_amsdu_dismantle(struct ecrnx_hw *ecrnx_hw, struct ecrnx_sw_txhdr *sw_txhdr_main)
1040 struct ecrnx_amsdu_txhdr *amsdu_txhdr, *next;
1041 struct sk_buff *skb_prev = sw_txhdr_main->skb;
1042 struct ecrnx_txq *txq = sw_txhdr_main->txq;
1043 trace_amsdu_dismantle(sw_txhdr_main);
1044 ecrnx_hw->stats.amsdus[sw_txhdr_main->amsdu.nb - 1].done--;
1045 sw_txhdr_main->amsdu.len = 0;
1046 sw_txhdr_main->amsdu.nb = 0;
1047 sw_txhdr_main->desc.host.flags &= ~TXU_CNTRL_AMSDU;
1048 sw_txhdr_main->desc.host.packet_cnt = 1;
1049 list_for_each_entry_safe(amsdu_txhdr, next, &sw_txhdr_main->amsdu.hdrs, list) {
1050 struct ecrnx_txhdr *txhdr;
1051 struct ecrnx_sw_txhdr *sw_txhdr;
1052 dma_addr_t dma_addr = amsdu_txhdr->dma_addr;
1053 size_t map_len = amsdu_txhdr->map_len;
1055 size_t data_oft, cfm_oft = 0;
1056 struct sk_buff *skb = amsdu_txhdr->skb;
1058 list_del(&amsdu_txhdr->list);
1059 ecrnx_ipc_sta_buffer(ecrnx_hw, txq->sta, txq->tid, -amsdu_txhdr->msdu_len);
1060 ecrnx_amsdu_del_subframe_header(amsdu_txhdr);
1061 headroom = ECRNX_TX_HEADROOM(skb);
1062 tx_map_len = ECRNX_TX_DMA_MAP_LEN(skb);
1063 sw_txhdr = kmem_cache_alloc(ecrnx_hw->sw_txhdr_cache, GFP_ATOMIC);
1064 if (unlikely((skb_headroom(skb) < headroom) ||
1065 (sw_txhdr == NULL) || (tx_map_len > map_len))) {
1067 kmem_cache_free(ecrnx_hw->sw_txhdr_cache, sw_txhdr);
1068 dma_unmap_single(ecrnx_hw->dev, dma_addr, map_len, DMA_TO_DEVICE);
1069 dev_kfree_skb_any(skb);
1072 sw_txhdr->headroom = headroom;
1073 cfm_oft = map_len - tx_map_len;
1074 data_oft = sizeof(struct ethhdr) + ECRNX_TX_DATA_OFT(sw_txhdr) + cfm_oft;
1075 txhdr = skb_push(skb, headroom);
1076 txhdr->sw_hdr = sw_txhdr;
1077 memcpy(sw_txhdr, sw_txhdr_main, sizeof(*sw_txhdr));
1078 sw_txhdr->frame_len = map_len - data_oft;
1079 sw_txhdr->skb = skb;
1080 sw_txhdr->headroom = headroom;
1081 txhdr->hw_hdr.cfm.status.value = 0;
1082 sw_txhdr->map_len = map_len;
1083 sw_txhdr->dma_addr = dma_addr;
1084 sw_txhdr->desc.host.packet_addr[0] = dma_addr + data_oft;
1085 sw_txhdr->desc.host.status_desc_addr = dma_addr + cfm_oft;
1086 sw_txhdr->desc.host.packet_len[0] = sw_txhdr->frame_len;
1087 sw_txhdr->desc.host.packet_cnt = 1;
1088 ecrnx_txq_queue_skb(skb, sw_txhdr->txq, ecrnx_hw, false, skb_prev);
1093 * ecrnx_amsdu_update_len - Update length allowed for A-MSDU on a TXQ
1095 * @ecrnx_hw Driver main data.
1097 * @amsdu_len New length allowed ofr A-MSDU.
1099 * If this is a TXQ linked to a STA and the allowed A-MSDU size is reduced it is
1100 * then necessary to disassemble all A-MSDU currently queued on all STA' txq that
1101 * are larger than this new limit.
1102 * Does nothing if the A-MSDU limit increase or stay the same.
1104 static void ecrnx_amsdu_update_len(struct ecrnx_hw *ecrnx_hw, struct ecrnx_txq *txq,
1107 struct ecrnx_sta *sta = txq->sta;
1110 if (amsdu_len != txq->amsdu_len)
1111 trace_amsdu_len_update(txq->sta, amsdu_len);
1113 if (amsdu_len >= txq->amsdu_len) {
1114 txq->amsdu_len = amsdu_len;
1119 netdev_err(txq->ndev, "Non STA txq(%d) with a-amsdu len %d\n",
1120 txq->idx, amsdu_len);
1125 /* A-MSDU size has been reduced by the firmware, need to dismantle all
1126 queued a-msdu that are too large. Need to do this for all txq of the STA. */
1127 foreach_sta_txq(sta, txq, tid, ecrnx_hw) {
1128 struct sk_buff *skb, *skb_next;
1130 if (txq->amsdu_len <= amsdu_len)
1133 if (txq->last_retry_skb)
1134 skb = txq->last_retry_skb->next;
1136 skb = txq->sk_list.next;
1138 skb_queue_walk_from_safe(&txq->sk_list, skb, skb_next) {
1139 struct ecrnx_txhdr *txhdr = (struct ecrnx_txhdr *)skb->data;
1140 struct ecrnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
1141 if ((sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU) &&
1142 (sw_txhdr->amsdu.len > amsdu_len))
1143 ecrnx_amsdu_dismantle(ecrnx_hw, sw_txhdr);
1145 if (txq->amsdu == sw_txhdr)
1149 txq->amsdu_len = amsdu_len;
1152 #endif /* CONFIG_ECRNX_AMSDUS_TX */
1155 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1156 * struct net_device *dev);
1157 * Called when a packet needs to be transmitted.
1158 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
1159 * (can also return NETDEV_TX_LOCKED if NETIF_F_LLTX)
1161 * - Initialize the desciptor for this pkt (stored in skb before data)
1162 * - Push the pkt in the corresponding Txq
1163 * - If possible (i.e. credit available and not in PS) the pkt is pushed
1166 netdev_tx_t ecrnx_start_xmit(struct sk_buff *skb, struct net_device *dev)
1168 struct ecrnx_vif *ecrnx_vif = netdev_priv(dev);
1169 struct ecrnx_hw *ecrnx_hw = ecrnx_vif->ecrnx_hw;
1170 struct ecrnx_txhdr *txhdr;
1171 struct ecrnx_sw_txhdr *sw_txhdr = NULL;
1173 struct txdesc_api *desc;
1174 struct ecrnx_sta *sta;
1175 struct ecrnx_txq *txq;
1176 int headroom = 0, hdr_pads = 0;
1182 sk_pacing_shift_update(skb->sk, ecrnx_hw->tcp_pacing_shift);
1184 /* check whether the current skb can be used */
1185 if (skb_shared(skb) || (skb_headroom(skb) < ECRNX_TX_MAX_HEADROOM) ||
1186 (skb_cloned(skb) && (dev->priv_flags & IFF_BRIDGE_PORT))) {
1187 struct sk_buff *newskb = skb_copy_expand(skb, ECRNX_TX_MAX_HEADROOM, 0, GFP_ATOMIC);
1188 if (unlikely(newskb == NULL))
1191 dev_kfree_skb_any(skb);
1196 /* Get the STA id and TID information */
1197 sta = ecrnx_get_tx_info(ecrnx_vif, skb, &tid);
1201 txq = ecrnx_txq_sta_get(sta, tid, ecrnx_hw);
1202 ECRNX_DBG("%s-%d:sta:0x%p,sta_idx:%d, sta_mac:%pM, tid:%d, ecrnx_hw:0x%p, txq:0x%p \n", __func__, __LINE__, sta, sta->sta_idx, sta->mac_addr, tid, ecrnx_hw, txq);
1203 if (txq->idx == TXQ_INACTIVE)
1206 #ifdef CONFIG_ECRNX_AMSDUS_TX
1207 if (ecrnx_amsdu_add_subframe(ecrnx_hw, skb, sta, txq))
1208 return NETDEV_TX_OK;
1211 sw_txhdr = kmem_cache_alloc(ecrnx_hw->sw_txhdr_cache, GFP_ATOMIC);
1212 if (unlikely(sw_txhdr == NULL))
1215 /* Retrieve the pointer to the Ethernet data */
1216 eth = (struct ethhdr *)skb->data;
1220 if ((skb->data[0] & 0x1) && (skb->data[0] != 0xff)) {
1221 printk("drop mc pkt 0x%x\n", skb->data[0]);
1227 if (0xDD86 == eth->h_proto) { //ipv6
1228 //printk("%s-%d: eapol\n", __func__, __LINE__);
1230 //dump_xxx_buf(skb->data, skb->len);
1234 if (0x8e88 == eth->h_proto) { //icmp
1235 printk("%s-%d: eapol\n", __func__, __LINE__);
1236 //dump_xxx_buf(skb->data, skb->len);
1242 if (8 == eth->h_proto && 0x1 == skb->data[23]) { //icmp
1243 memset(skb->data + 14, 0xff, skb->len - 14);
1245 if (8 == eth->h_proto && 0x11 == skb->data[23]) {
1246 printk("---drop udp pkt\n");
1250 //no_encrypt = check_eapol_dont_encrypt(skb);
1252 hdr_pads = ECRNX_SWTXHDR_ALIGN_PADS((long)eth);
1253 /* Use headroom to store struct ecrnx_txhdr */
1254 headroom = ECRNX_TX_HEADROOM(skb);
1256 txhdr = (struct ecrnx_txhdr *)skb_push(skb, headroom);
1257 txhdr->sw_hdr = sw_txhdr;
1258 frame_len = (u16)skb->len - headroom - sizeof(*eth);
1260 sw_txhdr->txq = txq;
1261 sw_txhdr->frame_len = frame_len;
1262 sw_txhdr->ecrnx_sta = sta;
1263 sw_txhdr->ecrnx_vif = ecrnx_vif;
1264 sw_txhdr->skb = skb;
1265 sw_txhdr->headroom = headroom;
1266 #ifdef CONFIG_ECRNX_ESWIN
1267 sw_txhdr->offset = headroom + sizeof(*eth);
1269 sw_txhdr->map_len = skb->len - offsetof(struct ecrnx_txhdr, hw_hdr);
1271 sw_txhdr->jiffies = jiffies;
1272 #ifdef CONFIG_ECRNX_AMSDUS_TX
1273 sw_txhdr->amsdu.len = 0;
1274 sw_txhdr->amsdu.nb = 0;
1276 // Fill-in the descriptor
1277 desc = &sw_txhdr->desc;
1278 memcpy(&desc->host.eth_dest_addr, eth->h_dest, ETH_ALEN);
1279 memcpy(&desc->host.eth_src_addr, eth->h_source, ETH_ALEN);
1280 desc->host.ethertype = eth->h_proto;
1281 desc->host.staid = sta->sta_idx;
1282 desc->host.tid = tid;
1283 if (unlikely(ecrnx_vif->wdev.iftype == NL80211_IFTYPE_AP_VLAN))
1284 desc->host.vif_idx = ecrnx_vif->ap_vlan.master->vif_index;
1286 desc->host.vif_idx = ecrnx_vif->vif_index;
1287 desc->host.flags = 0;
1289 if (ecrnx_vif->use_4addr && (sta->sta_idx < NX_REMOTE_STA_MAX))
1290 desc->host.flags |= TXU_CNTRL_USE_4ADDR;
1292 if ((ecrnx_vif->tdls_status == TDLS_LINK_ACTIVE) &&
1293 ecrnx_vif->sta.tdls_sta &&
1294 (memcmp(desc->host.eth_dest_addr.array, ecrnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN) == 0)) {
1295 desc->host.flags |= TXU_CNTRL_TDLS;
1296 ecrnx_vif->sta.tdls_sta->tdls.last_tid = desc->host.tid;
1297 ecrnx_vif->sta.tdls_sta->tdls.last_sn = desc->host.sn;
1300 if ((ecrnx_vif->wdev.iftype == NL80211_IFTYPE_MESH_POINT) &&
1301 (ecrnx_vif->is_resending))
1302 desc->host.flags |= TXU_CNTRL_MESH_FWD;
1304 #ifdef CONFIG_ECRNX_SPLIT_TX_BUF
1305 desc->host.packet_len[0] = frame_len;
1307 desc->host.packet_len = frame_len;
1310 txhdr->hw_hdr.cfm.status.value = 0;
1312 #ifdef CONFIG_ECRNX_ESWIN
1313 skb_addr = (ptr_addr)skb;
1314 #ifdef CONFIG_ECRNX_SPLIT_TX_BUF
1315 desc->host.packet_addr[0] = (u64_l)skb;
1316 desc->host.packet_cnt = 1;
1318 //desc->host.packet_addr = (u64_l)skb;
1319 desc->host.packet_addr[0] = (u32_l)skb_addr;
1320 desc->host.packet_addr[1] = (u32_l)(skb_addr >> 32);
1322 //desc->host.status_desc_addr = (u64_l)skb;
1323 desc->host.status_desc_addr[0] = (u32_l)skb_addr;
1324 desc->host.status_desc_addr[1] = (u32_l)(skb_addr >> 32);
1327 desc->host.flags |= TXU_CNTRL_NO_ENCRYPT;
1329 #else //CONFIG_ECRNX_ESWIN_SDIO
1330 if (unlikely(ecrnx_prep_dma_tx(ecrnx_hw, txhdr, true)))
1332 #endif //CONFIG_ECRNX_ESWIN_SDIO
1333 //ECRNX_DBG("%s:desc:0x%08x, vif_idx:%d, skb:0x%08x, headroom:%d !!! \n", __func__, desc, desc->host.vif_idx, skb, headroom);
1334 spin_lock_bh(&ecrnx_hw->tx_lock);
1335 if (ecrnx_txq_queue_skb(skb, txq, ecrnx_hw, false, NULL))
1337 ECRNX_DBG("%s-%d:txdesc:0x%x, skb:0x%08x, skb->len:%d \n", __func__, __LINE__, desc, skb, skb->len);
1338 ecrnx_hwq_process(ecrnx_hw, txq->hwq);
1342 ECRNX_DBG("%s-%d: delay send(put txq), txq:0x%p, queue status 0x%x, skb:0x%08x, skb->len:%d !!! \n", __func__, __LINE__, txq, txq->status, skb, skb->len);
1344 spin_unlock_bh(&ecrnx_hw->tx_lock);
1346 return NETDEV_TX_OK;
1350 kmem_cache_free(ecrnx_hw->sw_txhdr_cache, sw_txhdr);
1352 skb_pull(skb, headroom);
1353 dev_kfree_skb_any(skb);
1355 return NETDEV_TX_OK;
1359 * ecrnx_start_mgmt_xmit - Transmit a management frame
1361 * @vif: Vif that send the frame
1362 * @sta: Destination of the frame. May be NULL if the destiantion is unknown
1364 * @params: Mgmt frame parameters
1365 * @offchan: Indicate whether the frame must be send via the offchan TXQ.
1366 * (is is redundant with params->offchan ?)
1367 * @cookie: updated with a unique value to identify the frame with upper layer
1370 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
1371 int ecrnx_start_mgmt_xmit(struct ecrnx_vif *vif, struct ecrnx_sta *sta,
1372 struct cfg80211_mgmt_tx_params *params, bool offchan,
1375 int ecrnx_start_mgmt_xmit(struct ecrnx_vif *vif, struct ecrnx_sta *sta,
1376 struct ieee80211_channel *channel, bool offchan,
1377 unsigned int wait, const u8* buf, size_t len,
1378 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
1381 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
1382 bool dont_wait_for_ack,
1385 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
1387 struct ecrnx_hw *ecrnx_hw = vif->ecrnx_hw;
1388 struct ecrnx_txhdr *txhdr;
1389 struct ecrnx_sw_txhdr *sw_txhdr;
1390 struct txdesc_api *desc;
1391 struct sk_buff *skb;
1392 u16 frame_len, headroom;
1394 struct ecrnx_txq *txq;
1397 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
1398 const u8 *buf = params->buf;
1399 size_t len = params->len;
1400 bool no_cck = params->no_cck;
1404 #ifdef CONFIG_ECRNX_ESWIN
1405 headroom = sizeof(struct ecrnx_txhdr) + ECRNX_TX_TXDESC_API_ALIGN;
1407 headroom = sizeof(struct ecrnx_txhdr);
1411 //----------------------------------------------------------------------
1413 /* Set TID and Queues indexes */
1415 txq = ecrnx_txq_sta_get(sta, 8, ecrnx_hw);
1418 txq = &ecrnx_hw->txq[NX_OFF_CHAN_TXQ_IDX];
1420 txq = ecrnx_txq_vif_get(vif, NX_UNK_TXQ_TYPE);
1423 /* Ensure that TXQ is active */
1424 if (txq->idx == TXQ_INACTIVE) {
1426 netdev_dbg(vif->ndev, "TXQ inactive\n");
1434 * Create a SK Buff object that will contain the provided data
1436 skb = dev_alloc_skb(headroom + frame_len);
1442 *cookie = (unsigned long)skb;
1444 sw_txhdr = kmem_cache_alloc(ecrnx_hw->sw_txhdr_cache, GFP_ATOMIC);
1445 if (unlikely(sw_txhdr == NULL)) {
1450 * Move skb->data pointer in order to reserve room for ecrnx_txhdr
1451 * headroom value will be equal to sizeof(struct ecrnx_txhdr)
1453 skb_reserve(skb, headroom);
1456 * Extend the buffer data area in order to contain the provided packet
1457 * len value (for skb) will be equal to param->len
1459 data = skb_put(skb, frame_len);
1460 /* Copy the provided data */
1461 memcpy(data, buf, frame_len);
1462 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
1463 robust = ieee80211_is_robust_mgmt_frame(skb);
1468 robust = ieee80211_is_robust_mgmt_frame((void *)skb->data);
1469 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
1471 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1472 /* Update CSA counter if present */
1473 if (unlikely(params->n_csa_offsets) &&
1474 vif->wdev.iftype == NL80211_IFTYPE_AP &&
1479 for (i = 0; i < params->n_csa_offsets ; i++) {
1480 data[params->csa_offsets[i]] = vif->ap.csa->count;
1483 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) */
1486 * Go back to the beginning of the allocated data area
1487 * skb->data pointer will move backward
1489 txhdr = (struct ecrnx_txhdr *)skb_push(skb, headroom);
1491 //----------------------------------------------------------------------
1493 /* Fill the TX Header */
1496 //----------------------------------------------------------------------
1498 /* Fill the SW TX Header */
1499 txhdr->sw_hdr = sw_txhdr;
1501 sw_txhdr->txq = txq;
1502 sw_txhdr->frame_len = frame_len;
1503 sw_txhdr->ecrnx_sta = sta;
1504 sw_txhdr->ecrnx_vif = vif;
1505 sw_txhdr->skb = skb;
1506 sw_txhdr->headroom = headroom;
1507 #ifdef CONFIG_ECRNX_ESWIN
1508 sw_txhdr->offset = headroom; //sizeof(struct ecrnx_txhdr) + sizeof(struct txdesc_api);
1510 sw_txhdr->map_len = skb->len - offsetof(struct ecrnx_txhdr, hw_hdr);
1512 sw_txhdr->jiffies = jiffies;
1513 #ifdef CONFIG_ECRNX_AMSDUS_TX
1514 sw_txhdr->amsdu.len = 0;
1515 sw_txhdr->amsdu.nb = 0;
1517 //----------------------------------------------------------------------
1519 /* Fill the Descriptor to be provided to the MAC SW */
1520 desc = &sw_txhdr->desc;
1522 desc->host.staid = (sta) ? sta->sta_idx : 0xFF;
1523 desc->host.vif_idx = vif->vif_index;
1524 desc->host.tid = 0xFF;
1525 desc->host.flags = TXU_CNTRL_MGMT;
1527 desc->host.flags |= TXU_CNTRL_MGMT_ROBUST;
1531 desc->host.flags |= TXU_CNTRL_MGMT_NO_CCK;
1534 #ifdef CONFIG_RWNX_SPLIT_TX_BUF
1535 desc->host.packet_len[0] = frame_len;
1537 desc->host.packet_len = frame_len;
1540 txhdr->hw_hdr.cfm.status.value = 0;
1541 #ifdef CONFIG_ECRNX_ESWIN
1542 skb_addr = (ptr_addr)skb;
1543 #ifdef CONFIG_ECRNX_SPLIT_TX_BUF
1544 desc->host.packet_addr[0] = skb;
1545 desc->host.packet_cnt = 1;
1547 //desc->host.packet_addr = (u64_l)skb;
1548 desc->host.packet_addr[0] = (u32_l)skb_addr;
1549 desc->host.packet_addr[1] = (u32_l)(skb_addr >> 32);
1551 //desc->host.status_desc_addr = (u64_l)skb;
1552 desc->host.packet_addr[0] = (u32_l)skb_addr;
1553 desc->host.packet_addr[1] = (u32_l)(skb_addr >> 32);
1554 #else //CONFIG_ECRNX_ESWIN_SDIO
1556 /* Get DMA Address */
1557 if (unlikely(ecrnx_prep_dma_tx(ecrnx_hw, txhdr, false))) {
1558 kmem_cache_free(ecrnx_hw->sw_txhdr_cache, sw_txhdr);
1563 #ifdef CONFIG_ECRNX_SPLIT_TX_BUF
1566 //----------------------------------------------------------------------
1568 spin_lock_bh(&ecrnx_hw->tx_lock);
1569 if (ecrnx_txq_queue_skb(skb, txq, ecrnx_hw, false, NULL)) {
1570 ECRNX_DBG("%s-%d:txdesc:0x%x, skb:0x%08x, skb->len:%d \n", __func__, __LINE__, desc, skb, skb->len);
1571 ecrnx_hwq_process(ecrnx_hw, txq->hwq);
1573 ECRNX_DBG("%s-%d: delay send(put txq), queue status 0x%x, skb:0x%08x, skb->len:%d !!! \n", __func__, __LINE__, txq->status, skb, skb->len);
1575 spin_unlock_bh(&ecrnx_hw->tx_lock);
1580 int ecrnx_handle_tx_datacfm(void *priv, void *host_id)
1582 struct ecrnx_hw *ecrnx_hw = (struct ecrnx_hw *)priv;
1583 struct sk_buff *skb = host_id;
1584 struct ecrnx_hwq *hwq;
1585 struct ecrnx_txq *txq;
1586 struct ecrnx_sta *sta;
1587 struct ecrnx_txhdr *txhdr;
1589 #if defined(CONFIG_ECRNX_ESWIN_USB)
1590 txhdr = (struct ecrnx_txhdr *)(*((ptr_addr*)skb->data - 1));
1592 txhdr = (struct ecrnx_txhdr *)skb->data;
1595 struct ecrnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
1597 /* Check status in the header. If status is null, it means that the buffer
1598 * was not transmitted and we have to return immediately */
1599 ECRNX_DBG("%s:hostid(tx_skb):0x%08x\n", __func__, skb);
1601 txq = sw_txhdr->txq;
1602 /* don't use txq->hwq as it may have changed between push and confirm */
1603 hwq = &ecrnx_hw->hwq[sw_txhdr->hw_queue];
1604 ecrnx_txq_confirm_any(ecrnx_hw, txq, hwq, sw_txhdr);
1606 if (txq->idx != TXQ_INACTIVE) {
1609 //printk("finish_cfm: txq->credits %d 0x%08x\n", txq->credits,skb);
1610 if (txq->credits <= 0){
1611 ecrnx_txq_stop(txq, ECRNX_TXQ_STOP_FULL);
1613 else if (txq->credits > 0)
1615 ecrnx_txq_start(txq, ECRNX_TXQ_STOP_FULL);
1616 /* baoyong:handle the pkts in sk_list right now */
1617 if (txq->idx != TXQ_INACTIVE && !skb_queue_empty(&txq->sk_list))
1619 ecrnx_hwq_process(ecrnx_hw, txq->hwq);
1623 /* continue service period */
1624 if (unlikely(txq->push_limit && !ecrnx_txq_is_full(txq))) {
1625 ecrnx_txq_add_to_hw_list(txq);
1629 /* Update statistics */
1630 sw_txhdr->ecrnx_vif->net_stats.tx_packets++;
1631 sw_txhdr->ecrnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len;
1637 sta->stats.tx_pkts ++;
1638 sta->stats.tx_bytes += sw_txhdr->frame_len;
1639 sta->stats.last_act = ecrnx_hw->stats.last_tx;
1641 //printk("sta->stats.tx_pkts=%d sta->stats.tx_bytes =%d\n", sta->stats.tx_pkts, sta->stats.tx_bytes);
1643 kmem_cache_free(ecrnx_hw->sw_txhdr_cache, sw_txhdr);
1644 skb_pull(skb, sw_txhdr->headroom);
1650 * ecrnx_txdatacfm - FW callback for TX confirmation
1652 * called with tx_lock hold
1654 int ecrnx_txdatacfm(void *pthis, void *host_id)
1656 struct ecrnx_hw *ecrnx_hw = (struct ecrnx_hw *)pthis;
1657 struct sk_buff *skb = host_id;
1658 struct ecrnx_txhdr *txhdr;
1659 union ecrnx_hw_txstatus ecrnx_txst;
1660 struct ecrnx_sw_txhdr *sw_txhdr;
1661 struct ecrnx_hwq *hwq;
1662 struct ecrnx_txq *txq;
1663 #ifndef CONFIG_ECRNX_ESWIN
1664 dma_addr_t cfm_dma_addr;
1668 #if defined(CONFIG_ECRNX_ESWIN_USB)
1669 txhdr = (struct ecrnx_txhdr *)host_id;
1670 skb = txhdr->sw_hdr->skb;
1671 skb_push(skb, sizeof(struct ecrnx_txhdr) - sizeof(u32_l));
1673 txhdr = (struct ecrnx_txhdr *)skb->data;
1675 sw_txhdr = txhdr->sw_hdr;
1676 cfm_len = sizeof(txhdr->hw_hdr.cfm);
1678 //ECRNX_DBG("%s-%d: skb:0x%08x, skb->len:%d \n", __func__, __LINE__, skb, skb->len);
1679 #ifndef CONFIG_ECRNX_ESWIN
1680 cfm_dma_addr = (ptr_addr)sw_txhdr->desc.host.status_desc_addr;
1681 dma_sync_single_for_cpu(ecrnx_hw->dev, cfm_dma_addr, cfm_len, DMA_FROM_DEVICE);
1683 /* Read status in the TX control header */
1684 ecrnx_txst = txhdr->hw_hdr.cfm.status;
1686 /* Check status in the header. If status is null, it means that the buffer
1687 * was not transmitted and we have to return immediately */
1688 if (ecrnx_txst.value == 0) {
1689 #ifndef CONFIG_ECRNX_ESWIN
1690 dma_sync_single_for_device(ecrnx_hw->dev, cfm_dma_addr, cfm_len, DMA_FROM_DEVICE);
1695 txq = sw_txhdr->txq;
1696 /* don't use txq->hwq as it may have changed between push and confirm */
1697 hwq = &ecrnx_hw->hwq[sw_txhdr->hw_queue];
1698 ecrnx_txq_confirm_any(ecrnx_hw, txq, hwq, sw_txhdr);
1700 /* Update txq and HW queue credits */
1701 if (sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT) {
1702 trace_mgmt_cfm(sw_txhdr->ecrnx_vif->vif_index,
1703 (sw_txhdr->ecrnx_sta) ? sw_txhdr->ecrnx_sta->sta_idx : 0xFF,
1704 ecrnx_txst.acknowledged);
1706 /* Confirm transmission to CFG80211 */
1707 cfg80211_mgmt_tx_status(&sw_txhdr->ecrnx_vif->wdev,
1709 (skb->data + sw_txhdr->headroom),
1710 sw_txhdr->frame_len,
1711 ecrnx_txst.acknowledged,
1713 } else if ((txq->idx != TXQ_INACTIVE) &&
1714 (ecrnx_txst.retry_required || ecrnx_txst.sw_retry_required)) {
1715 bool sw_retry = (ecrnx_txst.sw_retry_required) ? true : false;
1717 /* Reset the status */
1718 txhdr->hw_hdr.cfm.status.value = 0;
1720 /* The confirmed packet was part of an AMPDU and not acked
1721 * correctly, so reinject it in the TX path to be retried */
1722 ecrnx_tx_retry(ecrnx_hw, skb, txhdr, sw_retry);
1726 trace_skb_confirm(skb, txq, hwq, &txhdr->hw_hdr.cfm);
1728 /* STA may have disconnect (and txq stopped) when buffers were stored
1729 in fw. In this case do nothing when they're returned */
1730 if (txq->idx != TXQ_INACTIVE) {
1731 if (txhdr->hw_hdr.cfm.credits) {
1732 txq->credits += txhdr->hw_hdr.cfm.credits;
1733 if (txq->credits <= 0)
1734 ecrnx_txq_stop(txq, ECRNX_TXQ_STOP_FULL);
1735 else if (txq->credits > 0)
1737 ecrnx_txq_start(txq, ECRNX_TXQ_STOP_FULL);
1738 /* baoyong:handle the pkts in sk_list right now */
1739 if (txq->idx != TXQ_INACTIVE && !skb_queue_empty(&txq->sk_list))
1741 ecrnx_hwq_process(ecrnx_hw, txq->hwq);
1747 /* continue service period */
1748 if (unlikely(txq->push_limit && !ecrnx_txq_is_full(txq))) {
1749 ecrnx_txq_add_to_hw_list(txq);
1753 if (txhdr->hw_hdr.cfm.ampdu_size &&
1754 txhdr->hw_hdr.cfm.ampdu_size < IEEE80211_MAX_AMPDU_BUF)
1755 ecrnx_hw->stats.ampdus_tx[txhdr->hw_hdr.cfm.ampdu_size - 1]++;
1757 #ifdef CONFIG_ECRNX_AMSDUS_TX
1758 ecrnx_amsdu_update_len(ecrnx_hw, txq, txhdr->hw_hdr.cfm.amsdu_size);
1761 /* Update statistics */
1762 sw_txhdr->ecrnx_vif->net_stats.tx_packets++;
1763 sw_txhdr->ecrnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len;
1766 #ifdef CONFIG_ECRNX_AMSDUS_TX
1767 if (sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU) {
1768 struct ecrnx_amsdu_txhdr *amsdu_txhdr;
1769 list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) {
1770 ecrnx_amsdu_del_subframe_header(amsdu_txhdr);
1771 #ifndef CONFIG_ECRNX_ESWIN
1772 dma_unmap_single(ecrnx_hw->dev, amsdu_txhdr->dma_addr,
1773 amsdu_txhdr->map_len, DMA_TO_DEVICE);
1775 ecrnx_ipc_sta_buffer(ecrnx_hw, txq->sta, txq->tid,
1776 -amsdu_txhdr->msdu_len);
1777 ecrnx_tx_statistic(ecrnx_hw, txq, ecrnx_txst, amsdu_txhdr->msdu_len);
1778 consume_skb(amsdu_txhdr->skb);
1781 #endif /* CONFIG_ECRNX_AMSDUS_TX */
1783 #ifndef CONFIG_ECRNX_ESWIN
1784 /* unmap with the least costly DMA_TO_DEVICE since we don't need to inval */
1785 dma_unmap_single(ecrnx_hw->dev, sw_txhdr->dma_addr, sw_txhdr->map_len,
1788 ecrnx_ipc_sta_buffer(ecrnx_hw, txq->sta, txq->tid, -sw_txhdr->frame_len);
1789 ecrnx_tx_statistic(ecrnx_hw, txq, ecrnx_txst, sw_txhdr->frame_len);
1791 kmem_cache_free(ecrnx_hw->sw_txhdr_cache, sw_txhdr);
1792 skb_pull(skb, sw_txhdr->headroom);
1799 * ecrnx_txq_credit_update - Update credit for one txq
1801 * @ecrnx_hw: Driver main data
1804 * @update: offset to apply in txq credits
1806 * Called when fw send ME_TX_CREDITS_UPDATE_IND message.
1807 * Apply @update to txq credits, and stop/start the txq if needed
1809 void ecrnx_txq_credit_update(struct ecrnx_hw *ecrnx_hw, int sta_idx, u8 tid, s8 update)
1811 #ifndef CONFIG_ECRNX_ESWIN
1812 struct ecrnx_sta *sta = &ecrnx_hw->sta_table[sta_idx];
1813 struct ecrnx_txq *txq;
1815 txq = ecrnx_txq_sta_get(sta, tid, ecrnx_hw);
1817 spin_lock_bh(&ecrnx_hw->tx_lock);
1819 if (txq->idx != TXQ_INACTIVE) {
1820 txq->credits += update;
1821 trace_credit_update(txq, update);
1822 if (txq->credits <= 0){
1823 ECRNX_DBG("%s-%d:ecrnx_txq_stop,reaosn:0x%x \n", __func__, __LINE__, ECRNX_TXQ_STOP_FULL);
1824 ecrnx_txq_stop(txq, ECRNX_TXQ_STOP_FULL);
1827 ecrnx_txq_start(txq, ECRNX_TXQ_STOP_FULL);
1828 ECRNX_DBG("%s-%d:ecrnx_txq_start,reaosn:0x%x \n", __func__, __LINE__, ECRNX_TXQ_STOP_FULL);
1832 // Drop all the retry packets of a BA that was deleted
1833 if (update < NX_TXQ_INITIAL_CREDITS) {
1836 for (packet = 0; packet < txq->nb_retry; packet++) {
1837 ecrnx_txq_drop_skb(txq, skb_peek(&txq->sk_list), ecrnx_hw, true);
1841 spin_unlock_bh(&ecrnx_hw->tx_lock);
1846 #ifdef CONFIG_ECRNX_ESWIN_SDIO
1847 void ecrnx_tx_retry_sdio(struct ecrnx_hw *ecrnx_hw, struct sk_buff *skb,
1848 struct ecrnx_txhdr *txhdr, bool sw_retry)
1850 ecrnx_tx_retry(ecrnx_hw, skb, txhdr, sw_retry);
1854 #ifdef CONFIG_ECRNX_AMSDUS_TX
1855 void ecrnx_amsdu_del_subframe_header_sdio(struct ecrnx_amsdu_txhdr *amsdu_txhdr)
1857 ecrnx_amsdu_del_subframe_header(amsdu_txhdr);
1862 #ifdef CONFIG_ECRNX_ESWIN_USB
1863 void ecrnx_tx_retry_usb(struct ecrnx_hw *ecrnx_hw, struct sk_buff *skb,
1864 struct ecrnx_txhdr *txhdr, bool sw_retry)
1866 ecrnx_tx_retry(ecrnx_hw, skb, txhdr, sw_retry);
1870 #ifdef CONFIG_ECRNX_AMSDUS_TX
1871 void ecrnx_amsdu_del_subframe_header_sdio(struct ecrnx_amsdu_txhdr *amsdu_txhdr)
1873 ecrnx_amsdu_del_subframe_header(amsdu_txhdr);