2 * This file is part of wl1271
4 * Copyright (C) 2009 Nokia Corporation
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
37 * TODO: this is here just for now, it must be removed when the data
38 * operations are in place.
40 #include "../wl12xx/reg.h"
42 static int wl1271_set_default_wep_key(struct wl1271 *wl,
43 struct wl12xx_vif *wlvif, u8 id)
46 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
49 ret = wl12xx_cmd_set_default_wep_key(wl, id,
50 wlvif->ap.bcast_hlid);
52 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
57 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
61 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
65 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
66 if (id >= wl->num_tx_desc)
69 __set_bit(id, wl->tx_frames_map);
70 wl->tx_frames[id] = skb;
75 void wl1271_free_tx_id(struct wl1271 *wl, int id)
77 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
79 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
81 wl->tx_frames[id] = NULL;
85 EXPORT_SYMBOL(wl1271_free_tx_id);
87 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
90 struct ieee80211_hdr *hdr;
93 * add the station to the known list before transmitting the
94 * authentication response. this way it won't get de-authed by FW
95 * when transmitting too soon.
97 hdr = (struct ieee80211_hdr *)(skb->data +
98 sizeof(struct wl1271_tx_hw_descr));
99 if (ieee80211_is_auth(hdr->frame_control))
100 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
103 static void wl1271_tx_regulate_link(struct wl1271 *wl,
104 struct wl12xx_vif *wlvif,
107 bool fw_ps, single_sta;
110 if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
113 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
114 tx_pkts = wl->links[hlid].allocated_pkts;
115 single_sta = (wl->active_sta_count == 1);
118 * if in FW PS and there is enough data in FW we can put the link
119 * into high-level PS and clean out its TX queues.
120 * Make an exception if this is the only connected station. In this
121 * case FW-memory congestion is not a problem.
123 if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
124 wl12xx_ps_link_start(wl, wlvif, hlid, true);
127 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
129 return wl->dummy_packet == skb;
131 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
133 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
134 struct sk_buff *skb, struct ieee80211_sta *sta)
137 struct wl1271_station *wl_sta;
139 wl_sta = (struct wl1271_station *)sta->drv_priv;
142 struct ieee80211_hdr *hdr;
144 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
145 return wl->system_hlid;
147 hdr = (struct ieee80211_hdr *)skb->data;
148 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
149 return wlvif->ap.bcast_hlid;
151 return wlvif->ap.global_hlid;
155 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
156 struct sk_buff *skb, struct ieee80211_sta *sta)
158 if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
159 return wl->system_hlid;
161 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
162 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
164 return wlvif->sta.hlid;
167 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
168 unsigned int packet_length)
170 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
171 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
172 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
174 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
176 EXPORT_SYMBOL(wlcore_calc_packet_alignment);
178 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
179 struct sk_buff *skb, u32 extra, u32 buf_offset,
180 u8 hlid, bool is_gem)
182 struct wl1271_tx_hw_descr *desc;
183 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
185 int id, ret = -EBUSY, ac;
188 if (buf_offset + total_len > wl->aggr_buf_size)
191 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
193 /* allocate free identifier for the packet */
194 id = wl1271_alloc_tx_id(wl, skb);
198 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
200 if (total_blocks <= wl->tx_blocks_available) {
201 desc = (struct wl1271_tx_hw_descr *)skb_push(
202 skb, total_len - skb->len);
204 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
209 wl->tx_blocks_available -= total_blocks;
210 wl->tx_allocated_blocks += total_blocks;
212 /* If the FW was empty before, arm the Tx watchdog */
213 if (wl->tx_allocated_blocks == total_blocks)
214 wl12xx_rearm_tx_watchdog_locked(wl);
216 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
217 wl->tx_allocated_pkts[ac]++;
219 if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
220 wlvif->bss_type == BSS_TYPE_AP_BSS &&
221 test_bit(hlid, wlvif->ap.sta_hlid_map))
222 wl->links[hlid].allocated_pkts++;
226 wl1271_debug(DEBUG_TX,
227 "tx_allocate: size: %d, blocks: %d, id: %d",
228 total_len, total_blocks, id);
230 wl1271_free_tx_id(wl, id);
236 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
237 struct sk_buff *skb, u32 extra,
238 struct ieee80211_tx_info *control, u8 hlid)
241 struct wl1271_tx_hw_descr *desc;
245 __le16 frame_control;
246 struct ieee80211_hdr *hdr;
250 desc = (struct wl1271_tx_hw_descr *) skb->data;
251 frame_start = (u8 *)(desc + 1);
252 hdr = (struct ieee80211_hdr *)(frame_start + extra);
253 frame_control = hdr->frame_control;
255 /* relocate space for security header */
257 int hdrlen = ieee80211_hdrlen(frame_control);
258 memmove(frame_start, hdr, hdrlen);
259 skb_set_network_header(skb, skb_network_offset(skb) + extra);
262 /* configure packet life time */
264 hosttime = (timespec_to_ns(&ts) >> 10);
265 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
267 is_dummy = wl12xx_is_dummy_packet(wl, skb);
268 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
269 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
271 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
274 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
275 desc->tid = skb->priority;
279 * FW expects the dummy packet to have an invalid session id -
280 * any session id that is different than the one set in the join
282 tx_attr = (SESSION_COUNTER_INVALID <<
283 TX_HW_ATTR_OFST_SESSION_COUNTER) &
284 TX_HW_ATTR_SESSION_COUNTER;
286 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
288 /* configure the tx attributes */
289 tx_attr = wlvif->session_counter <<
290 TX_HW_ATTR_OFST_SESSION_COUNTER;
294 if (is_dummy || !wlvif)
296 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
298 * if the packets are data packets
299 * send them with AP rate policies (EAPOLs are an exception),
300 * otherwise use default basic rates
302 if (skb->protocol == cpu_to_be16(ETH_P_PAE))
303 rate_idx = wlvif->sta.basic_rate_idx;
304 else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
305 rate_idx = wlvif->sta.p2p_rate_idx;
306 else if (ieee80211_is_data(frame_control))
307 rate_idx = wlvif->sta.ap_rate_idx;
309 rate_idx = wlvif->sta.basic_rate_idx;
311 if (hlid == wlvif->ap.global_hlid)
312 rate_idx = wlvif->ap.mgmt_rate_idx;
313 else if (hlid == wlvif->ap.bcast_hlid ||
314 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
315 !ieee80211_is_data(frame_control))
317 * send non-data, bcast and EAPOLs using the
320 rate_idx = wlvif->ap.bcast_rate_idx;
322 rate_idx = wlvif->ap.ucast_rate_idx[ac];
325 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
327 /* for WEP shared auth - no fw encryption is needed */
328 if (ieee80211_is_auth(frame_control) &&
329 ieee80211_has_protected(frame_control))
330 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
332 desc->tx_attr = cpu_to_le16(tx_attr);
334 wlcore_hw_set_tx_desc_csum(wl, desc, skb);
335 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
338 /* caller must hold wl->mutex */
339 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
340 struct sk_buff *skb, u32 buf_offset, u8 hlid)
342 struct ieee80211_tx_info *info;
350 wl1271_error("discarding null skb");
354 if (hlid == WL12XX_INVALID_LINK_ID) {
355 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
359 info = IEEE80211_SKB_CB(skb);
361 is_dummy = wl12xx_is_dummy_packet(wl, skb);
363 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
364 info->control.hw_key &&
365 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
366 extra = WL1271_EXTRA_SPACE_TKIP;
368 if (info->control.hw_key) {
370 u8 idx = info->control.hw_key->hw_key_idx;
371 u32 cipher = info->control.hw_key->cipher;
373 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
374 (cipher == WLAN_CIPHER_SUITE_WEP104);
376 if (unlikely(is_wep && wlvif->default_key != idx)) {
377 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
380 wlvif->default_key = idx;
383 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
386 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
391 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
393 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
394 wl1271_tx_ap_update_inconnection_sta(wl, skb);
395 wl1271_tx_regulate_link(wl, wlvif, hlid);
399 * The length of each packet is stored in terms of
400 * words. Thus, we must pad the skb data to make sure its
401 * length is aligned. The number of padding bytes is computed
402 * and set in wl1271_tx_fill_hdr.
403 * In special cases, we want to align to a specific block size
404 * (eg. for wl128x with SDIO we align to 256).
406 total_len = wlcore_calc_packet_alignment(wl, skb->len);
408 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
409 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
411 /* Revert side effects in the dummy packet skb, so it can be reused */
413 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
418 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
419 enum ieee80211_band rate_band)
421 struct ieee80211_supported_band *band;
422 u32 enabled_rates = 0;
425 band = wl->hw->wiphy->bands[rate_band];
426 for (bit = 0; bit < band->n_bitrates; bit++) {
428 enabled_rates |= band->bitrates[bit].hw_value;
432 /* MCS rates indication are on bits 16 - 31 */
433 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
435 for (bit = 0; bit < 16; bit++) {
437 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
441 return enabled_rates;
444 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
448 for (i = 0; i < NUM_TX_QUEUES; i++) {
449 if (wlcore_is_queue_stopped_by_reason(wl, i,
450 WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
451 wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
452 /* firmware buffer has space, restart queues */
453 wlcore_wake_queue(wl, i,
454 WLCORE_QUEUE_STOP_REASON_WATERMARK);
459 static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
460 struct sk_buff_head *queues)
463 u32 min_pkts = 0xffffffff;
466 * Find a non-empty ac where:
467 * 1. There are packets to transmit
468 * 2. The FW has the least allocated blocks
470 * We prioritize the ACs according to VO>VI>BE>BK
472 for (i = 0; i < NUM_TX_QUEUES; i++) {
473 ac = wl1271_tx_get_queue(i);
474 if (!skb_queue_empty(&queues[ac]) &&
475 (wl->tx_allocated_pkts[ac] < min_pkts)) {
477 min_pkts = wl->tx_allocated_pkts[q];
487 static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
488 struct wl1271_link *lnk)
492 struct sk_buff_head *queue;
494 queue = wl1271_select_queue(wl, lnk->tx_queue);
498 skb = skb_dequeue(queue);
500 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
501 spin_lock_irqsave(&wl->wl_lock, flags);
502 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
503 wl->tx_queue_count[q]--;
504 spin_unlock_irqrestore(&wl->wl_lock, flags);
510 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
511 struct wl12xx_vif *wlvif,
514 struct sk_buff *skb = NULL;
515 int i, h, start_hlid;
517 /* start from the link after the last one */
518 start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
520 /* dequeue according to AC, round robin on each link */
521 for (i = 0; i < WL12XX_MAX_LINKS; i++) {
522 h = (start_hlid + i) % WL12XX_MAX_LINKS;
524 /* only consider connected stations */
525 if (!test_bit(h, wlvif->links_map))
528 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
532 wlvif->last_tx_hlid = h;
537 wlvif->last_tx_hlid = 0;
539 *hlid = wlvif->last_tx_hlid;
543 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
546 struct wl12xx_vif *wlvif = wl->last_wlvif;
547 struct sk_buff *skb = NULL;
549 /* continue from last wlvif (round robin) */
551 wl12xx_for_each_wlvif_continue(wl, wlvif) {
552 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
554 wl->last_wlvif = wlvif;
560 /* dequeue from the system HLID before the restarting wlvif list */
562 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
563 *hlid = wl->system_hlid;
566 /* do a new pass over the wlvif list */
568 wl12xx_for_each_wlvif(wl, wlvif) {
569 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
571 wl->last_wlvif = wlvif;
576 * No need to continue after last_wlvif. The previous
577 * pass should have found it.
579 if (wlvif == wl->last_wlvif)
585 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
588 skb = wl->dummy_packet;
589 *hlid = wl->system_hlid;
590 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
591 spin_lock_irqsave(&wl->wl_lock, flags);
592 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
593 wl->tx_queue_count[q]--;
594 spin_unlock_irqrestore(&wl->wl_lock, flags);
600 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
601 struct sk_buff *skb, u8 hlid)
604 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
606 if (wl12xx_is_dummy_packet(wl, skb)) {
607 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
609 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
611 /* make sure we dequeue the same packet next time */
612 wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
616 spin_lock_irqsave(&wl->wl_lock, flags);
617 wl->tx_queue_count[q]++;
618 spin_unlock_irqrestore(&wl->wl_lock, flags);
621 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
623 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
625 return ieee80211_is_data_present(hdr->frame_control);
628 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
630 struct wl12xx_vif *wlvif;
634 if (!wl->conf.rx_streaming.interval)
637 if (!wl->conf.rx_streaming.always &&
638 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
641 timeout = wl->conf.rx_streaming.duration;
642 wl12xx_for_each_wlvif_sta(wl, wlvif) {
644 for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
645 if (test_bit(hlid, wlvif->links_map)) {
654 /* enable rx streaming */
655 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
656 ieee80211_queue_work(wl->hw,
657 &wlvif->rx_streaming_enable_work);
659 mod_timer(&wlvif->rx_streaming_timer,
660 jiffies + msecs_to_jiffies(timeout));
665 * Returns failure values only in case of failed bus ops within this function.
666 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
667 * triggering recovery by higher layers when not necessary.
668 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
669 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
670 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
671 * within prepare_tx_frame code but there's nothing we should do about those
674 int wlcore_tx_work_locked(struct wl1271 *wl)
676 struct wl12xx_vif *wlvif;
678 struct wl1271_tx_hw_descr *desc;
679 u32 buf_offset = 0, last_len = 0;
680 bool sent_packets = false;
681 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
686 if (unlikely(wl->state != WLCORE_STATE_ON))
689 while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
690 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
691 bool has_data = false;
694 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
695 wlvif = wl12xx_vif_to_data(info->control.vif);
697 hlid = wl->system_hlid;
699 has_data = wlvif && wl1271_tx_is_data_present(skb);
700 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
702 if (ret == -EAGAIN) {
704 * Aggregation buffer is full.
705 * Flush buffer and try again.
707 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
709 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
711 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
712 wl->aggr_buf, buf_offset, true);
719 } else if (ret == -EBUSY) {
721 * Firmware buffer is full.
722 * Queue back last skb, and stop aggregating.
724 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
725 /* No work left, avoid scheduling redundant tx work */
726 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
728 } else if (ret < 0) {
729 if (wl12xx_is_dummy_packet(wl, skb))
731 * fw still expects dummy packet,
734 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
736 ieee80211_free_txskb(wl->hw, skb);
740 buf_offset += last_len;
741 wl->tx_packets_count++;
743 desc = (struct wl1271_tx_hw_descr *) skb->data;
744 __set_bit(desc->hlid, active_hlids);
750 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
751 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
760 * Interrupt the firmware with the new packets. This is only
761 * required for older hardware revisions
763 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
764 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
765 wl->tx_packets_count);
770 wl1271_handle_tx_low_watermark(wl);
772 wl12xx_rearm_rx_streaming(wl, active_hlids);
778 void wl1271_tx_work(struct work_struct *work)
780 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
783 mutex_lock(&wl->mutex);
784 ret = wl1271_ps_elp_wakeup(wl);
788 ret = wlcore_tx_work_locked(wl);
790 wl12xx_queue_recovery_work(wl);
794 wl1271_ps_elp_sleep(wl);
796 mutex_unlock(&wl->mutex);
799 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
804 * TODO: use wl12xx constants when this code is moved to wl12xx, as
805 * only it uses Tx-completion.
807 if (rate_class_index <= 8)
808 flags |= IEEE80211_TX_RC_MCS;
811 * TODO: use wl12xx constants when this code is moved to wl12xx, as
812 * only it uses Tx-completion.
814 if (rate_class_index == 0)
815 flags |= IEEE80211_TX_RC_SHORT_GI;
820 static void wl1271_tx_complete_packet(struct wl1271 *wl,
821 struct wl1271_tx_hw_res_descr *result)
823 struct ieee80211_tx_info *info;
824 struct ieee80211_vif *vif;
825 struct wl12xx_vif *wlvif;
832 /* check for id legality */
833 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
834 wl1271_warning("TX result illegal id: %d", id);
838 skb = wl->tx_frames[id];
839 info = IEEE80211_SKB_CB(skb);
841 if (wl12xx_is_dummy_packet(wl, skb)) {
842 wl1271_free_tx_id(wl, id);
846 /* info->control is valid as long as we don't update info->status */
847 vif = info->control.vif;
848 wlvif = wl12xx_vif_to_data(vif);
850 /* update the TX status info */
851 if (result->status == TX_SUCCESS) {
852 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
853 info->flags |= IEEE80211_TX_STAT_ACK;
854 rate = wlcore_rate_to_idx(wl, result->rate_class_index,
856 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
857 retries = result->ack_failures;
858 } else if (result->status == TX_RETRY_EXCEEDED) {
859 wl->stats.excessive_retries++;
860 retries = result->ack_failures;
863 info->status.rates[0].idx = rate;
864 info->status.rates[0].count = retries;
865 info->status.rates[0].flags = rate_flags;
866 info->status.ack_signal = -1;
868 wl->stats.retry_count += result->ack_failures;
871 * update sequence number only when relevant, i.e. only in
872 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
874 if (info->control.hw_key &&
875 (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
876 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
877 info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
878 u8 fw_lsb = result->tx_security_sequence_number_lsb;
879 u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
882 * update security sequence number, taking care of potential
885 wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
886 wlvif->tx_security_last_seq_lsb = fw_lsb;
889 /* remove private header from packet */
890 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
892 /* remove TKIP header space if present */
893 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
894 info->control.hw_key &&
895 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
896 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
897 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
899 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
902 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
904 result->id, skb, result->ack_failures,
905 result->rate_class_index, result->status);
907 /* return the packet to the stack */
908 skb_queue_tail(&wl->deferred_tx_queue, skb);
909 queue_work(wl->freezable_wq, &wl->netstack_work);
910 wl1271_free_tx_id(wl, result->id);
913 /* Called upon reception of a TX complete interrupt */
914 int wlcore_tx_complete(struct wl1271 *wl)
916 struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
917 u32 count, fw_counter;
921 /* read the tx results from the chipset */
922 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
923 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
927 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
929 /* write host counter to chipset (to ack) */
930 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
931 offsetof(struct wl1271_tx_hw_res_if,
932 tx_result_host_counter), fw_counter);
936 count = fw_counter - wl->tx_results_count;
937 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
939 /* verify that the result buffer is not getting overrun */
940 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
941 wl1271_warning("TX result overflow from chipset: %d", count);
943 /* process the results */
944 for (i = 0; i < count; i++) {
945 struct wl1271_tx_hw_res_descr *result;
946 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
948 /* process the packet */
949 result = &(wl->tx_res_if->tx_results_queue[offset]);
950 wl1271_tx_complete_packet(wl, result);
952 wl->tx_results_count++;
958 EXPORT_SYMBOL(wlcore_tx_complete);
960 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
965 struct ieee80211_tx_info *info;
966 int total[NUM_TX_QUEUES];
968 for (i = 0; i < NUM_TX_QUEUES; i++) {
970 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
971 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
973 if (!wl12xx_is_dummy_packet(wl, skb)) {
974 info = IEEE80211_SKB_CB(skb);
975 info->status.rates[0].idx = -1;
976 info->status.rates[0].count = 0;
977 ieee80211_tx_status_ni(wl->hw, skb);
984 spin_lock_irqsave(&wl->wl_lock, flags);
985 for (i = 0; i < NUM_TX_QUEUES; i++)
986 wl->tx_queue_count[i] -= total[i];
987 spin_unlock_irqrestore(&wl->wl_lock, flags);
989 wl1271_handle_tx_low_watermark(wl);
992 /* caller must hold wl->mutex and TX must be stopped */
993 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
998 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
999 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1000 wl1271_free_sta(wl, wlvif, i);
1002 wlvif->sta.ba_rx_bitmap = 0;
1004 wl->links[i].allocated_pkts = 0;
1005 wl->links[i].prev_freed_pkts = 0;
1007 wlvif->last_tx_hlid = 0;
1010 /* caller must hold wl->mutex and TX must be stopped */
1011 void wl12xx_tx_reset(struct wl1271 *wl)
1014 struct sk_buff *skb;
1015 struct ieee80211_tx_info *info;
1017 /* only reset the queues if something bad happened */
1018 if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
1019 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1020 wl1271_tx_reset_link_queues(wl, i);
1022 for (i = 0; i < NUM_TX_QUEUES; i++)
1023 wl->tx_queue_count[i] = 0;
1027 * Make sure the driver is at a consistent state, in case this
1028 * function is called from a context other than interface removal.
1029 * This call will always wake the TX queues.
1031 wl1271_handle_tx_low_watermark(wl);
1033 for (i = 0; i < wl->num_tx_desc; i++) {
1034 if (wl->tx_frames[i] == NULL)
1037 skb = wl->tx_frames[i];
1038 wl1271_free_tx_id(wl, i);
1039 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1041 if (!wl12xx_is_dummy_packet(wl, skb)) {
1043 * Remove private headers before passing the skb to
1046 info = IEEE80211_SKB_CB(skb);
1047 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1048 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1049 info->control.hw_key &&
1050 info->control.hw_key->cipher ==
1051 WLAN_CIPHER_SUITE_TKIP) {
1052 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1053 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1055 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1058 info->status.rates[0].idx = -1;
1059 info->status.rates[0].count = 0;
1061 ieee80211_tx_status_ni(wl->hw, skb);
1066 #define WL1271_TX_FLUSH_TIMEOUT 500000
1068 /* caller must *NOT* hold wl->mutex */
1069 void wl1271_tx_flush(struct wl1271 *wl)
1071 unsigned long timeout, start_time;
1073 start_time = jiffies;
1074 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1076 /* only one flush should be in progress, for consistent queue state */
1077 mutex_lock(&wl->flush_mutex);
1079 mutex_lock(&wl->mutex);
1080 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1081 mutex_unlock(&wl->mutex);
1085 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1087 while (!time_after(jiffies, timeout)) {
1088 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1090 wl1271_tx_total_queue_count(wl));
1092 /* force Tx and give the driver some time to flush data */
1093 mutex_unlock(&wl->mutex);
1094 if (wl1271_tx_total_queue_count(wl))
1095 wl1271_tx_work(&wl->tx_work);
1097 mutex_lock(&wl->mutex);
1099 if ((wl->tx_frames_cnt == 0) &&
1100 (wl1271_tx_total_queue_count(wl) == 0)) {
1101 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1102 jiffies_to_msecs(jiffies - start_time));
1107 wl1271_warning("Unable to flush all TX buffers, "
1108 "timed out (timeout %d ms",
1109 WL1271_TX_FLUSH_TIMEOUT / 1000);
1111 /* forcibly flush all Tx buffers on our queues */
1112 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1113 wl1271_tx_reset_link_queues(wl, i);
1116 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1117 mutex_unlock(&wl->mutex);
1119 mutex_unlock(&wl->flush_mutex);
1121 EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1123 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1125 if (WARN_ON(!rate_set))
1128 return BIT(__ffs(rate_set));
1131 void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
1132 enum wlcore_queue_stop_reason reason)
1134 bool stopped = !!wl->queue_stop_reasons[queue];
1136 /* queue should not be stopped for this reason */
1137 WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
1142 ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1145 void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
1146 enum wlcore_queue_stop_reason reason)
1148 unsigned long flags;
1150 spin_lock_irqsave(&wl->wl_lock, flags);
1151 wlcore_stop_queue_locked(wl, queue, reason);
1152 spin_unlock_irqrestore(&wl->wl_lock, flags);
1155 void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
1156 enum wlcore_queue_stop_reason reason)
1158 unsigned long flags;
1160 spin_lock_irqsave(&wl->wl_lock, flags);
1162 /* queue should not be clear for this reason */
1163 WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
1165 if (wl->queue_stop_reasons[queue])
1168 ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
1171 spin_unlock_irqrestore(&wl->wl_lock, flags);
1174 void wlcore_stop_queues(struct wl1271 *wl,
1175 enum wlcore_queue_stop_reason reason)
1179 for (i = 0; i < NUM_TX_QUEUES; i++)
1180 wlcore_stop_queue(wl, i, reason);
1182 EXPORT_SYMBOL_GPL(wlcore_stop_queues);
1184 void wlcore_wake_queues(struct wl1271 *wl,
1185 enum wlcore_queue_stop_reason reason)
1189 for (i = 0; i < NUM_TX_QUEUES; i++)
1190 wlcore_wake_queue(wl, i, reason);
1192 EXPORT_SYMBOL_GPL(wlcore_wake_queues);
1194 void wlcore_reset_stopped_queues(struct wl1271 *wl)
1197 unsigned long flags;
1199 spin_lock_irqsave(&wl->wl_lock, flags);
1201 for (i = 0; i < NUM_TX_QUEUES; i++) {
1202 if (!wl->queue_stop_reasons[i])
1205 wl->queue_stop_reasons[i] = 0;
1206 ieee80211_wake_queue(wl->hw,
1207 wl1271_tx_get_mac80211_queue(i));
1210 spin_unlock_irqrestore(&wl->wl_lock, flags);
1213 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
1214 enum wlcore_queue_stop_reason reason)
1216 return test_bit(reason, &wl->queue_stop_reasons[queue]);
1219 bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
1221 return !!wl->queue_stop_reasons[queue];