struct ath_buf *bf;
struct ath_desc *ds;
- struct ath_rx_status *rx_stats;
struct sk_buff *skb = NULL, *requeue_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
+ struct ath_rx_status rs;
spin_lock_bh(&sc->rx.rxbuflock);
bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
ds = bf->bf_desc;
- rx_stats = &ds->ds_us.rx;
/*
* Must provide the virtual address of the current
* on. All this is necessary because of our use of
* a self-linked list to avoid rx overruns.
*/
- retval = ath9k_hw_rxprocdesc(ah, ds, rx_stats, 0);
+ memset(&rs, 0, sizeof(rs));
+ retval = ath9k_hw_rxprocdesc(ah, ds, &rs, 0);
if (retval == -EINPROGRESS) {
+ struct ath_rx_status trs;
struct ath_buf *tbf;
struct ath_desc *tds;
+ memset(&trs, 0, sizeof(trs));
if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
sc->rx.rxlink = NULL;
break;
*/
tds = tbf->bf_desc;
- retval = ath9k_hw_rxprocdesc(ah, tds, &tds->ds_us.rx, 0);
+ retval = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
if (retval == -EINPROGRESS) {
break;
}
hw = ath_get_virt_hw(sc, hdr);
- ath_debug_stat_rx(sc, rx_stats);
+ ath_debug_stat_rx(sc, &rs);
/*
* If we're asked to flush receive queue, directly
if (flush)
goto requeue;
- retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
+ retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
rxs, &decrypt_error);
if (retval)
goto requeue;
common->rx_bufsize,
DMA_FROM_DEVICE);
- skb_put(skb, rx_stats->rs_datalen);
+ skb_put(skb, rs.rs_datalen);
- ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
*/
- if (sc->rx.defant != rx_stats->rs_antenna) {
+ if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rx_stats->rs_antenna);
+ ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
struct ath_buf *bf, *lastbf, *bf_held = NULL;
struct list_head bf_head;
struct ath_desc *ds;
- struct ath_tx_status *ts;
+ struct ath_tx_status ts;
int txok;
int status;
lastbf = bf->bf_lastbf;
ds = lastbf->bf_desc;
- ts = &ds->ds_us.tx;
- status = ath9k_hw_txprocdesc(ah, ds, ts);
+ memset(&ts, 0, sizeof(ts));
+ status = ath9k_hw_txprocdesc(ah, ds, &ts);
if (status == -EINPROGRESS) {
spin_unlock_bh(&txq->axq_lock);
break;
* can disable RX.
*/
if (bf->bf_isnullfunc &&
- (ts->ts_status & ATH9K_TX_ACKED)) {
+ (ts.ts_status & ATH9K_TX_ACKED)) {
if ((sc->ps_flags & PS_ENABLED))
ath9k_enable_ps(sc);
else
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = !(ts->ts_status & ATH9K_TXERR_MASK);
+ txok = !(ts.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
* This frame is sent out as a single frame.
* Use hardware retry status for this frame.
*/
- bf->bf_retries = ts->ts_longretry;
- if (ts->ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_retries = ts.ts_longretry;
+ if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, ts, 0, txok, true);
+ ath_tx_rc_status(bf, &ts, 0, txok, true);
}
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, ts, txok);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
ath_wake_mac80211_queue(sc, txq);