int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- struct mt76_queue *q = dev->q_tx[i].q;
+ struct mt76_queue *q = dev->q_tx[i];
if (!q)
continue;
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- struct mt76_queue *q = sq->q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
bool wake = false;
int last;
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
offset = __MT_TXQ_MAX * (phy != &dev->phy);
for (i = 0; i < __MT_TXQ_MAX; i++) {
- q = dev->q_tx[offset + i].q;
+ q = dev->q_tx[offset + i];
if (q && q->queued)
return true;
}
struct page_frag_cache rx_page;
};
-struct mt76_sw_queue {
- struct mt76_queue *q;
-};
-
struct mt76_mcu_ops {
u32 headroom;
u32 tailroom;
};
struct mt76_txq {
- struct mt76_sw_queue *swq;
+ struct mt76_queue *q;
struct mt76_wcid *wcid;
struct sk_buff_head retry_q;
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
- struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
+ struct mt76_queue *q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
- dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
+ dev->mt76.q_tx[MT_TXQ_CAB]->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
data.dev = dev;
__skb_queue_head_init(&data.q);
- q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
+ q = dev->mt76.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
if (dev->mt76.csa_complete)
goto out;
- q = dev->mt76.q_tx[MT_TXQ_CAB].q;
+ q = dev->mt76.q_tx[MT_TXQ_CAB];
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
+ if (dev->mt76.q_tx[MT_TXQ_BEACON]->queued >
hweight8(dev->mt76.beacon_mask))
dev->beacon_check++;
}
#include "../dma.h"
static int
-mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
if (err < 0)
return err;
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
mt7603_pse_client_reset(dev);
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i],
- wmm_queue_map[i],
+ ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i],
MT_TX_RING_SIZE);
if (ret)
return ret;
}
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD,
MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON,
MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB,
MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
if (ret)
return ret;
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
for (i = 0; i < 4; i++) {
- struct mt76_queue *q = dev->mt76.q_tx[i].q;
+ struct mt76_queue *q = dev->mt76.q_tx[i];
u8 qidx = q->hw_idx;
u8 tid = ac_to_tid[i];
u32 txtime = airtime[qidx];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_queue *q = dev->mt76.q_tx[qid].q;
+ struct mt76_queue *q = dev->mt76.q_tx[qid];
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i].q;
+ q = dev->mt76.q_tx[i];
if (!q->queued)
continue;
u16 cw_max = (1 << 10) - 1;
u32 val;
- queue = dev->mt76.q_tx[queue].q->hw_idx;
+ queue = dev->mt76.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
- struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
"%s: queued=%d head=%d tail=%d\n",
- queue_map[i].queue, q->q->queued, q->q->head,
- q->q->tail);
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
}
return 0;
#include "mac.h"
static int
-mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7615_init_tx_queue(struct mt7615_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
if (err < 0)
return err;
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
return 0;
}
int i;
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i],
- wmm_queue_map[i],
+ ret = mt7615_init_tx_queue(dev, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2);
if (ret)
return ret;
}
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_PSD,
MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
if (ret)
return ret;
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU,
MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
return ret;
}
static int
mt7615_init_tx_queues(struct mt7615_dev *dev)
{
- struct mt76_sw_queue *q;
int ret, i;
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_FWDL,
MT7615_TXQ_FWDL,
MT7615_TX_FWDL_RING_SIZE);
if (ret)
if (!is_mt7615(&dev->mt76))
return mt7622_init_tx_queues_multi(dev);
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0,
- MT7615_TX_RING_SIZE);
+ ret = mt7615_init_tx_queue(dev, 0, 0, MT7615_TX_RING_SIZE);
if (ret)
return ret;
- for (i = 1; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- q->q = dev->mt76.q_tx[0].q;
- }
+ for (i = 1; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = dev->mt76.q_tx[0];
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7615_TXQ_MCU,
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE);
return 0;
}
static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
{
- return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU].q->hw_idx);
+ return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU]->hw_idx);
}
void mt7615_dma_reset(struct mt7615_dev *dev);
if (ret)
goto out;
- mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q);
+ mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU]);
if (wait_resp)
ret = mt7615_mcu_wait_response(dev, cmd, seq);
static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- bool mcu = q == dev->q_tx[MT_TXQ_MCU].q;
+ bool mcu = q == dev->q_tx[MT_TXQ_MCU];
struct mt76_sdio *sdio = &dev->sdio;
int nframes = 0;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
- ret = mt7663s_tx_run_queue(dev, dev->q_tx[i].q);
+ ret = mt7663s_tx_run_queue(dev, dev->q_tx[i]);
if (ret < 0)
break;
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
- struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
+ struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i;
EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
static int
-mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
if (err < 0)
return err;
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
- mt76_ac_to_hwq(i),
+ ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i),
MT_TX_RING_SIZE);
if (ret)
return ret;
}
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD,
MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
if (ret)
return ret;
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
- mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
+ mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]);
}
if (intr & MT_INT_TX_STAT)
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i].q;
+ q = dev->mt76.q_tx[i];
if (!q->queued)
continue;
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
+ int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx);
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->mt76.q_tx[queue].q->hw_idx;
+ qid = dev->mt76.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
- struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
"%s: queued=%d head=%d tail=%d\n",
- queue_map[i].queue, q->q->queued, q->q->head,
- q->q->tail);
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
}
return 0;
static int
mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc)
{
- struct mt76_sw_queue *q;
struct mt76_queue *hwq;
int err, i;
if (err < 0)
return err;
- for (i = 0; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- q->q = hwq;
- }
+ for (i = 0; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = hwq;
return 0;
}
static int
-mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7915_init_mcu_queue(struct mt7915_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
if (err < 0)
return err;
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
return 0;
}
return ret;
/* command to WM */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7915_TXQ_MCU_WM,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU, MT7915_TXQ_MCU_WM,
MT7915_TX_MCU_RING_SIZE);
if (ret)
return ret;
/* command to WA */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU_WA],
- MT7915_TXQ_MCU_WA,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU_WA, MT7915_TXQ_MCU_WA,
MT7915_TX_MCU_RING_SIZE);
if (ret)
return ret;
/* firmware download */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
- MT7915_TXQ_FWDL,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_FWDL, MT7915_TXQ_FWDL,
MT7915_TX_FWDL_RING_SIZE);
if (ret)
return ret;
spin_lock_init(&q->lock);
q->hw_idx = i;
- dev->q_tx[i].q = q;
+ dev->q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
- struct mt76_queue *q = sq->q;
bool wake;
while (q->queued > 0) {
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
int ret = -ENOSPC, len = skb->len;
if (q->queued == q->ndesc)
return;
qid = skb_get_queue_mapping(skb);
- q = dev->q_tx[qid].q;
+ q = dev->q_tx[qid];
spin_lock_bh(&q->lock);
return idx;
wcid = (struct mt76_wcid *)sta->drv_priv;
- q = dev->q_tx[qid].q;
+ q = dev->q_tx[qid];
q->entry[idx].wcid = wcid->idx;
pending = atomic_inc_return(&wcid->non_aql_packets);
if (stop && pending >= MT_MAX_NON_AQL_PKT)
if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- q = dev->q_tx[qid].q;
+ q = dev->q_tx[qid];
spin_lock_bh(&q->lock);
__mt76_tx_queue_skb(dev, qid, skb, wcid, sta, NULL);
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD];
int i;
spin_lock_bh(&hwq->lock);
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
struct mt76_txq *mtxq)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
- struct mt76_queue *hwq = sq->q;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1;
n_frames++;
} while (1);
- dev->queue_ops->kick(dev, hwq);
+ dev->queue_ops->kick(dev, q);
return n_frames;
}
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
struct mt76_dev *dev = phy->dev;
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- struct mt76_queue *hwq = sq->q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
struct mt76_wcid *wcid;
int ret = 0;
- spin_lock_bh(&hwq->lock);
+ spin_lock_bh(&q->lock);
while (1) {
if (test_bit(MT76_STATE_PM, &phy->state) ||
test_bit(MT76_RESET, &phy->state)) {
u8 tid = txq->tid;
mtxq->send_bar = false;
- spin_unlock_bh(&hwq->lock);
+ spin_unlock_bh(&q->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
- spin_lock_bh(&hwq->lock);
+ spin_lock_bh(&q->lock);
}
- ret += mt76_txq_send_burst(phy, sq, mtxq);
+ ret += mt76_txq_send_burst(phy, q, mtxq);
ieee80211_return_txq(phy->hw, txq,
!skb_queue_empty(&mtxq->retry_q));
}
- spin_unlock_bh(&hwq->lock);
+ spin_unlock_bh(&q->lock);
return ret;
}
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
- hwq = mtxq->swq->q;
+ hwq = mtxq->q;
spin_lock_bh(&hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
skb_queue_head_init(&mtxq->retry_q);
- mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
+ mtxq->q = dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);
{
struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue_entry entry;
- struct mt76_sw_queue *sq;
struct mt76_queue *q;
bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- sq = &dev->q_tx[i];
- q = sq->q;
+ q = dev->q_tx[i];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
for (i = 0; i <= MT_TXQ_PSD; i++) {
if (i >= IEEE80211_NUM_ACS) {
- dev->q_tx[i].q = dev->q_tx[0].q;
+ dev->q_tx[i] = dev->q_tx[0];
continue;
}
spin_lock_init(&q->lock);
q->hw_idx = mt76u_ac_to_hwq(dev, i);
- dev->q_tx[i].q = q;
+ dev->q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
struct mt76_queue *q;
int j;
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
* will fail to submit urb, cleanup those skb's manually.
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;