int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- struct mt76_queue *q = &dev->q_tx[i];
+ struct mt76_sw_queue *q = &dev->q_tx[i];
- if (!q->ndesc)
+ if (!q->q)
continue;
seq_printf(s,
"%d: queued=%d head=%d tail=%d swq_queued=%d\n",
- i, q->queued, q->head, q->tail, q->swq_queued);
+ i, q->q->queued, q->q->head, q->q->tail,
+ q->swq_queued);
}
return 0;
int i;
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc;
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
bool wake = false;
int last;
- if (!q->ndesc)
+ if (!q)
return;
spin_lock_bh(&q->lock);
while (q->queued && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
- q->swq_queued--;
+ dev->q_tx[qid].swq_queued--;
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
}
if (!flush)
- mt76_txq_schedule(dev, q);
+ mt76_txq_schedule(dev, sq);
else
mt76_dma_sync_idx(dev, q);
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_buf buf;
dma_addr_t addr;
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
struct mt76_queue_buf buf[32];
static bool mt76_has_tx_pending(struct mt76_dev *dev)
{
+ struct mt76_queue *q;
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- if (dev->q_tx[i].queued)
+ q = dev->q_tx[i].q;
+ if (q && q->queued)
return true;
}
struct mt76_queue_entry *entry;
struct mt76_desc *desc;
- struct list_head swq;
- int swq_queued;
-
u16 first;
u16 head;
u16 tail;
spinlock_t rx_page_lock;
};
+struct mt76_sw_queue {
+ struct mt76_queue *q;
+
+ struct list_head swq;
+ int swq_queued;
+};
+
struct mt76_mcu_ops {
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
struct mt76_txq {
struct list_head list;
- struct mt76_queue *hwq;
+ struct mt76_sw_queue *swq;
struct mt76_wcid *wcid;
struct sk_buff_head retry_q;
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
- struct mt76_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_sw_queue *sq);
void mt76_txq_schedule_all(struct mt76_dev *dev);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
- dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) |
+ dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
data.dev = dev;
__skb_queue_head_init(&data.q);
- q = &dev->mt76.q_tx[MT_TXQ_BEACON];
+ q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
if (dev->mt76.csa_complete)
goto out;
- q = &dev->mt76.q_tx[MT_TXQ_CAB];
+ q = dev->mt76.q_tx[MT_TXQ_CAB].q;
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
+ if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
+ hweight8(dev->beacon_mask))
dev->beacon_check++;
}
#include "../dma.h"
static int
-mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
+mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
int idx, int n_desc)
{
+ struct mt76_queue *hwq;
int err;
- err = mt76_queue_alloc(dev, q, idx, n_desc, 0,
- MT_TX_RING_BASE);
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
return 0;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_queue *q = &dev->mt76.q_tx[qid];
+ struct mt76_queue *q = dev->mt76.q_tx[qid].q;
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
int i;
for (i = 0; i < 4; i++) {
- q = &dev->mt76.q_tx[i];
+ q = dev->mt76.q_tx[i].q;
if (!q->queued)
continue;
u16 cw_max = (1 << 10) - 1;
u32 val;
- queue = dev->mt76.q_tx[queue].hw_idx;
+ queue = dev->mt76.q_tx[queue].q->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
- struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+ struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i, nframes;
}
static int
-mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
int idx, int n_desc)
{
+ struct mt76_queue *hwq;
int err;
- err = mt76_queue_alloc(dev, q, idx, n_desc, 0,
- MT_TX_RING_BASE);
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
return 0;
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
- mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+ mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
}
if (intr & MT_INT_TX_STAT) {
int i;
for (i = 0; i < 4; i++) {
- q = &dev->mt76.q_tx[i];
+ q = dev->mt76.q_tx[i].q;
if (!q->queued)
continue;
u32 *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = skb->len, ep = q2ep(mdev->q_tx[qid].hw_idx);
+ int pid, len = skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
struct mt76x02_txwi *txwi;
enum mt76_qsel qsel;
u32 flags;
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->mt76.q_tx[queue].hw_idx;
+ qid = dev->mt76.q_tx[queue].q->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
mt76_check_agg_ssn(mtxq, skb);
}
- q = &dev->q_tx[qid];
+ q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
{
struct mt76_dev *dev = hw->priv;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
int i;
spin_lock_bh(&hwq->lock);
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
struct mt76_txq *mtxq, bool *empty)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
- struct ieee80211_tx_info *info;
struct mt76_wcid *wcid = mtxq->wcid;
+ struct mt76_queue *hwq = sq->q;
+ struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1, limit;
struct ieee80211_tx_rate tx_rate;
} while (n_frames < limit);
if (!probe) {
- hwq->swq_queued++;
hwq->entry[idx].schedule = true;
+ sq->swq_queued++;
}
dev->queue_ops->kick(dev, hwq);
}
static int
-mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_sw_queue *sq)
{
+ struct mt76_queue *hwq = sq->q;
struct mt76_txq *mtxq, *mtxq_last;
int len = 0;
restart:
- mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
- while (!list_empty(&hwq->swq)) {
+ mtxq_last = list_last_entry(&sq->swq, struct mt76_txq, list);
+ while (!list_empty(&sq->swq)) {
bool empty = false;
int cur;
test_bit(MT76_RESET, &dev->state))
return -EBUSY;
- mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
+ mtxq = list_first_entry(&sq->swq, struct mt76_txq, list);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
list_del_init(&mtxq->list);
- cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
+ cur = mt76_txq_send_burst(dev, sq, mtxq, &empty);
if (!empty)
- list_add_tail(&mtxq->list, &hwq->swq);
+ list_add_tail(&mtxq->list, &sq->swq);
if (cur < 0)
return cur;
return len;
}
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_sw_queue *sq)
{
int len;
rcu_read_lock();
do {
- if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
+ if (sq->swq_queued >= 4 || list_empty(&sq->swq))
break;
- len = mt76_txq_schedule_list(dev, hwq);
+ len = mt76_txq_schedule_list(dev, sq);
} while (len > 0);
rcu_read_unlock();
}
int i;
for (i = 0; i <= MT_TXQ_BK; i++) {
- struct mt76_queue *q = &dev->q_tx[i];
+ struct mt76_queue *q = dev->q_tx[i].q;
spin_lock_bh(&q->lock);
- mt76_txq_schedule(dev, q);
+ mt76_txq_schedule(dev, &dev->q_tx[i]);
spin_unlock_bh(&q->lock);
}
}
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_queue *hwq;
struct mt76_txq *mtxq;
if (!txq)
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ hwq = mtxq->swq->q;
- spin_lock_bh(&mtxq->hwq->lock);
+ spin_lock_bh(&hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
if (!list_empty(&mtxq->list))
list_del_init(&mtxq->list);
- spin_unlock_bh(&mtxq->hwq->lock);
+ spin_unlock_bh(&hwq->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct mt76_dev *dev = hw->priv;
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
- struct mt76_queue *hwq = mtxq->hwq;
+ struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
+ struct mt76_sw_queue *sq = mtxq->swq;
+ struct mt76_queue *hwq = sq->q;
if (!test_bit(MT76_STATE_RUNNING, &dev->state))
return;
spin_lock_bh(&hwq->lock);
if (list_empty(&mtxq->list))
- list_add_tail(&mtxq->list, &hwq->swq);
- mt76_txq_schedule(dev, hwq);
+ list_add_tail(&mtxq->list, &sq->swq);
+ mt76_txq_schedule(dev, sq);
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
- struct mt76_txq *mtxq;
struct mt76_queue *hwq;
+ struct mt76_txq *mtxq;
struct sk_buff *skb;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
- hwq = mtxq->hwq;
+ hwq = mtxq->swq->q;
spin_lock_bh(&hwq->lock);
if (!list_empty(&mtxq->list))
INIT_LIST_HEAD(&mtxq->list);
skb_queue_head_init(&mtxq->retry_q);
- mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+ mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);
{
struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue_entry entry;
+ struct mt76_sw_queue *sq;
struct mt76u_buf *buf;
struct mt76_queue *q;
bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ sq = &dev->q_tx[i];
+ q = sq->q;
spin_lock_bh(&q->lock);
while (true) {
if (q->entry[q->head].schedule) {
q->entry[q->head].schedule = false;
- q->swq_queued--;
+ sq->swq_queued--;
}
entry = q->entry[q->head];
dev->drv->tx_complete_skb(dev, i, &entry);
spin_lock_bh(&q->lock);
}
- mt76_txq_schedule(dev, q);
+ mt76_txq_schedule(dev, sq);
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76u_buf *buf;
u16 idx = q->tail;
int err;
int i, j;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ INIT_LIST_HEAD(&dev->q_tx[i].swq);
+
+ q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
q->hw_idx = mt76_ac_to_hwq(i);
+ dev->q_tx[i].q = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
int i, j;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ q = dev->q_tx[i].q;
for (j = 0; j < q->ndesc; j++)
usb_free_urb(q->entry[j].ubuf.urb);
}
int i, j;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ q = dev->q_tx[i].q;
for (j = 0; j < q->ndesc; j++)
usb_kill_urb(q->entry[j].ubuf.urb);
}