mt76: unify queue tx cleanup code
authorFelix Fietkau <nbd@nbd.name>
Sun, 23 Aug 2020 12:50:13 +0000 (14:50 +0200)
committerFelix Fietkau <nbd@nbd.name>
Thu, 24 Sep 2020 16:10:16 +0000 (18:10 +0200)
Cleanup and preparation for changing tx scheduling behavior

Signed-off-by: Felix Fietkau <nbd@nbd.name>
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/sdio.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c

index 24ff21e..cab8422 100644 (file)
@@ -165,16 +165,8 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
                last = readl(&q->regs->dma_idx);
 
        while (q->queued > 0 && q->tail != last) {
-               int swq_qid = -1;
-
                mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
-               if (entry.schedule)
-                       swq_qid = entry.qid;
-
-               q->tail = (q->tail + 1) % q->ndesc;
-
-               if (entry.skb)
-                       dev->drv->tx_complete_skb(dev, qid, &entry);
+               mt76_queue_tx_complete(dev, q, &entry);
 
                if (entry.txwi) {
                        if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
@@ -185,13 +177,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
                if (!flush && q->tail == last)
                        last = readl(&q->regs->dma_idx);
 
-               spin_lock_bh(&q->lock);
-               if (swq_qid >= 4)
-                       dev->q_tx[__MT_TXQ_MAX + swq_qid - 4].swq_queued--;
-               else if (swq_qid >= 0)
-                       dev->q_tx[swq_qid].swq_queued--;
-               q->queued--;
-               spin_unlock_bh(&q->lock);
        }
 
        if (flush) {
index 80f4ba9..23e8d63 100644 (file)
@@ -1017,6 +1017,8 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
                           struct napi_struct *napi);
 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
 void mt76_testmode_tx_pending(struct mt76_dev *dev);
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+                           struct mt76_queue_entry *e);
 
 /* usb */
 static inline bool mt76u_urb_error(struct urb *urb)
index 6c6d4ce..326d9c5 100644 (file)
@@ -133,38 +133,28 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
        return nframes;
 }
 
-static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
 {
        struct mt76_sw_queue *sq = &dev->q_tx[qid];
-       u32 n_dequeued = 0, n_sw_dequeued = 0;
        struct mt76_queue_entry entry;
        struct mt76_queue *q = sq->q;
        bool wake;
 
-       while (q->queued > n_dequeued) {
+       while (q->queued > 0) {
                if (!q->entry[q->tail].done)
                        break;
 
-               if (q->entry[q->tail].schedule) {
-                       q->entry[q->tail].schedule = false;
-                       n_sw_dequeued++;
-               }
-
                entry = q->entry[q->tail];
                q->entry[q->tail].done = false;
-               q->tail = (q->tail + 1) % q->ndesc;
-               n_dequeued++;
+               q->entry[q->tail].schedule = false;
 
-               if (qid == MT_TXQ_MCU)
+               if (qid == MT_TXQ_MCU) {
                        dev_kfree_skb(entry.skb);
-               else
-                       dev->drv->tx_complete_skb(dev, qid, &entry);
-       }
-
-       spin_lock_bh(&q->lock);
+                       entry.skb = NULL;
+               }
 
-       sq->swq_queued -= n_sw_dequeued;
-       q->queued -= n_dequeued;
+               mt76_queue_tx_complete(dev, q, &entry);
+       }
 
        wake = q->stopped && q->queued < q->ndesc - 8;
        if (wake)
@@ -173,18 +163,13 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
        if (!q->queued)
                wake_up(&dev->tx_wait);
 
-       spin_unlock_bh(&q->lock);
-
        if (qid == MT_TXQ_MCU)
-               goto out;
+               return;
 
        mt76_txq_schedule(&dev->phy, qid);
 
        if (wake)
                ieee80211_wake_queue(dev->hw, qid);
-
-out:
-       return n_dequeued;
 }
 
 static void mt76s_tx_status_data(struct work_struct *work)
index d8252e2..1a2f216 100644 (file)
@@ -696,3 +696,25 @@ int mt76_skb_adjust_pad(struct sk_buff *skb)
        return 0;
 }
 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
+
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+                           struct mt76_queue_entry *e)
+{
+       enum mt76_txq_id qid = e->qid % 4;
+       bool ext_phy = e->qid >= 4;
+
+       if (e->skb)
+               dev->drv->tx_complete_skb(dev, qid, e);
+
+       spin_lock_bh(&q->lock);
+       q->tail = (q->tail + 1) % q->ndesc;
+       q->queued--;
+
+       if (ext_phy)
+               qid += __MT_TXQ_MAX;
+
+       if (e->schedule)
+               dev->q_tx[qid].swq_queued--;
+       spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
index 36e6cc3..730d173 100644 (file)
@@ -802,33 +802,20 @@ static void mt76u_tx_tasklet(unsigned long data)
        int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
-               u32 n_dequeued = 0, n_sw_dequeued = 0;
-
                sq = &dev->q_tx[i];
                q = sq->q;
 
-               while (q->queued > n_dequeued) {
+               while (q->queued > 0) {
                        if (!q->entry[q->tail].done)
                                break;
 
-                       if (q->entry[q->tail].schedule) {
-                               q->entry[q->tail].schedule = false;
-                               n_sw_dequeued++;
-                       }
-
                        entry = q->entry[q->tail];
                        q->entry[q->tail].done = false;
-                       q->tail = (q->tail + 1) % q->ndesc;
-                       n_dequeued++;
+                       q->entry[q->tail].schedule = false;
 
-                       dev->drv->tx_complete_skb(dev, i, &entry);
+                       mt76_queue_tx_complete(dev, q, &entry);
                }
 
-               spin_lock_bh(&q->lock);
-
-               sq->swq_queued -= n_sw_dequeued;
-               q->queued -= n_dequeued;
-
                wake = q->stopped && q->queued < q->ndesc - 8;
                if (wake)
                        q->stopped = false;
@@ -836,8 +823,6 @@ static void mt76u_tx_tasklet(unsigned long data)
                if (!q->queued)
                        wake_up(&dev->tx_wait);
 
-               spin_unlock_bh(&q->lock);
-
                mt76_txq_schedule(&dev->phy, i);
 
                if (dev->drv->tx_status_data &&
@@ -1068,16 +1053,11 @@ void mt76u_stop_tx(struct mt76_dev *dev)
                        if (!q)
                                continue;
 
-                       /* Assure we are in sync with killed tasklet. */
-                       spin_lock_bh(&q->lock);
-                       while (q->queued) {
-                               entry = q->entry[q->tail];
-                               q->tail = (q->tail + 1) % q->ndesc;
-                               q->queued--;
+                       entry = q->entry[q->tail];
+                       q->entry[q->tail].done = false;
+                       q->entry[q->tail].schedule = false;
 
-                               dev->drv->tx_complete_skb(dev, i, &entry);
-                       }
-                       spin_unlock_bh(&q->lock);
+                       mt76_queue_tx_complete(dev, q, &entry);
                }
        }