ret = mt76s_init(mdev, func, &mt7663s_ops);
if (ret < 0)
- goto err_free;
+ goto error;
ret = mt7663s_hw_init(dev, func);
if (ret)
- goto err_deinit;
+ goto error;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
GFP_KERNEL);
if (!mdev->sdio.intr_data) {
ret = -ENOMEM;
- goto err_deinit;
+ goto error;
}
for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
GFP_KERNEL);
if (!mdev->sdio.xmit_buf[i]) {
ret = -ENOMEM;
- goto err_deinit;
+ goto error;
}
}
ret = mt76s_alloc_queues(&dev->mt76);
if (ret)
- goto err_deinit;
+ goto error;
ret = mt76_worker_setup(mt76_hw(dev), &mdev->sdio.txrx_worker,
mt7663s_txrx_worker, "sdio-txrx");
if (ret)
- goto err_deinit;
+ goto error;
sched_set_fifo_low(mdev->sdio.txrx_worker.task);
ret = mt7663_usb_sdio_register_device(dev);
if (ret)
- goto err_deinit;
+ goto error;
return 0;
-err_deinit:
+error:
mt76s_deinit(&dev->mt76);
-err_free:
mt76_free_device(&dev->mt76);
return ret;
return err;
mt76_worker_disable(&mdev->mt76.sdio.txrx_worker);
- mt76s_stop_txrx(&mdev->mt76);
+ mt76_worker_disable(&mdev->mt76.sdio.status_worker);
+ mt76_worker_disable(&mdev->mt76.sdio.net_worker);
+
+ cancel_work_sync(&mdev->mt76.sdio.stat_work);
+ clear_bit(MT76_READING_STATS, &mdev->mphy.state);
+
+ mt76_tx_status_check(&mdev->mt76, NULL, true);
return 0;
}
int err;
mt76_worker_enable(&mdev->mt76.sdio.txrx_worker);
+ mt76_worker_enable(&mdev->mt76.sdio.status_worker);
+ mt76_worker_enable(&mdev->mt76.sdio.net_worker);
err = mt7615_mcu_set_drv_ctrl(mdev);
if (err)
return 0;
}
-void mt76s_stop_txrx(struct mt76_dev *dev)
-{
- struct mt76_sdio *sdio = &dev->sdio;
-
- cancel_work_sync(&sdio->status_work);
- cancel_work_sync(&sdio->net_work);
- cancel_work_sync(&sdio->stat_work);
- clear_bit(MT76_READING_STATS, &dev->phy.state);
-
- mt76_tx_status_check(dev, NULL, true);
-}
-EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
-
int mt76s_alloc_queues(struct mt76_dev *dev)
{
int err;
return nframes;
}
-static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+static void mt76s_net_worker(struct mt76_worker *w)
+{
+ struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
+ net_worker);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i, nframes;
+
+ do {
+ nframes = 0;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ mt76_for_each_q_rx(dev, i)
+ nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
+
+ rcu_read_unlock();
+ local_bh_enable();
+ } while (nframes > 0);
+}
+
+static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
+ int nframes = 0;
bool wake;
while (q->queued > 0) {
}
mt76_queue_tx_complete(dev, q, &entry);
+ nframes++;
}
wake = q->stopped && q->queued < q->ndesc - 8;
wake_up(&dev->tx_wait);
if (qid == MT_TXQ_MCU)
- return;
+ goto out;
mt76_txq_schedule(&dev->phy, qid);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
+out:
+ return nframes;
+}
+
+static void mt76s_status_worker(struct mt76_worker *w)
+{
+ struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
+ status_worker);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i, nframes;
+
+ do {
+ nframes = 0;
+ for (i = 0; i < MT_TXQ_MCU_WA; i++)
+ nframes += mt76s_process_tx_queue(dev, i);
+
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->wq, &dev->sdio.stat_work);
+ } while (nframes > 0);
}
static void mt76s_tx_status_data(struct work_struct *work)
.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
};
-static void mt76s_tx_work(struct work_struct *work)
-{
- struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
- status_work);
- struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
- int i;
-
- for (i = 0; i < MT_TXQ_MCU_WA; i++)
- mt76s_process_tx_queue(dev, i);
-
- if (dev->drv->tx_status_data &&
- !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
- queue_work(dev->wq, &dev->sdio.stat_work);
-}
-
-static void mt76s_rx_work(struct work_struct *work)
-{
- struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
- net_work);
- struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
- int i;
-
- /* rx processing */
- local_bh_disable();
- rcu_read_lock();
-
- mt76_for_each_q_rx(dev, i)
- mt76s_process_rx_queue(dev, &dev->q_rx[i]);
-
- rcu_read_unlock();
- local_bh_enable();
-}
-
void mt76s_deinit(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
int i;
mt76_worker_teardown(&sdio->txrx_worker);
+ mt76_worker_teardown(&sdio->status_worker);
+ mt76_worker_teardown(&sdio->net_worker);
- mt76s_stop_txrx(dev);
- if (sdio->txrx_wq) {
- destroy_workqueue(sdio->txrx_wq);
- sdio->txrx_wq = NULL;
- }
+ cancel_work_sync(&sdio->stat_work);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
+
+ mt76_tx_status_check(dev, NULL, true);
sdio_claim_host(sdio->func);
sdio_release_irq(sdio->func);
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
+ int err;
- sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
- WQ_UNBOUND | WQ_HIGHPRI,
- WQ_UNBOUND_MAX_ACTIVE);
- if (!sdio->txrx_wq)
- return -ENOMEM;
+ err = mt76_worker_setup(dev->hw, &sdio->status_worker,
+ mt76s_status_worker, "sdio-status");
+ if (err)
+ return err;
+
+ err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
+ "sdio-net");
+ if (err)
+ return err;
+
+ sched_set_fifo_low(sdio->status_worker.task);
+ sched_set_fifo_low(sdio->net_worker.task);
INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
- INIT_WORK(&sdio->status_work, mt76s_tx_work);
- INIT_WORK(&sdio->net_work, mt76s_rx_work);
mutex_init(&sdio->sched.lock);
dev->queue_ops = &sdio_queue_ops;