return i;
}
+static int mt7663s_rx_handler(struct mt76_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+ struct mt76s_intr *intr = sdio->intr_data;
+ int nframes = 0, ret;
+
+ /* disable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
+ sdio_release_host(sdio->func);
+
+ if (ret < 0)
+ goto out;
+
+ trace_dev_irq(dev, intr->isr, 0);
+
+ if (intr->isr & WHIER_RX0_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 0, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->net_work);
+ nframes += ret;
+ }
+ }
+
+ if (intr->isr & WHIER_RX1_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 1, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->net_work);
+ nframes += ret;
+ }
+ }
+
+ nframes += !!mt7663s_refill_sched_quota(dev, intr->tx.wtqcr);
+
+out:
+ /* enable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
+
+ return nframes;
+}
+
static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
int buf_sz, int *pse_size, int *ple_size)
{
return nframes;
}
-void mt7663s_tx_work(struct work_struct *work)
+void mt7663s_txrx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
- tx.xmit_work);
+ txrx_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes = 0;
+ /* tx */
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
nframes += ret;
}
- if (nframes)
- queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
-
- queue_work(sdio->txrx_wq, &sdio->tx.status_work);
-}
-
-void mt7663s_rx_work(struct work_struct *work)
-{
- struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
- rx.recv_work);
- struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
- struct mt76s_intr *intr = sdio->intr_data;
- int nframes = 0, ret;
-
- /* disable interrupt */
- sdio_claim_host(sdio->func);
- sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
- ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
- sdio_release_host(sdio->func);
-
- if (ret < 0)
- goto out;
-
- trace_dev_irq(dev, intr->isr, 0);
-
- if (intr->isr & WHIER_RX0_DONE_INT_EN) {
- ret = mt7663s_rx_run_queue(dev, 0, intr);
- if (ret > 0) {
- queue_work(sdio->txrx_wq, &sdio->rx.net_work);
- nframes += ret;
- }
- }
- if (intr->isr & WHIER_RX1_DONE_INT_EN) {
- ret = mt7663s_rx_run_queue(dev, 1, intr);
- if (ret > 0) {
- queue_work(sdio->txrx_wq, &sdio->rx.net_work);
- nframes += ret;
- }
- }
+ /* rx */
+ nframes += mt7663s_rx_handler(dev);
- if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr))
- queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
+ if (nframes)
+ queue_work(sdio->txrx_wq, &sdio->txrx_work);
- if (nframes) {
- queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
- return;
- }
-out:
- /* enable interrupt */
- sdio_claim_host(sdio->func);
- sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
- sdio_release_host(sdio->func);
+ queue_work(sdio->txrx_wq, &sdio->status_work);
}
void mt7663s_sdio_irq(struct sdio_func *func)
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
return;
- queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
+ queue_work(sdio->txrx_wq, &sdio->txrx_work);
}
{
struct mt76_sdio *sdio = &dev->sdio;
- cancel_work_sync(&sdio->tx.xmit_work);
- cancel_work_sync(&sdio->tx.status_work);
- cancel_work_sync(&sdio->rx.recv_work);
- cancel_work_sync(&sdio->rx.net_work);
+ cancel_work_sync(&sdio->txrx_work);
+ cancel_work_sync(&sdio->status_work);
+ cancel_work_sync(&sdio->net_work);
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
{
struct mt76_sdio *sdio = &dev->sdio;
- queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
+ queue_work(sdio->txrx_wq, &sdio->txrx_work);
}
static const struct mt76_queue_ops sdio_queue_ops = {
static void mt76s_tx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
- tx.status_work);
+ status_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i;
static void mt76s_rx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
- rx.net_work);
+ net_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i;
return -ENOMEM;
INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
- INIT_WORK(&sdio->tx.status_work, mt76s_tx_work);
- INIT_WORK(&sdio->rx.net_work, mt76s_rx_work);
+ INIT_WORK(&sdio->status_work, mt76s_tx_work);
+ INIT_WORK(&sdio->net_work, mt76s_rx_work);
mutex_init(&sdio->sched.lock);
dev->queue_ops = &sdio_queue_ops;