struct mt76_dev *dev = dev_get_drvdata(s->private);
int i;
- for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- struct mt76_queue *q = dev->q_tx[i];
+ for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
+ struct mt76_queue *q = dev->phy.q_tx[i];
if (!q)
continue;
mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
- for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
- mt76_dma_tx_cleanup(dev, dev->q_tx[i], true);
+ for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
+ mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
+ if (dev->phy2)
+ mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
+ }
for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
bool mt76_has_tx_pending(struct mt76_phy *phy)
{
- struct mt76_dev *dev = phy->dev;
struct mt76_queue *q;
- int i, offset;
-
- offset = __MT_TXQ_MAX * (phy != &dev->phy);
+ int i;
for (i = 0; i < __MT_TXQ_MAX; i++) {
- q = dev->q_tx[offset + i];
+ q = phy->q_tx[i];
if (q && q->queued)
return true;
}
unsigned long state;
+ struct mt76_queue *q_tx[__MT_TXQ_MAX];
+
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
- struct mt76_queue *q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
return PTR_ERR(q);
q->qid = qid;
- phy->dev->q_tx[qid] = q;
+ phy->q_tx[qid] = q;
return 0;
}
void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar);
void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
if (!skb)
return;
- mt76_tx_queue_skb(dev, mdev->q_tx[MT_TXQ_BEACON], skb,
+ mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], skb,
&mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
- mdev->q_tx[MT_TXQ_CAB]->hw_idx) |
+ dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
data.dev = dev;
__skb_queue_head_init(&data.q);
- q = mdev->q_tx[MT_TXQ_BEACON];
+ q = dev->mphy.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
/* Flush all previous CAB queue packets */
mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
- mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_CAB], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
mt76_csa_check(mdev);
if (mdev->csa_complete)
goto out;
- q = mdev->q_tx[MT_TXQ_CAB];
+ q = dev->mphy.q_tx[MT_TXQ_CAB];
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
- mt76_tx_queue_skb(dev, mdev->q_tx[MT_TXQ_CAB], skb,
- &mvif->sta.wcid, NULL);
+ mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
out:
- mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_BEACON], false);
- if (mdev->q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
+ if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
dev->beacon_check++;
}
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
if (napi_complete_done(napi, 0))
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
mt7603_mac_sta_poll(dev);
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
for (i = 0; i < 4; i++) {
- struct mt76_queue *q = dev->mt76.q_tx[i];
+ struct mt76_queue *q = dev->mphy.q_tx[i];
u8 qidx = q->hw_idx;
u8 tid = ac_to_tid[i];
u32 txtime = airtime[qidx];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_queue *q = dev->mt76.q_tx[qid];
+ struct mt76_queue *q = dev->mphy.q_tx[qid];
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i];
+ q = dev->mphy.q_tx[i];
if (!q->queued)
continue;
while ((skb = __skb_dequeue(list)) != NULL) {
int qid = skb_get_queue_mapping(skb);
- mt76_tx_queue_skb_raw(dev, dev->mt76.q_tx[qid], skb, 0);
+ mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[qid], skb, 0);
}
}
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list;
- mt76_stop_tx_queues(&dev->mt76, sta, true);
+ mt76_stop_tx_queues(&dev->mphy, sta, true);
mt7603_wtbl_set_ps(dev, msta, ps);
if (ps)
return;
u16 cw_max = (1 << 10) - 1;
u32 val;
- queue = dev->mt76.q_tx[queue]->hw_idx;
+ queue = dev->mphy.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
skb_put(skb, 1);
mt7615_mutex_acquire(dev);
- mt76_tx_queue_skb_raw(dev, dev->mt76.q_tx[0], skb, 0);
+ mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[0], skb, 0);
mt7615_mutex_release(dev);
return 0;
struct mt76_queue *q;
char *queue;
} queue_map[] = {
- { dev->mt76.q_tx[MT_TXQ_BE], "PDMA0" },
+ { dev->mphy.q_tx[MT_TXQ_BE], "PDMA0" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], "MCUQ" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
};
return ret;
for (i = 1; i <= MT_TXQ_PSD ; i++)
- dev->mt76.q_tx[i] = dev->mt76.q_tx[0];
+ dev->mphy.q_tx[i] = dev->mphy.q_tx[0];
return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
{
struct mt7615_phy *phy = mt7615_ext_phy(dev);
struct mt76_phy *mphy;
- int ret;
+ int i, ret;
if (!is_mt7615(&dev->mt76))
return -EOPNOTSUPP;
mphy->sband_2g.sband.n_channels = 0;
mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+ /* mt7615 second phy shares the same hw queues with the primary one */
+ for (i = 0; i <= MT_TXQ_PSD ; i++)
+ mphy->q_tx[i] = dev->mphy.q_tx[i];
+
ret = mt76_register_phy(mphy);
if (ret)
ieee80211_free_hw(mphy->hw);
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
if (is_mt7615(&dev->mt76)) {
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[MT_TXQ_BE], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
} else {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
}
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
/* tx */
for (i = 0; i <= MT_TXQ_PSD; i++) {
- ret = mt7663s_tx_run_queue(dev, dev->q_tx[i]);
+ ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
if (ret > 0)
nframes += ret;
}
{
struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);
struct mt76_dev *mdev = &dev->mt76;
- struct mt76_queue *q = mdev->q_tx[MT_TXQ_PSD];
+ struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i;
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- mt76_tx_queue_skb(dev, mdev->q_tx[MT_TXQ_PSD], skb,
- &mvif->group_wcid, NULL);
+ mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL);
}
spin_unlock_bh(&q->lock);
}
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
if (napi_complete_done(napi, 0))
mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
mt76_worker_schedule(&dev->mt76.tx_worker);
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
- mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]);
+ mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]);
}
if (intr & MT_INT_TX_STAT)
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i];
+ q = dev->mphy.q_tx[i];
if (!q->queued)
continue;
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx);
+ int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->mt76.q_tx[queue]->hw_idx;
+ qid = dev->mphy.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
- mt76_stop_tx_queues(&dev->mt76, sta, true);
+ mt76_stop_tx_queues(&dev->mphy, sta, true);
if (mt76_is_mmio(mdev))
mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
struct mt76_queue *q;
char *queue;
} queue_map[] = {
- { dev->mt76.q_tx[MT_TXQ_BE], "WFDMA0" },
+ { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" },
{ dev->mt76.q_mcu[MT_MCUQ_WA], "MCUWA" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
#include "mac.h"
static int
-mt7915_init_tx_queues(struct mt7915_dev *dev, int idx, int n_desc)
+mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
{
int i, err;
- err = mt76_init_tx_queue(&dev->mphy, 0, idx, n_desc, MT_TX_RING_BASE);
+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
if (err < 0)
return err;
for (i = 0; i <= MT_TXQ_PSD; i++)
- dev->mt76.q_tx[i] = dev->mt76.q_tx[0];
+ phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
return 0;
}
mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
/* init tx queue */
- ret = mt7915_init_tx_queues(dev, MT7915_TXQ_BAND0,
+ ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
MT7915_TX_RING_SIZE);
if (ret)
return ret;
u8 i, count;
/* clean DMA queues and unmap buffers first */
- mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_PSD], false);
- mt76_queue_tx_cleanup(dev, mdev->q_tx[MT_TXQ_BE], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
/*
* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
}
static void
-mt7915_dma_reset(struct mt7915_dev *dev)
+mt7915_dma_reset(struct mt7915_phy *phy)
{
+ struct mt7915_dev *dev = phy->dev;
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, dev->mt76.q_tx[i], true);
+ mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
- mt7915_dma_reset(dev);
+ mt7915_dma_reset(&dev->phy);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
return PTR_ERR(q);
q->qid = i;
- dev->q_tx[i] = q;
+ dev->phy.q_tx[i] = q;
}
q = mt76s_alloc_tx_queue(dev);
nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
for (i = 0; i <= MT_TXQ_PSD; i++)
- nframes += mt76s_process_tx_queue(dev, dev->q_tx[i]);
+ nframes += mt76s_process_tx_queue(dev,
+ dev->phy.q_tx[i]);
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
{
struct mt76_testmode_data *td = &dev->test;
struct mt76_wcid *wcid = &dev->global_wcid;
+ struct mt76_phy *phy = &dev->phy;
struct sk_buff *skb = td->tx_skb;
struct mt76_queue *q;
int qid;
return;
qid = skb_get_queue_mapping(skb);
- q = dev->q_tx[qid];
+ q = phy->q_tx[qid];
spin_lock_bh(&q->lock);
bool *stop)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *q = phy->q_tx[qid];
struct mt76_dev *dev = phy->dev;
- struct mt76_queue *q = dev->q_tx[qid];
bool non_aql;
int pending;
int idx;
if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- q = dev->q_tx[qid];
+ q = phy->q_tx[qid];
spin_lock_bh(&q->lock);
__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD];
+ struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
int i;
spin_lock_bh(&hwq->lock);
static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
- struct mt76_dev *dev = phy->dev;
- struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_queue *q = phy->q_tx[qid];
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
struct mt76_wcid *wcid;
#endif
}
-void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar)
{
int i;
if (!txq)
continue;
- hwq = dev->q_tx[mt76_txq_get_qid(txq)];
+ hwq = phy->q_tx[mt76_txq_get_qid(txq)];
mtxq = (struct mt76_txq *)txq->drv_priv;
spin_lock_bh(&hwq->lock);
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i];
+ q = dev->phy.q_tx[i];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
for (i = 0; i <= MT_TXQ_PSD; i++) {
if (i >= IEEE80211_NUM_ACS) {
- dev->q_tx[i] = dev->q_tx[0];
+ dev->phy.q_tx[i] = dev->phy.q_tx[0];
continue;
}
q->hw_idx = mt76u_ac_to_hwq(dev, i);
q->qid = i;
- dev->q_tx[i] = q;
+ dev->phy.q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
struct mt76_queue *q;
int j;
- q = dev->q_tx[i];
+ q = dev->phy.q_tx[i];
if (!q)
continue;
dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i];
+ q = dev->phy.q_tx[i];
if (!q)
continue;
* will fail to submit urb, cleanup those skb's manually.
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i];
+ q = dev->phy.q_tx[i];
if (!q)
continue;