void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_phy *phy);
+void mt76_tx_worker_run(struct mt76_dev *dev);
void mt76_tx_worker(struct mt76_worker *w);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
+ dev->mt76.tx_worker.fn = mt7615_tx_worker;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work);
INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work);
+ spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_completion(&dev->pm.wake_cmpl);
spin_lock_init(&dev->pm.txq_lock);
break;
}
msta->n_rates = i;
- if (!test_bit(MT76_STATE_PM, &phy->mt76->state))
+ if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) {
mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
+ mt76_connac_pm_unref(&dev->pm);
+ }
spin_unlock_bh(&dev->mt76.lock);
}
-static void
-mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+void mt7615_tx_worker(struct mt76_worker *w)
{
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
- struct mt7615_phy *phy = mt7615_hw_phy(hw);
- struct mt76_phy *mphy = phy->mt76;
-
- if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
- return;
+ struct mt7615_dev *dev = container_of(w, struct mt7615_dev,
+ mt76.tx_worker);
- if (test_bit(MT76_STATE_PM, &mphy->state)) {
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return;
}
- dev->pm.last_activity = jiffies;
- mt76_worker_schedule(&dev->mt76.tx_worker);
+ mt76_tx_worker_run(&dev->mt76);
+ mt76_connac_pm_unref(&dev->pm);
}
static void mt7615_tx(struct ieee80211_hw *hw,
wcid = &msta->wcid;
}
- if (!test_bit(MT76_STATE_PM, &mphy->state)) {
- dev->pm.last_activity = jiffies;
+ if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
+ mt76_connac_pm_unref(&dev->pm);
return;
}
.sta_set_decap_offload = mt7615_sta_set_decap_offload,
.ampdu_action = mt7615_ampdu_action,
.set_rts_threshold = mt7615_set_rts_threshold,
- .wake_tx_queue = mt7615_wake_tx_queue,
+ .wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
.sw_scan_start = mt76_sw_scan,
.sw_scan_complete = mt76_sw_scan_complete,
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
-void mt76_tx_worker(struct mt76_worker *w)
+void mt76_tx_worker_run(struct mt76_dev *dev)
{
- struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
-
mt76_txq_schedule_all(&dev->phy);
if (dev->phy2)
mt76_txq_schedule_all(dev->phy2);
mt76_testmode_tx_pending(dev->phy2);
#endif
}
+EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
+
+void mt76_tx_worker(struct mt76_worker *w)
+{
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
+
+ mt76_tx_worker_run(dev);
+}
void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar)