mt76_put_txwi(mdev, txwi);
}
- mt7921_mac_sta_poll(dev);
-
if (wake) {
spin_lock_bh(&dev->token_lock);
mt7921_set_tx_blocked(dev, false);
spin_unlock_bh(&dev->token_lock);
}
- mt76_worker_schedule(&dev->mt76.tx_worker);
-
napi_consume_skb(skb, 1);
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
}
+
+ if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
+ return;
+
+ mt7921_mac_sta_poll(dev);
+
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
+ return;
+
mt7921_phy_update_channel(&mdev->phy, 0);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
+
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
static bool
napi_disable(&dev->mt76.napi[2]);
napi_disable(&dev->mt76.tx_napi);
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
mt7921_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT7921_WATCHDOG_TIME);
mac_work.work);
phy = mphy->priv;
- mutex_lock(&mphy->dev->mutex);
+ if (test_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ mt7921_mutex_acquire(phy->dev);
mt76_update_survey(mphy->dev);
if (++mphy->mac_work_count == 5) {
mt7921_mac_sta_stats_work(phy);
};
- mutex_unlock(&mphy->dev->mutex);
+ mt7921_mutex_release(phy->dev);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+out:
+ ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
MT7921_WATCHDOG_TIME);
}
+
+void mt7921_pm_wake_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev;
+ struct mt76_phy *mphy;
+
+ dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
+ pm.wake_work);
+ mphy = dev->phy.mt76;
+
+ if (!mt7921_mcu_drv_pmctrl(dev))
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+ else
+ dev_err(mphy->dev->dev, "failed to wake device\n");
+
+ ieee80211_wake_queues(mphy->hw);
+ complete_all(&dev->pm.wake_cmpl);
+}
+
+void mt7921_pm_power_save_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev;
+ unsigned long delta;
+
+ dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
+ pm.ps_work.work);
+
+ delta = dev->pm.idle_timeout;
+ if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
+ delta = dev->pm.last_activity + delta - jiffies;
+ goto out;
+ }
+
+ if (!mt7921_mcu_fw_pmctrl(dev))
+ return;
+out:
+ queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
+}
+
+int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7921_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int err;
+
+ if (!dev->pm.enable)
+ return -EOPNOTSUPP;
+
+ err = mt7921_mcu_set_bss_pm(dev, vif, enable);
+ if (err)
+ return err;
+
+ if (enable) {
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ } else {
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ mt76_clear(dev, MT_WF_RFCR(ext_phy),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+
+ return 0;
+}
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false);
mt76_connac_mcu_set_channel_domain(phy->mt76);
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7921_WATCHDOG_TIME);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return 0;
}
cancel_delayed_work_sync(&phy->mt76->mac_work);
- mutex_lock(&dev->mt76.mutex);
+ cancel_delayed_work_sync(&dev->pm.ps_work);
+ cancel_work_sync(&dev->pm.wake_work);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+
+ mt7921_mutex_acquire(dev);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
}
static inline int get_free_idx(u32 mask, u8 start, u8 end)
struct mt76_txq *mtxq;
int idx, ret = 0;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
if (vif->type == NL80211_IFTYPE_MONITOR &&
is_zero_ether_addr(vif->addr))
if (ret)
goto out;
+ if (dev->pm.enable) {
+ ret = mt7921_mcu_set_bss_pm(dev, vif, true);
+ if (ret)
+ goto out;
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+
dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
out:
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return ret;
}
if (vif == phy->monitor_vif)
phy->monitor_vif = NULL;
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+
+ if (dev->pm.enable) {
+ mt7921_mcu_set_bss_pm(dev, vif, false);
+ mt76_clear(dev, MT_WF_RFCR(0),
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ }
+
mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
cancel_delayed_work_sync(&phy->mt76->mac_work);
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
set_bit(MT76_RESET, &phy->mt76->state);
mt76_set_channel(phy->mt76);
out:
clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
mt76_txq_schedule_all(phy->mt76);
ieee80211_wake_queues(hw);
}
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
}
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return 0;
}
phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
MT_WF_RFCR_DROP_OTHER_BEACON |
else
mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
}
static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
if (changed & BSS_CHANGED_PS)
mt7921_mcu_uni_bss_ps(dev, vif);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
}
int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->stats.jiffies = jiffies;
+ ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ if (ret)
+ return ret;
+
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
true);
if (ret)
return ret;
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+
return 0;
}
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+
mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, false,
MCU_UNI_CMD_STA_REC_UPDATE);
+
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
if (!list_empty(&msta->stats_list))
list_del_init(&msta->stats_list);
spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+}
+
+static void
+mt7921_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt76_phy *mphy = phy->mt76;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return;
+
+ if (test_bit(MT76_STATE_PM, &mphy->state)) {
+ queue_work(dev->mt76.wq, &dev->pm.wake_work);
+ return;
+ }
+
+ dev->pm.last_activity = jiffies;
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
static void mt7921_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ int qid;
if (control->sta) {
struct mt7921_sta *sta;
wcid = &mvif->sta.wcid;
}
- mt76_tx(mphy, control->sta, wcid, skb);
+ if (!test_bit(MT76_STATE_PM, &mphy->state)) {
+ dev->pm.last_activity = jiffies;
+ mt76_tx(mphy, control->sta, wcid, skb);
+ return;
+ }
+
+ qid = skb_get_queue_mapping(skb);
+ if (qid >= MT_TXQ_PSD) {
+ qid = IEEE80211_AC_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb);
}
static int mt7921_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, 0);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return 0;
}
mtxq = (struct mt76_txq *)txq->drv_priv;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return ret;
}
} tsf;
u16 n;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx;
/* TSF software read */
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return tsf.t64;
}
} tsf = { .t64 = timestamp, };
u16 n;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
/* TSF software overwrite */
mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
}
static void
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mt7921_dev *dev = phy->dev;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
phy->coverage_class = max_t(s16, coverage_class, 0);
mt7921_mac_set_timing(phy);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
}
void mt7921_scan_work(struct work_struct *work)
struct mt76_phy *mphy = hw->priv;
int err;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
err = mt76_connac_mcu_hw_scan(mphy, vif, req);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return err;
}
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
mt76_connac_mcu_cancel_hw_scan(mphy, vif);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
}
static int
struct mt76_phy *mphy = hw->priv;
int err;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
err = mt76_connac_mcu_sched_scan_req(mphy, vif, req);
if (err < 0)
err = mt76_connac_mcu_sched_scan_enable(mphy, vif, true);
out:
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return err;
}
struct mt76_phy *mphy = hw->priv;
int err;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
err = mt76_connac_mcu_sched_scan_enable(mphy, vif, false);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return err;
}
if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
tx_ant = BIT(ffs(tx_ant) - 1) - 1;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
phy->mt76->antenna_mask = tx_ant;
phy->mt76->chainmask = tx_ant;
mt76_set_stream_caps(phy->mt76, true);
mt7921_set_stream_he_caps(phy);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return 0;
}
cancel_delayed_work_sync(&phy->scan_work);
cancel_delayed_work_sync(&phy->mt76->mac_work);
- mutex_lock(&dev->mt76.mutex);
+ cancel_delayed_work_sync(&dev->pm.ps_work);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+
+ mt7921_mutex_acquire(dev);
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
return err;
}
struct mt7921_phy *phy = mt7921_hw_phy(hw);
int err;
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
if (err < 0)
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7921_WATCHDOG_TIME);
out:
- mutex_unlock(&dev->mt76.mutex);
+
+ mt7921_mutex_release(dev);
return err;
}
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- mutex_lock(&dev->mt76.mutex);
+ mt7921_mutex_acquire(dev);
mt76_connac_mcu_update_gtk_rekey(hw, vif, data);
- mutex_unlock(&dev->mt76.mutex);
+ mt7921_mutex_release(dev);
}
#endif /* CONFIG_PM */
.set_key = mt7921_set_key,
.ampdu_action = mt7921_ampdu_action,
.set_rts_threshold = mt7921_set_rts_threshold,
- .wake_tx_queue = mt76_wake_tx_queue,
+ .wake_tx_queue = mt7921_wake_tx_queue,
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.get_stats = mt7921_get_stats,