return ret;
}
-static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- struct sk_buff *skb)
-{
- struct iwl_mvm_sta *mvmsta;
- bool defer = false;
-
- /*
- * double check the IN_D0I3 flag both before and after
- * taking the spinlock, in order to prevent taking
- * the spinlock when not needed.
- */
- if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
- return false;
-
- spin_lock(&mvm->d0i3_tx_lock);
- /*
- * testing the flag again ensures the skb dequeue
- * loop (on d0i3 exit) hasn't run yet.
- */
- if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
- goto out;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
- mvmsta->sta_id != mvm->d0i3_ap_sta_id)
- goto out;
-
- __skb_queue_tail(&mvm->d0i3_tx, skb);
-
- defer = true;
-out:
- spin_unlock(&mvm->d0i3_tx_lock);
- return defer;
-}
-
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
}
if (sta) {
- if (iwl_mvm_defer_tx(mvm, sta, skb))
- return;
if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
* would do.
*/
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
-#ifdef CONFIG_PM
- iwl_mvm_d0i3_enable_tx(mvm, NULL);
-#endif
}
return ret;
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
-#ifdef CONFIG_PM
- iwl_mvm_d0i3_enable_tx(mvm, NULL);
-#endif
+
ret = iwl_mvm_update_quotas(mvm, true, NULL);
if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
u8 d0i3_ap_sta_id;
bool d0i3_offloading;
struct work_struct d0i3_exit_work;
- struct sk_buff_head d0i3_tx;
/* protect d0i3_suspend_flags */
struct mutex d0i3_suspend_mutex;
unsigned long d0i3_suspend_flags;
- /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
- spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
wait_queue_head_t rx_sync_waitq;
u32 cmd_flags);
#ifdef CONFIG_PM
-void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs);
- spin_lock_init(&mvm->d0i3_tx_lock);
- skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
init_waitqueue_head(&mvm->rx_sync_waitq);
iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
}
-void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
-{
- struct ieee80211_sta *sta = NULL;
- struct iwl_mvm_sta *mvm_ap_sta;
- int i;
- bool wake_queues = false;
-
- lockdep_assert_held(&mvm->mutex);
-
- spin_lock_bh(&mvm->d0i3_tx_lock);
-
- if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
- goto out;
-
- IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
-
- /* get the sta in order to update seq numbers and re-enqueue skbs */
- sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
- lockdep_is_held(&mvm->mutex));
-
- if (IS_ERR_OR_NULL(sta)) {
- sta = NULL;
- goto out;
- }
-
- if (mvm->d0i3_offloading && qos_seq) {
- /* update qos seq numbers if offloading was enabled */
- mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
- for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
- u16 seq = le16_to_cpu(qos_seq[i]);
- /* firmware stores last-used one, we store next one */
- seq += 0x10;
- mvm_ap_sta->tid_data[i].seq_number = seq;
- }
- }
-out:
- /* re-enqueue (or drop) all packets */
- while (!skb_queue_empty(&mvm->d0i3_tx)) {
- struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
-
- if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
- ieee80211_free_txskb(mvm->hw, skb);
-
- /* if the skb_queue is not empty, we need to wake queues */
- wake_queues = true;
- }
- clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
- wake_up(&mvm->d0i3_exit_waitq);
- mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
- if (wake_queues)
- ieee80211_wake_queues(mvm->hw);
-
- spin_unlock_bh(&mvm->d0i3_tx_lock);
-}
-
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
struct iwl_wowlan_status *status;
u32 wakeup_reasons = 0;
- __le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex);
}
wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
- qos_seq = status->qos_seq_ctr;
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
iwl_mvm_d0i3_exit_work_iter,
&iter_data);
out:
- iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
-
IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
wakeup_reasons);
- /* qos_seq might point inside resp_pkt, so free it only now */
kfree(status);
/* the FW might have updated the regdomain */