wifi: iwlwifi: mvm: protect TXQ list manipulation
authorJohannes Berg <johannes.berg@intel.com>
Fri, 17 Mar 2023 09:53:25 +0000 (10:53 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 20 Apr 2023 10:35:11 +0000 (12:35 +0200)
[ Upstream commit 923bf981eb6ecc027227716e30701bdcc1845fbf ]

Some recent upstream debugging uncovered the fact that in
iwlwifi, the TXQ list manipulation is racy.

Introduce a new state bit for when the TXQ is completely
ready and can be used without locking, and if that's not
set yet acquire the lock to check everything correctly.

Reviewed-by: Benjamin Berg <benjamin.berg@intel.com>
Tested-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c

index 23e1413ef47196a524e5a58e05f54d5a4d8c5664..a841268e0709ff08daa06740eb5d62191b5f501d 100644 (file)
@@ -757,42 +757,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
 
-       /*
-        * Please note that racing is handled very carefully here:
-        * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
-        * deleted afterwards.
-        * This means that if:
-        * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
-        *      queue is allocated and we can TX.
-        * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
-        *      a race, should defer the frame.
-        * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
-        *      need to allocate the queue and defer the frame.
-        * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
-        *      queue is already scheduled for allocation, no need to allocate,
-        *      should defer the frame.
-        */
-
-       /* If the queue is allocated TX and return. */
-       if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
-               /*
-                * Check that list is empty to avoid a race where txq_id is
-                * already updated, but the queue allocation work wasn't
-                * finished
-                */
-               if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
-                       return;
-
+       if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+           !txq->sta) {
                iwl_mvm_mac_itxq_xmit(hw, txq);
                return;
        }
 
-       /* The list is being deleted only after the queue is fully allocated. */
-       if (!list_empty(&mvmtxq->list))
-               return;
+       /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
+        * to handle any packets we leave on the txq now
+        */
 
-       list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
-       schedule_work(&mvm->add_stream_wk);
+       spin_lock_bh(&mvm->add_stream_lock);
+       /* The list is being deleted only after the queue is fully allocated. */
+       if (list_empty(&mvmtxq->list) &&
+           /* recheck under lock */
+           !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
+               list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+               schedule_work(&mvm->add_stream_wk);
+       }
+       spin_unlock_bh(&mvm->add_stream_lock);
 }
 
 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)          \
index b5089349ebb7ad8eec5737256492e004e2e15b9f..f5c921c41be56a0e468fac43f30210e3bbe573af 100644 (file)
@@ -731,6 +731,7 @@ struct iwl_mvm_txq {
        atomic_t tx_request;
 #define IWL_MVM_TXQ_STATE_STOP_FULL    0
 #define IWL_MVM_TXQ_STATE_STOP_REDIRECT        1
+#define IWL_MVM_TXQ_STATE_READY                2
        unsigned long state;
 };
 
@@ -829,6 +830,7 @@ struct iwl_mvm {
                struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
        };
        struct work_struct add_stream_wk; /* To add streams to queues */
+       spinlock_t add_stream_lock;
 
        const char *nvm_file_name;
        struct iwl_nvm_data *nvm_data;
index 79e151512fe732da9438ed1ba933dda3064dba71..994f597a7102a80a856766d1a8c0b6e91116de5e 100644 (file)
@@ -1184,6 +1184,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
        INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
        INIT_LIST_HEAD(&mvm->add_stream_txqs);
+       spin_lock_init(&mvm->add_stream_lock);
 
        init_waitqueue_head(&mvm->rx_sync_waitq);
 
index 41b1b8b6c1e1d5ff67b9e65d7cc04b24c189665b..013aca70c3d3b60111f67989fba52b6813b4f9ca 100644 (file)
@@ -383,8 +383,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                struct iwl_mvm_txq *mvmtxq =
                        iwl_mvm_txq_from_tid(sta, tid);
 
-               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_lock_bh(&mvm->add_stream_lock);
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 
        /* Regardless if this is a reserved TXQ for a STA - mark it as false */
@@ -478,8 +481,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
                        disable_agg_tids |= BIT(tid);
                mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
 
-               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_lock_bh(&mvm->add_stream_lock);
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 
        mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -1443,12 +1449,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
                 * a queue in the function itself.
                 */
                if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
+                       spin_lock_bh(&mvm->add_stream_lock);
                        list_del_init(&mvmtxq->list);
+                       spin_unlock_bh(&mvm->add_stream_lock);
                        continue;
                }
 
-               list_del_init(&mvmtxq->list);
+               /* now we're ready, any remaining races/concurrency will be
+                * handled in iwl_mvm_mac_itxq_xmit()
+                */
+               set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+
                local_bh_disable();
+               spin_lock(&mvm->add_stream_lock);
+               list_del_init(&mvmtxq->list);
+               spin_unlock(&mvm->add_stream_lock);
+
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
                local_bh_enable();
        }
@@ -1862,8 +1878,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
                struct iwl_mvm_txq *mvmtxq =
                        iwl_mvm_txq_from_mac80211(sta->txq[i]);
 
+               spin_lock_bh(&mvm->add_stream_lock);
                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 }