Work queues cannot be allocated when a mutex is held because the mutex
may be in use and that would make it sleep. Doing so generates the
following splat with 4.13+:
[ 19.513298] ======================================================
[ 19.513429] WARNING: possible circular locking dependency detected
[ 19.513557] 4.13.0-rc5+ #6 Not tainted
[ 19.513638] ------------------------------------------------------
[ 19.513767] cpuhp/0/12 is trying to acquire lock:
[ 19.513867] (&tz->lock){+.+.+.}, at: [<
ffffffff924afebb>] thermal_zone_get_temp+0x5b/0xb0
[ 19.514047]
[ 19.514047] but task is already holding lock:
[ 19.514166] (cpuhp_state){+.+.+.}, at: [<
ffffffff91cc4baa>] cpuhp_thread_fun+0x3a/0x210
[ 19.514338]
[ 19.514338] which lock already depends on the new lock.
This lock dependency already existed with previous kernel versions,
but it was not detected until commit
49dfe2a67797 ("cpuhotplug: Link
lock stacks for hotplug callbacks") was introduced.
Reported-by: David Weinehall <david.weinehall@intel.com>
Reported-by: Jiri Kosina <jikos@kernel.org>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+void iwl_pcie_rx_allocator_work(struct work_struct *data);
+
/* common functions that are used by gen2 transport */
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
rxq->free_count += RX_CLAIM_REQ_ALLOC;
}
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
struct iwl_rb_allocator *rba_p =
container_of(data, struct iwl_rb_allocator, rx_alloc);
return err;
}
def_rxq = trans_pcie->rxq;
- if (!rba->alloc_wq)
- rba->alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
}
cancel_work_sync(&rba->rx_alloc);
- if (rba->alloc_wq) {
- destroy_workqueue(rba->alloc_wq);
- rba->alloc_wq = NULL;
- }
iwl_pcie_free_rbs_pool(trans);
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
+ if (trans_pcie->rba.alloc_wq) {
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+ trans_pcie->rba.alloc_wq = NULL;
+ }
+
if (trans_pcie->msix_enabled) {
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
irq_set_affinity_hint(
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
#ifdef CONFIG_IWLWIFI_PCIE_RTPM
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
#else