memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(priv->hw, skb);
+ ieee80211_rx_ni(priv->hw, skb);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
if (is_agg)
iwl_rx_reply_tx_agg(priv, tx_resp);
le16_to_cpu(tx_resp->seq_ctl));
iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
if (is_offchannel_skb)
tid = ba_resp->tid;
agg = &priv->tid_data[sta_id][tid].agg;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
IWL_DEBUG_TX_QUEUES(priv,
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
}
}
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
return 0;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
- * Must be atomic and called with BH disabled.
+ * This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- * the radio is killed. Must be atomic.
+ * the radio is killed. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
* called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
- * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
- * with BH disabled.
+ * @wimax_active: invoked when WiMax becomes active. May sleep
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
+ might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
bool state)
{
+ might_sleep();
op_mode->ops->hw_rf_kill(op_mode, state);
}
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
{
+ might_sleep();
op_mode->ops->wimax_active(op_mode);
}
#include <linux/ieee80211.h>
#include <linux/mm.h> /* for page_address */
+#include <linux/lockdep.h>
#include "iwl-debug.h"
#include "iwl-config.h"
struct dentry *dbgfs_dir;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map sync_cmd_lockdep_map;
+#endif
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+ struct iwl_host_cmd *cmd)
{
+ int ret;
+
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- return trans->ops->send_cmd(trans, cmd);
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+
+ ret = trans->ops->send_cmd(trans, cmd);
+
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return ret;
}
static inline struct iwl_device_cmd *
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
+static inline void trans_lockdep_init(struct iwl_trans *trans)
+{
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key __key;
+
+ lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+ &__key, 0);
+#endif
+}
+
#endif /* __iwl_trans_h__ */
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(mvm->hw, skb);
+ ieee80211_rx_ni(mvm->hw, skb);
}
/*
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
- ieee80211_tx_status(mvm->hw, skb);
+ ieee80211_tx_status_ni(mvm->hw, skb);
}
if (txq_id >= IWL_FIRST_AMPDU_QUEUE) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
- spin_lock(&mvmsta->lock);
+ spin_lock_bh(&mvmsta->lock);
tid_data->next_reclaimed = next_reclaimed;
IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
next_reclaimed);
iwl_mvm_check_ratid_empty(mvm, sta, tid);
- spin_unlock(&mvmsta->lock);
+ spin_unlock_bh(&mvmsta->lock);
}
#ifdef CONFIG_PM_SLEEP
return 0;
}
- spin_lock(&mvmsta->lock);
+ spin_lock_bh(&mvmsta->lock);
__skb_queue_head_init(&reclaimed_skbs);
}
}
- spin_unlock(&mvmsta->lock);
+ spin_unlock_bh(&mvmsta->lock);
rcu_read_unlock();
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status(mvm->hw, skb);
+ ieee80211_tx_status_ni(mvm->hw, skb);
}
return 0;
int ict_index;
u32 inta;
bool use_ict;
- struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
-void iwl_pcie_tasklet(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ * If there were enough free buffers and RX_STALLED is set it is cleared.
*
*
* Driver sequence:
/*
* If the device isn't enabled - not need to try to add buffers...
* This can happen when we stop the device and still have an interrupt
- * pending. We stop the APM before we sync the interrupts / tasklets
- * because we have to (see comment there). On the other hand, since
- * the APM is stopped, we cannot access the HW (in particular not prph).
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
wake_up(&trans_pcie->wait_command_queue);
+ local_bh_disable();
iwl_op_mode_nic_error(trans->op_mode);
+ local_bh_enable();
}
-void iwl_pcie_tasklet(struct iwl_trans *trans)
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
+ struct iwl_trans *trans = dev_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 inta_mask;
#endif
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
handled |= CSR_INT_BIT_HW_ERR;
- return;
+ goto out;
}
#ifdef CONFIG_IWLWIFI_DEBUG
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
+
+out:
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_HANDLED;
}
/******************************************************************************
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
+ * If we have something to service, the irq thread will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here. */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
#endif
trans_pcie->inta |= inta;
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
+ /* the thread will service interrupts and re-enable them */
if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
+ return IRQ_WAKE_THREAD;
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
trans_pcie->inta |= inta;
/* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
+ if (likely(inta)) {
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ return IRQ_WAKE_THREAD;
+ } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
synchronize_irq(trans_pcie->pci_dev->irq);
- tasklet_kill(&trans_pcie->irq_tasklet);
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
trans->ops = &trans_ops_pcie;
trans->cfg = cfg;
+ trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
trans_pcie->inta_mask = CSR_INI_SET_MASK;
- tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_pcie_tasklet, (unsigned long)trans);
-
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- err = request_irq(pdev->irq, iwl_pcie_isr_ict,
- IRQF_SHARED, DRV_NAME, trans);
- if (err) {
+ if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans)) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return;
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
if (txq->q.read_ptr == tfd_num)
goto out;
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
out:
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
/*
return;
}
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->entries[cmd_index].cmd;
meta->flags = 0;
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)