* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
* only valid for discrete (not integrated) NICs
+ * @invalid_tx_cmd: invalid TX command buffer
*/
struct iwl_trans {
bool csme_own;
u8 pcie_link_speed;
+ struct iwl_dma_ptr invalid_tx_cmd;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[] __aligned(sizeof(void *));
memset(desc_dram, 0, sizeof(*desc_dram));
}
+static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
+{
+ iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd);
+}
+
+static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
+{
+ struct iwl_cmd_header_wide bad_cmd = {
+ .cmd = INVALID_WR_PTR_CMD,
+ .group_id = DEBUG_GROUP,
+ .sequence = cpu_to_le16(0xffff),
+ .length = cpu_to_le16(0),
+ .version = 0,
+ };
+ int ret;
+
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd,
+ sizeof(bad_cmd));
+ if (ret)
+ return ret;
+ memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
+ return 0;
+}
+
void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_pcie_free_ict(trans);
}
+ iwl_pcie_free_invalid_tx_cmd(trans);
+
iwl_pcie_free_fw_monitor(trans);
iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
init_waitqueue_head(&trans_pcie->sx_waitq);
+ ret = iwl_pcie_alloc_invalid_tx_cmd(trans);
+ if (ret)
+ goto out_no_pci;
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
#include "fw/api/commands.h"
#include "fw/api/tx.h"
#include "fw/api/datapath.h"
+#include "fw/api/debug.h"
#include "queue/tx.h"
#include "iwl-fh.h"
#include "iwl-scd.h"
return idx;
}
+static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd)
{
DMA_TO_DEVICE);
}
- tfd->num_tbs = 0;
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
}
void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
iwl_force_nmi(trans);
}
+static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
+ struct iwl_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
bool cmd_queue)
{
- size_t tfd_sz = trans->txqs.tfd.size *
- trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t num_entries = trans->trans_cfg->gen2 ?
+ slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t tfd_sz;
size_t tb0_buf_sz;
int i;
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- if (trans->trans_cfg->gen2)
- tfd_sz = trans->txqs.tfd.size * slots_num;
+ tfd_sz = trans->txqs.tfd.size * num_entries;
timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
txq->trans = trans;
if (!txq->first_tb_bufs)
goto err_free_tfds;
+ for (i = 0; i < num_entries; i++) {
+ void *tfd = iwl_txq_get_tfd(trans, txq, i);
+
+ if (trans->trans_cfg->gen2)
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+ else
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+ }
+
return 0;
err_free_tfds:
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
meta->tbs = 0;
- tfd->num_tbs = 0;
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
}
#define IWL_TX_CRC_SIZE 4