/**
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
- * IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
+ * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
* rather copies it to an previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
* buffer. This can save memcpy and is worth with very big comamnds.
+ * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
+ * chunk internally and free it again after the command completes. This
+ * can (currently) be used only once per command.
*/
enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_NOCOPY = BIT(0),
+ IWL_HCMD_DFL_DUP = BIT(1),
};
/**
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ void *dup_buf = NULL;
dma_addr_t phys_addr;
- u32 idx;
+ int idx;
u16 copy_size, cmd_size;
bool had_nocopy = false;
int i;
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmd->data[i], cmd->len[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
} else {
/* NOCOPY must not be followed by normal! */
- if (WARN_ON(had_nocopy))
- return -EINVAL;
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
copy_size += cmd->len[i];
}
cmd_size += cmd->len[i];
if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
"Command %s (%#x) is too large (%d bytes)\n",
trans_pcie_get_cmd_string(trans_pcie, cmd->id),
- cmd->id, copy_size))
- return -EINVAL;
+ cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
spin_lock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
- return -ENOSPC;
+ idx = -ENOSPC;
+ goto free_dup_buf;
}
idx = get_cmd_index(q, q->write_ptr);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+ if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))
break;
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
cmd_pos += cmd->len[i];
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ const void *data = cmd->data[i];
+
if (!cmd->len[i])
continue;
- if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
continue;
- phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, (void *)data,
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_unmap_tfd(trans, out_meta,
}
out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kfree(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
txq->need_update = 1;
out:
spin_unlock_bh(&txq->lock);
+ free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
return idx;
}