提交 f4feb8ac 编写于 作者: J Johannes Berg

iwlwifi: support host command with copied data

In addition to the NOCOPY flag, add a DUP flag that
tells the transport to kmemdup() the buffer and free
it after the command completes.

Currently this is only supported for a single buffer
in a given command, but that could be extended if it
should be needed.
Reviewed-by: NEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: NJohannes Berg <johannes.berg@intel.com>
上级 86052a77
......@@ -221,14 +221,18 @@ struct iwl_device_cmd {
/**
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
* IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
* rather copies it to an previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
* buffer. This can save memcpy and is worth with very big comamnds.
* @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
* chunk internally and free it again after the command completes. This
* can (currently) be used only once per command.
*/
enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_NOCOPY = BIT(0),
IWL_HCMD_DFL_DUP = BIT(1),
};
/**
......
......@@ -186,6 +186,8 @@ struct iwl_pcie_tx_queue_entry {
struct iwl_device_cmd *cmd;
struct iwl_device_cmd *copy_cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
struct iwl_cmd_meta meta;
};
......
......@@ -452,6 +452,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
/* The original command isn't needed any more */
kfree(txq->entries[cmd_index].copy_cmd);
txq->entries[cmd_index].copy_cmd = NULL;
/* nor is the duplicated part of the command */
kfree(txq->entries[cmd_index].free_buf);
txq->entries[cmd_index].free_buf = NULL;
}
/*
......
......@@ -496,6 +496,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
for (i = 0; i < txq->q.n_window; i++) {
kfree(txq->entries[i].cmd);
kfree(txq->entries[i].copy_cmd);
kfree(txq->entries[i].free_buf);
}
/* De-alloc circular buffer of TFDs */
......
......@@ -517,8 +517,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
dma_addr_t phys_addr;
u32 idx;
int idx;
u16 copy_size, cmd_size;
bool had_nocopy = false;
int i;
......@@ -535,10 +536,33 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true;
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
idx = -EINVAL;
goto free_dup_buf;
}
} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
/*
* This is also a chunk that isn't copied
* to the static buffer so set had_nocopy.
*/
had_nocopy = true;
/* only allowed once */
if (WARN_ON(dup_buf)) {
idx = -EINVAL;
goto free_dup_buf;
}
dup_buf = kmemdup(cmd->data[i], cmd->len[i],
GFP_ATOMIC);
if (!dup_buf)
return -ENOMEM;
} else {
/* NOCOPY must not be followed by normal! */
if (WARN_ON(had_nocopy))
return -EINVAL;
if (WARN_ON(had_nocopy)) {
idx = -EINVAL;
goto free_dup_buf;
}
copy_size += cmd->len[i];
}
cmd_size += cmd->len[i];
......@@ -553,8 +577,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
"Command %s (%#x) is too large (%d bytes)\n",
trans_pcie_get_cmd_string(trans_pcie, cmd->id),
cmd->id, copy_size))
return -EINVAL;
cmd->id, copy_size)) {
idx = -EINVAL;
goto free_dup_buf;
}
spin_lock_bh(&txq->lock);
......@@ -563,7 +589,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
return -ENOSPC;
idx = -ENOSPC;
goto free_dup_buf;
}
idx = get_cmd_index(q, q->write_ptr);
......@@ -587,7 +614,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP))
break;
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
cmd_pos += cmd->len[i];
......@@ -629,11 +657,16 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
const void *data = cmd->data[i];
if (!cmd->len[i])
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP)))
continue;
phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
phys_addr = dma_map_single(trans->dev, (void *)data,
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_unmap_tfd(trans, out_meta,
......@@ -648,6 +681,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
txq->need_update = 1;
......@@ -664,6 +700,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
out:
spin_unlock_bh(&txq->lock);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);
return idx;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册