提交 da99c4b6 编写于 作者: G Gregory Greenman 提交者: John W. Linville

iwlwifi: memory allocation optimization

This patch optimizes memory allocation. The cmd member of
iwl_tx_queue was allocated previously as a continuous block
of memory. This patch allocates separate memory chunks for each command
and maps/unmaps these chunks in the run time.
Signed-off-by: NGregory Greenman <gregory.greenman@intel.com>
Signed-off-by: NTomas Winkler <tomas.winkler@intel.com>
Signed-off-by: NZhu Yi <yi.zhu@intel.com>
Signed-off-by: NJohn W. Linville <linville@tuxdriver.com>
上级 4c43e0d0
...@@ -939,8 +939,8 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv, ...@@ -939,8 +939,8 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
if (txq_id != IWL_CMD_QUEUE_NUM) { if (txq_id != IWL_CMD_QUEUE_NUM) {
sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id; sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl; sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) { switch (sec_ctl & TX_CMD_SEC_MSK) {
case TX_CMD_SEC_CCM: case TX_CMD_SEC_CCM:
...@@ -979,7 +979,7 @@ static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, ...@@ -979,7 +979,7 @@ static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
u8 sta = 0; u8 sta = 0;
if (txq_id != IWL_CMD_QUEUE_NUM) if (txq_id != IWL_CMD_QUEUE_NUM)
sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id; sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id;
shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr]. shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
val = cpu_to_le16(1 | (sta << 12)); val = cpu_to_le16(1 | (sta << 12));
......
...@@ -135,8 +135,7 @@ struct iwl_tx_info { ...@@ -135,8 +135,7 @@ struct iwl_tx_info {
struct iwl_tx_queue { struct iwl_tx_queue {
struct iwl_queue q; struct iwl_queue q;
struct iwl_tfd_frame *bd; struct iwl_tfd_frame *bd;
struct iwl_cmd *cmd; struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
dma_addr_t dma_addr_cmd;
struct iwl_tx_info *txb; struct iwl_tx_info *txb;
int need_update; int need_update;
int sched_retry; int sched_retry;
......
...@@ -228,7 +228,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -228,7 +228,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* TX cmd queue. Otherwise in case the cmd comes * TX cmd queue. Otherwise in case the cmd comes
* in later, it will possibly set an invalid * in later, it will possibly set an invalid
* address (cmd->meta.source). */ * address (cmd->meta.source). */
qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; qcmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
qcmd->meta.flags &= ~CMD_WANT_SKB; qcmd->meta.flags &= ~CMD_WANT_SKB;
} }
fail: fail:
......
...@@ -208,11 +208,12 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr); ...@@ -208,11 +208,12 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
* Free all buffers. * Free all buffers.
* 0-fill, but do not free "txq" descriptor structure. * 0-fill, but do not free "txq" descriptor structure.
*/ */
static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
{ {
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q; struct iwl_queue *q = &txq->q;
struct pci_dev *dev = priv->pci_dev; struct pci_dev *dev = priv->pci_dev;
int len; int i, slots_num, len;
if (q->n_bd == 0) if (q->n_bd == 0)
return; return;
...@@ -227,7 +228,12 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) ...@@ -227,7 +228,12 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
len += IWL_MAX_SCAN_SIZE; len += IWL_MAX_SCAN_SIZE;
/* De-alloc array of command/tx buffers */ /* De-alloc array of command/tx buffers */
pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
for (i = 0; i < slots_num; i++)
kfree(txq->cmd[i]);
if (txq_id == IWL_CMD_QUEUE_NUM)
kfree(txq->cmd[slots_num]);
/* De-alloc circular buffer of TFDs */ /* De-alloc circular buffer of TFDs */
if (txq->q.n_bd) if (txq->q.n_bd)
...@@ -400,8 +406,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv, ...@@ -400,8 +406,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id) int slots_num, u32 txq_id)
{ {
struct pci_dev *dev = priv->pci_dev; int i, len;
int len;
int rc = 0; int rc = 0;
/* /*
...@@ -412,17 +417,25 @@ static int iwl_tx_queue_init(struct iwl_priv *priv, ...@@ -412,17 +417,25 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
* For normal Tx queues (all other queues), no super-size command * For normal Tx queues (all other queues), no super-size command
* space is needed. * space is needed.
*/ */
len = sizeof(struct iwl_cmd) * slots_num; len = sizeof(struct iwl_cmd);
if (txq_id == IWL_CMD_QUEUE_NUM) for (i = 0; i <= slots_num; i++) {
len += IWL_MAX_SCAN_SIZE; if (i == slots_num) {
txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); if (txq_id == IWL_CMD_QUEUE_NUM)
if (!txq->cmd) len += IWL_MAX_SCAN_SIZE;
return -ENOMEM; else
continue;
}
txq->cmd[i] = kmalloc(len, GFP_KERNEL | GFP_DMA);
if (!txq->cmd[i])
return -ENOMEM;
}
/* Alloc driver data array and TFD circular buffer */ /* Alloc driver data array and TFD circular buffer */
rc = iwl_tx_queue_alloc(priv, txq, txq_id); rc = iwl_tx_queue_alloc(priv, txq, txq_id);
if (rc) { if (rc) {
pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); for (i = 0; i < slots_num; i++)
kfree(txq->cmd[i]);
return -ENOMEM; return -ENOMEM;
} }
...@@ -451,7 +464,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) ...@@ -451,7 +464,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
/* Tx queues */ /* Tx queues */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
iwl_tx_queue_free(priv, &priv->txq[txq_id]); iwl_tx_queue_free(priv, txq_id);
/* Keep-warm buffer */ /* Keep-warm buffer */
iwl_kw_free(priv); iwl_kw_free(priv);
...@@ -859,7 +872,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) ...@@ -859,7 +872,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq->txb[q->write_ptr].skb[0] = skb; txq->txb[q->write_ptr].skb[0] = skb;
/* Set up first empty entry in queue's array of Tx/cmd buffers */ /* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = &txq->cmd[idx]; out_cmd = txq->cmd[idx];
tx_cmd = &out_cmd->cmd.tx; tx_cmd = &out_cmd->cmd.tx;
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
...@@ -899,8 +912,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) ...@@ -899,8 +912,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Physical address of this Tx command's header (not MAC header!), /* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */ * within command buffer array. */
txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + txcmd_phys = pci_map_single(priv->pci_dev, out_cmd,
offsetof(struct iwl_cmd, hdr); sizeof(struct iwl_cmd), PCI_DMA_TODEVICE);
txcmd_phys += offsetof(struct iwl_cmd, hdr);
/* Add buffer containing Tx command and MAC(!) header to TFD's /* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */ * first entry */
...@@ -1004,7 +1018,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -1004,7 +1018,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
u32 idx; u32 idx;
u16 fix_size; u16 fix_size;
dma_addr_t phys_addr; dma_addr_t phys_addr;
int ret; int len, ret;
unsigned long flags; unsigned long flags;
cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
...@@ -1034,7 +1048,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -1034,7 +1048,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
control_flags = (u32 *) tfd; control_flags = (u32 *) tfd;
idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
out_cmd = &txq->cmd[idx]; out_cmd = txq->cmd[idx];
out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.cmd = cmd->id;
memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
...@@ -1048,9 +1062,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) ...@@ -1048,9 +1062,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
INDEX_TO_SEQ(q->write_ptr)); INDEX_TO_SEQ(q->write_ptr));
if (out_cmd->meta.flags & CMD_SIZE_HUGE) if (out_cmd->meta.flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
len = (idx == TFD_CMD_SLOTS) ?
phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
offsetof(struct iwl_cmd, hdr); phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
PCI_DMA_TODEVICE);
phys_addr += offsetof(struct iwl_cmd, hdr);
iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
...@@ -1115,6 +1131,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) ...@@ -1115,6 +1131,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
{ {
struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q; struct iwl_queue *q = &txq->q;
struct iwl_tfd_frame *bd = &txq->bd[index];
dma_addr_t dma_addr;
int is_odd, buf_len;
int nfreed = 0; int nfreed = 0;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
...@@ -1132,6 +1151,19 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) ...@@ -1132,6 +1151,19 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
q->write_ptr, q->read_ptr); q->write_ptr, q->read_ptr);
queue_work(priv->workqueue, &priv->restart); queue_work(priv->workqueue, &priv->restart);
} }
is_odd = (index/2) & 0x1;
if (is_odd) {
dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
(IWL_GET_BITS(bd->pa[index],
tb2_addr_hi20) << 16);
buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
} else {
dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
}
pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
PCI_DMA_TODEVICE);
nfreed++; nfreed++;
} }
} }
...@@ -1163,7 +1195,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) ...@@ -1163,7 +1195,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
/* Input error checking is done when commands are added to queue. */ /* Input error checking is done when commands are added to queue. */
if (cmd->meta.flags & CMD_WANT_SKB) { if (cmd->meta.flags & CMD_WANT_SKB) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册