提交 c7df1f4b 编写于 作者: J Johannes Berg

iwlwifi: pcie: rework RX buffer list init and freeing

The PCIe code has an array of buffer descriptors (RXBs) that have pages
and DMA mappings attached. In regular use, the array isn't used and the
buffers are either on the hardware receive queue or the rx_free/rx_used
lists for recycling.

Occasionally, during module unload, we'd see a warning from this:

WARNING: at lib/list_debug.c:32 __list_add+0x91/0xa0()
list_add corruption. prev->next should be next (c31c98cc), but was c31c80bc. (prev=c31c80bc).
Pid: 519, comm: rmmod Tainted: G        W  O 3.4.24-dev #3
Call Trace:
 [<c10335b2>] warn_slowpath_common+0x72/0xa0
 [<c1033683>] warn_slowpath_fmt+0x33/0x40
 [<c12e31d1>] __list_add+0x91/0xa0
 [<fdf2083c>] iwl_pcie_rxq_free_rbs+0xcc/0xe0 [iwlwifi]
 [<fdf21b3f>] iwl_pcie_rx_free+0x3f/0x210 [iwlwifi]
 [<fdf2dd7a>] iwl_trans_pcie_free+0x2a/0x90 [iwlwifi]

The reason for this seems to be that in iwl_pcie_rxq_free_rbs() we use
the array to free all buffers (the hardware receive queue isn't in use
any more at this point). The function also adds all buffers to rx_used
because it's also used during initialisation (when no freeing happens.)
This can cause the warning because it may add entries to the list that
are already on it. Luckily, this is harmless because it can only happen
when the entire data structure is freed anyway, since during init both
lists are initialized from scratch.

Disentangle this code and treat init and free separately. During init
we just need to put them onto the list after freeing all buffers (for
switching between 4k/8k buffers); during free no list manipulations
are necessary at all.
Reviewed-by: NEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: NJohannes Berg <johannes.berg@intel.com>
上级 3a3cb92e
......@@ -355,19 +355,16 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i;
/* Fill the rx_used queue with _all_ of the Rx buffers */
lockdep_assert_held(&rxq->lock);
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page,
trans_pcie->rx_page_order);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
rxq->pool[i].page = NULL;
}
}
......@@ -491,6 +488,20 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
int i;
lockdep_assert_held(&rxq->lock);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
......@@ -505,13 +516,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
}
spin_lock_irqsave(&rxq->lock, flags);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
INIT_WORK(&trans_pcie->rx_replenish,
iwl_pcie_rx_replenish_work);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
for (i = 0; i < RX_QUEUE_SIZE; i++)
rxq->queue[i] = NULL;
......@@ -520,7 +530,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock_irqrestore(&rxq->lock, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册