提交 51232f7e 编写于 作者: E Emmanuel Grumbach

iwlwifi: pcie: use don't disable interrupt when rxq->lock is taken

This lock was never acquired in the primary interrupt
handler, but since it was acquired along with irq_lock
which had to disable interrupts, rxq->lock had to disable
interrupts too.
Now that trans_pcie->irq_lock isn't acquired in the primary
interrupt handler, rxq->lock can let interrupt enabled.
Reviewed-by: NJohannes Berg <johannes.berg@intel.com>
Signed-off-by: NEmmanuel Grumbach <emmanuel.grumbach@intel.com>
上级 7b70bd63
...@@ -148,10 +148,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans) ...@@ -148,10 +148,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_rxq *rxq) struct iwl_rxq *rxq)
{ {
unsigned long flags;
u32 reg; u32 reg;
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
if (rxq->need_update == 0) if (rxq->need_update == 0)
goto exit_unlock; goto exit_unlock;
...@@ -190,7 +189,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, ...@@ -190,7 +189,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->need_update = 0; rxq->need_update = 0;
exit_unlock: exit_unlock:
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
} }
/* /*
...@@ -209,7 +208,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) ...@@ -209,7 +208,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb; struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
/* /*
* If the device isn't enabled - not need to try to add buffers... * If the device isn't enabled - not need to try to add buffers...
...@@ -222,7 +220,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) ...@@ -222,7 +220,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return; return;
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */ /* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write]; rxb = rxq->queue[rxq->write];
...@@ -239,7 +237,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) ...@@ -239,7 +237,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--; rxq->free_count--;
} }
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to /* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */ * refill it */
if (rxq->free_count <= RX_LOW_WATERMARK) if (rxq->free_count <= RX_LOW_WATERMARK)
...@@ -248,9 +246,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) ...@@ -248,9 +246,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/* If we've added more space for the firmware to place data, tell it. /* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */ * Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) { if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
rxq->need_update = 1; rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq); iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
} }
} }
...@@ -270,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) ...@@ -270,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb; struct iwl_rx_mem_buffer *rxb;
struct page *page; struct page *page;
unsigned long flags;
gfp_t gfp_mask = priority; gfp_t gfp_mask = priority;
while (1) { while (1) {
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) { if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
return; return;
} }
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK) if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN; gfp_mask |= __GFP_NOWARN;
...@@ -308,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) ...@@ -308,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
return; return;
} }
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) { if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order); __free_pages(page, trans_pcie->rx_page_order);
return; return;
} }
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list); list);
list_del(&rxb->list); list_del(&rxb->list);
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
BUG_ON(rxb->page); BUG_ON(rxb->page);
rxb->page = page; rxb->page = page;
...@@ -329,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) ...@@ -329,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) { if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL; rxb->page = NULL;
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used); list_add(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order); __free_pages(page, trans_pcie->rx_page_order);
return; return;
} }
...@@ -340,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) ...@@ -340,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
/* and also 256 byte aligned! */ /* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free); list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++; rxq->free_count++;
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
} }
} }
...@@ -510,7 +507,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) ...@@ -510,7 +507,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rxq *rxq = &trans_pcie->rxq;
int i, err; int i, err;
unsigned long flags;
if (!rxq->bd) { if (!rxq->bd) {
err = iwl_pcie_rx_alloc(trans); err = iwl_pcie_rx_alloc(trans);
...@@ -518,7 +514,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) ...@@ -518,7 +514,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
return err; return err;
} }
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
...@@ -534,7 +530,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) ...@@ -534,7 +530,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = rxq->write = 0; rxq->read = rxq->write = 0;
rxq->write_actual = 0; rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
iwl_pcie_rx_replenish(trans); iwl_pcie_rx_replenish(trans);
...@@ -552,7 +548,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) ...@@ -552,7 +548,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rxq *rxq = &trans_pcie->rxq;
unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated, /*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */ * exit now */
...@@ -563,9 +558,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) ...@@ -563,9 +558,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
cancel_work_sync(&trans_pcie->rx_replenish); cancel_work_sync(&trans_pcie->rx_replenish);
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rxq_free_rbs(trans);
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma); rxq->bd, rxq->bd_dma);
...@@ -588,7 +583,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -588,7 +583,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
unsigned long flags;
bool page_stolen = false; bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order; int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0; u32 offset = 0;
...@@ -690,7 +684,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -690,7 +684,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and /* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the * SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */ * rx_free list for reuse later. */
spin_lock_irqsave(&rxq->lock, flags); spin_lock(&rxq->lock);
if (rxb->page != NULL) { if (rxb->page != NULL) {
rxb->page_dma = rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0, dma_map_page(trans->dev, rxb->page, 0,
...@@ -711,7 +705,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -711,7 +705,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
} }
} else } else
list_add_tail(&rxb->list, &rxq->rx_used); list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock(&rxq->lock);
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册