提交 2fe30dce 编写于 作者: F Felix Fietkau

mt76: reduce locking in mt76_dma_tx_cleanup

q->tail can be safely updated without locking, because there is no
concurrent access. If called from outside of the tasklet (for flushing),
the tasklet is always disabled.
q->queued can be safely read without locking, as long as the decrement
happens within the locked section.
This patch allows cleaning up tx packets outside of the section that holds
the queue lock for improved performance
Signed-off-by: NFelix Fietkau <nbd@nbd.name>
上级 90fdc171
...@@ -149,31 +149,29 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) ...@@ -149,31 +149,29 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
struct mt76_sw_queue *sq = &dev->q_tx[qid]; struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q; struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry; struct mt76_queue_entry entry;
unsigned int n_swq_queued[4] = {};
unsigned int n_queued = 0;
bool wake = false; bool wake = false;
int last; int i, last;
if (!q) if (!q)
return; return;
spin_lock_bh(&q->lock);
if (flush) if (flush)
last = -1; last = -1;
else else
last = readl(&q->regs->dma_idx); last = readl(&q->regs->dma_idx);
while (q->queued && q->tail != last) { while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule) if (entry.schedule)
dev->q_tx[entry.qid].swq_queued--; n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc; q->tail = (q->tail + 1) % q->ndesc;
q->queued--; n_queued++;
if (entry.skb) { if (entry.skb)
spin_unlock_bh(&q->lock);
dev->drv->tx_complete_skb(dev, qid, &entry); dev->drv->tx_complete_skb(dev, qid, &entry);
spin_lock_bh(&q->lock);
}
if (entry.txwi) { if (entry.txwi) {
mt76_put_txwi(dev, entry.txwi); mt76_put_txwi(dev, entry.txwi);
...@@ -184,6 +182,16 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) ...@@ -184,6 +182,16 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
last = readl(&q->regs->dma_idx); last = readl(&q->regs->dma_idx);
} }
spin_lock_bh(&q->lock);
q->queued -= n_queued;
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i];
}
if (flush) if (flush)
mt76_dma_sync_idx(dev, q); mt76_dma_sync_idx(dev, q);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册