提交 3ad29529 编写于 作者: F Felix Fietkau 提交者: John W. Linville

ath9k: simplify tx locking

Instead of releasing and taking back the lock over and over again in the
tx path, hold the lock a bit longer, requiring much fewer lock/unlock pairs.
This makes locking much easier to review and should not have any noticeable
performance/latency impact.
Signed-off-by: NFelix Fietkau <nbd@openwrt.org>
Signed-off-by: NJohn W. Linville <linville@tuxdriver.com>
上级 6ee8284e
...@@ -169,13 +169,11 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ...@@ -169,13 +169,11 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
INIT_LIST_HEAD(&bf_head); INIT_LIST_HEAD(&bf_head);
memset(&ts, 0, sizeof(ts)); memset(&ts, 0, sizeof(ts));
spin_lock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&tid->buf_q))) { while ((skb = __skb_dequeue(&tid->buf_q))) {
fi = get_frame_info(skb); fi = get_frame_info(skb);
bf = fi->bf; bf = fi->bf;
spin_unlock_bh(&txq->axq_lock);
if (bf && fi->retries) { if (bf && fi->retries) {
list_add_tail(&bf->list, &bf_head); list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
...@@ -184,7 +182,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ...@@ -184,7 +182,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
} else { } else {
ath_tx_send_normal(sc, txq, NULL, skb); ath_tx_send_normal(sc, txq, NULL, skb);
} }
spin_lock_bh(&txq->axq_lock);
} }
if (tid->baw_head == tid->baw_tail) { if (tid->baw_head == tid->baw_tail) {
...@@ -192,8 +189,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ...@@ -192,8 +189,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
tid->state &= ~AGGR_CLEANUP; tid->state &= ~AGGR_CLEANUP;
} }
spin_unlock_bh(&txq->axq_lock);
if (sendbar) if (sendbar)
ath_send_bar(tid, tid->seq_start); ath_send_bar(tid, tid->seq_start);
} }
...@@ -254,9 +249,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, ...@@ -254,9 +249,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf; bf = fi->bf;
if (!bf) { if (!bf) {
spin_unlock(&txq->axq_lock);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
spin_lock(&txq->axq_lock);
continue; continue;
} }
...@@ -265,9 +258,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, ...@@ -265,9 +258,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
if (fi->retries) if (fi->retries)
ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
spin_unlock(&txq->axq_lock);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
spin_lock(&txq->axq_lock);
} }
tid->seq_next = tid->seq_start; tid->seq_next = tid->seq_start;
...@@ -515,9 +506,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -515,9 +506,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update * complete the acked-ones/xretried ones; update
* block-ack window * block-ack window
*/ */
spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, seqno); ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates)); memcpy(tx_info->control.rates, rates, sizeof(rates));
...@@ -540,9 +529,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -540,9 +529,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* run out of tx buf. * run out of tx buf.
*/ */
if (!tbf) { if (!tbf) {
spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, seqno); ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
ath_tx_complete_buf(sc, bf, txq, ath_tx_complete_buf(sc, bf, txq,
&bf_head, ts, 0); &bf_head, ts, 0);
...@@ -572,7 +559,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -572,7 +559,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (an->sleeping) if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true); ieee80211_sta_set_buffered(sta, tid->tidno, true);
spin_lock_bh(&txq->axq_lock);
skb_queue_splice(&bf_pending, &tid->buf_q); skb_queue_splice(&bf_pending, &tid->buf_q);
if (!an->sleeping) { if (!an->sleeping) {
ath_tx_queue_tid(txq, tid); ath_tx_queue_tid(txq, tid);
...@@ -580,7 +566,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -580,7 +566,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (ts->ts_status & ATH9K_TXERR_FILT) if (ts->ts_status & ATH9K_TXERR_FILT)
tid->ac->clear_ps_filter = true; tid->ac->clear_ps_filter = true;
} }
spin_unlock_bh(&txq->axq_lock);
} }
if (tid->state & AGGR_CLEANUP) if (tid->state & AGGR_CLEANUP)
...@@ -1179,9 +1164,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) ...@@ -1179,9 +1164,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txtid->state |= AGGR_CLEANUP; txtid->state |= AGGR_CLEANUP;
else else
txtid->state &= ~AGGR_ADDBA_COMPLETE; txtid->state &= ~AGGR_ADDBA_COMPLETE;
spin_unlock_bh(&txq->axq_lock);
ath_tx_flush_tid(sc, txtid); ath_tx_flush_tid(sc, txtid);
spin_unlock_bh(&txq->axq_lock);
} }
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
...@@ -1423,8 +1408,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) ...@@ -1423,8 +1408,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *list, bool retry_tx) struct list_head *list, bool retry_tx)
__releases(txq->axq_lock)
__acquires(txq->axq_lock)
{ {
struct ath_buf *bf, *lastbf; struct ath_buf *bf, *lastbf;
struct list_head bf_head; struct list_head bf_head;
...@@ -1451,13 +1434,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, ...@@ -1451,13 +1434,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf)) if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--; txq->axq_ampdu_depth--;
spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf)) if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
retry_tx); retry_tx);
else else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
spin_lock_bh(&txq->axq_lock);
} }
} }
...@@ -1836,7 +1817,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1836,7 +1817,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf; struct ath_buf *bf;
u8 tidno; u8 tidno;
spin_lock_bh(&txctl->txq->axq_lock);
if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
ieee80211_is_data_qos(hdr->frame_control)) { ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] & tidno = ieee80211_get_qos_ctl(hdr)[0] &
...@@ -1855,7 +1835,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1855,7 +1835,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
} else { } else {
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf) if (!bf)
goto out; return;
bf->bf_state.bfs_paprd = txctl->paprd; bf->bf_state.bfs_paprd = txctl->paprd;
...@@ -1864,9 +1844,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1864,9 +1844,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
ath_tx_send_normal(sc, txctl->txq, tid, skb); ath_tx_send_normal(sc, txctl->txq, tid, skb);
} }
out:
spin_unlock_bh(&txctl->txq->axq_lock);
} }
/* Upon failure caller should free skb */ /* Upon failure caller should free skb */
...@@ -1933,9 +1910,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, ...@@ -1933,9 +1910,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ieee80211_stop_queue(sc->hw, q); ieee80211_stop_queue(sc->hw, q);
txq->stopped = 1; txq->stopped = 1;
} }
spin_unlock_bh(&txq->axq_lock);
ath_tx_start_dma(sc, skb, txctl); ath_tx_start_dma(sc, skb, txctl);
spin_unlock_bh(&txq->axq_lock);
return 0; return 0;
} }
...@@ -1981,7 +1960,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1981,7 +1960,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
q = skb_get_queue_mapping(skb); q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) { if (txq == sc->tx.txq_map[q]) {
spin_lock_bh(&txq->axq_lock);
if (WARN_ON(--txq->pending_frames < 0)) if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0; txq->pending_frames = 0;
...@@ -1989,7 +1967,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1989,7 +1967,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ieee80211_wake_queue(sc->hw, q); ieee80211_wake_queue(sc->hw, q);
txq->stopped = 0; txq->stopped = 0;
} }
spin_unlock_bh(&txq->axq_lock);
} }
ieee80211_tx_status(hw, skb); ieee80211_tx_status(hw, skb);
...@@ -2095,8 +2072,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, ...@@ -2095,8 +2072,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf, struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head) struct list_head *bf_head)
__releases(txq->axq_lock)
__acquires(txq->axq_lock)
{ {
int txok; int txok;
...@@ -2106,16 +2081,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, ...@@ -2106,16 +2081,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf)) if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--; txq->axq_ampdu_depth--;
spin_unlock_bh(&txq->axq_lock);
if (!bf_isampdu(bf)) { if (!bf_isampdu(bf)) {
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else } else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
spin_lock_bh(&txq->axq_lock);
if (sc->sc_flags & SC_OP_TXAGGR) if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq); ath_txq_schedule(sc, txq);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册