提交 fba8248e 编写于 作者: S Sara Sharon 提交者: Luca Coelho

iwlwifi: mvm: get rid of tx_path_lock

TX path lock was introduced in order to prevent out of order
invocations of TX.

This can happen in the following flow:

TX path invoked from net dev
Packet dequeued
	TX path invoked from RX path
	Packet dequeued
	Packet TXed
Packet TXed

However, we don't really need a lock. If TX path is already
invoked from some location, other paths can simply abort their
execution, instead of waiting to the first path to finish, and
then discover queue is (likely) empty or stopped.

Replace the lock with an atomic variable to track TX ownership.
This simplifies the locking dependencies between RX and TX paths,
and should improve performance.
Signed-off-by: NSara Sharon <sara.sharon@intel.com>
Signed-off-by: NLuca Coelho <luciano.coelho@intel.com>
上级 1f7698ab
......@@ -878,25 +878,45 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
struct sk_buff *skb = NULL;
spin_lock(&mvmtxq->tx_path_lock);
/*
* No need for threads to be pending here, they can leave the first
* taker all the work.
*
* mvmtxq->tx_request logic:
*
* If 0, no one is currently TXing, set to 1 to indicate current thread
* will now start TX and other threads should quit.
*
* If 1, another thread is currently TXing, set to 2 to indicate to
* that thread that there was another request. Since that request may
* have raced with the check whether the queue is empty, the TXing
* thread should check the queue's status one more time before leaving.
* This check is done in order to not leave any TX hanging in the queue
* until the next TX invocation (which may not even happen).
*
* If 2, another thread is currently TXing, and it will already double
* check the queue, so do nothing.
*/
if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
return;
rcu_read_lock();
while (likely(!mvmtxq->stopped &&
(mvm->trans->system_pm_mode ==
IWL_PLAT_PM_MODE_DISABLED))) {
skb = ieee80211_tx_dequeue(hw, txq);
do {
while (likely(!mvmtxq->stopped &&
(mvm->trans->system_pm_mode ==
IWL_PLAT_PM_MODE_DISABLED))) {
skb = ieee80211_tx_dequeue(hw, txq);
if (!skb)
break;
if (!skb)
break;
if (!txq->sta)
iwl_mvm_tx_skb_non_sta(mvm, skb);
else
iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
if (!txq->sta)
iwl_mvm_tx_skb_non_sta(mvm, skb);
else
iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
} while (atomic_dec_return(&mvmtxq->tx_request));
rcu_read_unlock();
spin_unlock(&mvmtxq->tx_path_lock);
}
static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
......
......@@ -781,8 +781,7 @@ struct iwl_mvm_geo_profile {
struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
/* Protects TX path invocation from two places */
spinlock_t tx_path_lock;
atomic_t tx_request;
bool stopped;
};
......
......@@ -1403,9 +1403,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
list_del_init(&mvmtxq->list);
local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable();
}
mutex_unlock(&mvm->mutex);
......@@ -1646,7 +1644,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
INIT_LIST_HEAD(&mvmtxq->list);
spin_lock_init(&mvmtxq->tx_path_lock);
atomic_set(&mvmtxq->tx_request, 0);
}
mvm_sta->agg_tids = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册