提交 0636b938 编写于 作者: S Sara Sharon 提交者: Luca Coelho

iwlwifi: mvm: implement driver RX queues sync command

mac80211 will call the driver whenever there is a race between
RSS queues and control path that requires a processing of all
pending frames in RSS queues.
Implement that by utilizing the internal notification mechanism:
queue a message to all queues. When the message is received on
a queue it decrements the atomic counter. This guarantees that
all pending frames in the RX queue were processed since the message
is in order inside the queue.
Signed-off-by: NSara Sharon <sara.sharon@intel.com>
Signed-off-by: NLuca Coelho <luciano.coelho@intel.com>
上级 e5ed1792
...@@ -437,9 +437,11 @@ struct iwl_rxq_sync_notification { ...@@ -437,9 +437,11 @@ struct iwl_rxq_sync_notification {
/** /**
* Internal message identifier * Internal message identifier
* *
* @IWL_MVM_RXQ_SYNC: sync RSS queues
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
*/ */
enum iwl_mvm_rxq_notif_type { enum iwl_mvm_rxq_notif_type {
IWL_MVM_RXQ_SYNC,
IWL_MVM_RXQ_NOTIF_DEL_BA, IWL_MVM_RXQ_NOTIF_DEL_BA,
}; };
...@@ -448,10 +450,12 @@ enum iwl_mvm_rxq_notif_type { ...@@ -448,10 +450,12 @@ enum iwl_mvm_rxq_notif_type {
* in &iwl_rxq_sync_cmd. Should be DWORD aligned. * in &iwl_rxq_sync_cmd. Should be DWORD aligned.
* *
* @type: value from &iwl_mvm_rxq_notif_type * @type: value from &iwl_mvm_rxq_notif_type
* @cookie: internal cookie to identify old notifications
* @data: payload * @data: payload
*/ */
struct iwl_mvm_internal_rxq_notif { struct iwl_mvm_internal_rxq_notif {
u32 type; u32 type;
u32 cookie;
u8 data[]; u8 data[];
} __packed; } __packed;
......
...@@ -4037,6 +4037,47 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, ...@@ -4037,6 +4037,47 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
} }
} }
static void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm)
{
struct iwl_mvm_internal_rxq_notif data = {
.type = IWL_MVM_RXQ_SYNC,
.cookie = mvm->queue_sync_cookie,
};
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_has_new_rx_api(mvm))
return;
atomic_set(&mvm->queue_sync_counter, mvm->trans->num_rx_queues);
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)&data, sizeof(data));
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
goto out;
}
ret = wait_event_timeout(notif_waitq,
atomic_read(&mvm->queue_sync_counter) == 0,
HZ);
WARN_ON_ONCE(!ret);
out:
atomic_set(&mvm->queue_sync_counter, 0);
mvm->queue_sync_cookie++;
}
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
mutex_lock(&mvm->mutex);
iwl_mvm_sync_rx_queues_internal(mvm);
mutex_unlock(&mvm->mutex);
}
const struct ieee80211_ops iwl_mvm_hw_ops = { const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx, .tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action, .ampdu_action = iwl_mvm_mac_ampdu_action,
...@@ -4093,6 +4134,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { ...@@ -4093,6 +4134,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.event_callback = iwl_mvm_mac_event_callback, .event_callback = iwl_mvm_mac_event_callback,
.sync_rx_queues = iwl_mvm_sync_rx_queues,
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
......
...@@ -633,6 +633,8 @@ struct iwl_mvm { ...@@ -633,6 +633,8 @@ struct iwl_mvm {
unsigned long status; unsigned long status;
u32 queue_sync_cookie;
atomic_t queue_sync_counter;
/* /*
* for beacon filtering - * for beacon filtering -
* currently only one interface can be supported * currently only one interface can be supported
......
...@@ -586,6 +586,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -586,6 +586,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
skb_queue_head_init(&mvm->d0i3_tx); skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq); init_waitqueue_head(&mvm->d0i3_exit_waitq);
atomic_set(&mvm->queue_sync_counter, 0);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
/* /*
......
...@@ -406,6 +406,13 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -406,6 +406,13 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
internal_notif = (void *)notif->payload; internal_notif = (void *)notif->payload;
switch (internal_notif->type) { switch (internal_notif->type) {
case IWL_MVM_RXQ_SYNC:
if (mvm->queue_sync_cookie == internal_notif->cookie)
atomic_dec(&mvm->queue_sync_counter);
else
WARN_ONCE(1,
"Received expired RX queue sync message\n");
break;
case IWL_MVM_RXQ_NOTIF_DEL_BA: case IWL_MVM_RXQ_NOTIF_DEL_BA:
/* TODO */ /* TODO */
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册