提交 d4578ea8 编写于 作者: J Johannes Berg 提交者: Emmanuel Grumbach

iwlwifi: trans: allow skipping scheduler hardware config

In a later patch, the hardware configuration will be moved to
firmware. Prepare for this by allowing hardware configuration
in the transport to be skipped by not passing a configuration
on enable and passing configure_scd=false on disable.
Signed-off-by: NJohannes Berg <johannes.berg@intel.com>
Signed-off-by: NEmmanuel Grumbach <emmanuel.grumbach@intel.com>
上级 0ade579c
...@@ -580,7 +580,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -580,7 +580,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
* time, or we hadn't time to drain the AC queues. * time, or we hadn't time to drain the AC queues.
*/ */
if (agg_state == IWL_AGG_ON) if (agg_state == IWL_AGG_ON)
iwl_trans_txq_disable(priv->trans, txq_id); iwl_trans_txq_disable(priv->trans, txq_id, true);
else else
IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
agg_state); agg_state);
...@@ -686,7 +686,7 @@ int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -686,7 +686,7 @@ int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
* time, or we hadn't time to drain the AC queues. * time, or we hadn't time to drain the AC queues.
*/ */
if (agg_state == IWL_AGG_ON) if (agg_state == IWL_AGG_ON)
iwl_trans_txq_disable(priv->trans, txq_id); iwl_trans_txq_disable(priv->trans, txq_id, true);
else else
IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
agg_state); agg_state);
...@@ -781,7 +781,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) ...@@ -781,7 +781,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
"Can continue DELBA flow ssn = next_recl = %d\n", "Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed); tid_data->next_reclaimed);
iwl_trans_txq_disable(priv->trans, iwl_trans_txq_disable(priv->trans,
tid_data->agg.txq_id); tid_data->agg.txq_id, true);
iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
tid_data->agg.state = IWL_AGG_OFF; tid_data->agg.state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
......
...@@ -444,7 +444,9 @@ struct iwl_trans_txq_scd_cfg { ...@@ -444,7 +444,9 @@ struct iwl_trans_txq_scd_cfg {
* Must be atomic * Must be atomic
* @txq_enable: setup a queue. To setup an AC queue, use the * @txq_enable: setup a queue. To setup an AC queue, use the
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. May sleep. * this one. The op_mode must not configure the HCMD queue. The scheduler
* configuration may be %NULL, in which case the hardware will not be
* configured. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs * @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic * Must be atomic
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep. * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
...@@ -501,7 +503,8 @@ struct iwl_trans_ops { ...@@ -501,7 +503,8 @@ struct iwl_trans_ops {
void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg); const struct iwl_trans_txq_scd_cfg *cfg);
void (*txq_disable)(struct iwl_trans *trans, int queue); void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
...@@ -773,9 +776,22 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, ...@@ -773,9 +776,22 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
trans->ops->reclaim(trans, queue, ssn, skbs); trans->ops->reclaim(trans, queue, ssn, skbs);
} }
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue) static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd)
{ {
trans->ops->txq_disable(trans, queue); trans->ops->txq_disable(trans, queue, configure_scd);
}
static inline void
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg)
{
might_sleep();
if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
trans->ops->txq_enable(trans, queue, ssn, cfg);
} }
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
...@@ -789,19 +805,26 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, ...@@ -789,19 +805,26 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
.frame_limit = frame_limit, .frame_limit = frame_limit,
}; };
might_sleep(); iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg);
if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
trans->ops->txq_enable(trans, queue, ssn, &cfg);
} }
static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
int fifo) int fifo)
{ {
iwl_trans_txq_enable(trans, queue, fifo, -1, struct iwl_trans_txq_scd_cfg cfg = {
IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0); .fifo = fifo,
.sta_id = -1,
.tid = IWL_MAX_TID_COUNT,
.frame_limit = IWL_FRAME_LIMIT,
};
iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg);
}
static inline void
iwl_trans_txq_enable_no_scd(struct iwl_trans *trans, int queue, u16 ssn)
{
iwl_trans_txq_enable_cfg(trans, queue, ssn, NULL);
} }
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
......
...@@ -452,14 +452,16 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -452,14 +452,16 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) { switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_P2P_DEVICE:
iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE); iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
true);
break; break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
iwl_trans_txq_disable(mvm->trans, vif->cab_queue); iwl_trans_txq_disable(mvm->trans, vif->cab_queue, true);
/* fall through */ /* fall through */
default: default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac]); iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac],
true);
} }
} }
......
...@@ -910,7 +910,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -910,7 +910,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
tid_data->ssn = 0xffff; tid_data->ssn = 0xffff;
iwl_trans_txq_disable(mvm->trans, txq_id); iwl_trans_txq_disable(mvm->trans, txq_id, true);
/* fall through */ /* fall through */
case IWL_AGG_STARTING: case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA: case IWL_EMPTYING_HW_QUEUE_ADDBA:
...@@ -965,7 +965,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -965,7 +965,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true);
} }
mvm->queue_to_mac80211[tid_data->txq_id] = mvm->queue_to_mac80211[tid_data->txq_id] =
......
...@@ -482,7 +482,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, ...@@ -482,7 +482,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n", "Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed); tid_data->next_reclaimed);
iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true);
tid_data->state = IWL_AGG_OFF; tid_data->state = IWL_AGG_OFF;
/* /*
* we can't hold the mutex - but since we are after a sequence * we can't hold the mutex - but since we are after a sequence
......
...@@ -366,7 +366,8 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans); ...@@ -366,7 +366,8 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg); const struct iwl_trans_txq_scd_cfg *cfg);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id); struct iwl_device_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
......
...@@ -1070,37 +1070,41 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, ...@@ -1070,37 +1070,41 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg) const struct iwl_trans_txq_scd_cfg *cfg)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u8 frame_limit = cfg->frame_limit; int fifo = -1;
if (test_and_set_bit(txq_id, trans_pcie->queue_used)) if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id); WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
/* Stop this Tx queue before configuring it */ if (cfg) {
iwl_scd_txq_set_inactive(trans, txq_id); fifo = cfg->fifo;
/* Set this queue as a chain-building queue unless it is CMD queue */ /* Stop this Tx queue before configuring it */
if (txq_id != trans_pcie->cmd_queue) iwl_scd_txq_set_inactive(trans, txq_id);
iwl_scd_txq_set_chain(trans, txq_id);
/* If this queue is mapped to a certain station: it is an AGG queue */ /* Set this queue as a chain-building queue unless it is CMD */
if (cfg->sta_id >= 0) { if (txq_id != trans_pcie->cmd_queue)
u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); iwl_scd_txq_set_chain(trans, txq_id);
/* Map receiver-address / traffic-ID to this queue */ /* If this queue is mapped to a certain station: it is an AGG */
iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); if (cfg->sta_id >= 0) {
u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
/* enable aggregations for the queue */ /* Map receiver-address / traffic-ID to this queue */
iwl_scd_txq_enable_agg(trans, txq_id); iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
trans_pcie->txq[txq_id].ampdu = true;
} else { /* enable aggregations for the queue */
/* iwl_scd_txq_enable_agg(trans, txq_id);
* disable aggregations for the queue, this will also make the trans_pcie->txq[txq_id].ampdu = true;
* ra_tid mapping configuration irrelevant since it is now a } else {
* non-AGG queue. /*
*/ * disable aggregations for the queue, this will also
iwl_scd_txq_disable_agg(trans, txq_id); * make the ra_tid mapping configuration irrelevant
* since it is now a non-AGG queue.
*/
iwl_scd_txq_disable_agg(trans, txq_id);
ssn = trans_pcie->txq[txq_id].q.read_ptr; ssn = trans_pcie->txq[txq_id].q.read_ptr;
}
} }
/* Place first TFD at index corresponding to start sequence number. /* Place first TFD at index corresponding to start sequence number.
...@@ -1108,32 +1112,39 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, ...@@ -1108,32 +1112,39 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR, if (cfg) {
(ssn & 0xff) | (txq_id << 8)); u8 frame_limit = cfg->frame_limit;
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */ /* Set up Tx window size and frame limit for this queue */
iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + iwl_trans_write_mem32(trans,
trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
(1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
SCD_QUEUE_STTS_REG_MSK); SCD_QUEUE_STTS_REG_MSK);
}
trans_pcie->txq[txq_id].active = true; trans_pcie->txq[txq_id].active = true;
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, cfg->fifo, ssn & 0xff); txq_id, fifo, ssn & 0xff);
} }
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 stts_addr = trans_pcie->scd_base_addr + u32 stts_addr = trans_pcie->scd_base_addr +
...@@ -1152,10 +1163,12 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) ...@@ -1152,10 +1163,12 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
return; return;
} }
iwl_scd_txq_set_inactive(trans, txq_id); if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
ARRAY_SIZE(zero_val)); ARRAY_SIZE(zero_val));
}
iwl_pcie_txq_unmap(trans, txq_id); iwl_pcie_txq_unmap(trans, txq_id);
trans_pcie->txq[txq_id].ampdu = false; trans_pcie->txq[txq_id].ampdu = false;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册