提交 1ce8658c 编写于 作者: E Emmanuel Grumbach 提交者: Johannes Berg

iwlwifi: don't configure a txq that is being disabled

This is not needed, we just need to tell the SCD not to use
that queue. We will reconfigure that queue when we will use
it again.

Clean up a bit the code on the way.
Signed-off-by: NEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: NJohannes Berg <johannes.berg@intel.com>
上级 f22d3328
...@@ -340,10 +340,6 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, ...@@ -340,10 +340,6 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq, struct iwl_tx_queue *txq,
u16 byte_cnt); u16 byte_cnt);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, bool active);
void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
int fifo, int sta_id, int tid, int fifo, int sta_id, int tid,
int frame_limit, u16 ssn); int frame_limit, u16 ssn);
......
...@@ -380,7 +380,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, ...@@ -380,7 +380,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
} }
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
u16 txq_id) u16 txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
...@@ -405,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, ...@@ -405,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
return 0; return 0;
} }
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
{ {
/* Simply stop the queue, but don't change any configuration; /* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
...@@ -415,33 +415,6 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) ...@@ -415,33 +415,6 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
} }
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index)
{
IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
}
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, bool active)
{
int txq_id = txq->q.id;
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
SCD_QUEUE_STTS_REG_MSK);
if (active)
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
txq_id, tx_fifo_id);
else
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
int fifo, int sta_id, int tid, int fifo, int sta_id, int tid,
int frame_limit, u16 ssn) int frame_limit, u16 ssn)
...@@ -454,7 +427,7 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, ...@@ -454,7 +427,7 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
WARN_ONCE(1, "queue %d already used - expect issues", txq_id); WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
/* Stop this Tx queue before configuring it */ /* Stop this Tx queue before configuring it */
iwlagn_tx_queue_stop_scheduler(trans, txq_id); iwl_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD queue */ /* Set this queue as a chain-building queue unless it is CMD queue */
if (txq_id != trans_pcie->cmd_queue) if (txq_id != trans_pcie->cmd_queue)
...@@ -465,17 +438,27 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, ...@@ -465,17 +438,27 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
u16 ra_tid = BUILD_RAxTID(sta_id, tid); u16 ra_tid = BUILD_RAxTID(sta_id, tid);
/* Map receiver-address / traffic-ID to this queue */ /* Map receiver-address / traffic-ID to this queue */
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
/* enable aggregations for the queue */ /* enable aggregations for the queue */
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
} else {
/*
* disable aggregations for the queue, this will also make the
* ra_tid mapping configuration irrelevant since it is now a
* non-AGG queue.
*/
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
} }
/* Place first TFD at index corresponding to start sequence number. /* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */ * Assumes that ssn_idx is valid (!= 0xFFF) */
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */ /* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
...@@ -488,8 +471,13 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, ...@@ -488,8 +471,13 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
fifo, true); (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
SCD_QUEUE_STTS_REG_MSK);
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, fifo, ssn & 0xff);
} }
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
...@@ -509,22 +497,22 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, ...@@ -509,22 +497,22 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 rd_ptr, wr_ptr;
int n_bd = trans_pcie->txq[txq_id].q.n_bd;
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id); WARN_ONCE(1, "queue %d not used", txq_id);
return; return;
} }
iwlagn_tx_queue_stop_scheduler(trans, txq_id); rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
txq_id, rd_ptr, wr_ptr);
trans_pcie->txq[txq_id].q.read_ptr = 0;
trans_pcie->txq[txq_id].q.write_ptr = 0;
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], iwl_txq_set_inactive(trans, txq_id);
0, false); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
} }
/*************** HOST COMMAND QUEUE FUNCTIONS *****/ /*************** HOST COMMAND QUEUE FUNCTIONS *****/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册