提交 16c6d667 编写于 作者: P Petri Gynther 提交者: David S. Miller

net: bcmgenet: rework Tx queue init

1. Rename bcmgenet_init_multiq() to bcmgenet_init_tx_queues()
2. Fix bcmgenet_init_tx_queues() function description
3. Move Tx default queue init inside bcmgenet_init_tx_queues()
4. Modify bcmgenet_init_dma() to call bcmgenet_init_tx_queues()
Signed-off-by: NPetri Gynther <pgynther@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 4f8b2d7d
...@@ -1776,78 +1776,73 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, ...@@ -1776,78 +1776,73 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
return ret; return ret;
} }
/* init multi xmit queues, only available for GENET2+ /* Initialize Tx queues
* the queue is partitioned as follows:
* *
* queue 0 - 3 is priority based, each one has 32 descriptors, * Queues 0-3 are priority-based, each one has 32 descriptors,
* with queue 0 being the highest priority queue. * with queue 0 being the highest priority queue.
* *
* queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT * Queue 16 is the default Tx queue with
* descriptors: 256 - (number of tx queues * bds per queues) = 128 * GENET_DEFAULT_BD_CNT = 256 - 4 * 32 = 128 descriptors.
* descriptors.
* *
* The transmit control block pool is then partitioned as following: * The transmit control block pool is then partitioned as follows:
* - tx_cbs[0...127] are for queue 16 * - Tx queue 0 uses tx_cbs[0..31]
* - tx_ring_cbs[0] points to tx_cbs[128..159] * - Tx queue 1 uses tx_cbs[32..63]
* - tx_ring_cbs[1] points to tx_cbs[160..191] * - Tx queue 2 uses tx_cbs[64..95]
* - tx_ring_cbs[2] points to tx_cbs[192..223] * - Tx queue 3 uses tx_cbs[96..127]
* - tx_ring_cbs[3] points to tx_cbs[224..255] * - Tx queue 16 uses tx_cbs[128..255]
*/ */
static void bcmgenet_init_multiq(struct net_device *dev) static void bcmgenet_init_tx_queues(struct net_device *dev)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int i, dma_enable; u32 i, dma_enable;
u32 reg, dma_ctrl, ring_cfg = 0; u32 dma_ctrl, ring_cfg;
u32 dma_priority[3] = {0, 0, 0}; u32 dma_priority[3] = {0, 0, 0};
if (!netif_is_multiqueue(dev)) {
netdev_warn(dev, "called with non multi queue aware HW\n");
return;
}
dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
dma_enable = dma_ctrl & DMA_EN; dma_enable = dma_ctrl & DMA_EN;
dma_ctrl &= ~DMA_EN; dma_ctrl &= ~DMA_EN;
bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
dma_ctrl = 0;
ring_cfg = 0;
/* Enable strict priority arbiter mode */ /* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
for (i = 0; i < priv->hw_params->tx_queues; i++) { for (i = 0; i < priv->hw_params->tx_queues; i++) {
/* first 64 tx_cbs are reserved for default tx queue
* (ring 16)
*/
bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
i * priv->hw_params->bds_cnt, i * priv->hw_params->bds_cnt,
(i + 1) * priv->hw_params->bds_cnt); (i + 1) * priv->hw_params->bds_cnt);
ring_cfg |= (1 << i);
/* Configure ring as descriptor ring and setup priority */ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
ring_cfg |= 1 << i;
dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
dma_priority[DMA_PRIO_REG_INDEX(i)] |= dma_priority[DMA_PRIO_REG_INDEX(i)] |=
((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
} }
/* Set ring 16 priority and program the hardware registers */ /* Initialize Tx default queue 16 */
bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
priv->hw_params->tx_queues *
priv->hw_params->bds_cnt,
TOTAL_DESC);
ring_cfg |= (1 << DESC_INDEX);
dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
DMA_PRIO_REG_SHIFT(DESC_INDEX)); DMA_PRIO_REG_SHIFT(DESC_INDEX));
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
/* Enable rings */ /* Enable Tx queues */
reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
reg |= ring_cfg;
bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
/* Configure ring as descriptor ring and re-enable DMA if enabled */ /* Enable Tx DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg |= dma_ctrl;
if (dma_enable) if (dma_enable)
reg |= DMA_EN; dma_ctrl |= DMA_EN;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL); bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
} }
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
...@@ -1950,14 +1945,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) ...@@ -1950,14 +1945,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
return -ENOMEM; return -ENOMEM;
} }
/* initialize multi xmit queue */ /* Initialize Tx queues */
bcmgenet_init_multiq(priv->dev); bcmgenet_init_tx_queues(priv->dev);
/* initialize special ring 16 */
bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
priv->hw_params->tx_queues *
priv->hw_params->bds_cnt,
TOTAL_DESC);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册