提交 d43042f4 编写于 作者: J Joao Pinto 提交者: David S. Miller

net: stmmac: mapping mtl rx to dma channel

This patch adds the functionality of RX queue to dma channel mapping
based on configuration.
Signed-off-by: NJoao Pinto <jpinto@synopsys.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 4f6046f5
...@@ -465,6 +465,8 @@ struct stmmac_ops { ...@@ -465,6 +465,8 @@ struct stmmac_ops {
/* Set MTL TX queues weight */ /* Set MTL TX queues weight */
void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
u32 weight, u32 queue); u32 weight, u32 queue);
/* RX MTL queue to RX dma mapping */
void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
/* Dump MAC registers */ /* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
/* Handle extra events on specific interrupts hw dependent */ /* Handle extra events on specific interrupts hw dependent */
......
...@@ -177,6 +177,13 @@ enum power_event { ...@@ -177,6 +177,13 @@ enum power_event {
#define MTL_INT_STATUS 0x00000c20 #define MTL_INT_STATUS 0x00000c20
#define MTL_INT_Q0 BIT(0) #define MTL_INT_Q0 BIT(0)
#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
#define MTL_CHAN_BASE_ADDR 0x00000d00 #define MTL_CHAN_BASE_ADDR 0x00000d00
#define MTL_CHAN_BASE_OFFSET 0x40 #define MTL_CHAN_BASE_OFFSET 0x40
#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \ #define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
......
...@@ -131,6 +131,30 @@ static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw, ...@@ -131,6 +131,30 @@ static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
} }
static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
if (queue < 4)
value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
else
value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
if (queue == 0 || queue == 4) {
value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
value |= MTL_RXQ_DMA_Q04MDMACH(chan);
} else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
}
if (queue < 4)
writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
else
writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
}
static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
{ {
void __iomem *ioaddr = hw->pcsr; void __iomem *ioaddr = hw->pcsr;
...@@ -521,6 +545,7 @@ static const struct stmmac_ops dwmac4_ops = { ...@@ -521,6 +545,7 @@ static const struct stmmac_ops dwmac4_ops = {
.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwmac4_map_mtl_dma,
.dump_regs = dwmac4_dump_regs, .dump_regs = dwmac4_dump_regs,
.host_irq_status = dwmac4_irq_status, .host_irq_status = dwmac4_irq_status,
.flow_ctrl = dwmac4_flow_ctrl, .flow_ctrl = dwmac4_flow_ctrl,
......
...@@ -1659,6 +1659,23 @@ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) ...@@ -1659,6 +1659,23 @@ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
} }
} }
/**
* stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
* @priv: driver private structure
* Description: It is used for mapping RX queues to RX dma channels
*/
static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
{
u32 rx_queues_count = priv->plat->rx_queues_to_use;
u32 queue;
u32 chan;
for (queue = 0; queue < rx_queues_count; queue++) {
chan = priv->plat->rx_queues_cfg[queue].chan;
priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
}
}
/** /**
* stmmac_mtl_configuration - Configure MTL * stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure * @priv: driver private structure
...@@ -1682,6 +1699,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv) ...@@ -1682,6 +1699,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
priv->plat->tx_sched_algorithm); priv->plat->tx_sched_algorithm);
/* Map RX MTL to DMA channels */
if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
stmmac_rx_queue_dma_chan_map(priv);
/* Enable MAC RX Queues */ /* Enable MAC RX Queues */
if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable) if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
stmmac_mac_enable_rx_queues(priv); stmmac_mac_enable_rx_queues(priv);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册