提交 d03a68f8 编写于 作者: I Ido Shamay 提交者: David S. Miller

net/mlx4_en: Configure the XPS queue mapping on driver load

Only TX rings of User Piority 0 are mapped.
TX rings of other UP's are using UP 0 mapping.
XPS is not in use when num_tc is set.
Signed-off-by: NIdo Shamay <idos@mellanox.com>
Signed-off-by: NAmir Vadai <amirv@mellanox.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 84c86403
...@@ -1910,8 +1910,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) ...@@ -1910,8 +1910,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX, node)) prof->tx_ring_size, i, TX, node))
goto err; goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
prof->tx_ring_size, TXBB_SIZE, node)) priv->base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE,
node, i))
goto err; goto err;
} }
......
...@@ -55,7 +55,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); ...@@ -55,7 +55,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size, struct mlx4_en_tx_ring **pring, int qpn, u32 size,
u16 stride, int node) u16 stride, int node, int queue_index)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_ring *ring;
...@@ -140,6 +140,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -140,6 +140,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = true; ring->bf_enabled = true;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
cpumask_set_cpu(queue_index, &ring->affinity_mask);
*pring = ring; *pring = ring;
return 0; return 0;
...@@ -206,6 +210,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ...@@ -206,6 +210,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state); &ring->qp, &ring->qp_state);
if (!user_prio && cpu_online(ring->queue_index))
netif_set_xps_queue(priv->dev, &ring->affinity_mask,
ring->queue_index);
return err; return err;
} }
......
...@@ -255,6 +255,8 @@ struct mlx4_en_tx_ring { ...@@ -255,6 +255,8 @@ struct mlx4_en_tx_ring {
u16 poll_cnt; u16 poll_cnt;
struct mlx4_en_tx_info *tx_info; struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf; u8 *bounce_buf;
u8 queue_index;
cpumask_t affinity_mask;
u32 last_nr_txbb; u32 last_nr_txbb;
struct mlx4_qp qp; struct mlx4_qp qp;
struct mlx4_qp_context context; struct mlx4_qp_context context;
...@@ -719,7 +721,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); ...@@ -719,7 +721,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, struct mlx4_en_tx_ring **pring,
int qpn, u32 size, u16 stride, int node); int qpn, u32 size, u16 stride,
int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring); struct mlx4_en_tx_ring **pring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册