提交 3ed87996 编写于 作者: P Parav Pandit 提交者: Saeed Mahameed

net/mlx5: Use async EQ setup cleanup helpers for multiple EQs

Use helper routines to setup and teardown multiple EQs and reuse the
code in setup, cleanup and error unwinding flows.
Signed-off-by: NParav Pandit <parav@mellanox.com>
Signed-off-by: NSaeed Mahameed <saeedm@mellanox.com>
上级 7396ae3d
...@@ -564,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) ...@@ -564,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
gather_user_async_events(dev, mask); gather_user_async_events(dev, mask);
} }
static int
setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
struct mlx5_eq_param *param, const char *name)
{
int err;
eq->irq_nb.notifier_call = mlx5_eq_async_int;
err = create_async_eq(dev, &eq->core, param);
if (err) {
mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
return err;
}
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
destroy_async_eq(dev, &eq->core);
}
return err;
}
static void cleanup_async_eq(struct mlx5_core_dev *dev,
struct mlx5_eq_async *eq, const char *name)
{
int err;
mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
err = destroy_async_eq(dev, &eq->core);
if (err)
mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
name, err);
}
static int create_async_eqs(struct mlx5_core_dev *dev) static int create_async_eqs(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_table *table = dev->priv.eq_table;
...@@ -573,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -573,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb); mlx5_eq_notifier_register(dev, &table->cq_err_nb);
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.irq_index = 0, .irq_index = 0,
.nent = MLX5_NUM_CMD_EQE, .nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
}; };
err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD; if (err)
err = create_async_eq(dev, &table->cmd_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
goto err0;
}
err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
goto err1; goto err1;
}
mlx5_cmd_use_events(dev); mlx5_cmd_use_events(dev);
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.irq_index = 0, .irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE, .nent = MLX5_NUM_ASYNC_EQE,
}; };
gather_async_events_mask(dev, param.mask); gather_async_events_mask(dev, param.mask);
err = create_async_eq(dev, &table->async_eq.core, &param); err = setup_async_eq(dev, &table->async_eq, &param, "async");
if (err) { if (err)
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
goto err2; goto err2;
}
err = mlx5_eq_enable(dev, &table->async_eq.core,
&table->async_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
goto err3;
}
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.irq_index = 0, .irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1, .nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
}; };
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST; err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
err = create_async_eq(dev, &table->pages_eq.core, &param); if (err)
if (err) { goto err3;
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
goto err4;
}
err = mlx5_eq_enable(dev, &table->pages_eq.core,
&table->pages_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
goto err5;
}
return err; return 0;
err5:
destroy_async_eq(dev, &table->pages_eq.core);
err4:
mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err3: err3:
destroy_async_eq(dev, &table->async_eq.core); cleanup_async_eq(dev, &table->async_eq, "async");
err2: err2:
mlx5_cmd_use_polling(dev); mlx5_cmd_use_polling(dev);
mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1: err1:
destroy_async_eq(dev, &table->cmd_eq.core);
err0:
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err; return err;
} }
...@@ -651,28 +652,11 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -651,28 +652,11 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
static void destroy_async_eqs(struct mlx5_core_dev *dev) static void destroy_async_eqs(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_table *table = dev->priv.eq_table;
int err;
mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
err = destroy_async_eq(dev, &table->pages_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err);
mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err = destroy_async_eq(dev, &table->async_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
err);
cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_use_polling(dev); mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
err = destroy_async_eq(dev, &table->cmd_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
err);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册