提交 7c16f477 编写于 作者: K Kamal Heib 提交者: Doug Ledford

IB/mlx5: Expose Q counters groups only if they are supported by FW

This patch modify the Q counters implementation, so each one of the
three Q counters groups will be exposed by the driver only if they are
supported by the firmware.
Signed-off-by: NKamal Heib <kamalh@mellanox.com>
Reviewed-by: NMark Bloch <markb@mellanox.com>
Signed-off-by: NLeon Romanovsky <leon@kernel.org>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 1ffd3a26
...@@ -3072,13 +3072,102 @@ static void mlx5_disable_eth(struct mlx5_ib_dev *dev) ...@@ -3072,13 +3072,102 @@ static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev); mlx5_nic_vport_disable_roce(dev->mdev);
} }
struct mlx5_ib_q_counter {
const char *name;
size_t offset;
};
#define INIT_Q_COUNTER(_name) \
{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
static const struct mlx5_ib_q_counter basic_q_cnts[] = {
INIT_Q_COUNTER(rx_write_requests),
INIT_Q_COUNTER(rx_read_requests),
INIT_Q_COUNTER(rx_atomic_requests),
INIT_Q_COUNTER(out_of_buffer),
};
static const struct mlx5_ib_q_counter out_of_seq_q_cnts[] = {
INIT_Q_COUNTER(out_of_sequence),
};
static const struct mlx5_ib_q_counter retrans_q_cnts[] = {
INIT_Q_COUNTER(duplicate_request),
INIT_Q_COUNTER(rnr_nak_retry_err),
INIT_Q_COUNTER(packet_seq_err),
INIT_Q_COUNTER(implied_nak_seq_err),
INIT_Q_COUNTER(local_ack_timeout_err),
};
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < dev->num_ports; i++) for (i = 0; i < dev->num_ports; i++) {
mlx5_core_dealloc_q_counter(dev->mdev, mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].q_cnt_id); dev->port[i].q_cnts.set_id);
kfree(dev->port[i].q_cnts.names);
kfree(dev->port[i].q_cnts.offsets);
}
}
static int __mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev,
const char ***names,
size_t **offsets,
u32 *num)
{
u32 num_counters;
num_counters = ARRAY_SIZE(basic_q_cnts);
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
num_counters += ARRAY_SIZE(retrans_q_cnts);
*names = kcalloc(num_counters, sizeof(**names), GFP_KERNEL);
if (!*names)
return -ENOMEM;
*offsets = kcalloc(num_counters, sizeof(**offsets), GFP_KERNEL);
if (!*offsets)
goto err_names;
*num = num_counters;
return 0;
err_names:
kfree(*names);
return -ENOMEM;
}
static void mlx5_ib_fill_q_counters(struct mlx5_ib_dev *dev,
const char **names,
size_t *offsets)
{
int i;
int j = 0;
for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
names[j] = basic_q_cnts[i].name;
offsets[j] = basic_q_cnts[i].offset;
}
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
names[j] = out_of_seq_q_cnts[i].name;
offsets[j] = out_of_seq_q_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
names[j] = retrans_q_cnts[i].name;
offsets[j] = retrans_q_cnts[i].offset;
}
}
} }
static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
...@@ -3087,14 +3176,26 @@ static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) ...@@ -3087,14 +3176,26 @@ static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
int ret; int ret;
for (i = 0; i < dev->num_ports; i++) { for (i = 0; i < dev->num_ports; i++) {
struct mlx5_ib_port *port = &dev->port[i];
ret = mlx5_core_alloc_q_counter(dev->mdev, ret = mlx5_core_alloc_q_counter(dev->mdev,
&dev->port[i].q_cnt_id); &port->q_cnts.set_id);
if (ret) { if (ret) {
mlx5_ib_warn(dev, mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n", "couldn't allocate queue counter for port %d, err %d\n",
i + 1, ret); i + 1, ret);
goto dealloc_counters; goto dealloc_counters;
} }
ret = __mlx5_ib_alloc_q_counters(dev,
&port->q_cnts.names,
&port->q_cnts.offsets,
&port->q_cnts.num_counters);
if (ret)
goto dealloc_counters;
mlx5_ib_fill_q_counters(dev, port->q_cnts.names,
port->q_cnts.offsets);
} }
return 0; return 0;
...@@ -3102,62 +3203,39 @@ static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) ...@@ -3102,62 +3203,39 @@ static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
dealloc_counters: dealloc_counters:
while (--i >= 0) while (--i >= 0)
mlx5_core_dealloc_q_counter(dev->mdev, mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].q_cnt_id); dev->port[i].q_cnts.set_id);
return ret; return ret;
} }
static const char * const names[] = {
"rx_write_requests",
"rx_read_requests",
"rx_atomic_requests",
"out_of_buffer",
"out_of_sequence",
"duplicate_request",
"rnr_nak_retry_err",
"packet_seq_err",
"implied_nak_seq_err",
"local_ack_timeout_err",
};
static const size_t stats_offsets[] = {
MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
};
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
u8 port_num) u8 port_num)
{ {
BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets)); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1];
/* We support only per port stats */ /* We support only per port stats */
if (port_num == 0) if (port_num == 0)
return NULL; return NULL;
return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names), return rdma_alloc_hw_stats_struct(port->q_cnts.names,
port->q_cnts.num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN); RDMA_HW_STATS_DEFAULT_LIFESPAN);
} }
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats, struct rdma_hw_stats *stats,
u8 port, int index) u8 port_num, int index)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1];
int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
void *out; void *out;
__be32 val; __be32 val;
int ret; int ret;
int i; int i;
if (!port || !stats) if (!stats)
return -ENOSYS; return -ENOSYS;
out = mlx5_vzalloc(outlen); out = mlx5_vzalloc(outlen);
...@@ -3165,18 +3243,19 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, ...@@ -3165,18 +3243,19 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
return -ENOMEM; return -ENOMEM;
ret = mlx5_core_query_q_counter(dev->mdev, ret = mlx5_core_query_q_counter(dev->mdev,
dev->port[port - 1].q_cnt_id, 0, port->q_cnts.set_id, 0,
out, outlen); out, outlen);
if (ret) if (ret)
goto free; goto free;
for (i = 0; i < ARRAY_SIZE(names); i++) { for (i = 0; i < port->q_cnts.num_counters; i++) {
val = *(__be32 *)(out + stats_offsets[i]); val = *(__be32 *)(out + port->q_cnts.offsets[i]);
stats->value[i] = (u64)be32_to_cpu(val); stats->value[i] = (u64)be32_to_cpu(val);
} }
free: free:
kvfree(out); kvfree(out);
return ARRAY_SIZE(names); return port->q_cnts.num_counters;
} }
static void *mlx5_ib_add(struct mlx5_core_dev *mdev) static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
...@@ -3328,8 +3407,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -3328,8 +3407,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW); (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
} }
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) && if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
} }
......
...@@ -579,8 +579,15 @@ struct mlx5_ib_resources { ...@@ -579,8 +579,15 @@ struct mlx5_ib_resources {
struct mutex mutex; struct mutex mutex;
}; };
struct mlx5_ib_q_counters {
const char **names;
size_t *offsets;
u32 num_counters;
u16 set_id;
};
struct mlx5_ib_port { struct mlx5_ib_port {
u16 q_cnt_id; struct mlx5_ib_q_counters q_cnts;
}; };
struct mlx5_roce { struct mlx5_roce {
......
...@@ -2784,7 +2784,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -2784,7 +2784,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
qp->port) - 1; qp->port) - 1;
mibport = &dev->port[port_num]; mibport = &dev->port[port_num];
context->qp_counter_set_usr_page |= context->qp_counter_set_usr_page |=
cpu_to_be32((u32)(mibport->q_cnt_id) << 24); cpu_to_be32((u32)(mibport->q_cnts.set_id) << 24);
} }
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
...@@ -2812,7 +2812,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -2812,7 +2812,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
raw_qp_param.operation = op; raw_qp_param.operation = op;
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id; raw_qp_param.rq_q_ctr_id = mibport->q_cnts.set_id;
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册