提交 e4f418ff 编写于 作者: S Shay Drory 提交者: Zheng Zengkai

RDMA/mlx4: Do not map the core_clock page to user space unless enabled

stable inclusion
from stable-5.10.44
commit cb1aa1da04882d1860f733e24aeebdbbc85724d7
bugzilla: 109295
CVE: NA

--------------------------------

commit 404e5a12 upstream.

Currently when mlx4 maps the hca_core_clock page to the user space there
are read-modifiable registers, one of which is semaphore, on this page as
well as the clock counter. If user reads the wrong offset, it can modify
the semaphore and hang the device.

Do not map the hca_core_clock page to the user space unless the device has
been put in a backwards compatibility mode to support this feature.

After this patch, mlx4 core_clock won't be mapped to user space on the
majority of existing devices and the uverbs device time feature in
ibv_query_rt_values_ex() will be disabled.

Fixes: 52033cfb ("IB/mlx4: Add mmap call to map the hardware clock")
Link: https://lore.kernel.org/r/9632304e0d6790af84b3b706d8c18732bc0d5e27.1622726305.git.leonro@nvidia.comSigned-off-by: NShay Drory <shayd@nvidia.com>
Signed-off-by: NLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: NJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NChen Jun <chenjun102@huawei.com>
Acked-by: NWeilong Chen <chenweilong@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 b4ae54c6
......@@ -580,12 +580,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
resp.response_length += sizeof(resp.hca_core_clock_offset);
if (!err && !mlx4_is_slave(dev->dev)) {
if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
}
......
......@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
......@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (mlx4_is_mfunc(dev))
disable_unsupported_roce_caps(outbox);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
dev_cap->map_clock_to_user = field & 0x80;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
......
......@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
bool map_clock_to_user;
};
struct mlx4_func_cap {
......
......@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
......@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
if (mlx4_is_slave(dev))
return -EOPNOTSUPP;
if (!dev->caps.map_clock_to_user) {
mlx4_dbg(dev, "Map clock to user is not supported.\n");
return -EOPNOTSUPP;
}
if (!params)
return -EINVAL;
......
......@@ -631,6 +631,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs;
bool map_clock_to_user;
};
struct mlx4_buf_list {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册