提交 b47bd6ea 编写于 作者: D Daniel Jurgens 提交者: David S. Miller

{net, ib}/mlx5: Make cache line size determination at runtime.

ARM 64B cache line systems have L1_CACHE_BYTES set to 128.
cache_line_size() will return the correct size.

Fixes: cf50b5efa2fe('net/mlx5_core/ib: New device capabilities
handling.')
Signed-off-by: NDaniel Jurgens <danielj@mellanox.com>
Signed-off-by: NSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 bf911e98
...@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = L1_CACHE_BYTES; resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
......
...@@ -52,7 +52,6 @@ enum { ...@@ -52,7 +52,6 @@ enum {
enum { enum {
MLX5_IB_SQ_STRIDE = 6, MLX5_IB_SQ_STRIDE = 6,
MLX5_IB_CACHE_LINE_SIZE = 64,
}; };
static const u32 mlx5_ib_opcode[] = { static const u32 mlx5_ib_opcode[] = {
......
...@@ -41,6 +41,13 @@ ...@@ -41,6 +41,13 @@
#include "mlx5_core.h" #include "mlx5_core.h"
struct mlx5_db_pgdir {
struct list_head list;
unsigned long *bitmap;
__be32 *db_page;
dma_addr_t db_dma;
};
/* Handling for queue buffers -- we allocate a bunch of memory and /* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0. * register it in a memory region at HCA virtual address 0.
*/ */
...@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free); ...@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node) int node)
{ {
u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir; struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir) if (!pgdir)
return NULL; return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
sizeof(unsigned long),
GFP_KERNEL);
if (!pgdir->bitmap) {
kfree(pgdir);
return NULL;
}
bitmap_fill(pgdir->bitmap, db_per_page);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node); &pgdir->db_dma, node);
if (!pgdir->db_page) { if (!pgdir->db_page) {
kfree(pgdir->bitmap);
kfree(pgdir); kfree(pgdir);
return NULL; return NULL;
} }
...@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, ...@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db) struct mlx5_db *db)
{ {
u32 db_per_page = PAGE_SIZE / cache_line_size();
int offset; int offset;
int i; int i;
i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); i = find_first_bit(pgdir->bitmap, db_per_page);
if (i >= MLX5_DB_PER_PAGE) if (i >= db_per_page)
return -ENOMEM; return -ENOMEM;
__clear_bit(i, pgdir->bitmap); __clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir; db->u.pgdir = pgdir;
db->index = i; db->index = i;
offset = db->index * L1_CACHE_BYTES; offset = db->index * cache_line_size();
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset; db->dma = pgdir->db_dma + offset;
...@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc); ...@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{ {
u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex); mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap); __set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma); db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list); list_del(&db->u.pgdir->list);
kfree(db->u.pgdir->bitmap);
kfree(db->u.pgdir); kfree(db->u.pgdir);
} }
......
...@@ -625,10 +625,6 @@ struct mlx5_db { ...@@ -625,10 +625,6 @@ struct mlx5_db {
int index; int index;
}; };
enum {
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
};
enum { enum {
MLX5_COMP_EQ_SIZE = 1024, MLX5_COMP_EQ_SIZE = 1024,
}; };
...@@ -638,13 +634,6 @@ enum { ...@@ -638,13 +634,6 @@ enum {
MLX5_PTYS_EN = 1 << 2, MLX5_PTYS_EN = 1 << 2,
}; };
struct mlx5_db_pgdir {
struct list_head list;
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
__be32 *db_page;
dma_addr_t db_dma;
};
typedef void (*mlx5_cmd_cbk_t)(int status, void *context); typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent { struct mlx5_cmd_work_ent {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册