提交 01137808 编写于 作者: A Aharon Landau 提交者: Jason Gunthorpe

RDMA/mlx5: Rename the mkey cache variables and functions

After replacing the MR cache with an Mkey cache, rename the variables and
functions to fit the new meaning.

Link: https://lore.kernel.org/r/20220726071911.122765-6-michaelgur@nvidia.comSigned-off-by: NAharon Landau <aharonl@nvidia.com>
Signed-off-by: NLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: NJason Gunthorpe <jgg@nvidia.com>
上级 6b753386
...@@ -4002,7 +4002,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) ...@@ -4002,7 +4002,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{ {
int err; int err;
err = mlx5_mr_cache_cleanup(dev); err = mlx5_mkey_cache_cleanup(dev);
if (err) if (err)
mlx5_ib_warn(dev, "mr cache cleanup failed\n"); mlx5_ib_warn(dev, "mr cache cleanup failed\n");
...@@ -4022,7 +4022,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) ...@@ -4022,7 +4022,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
if (ret) if (ret)
return ret; return ret;
ret = mlx5_mr_cache_init(dev); ret = mlx5_mkey_cache_init(dev);
if (ret) { if (ret) {
mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
mlx5r_umr_resource_cleanup(dev); mlx5r_umr_resource_cleanup(dev);
......
...@@ -764,9 +764,9 @@ struct mlx5r_async_create_mkey { ...@@ -764,9 +764,9 @@ struct mlx5r_async_create_mkey {
u32 mkey; u32 mkey;
}; };
struct mlx5_mr_cache { struct mlx5_mkey_cache {
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
struct dentry *root; struct dentry *root;
unsigned long last_add; unsigned long last_add;
}; };
...@@ -1065,7 +1065,7 @@ struct mlx5_ib_dev { ...@@ -1065,7 +1065,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr; struct mlx5_ib_resources devr;
atomic_t mkey_var; atomic_t mkey_var;
struct mlx5_mr_cache cache; struct mlx5_mkey_cache cache;
struct timer_list delay_timer; struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */ /* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex; struct mutex slow_path_mutex;
...@@ -1310,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, ...@@ -1310,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags); u64 access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
struct mlx5_cache_ent *ent, struct mlx5_cache_ent *ent,
...@@ -1339,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq); ...@@ -1339,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void); int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void); void mlx5_ib_odp_cleanup(void);
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags); struct mlx5_ib_mr *mr, int flags);
...@@ -1358,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, ...@@ -1358,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; } static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {} static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags) {} struct mlx5_ib_mr *mr, int flags) {}
......
...@@ -119,7 +119,7 @@ static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create) ...@@ -119,7 +119,7 @@ static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
&async_create->cb_work); &async_create->cb_work);
} }
static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
...@@ -515,11 +515,11 @@ static const struct file_operations limit_fops = { ...@@ -515,11 +515,11 @@ static const struct file_operations limit_fops = {
.read = limit_read, .read = limit_read,
}; };
static bool someone_adding(struct mlx5_mr_cache *cache) static bool someone_adding(struct mlx5_mkey_cache *cache)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &cache->ent[i]; struct mlx5_cache_ent *ent = &cache->ent[i];
bool ret; bool ret;
...@@ -569,7 +569,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) ...@@ -569,7 +569,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
static void __cache_work_func(struct mlx5_cache_ent *ent) static void __cache_work_func(struct mlx5_cache_ent *ent)
{ {
struct mlx5_ib_dev *dev = ent->dev; struct mlx5_ib_dev *dev = ent->dev;
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mkey_cache *cache = &dev->cache;
int err; int err;
xa_lock_irq(&ent->mkeys); xa_lock_irq(&ent->mkeys);
...@@ -681,7 +681,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, ...@@ -681,7 +681,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
static void clean_keys(struct mlx5_ib_dev *dev, int c) static void clean_keys(struct mlx5_ib_dev *dev, int c)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
u32 mkey; u32 mkey;
...@@ -696,7 +696,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -696,7 +696,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
xa_unlock_irq(&ent->mkeys); xa_unlock_irq(&ent->mkeys);
} }
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{ {
if (!mlx5_debugfs_root || dev->is_rep) if (!mlx5_debugfs_root || dev->is_rep)
return; return;
...@@ -705,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) ...@@ -705,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
dev->cache.root = NULL; dev->cache.root = NULL;
} }
static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent; struct mlx5_cache_ent *ent;
struct dentry *dir; struct dentry *dir;
int i; int i;
...@@ -717,7 +717,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) ...@@ -717,7 +717,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev)); cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i]; ent = &cache->ent[i];
sprintf(ent->name, "%d", ent->order); sprintf(ent->name, "%d", ent->order);
dir = debugfs_create_dir(ent->name, cache->root); dir = debugfs_create_dir(ent->name, cache->root);
...@@ -735,9 +735,9 @@ static void delay_time_func(struct timer_list *t) ...@@ -735,9 +735,9 @@ static void delay_time_func(struct timer_list *t)
WRITE_ONCE(dev->fill_delay, 0); WRITE_ONCE(dev->fill_delay, 0);
} }
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent; struct mlx5_cache_ent *ent;
int i; int i;
...@@ -750,7 +750,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ...@@ -750,7 +750,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
timer_setup(&dev->delay_timer, delay_time_func, 0); timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i]; ent = &cache->ent[i];
xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
ent->order = i + 2; ent->order = i + 2;
...@@ -759,12 +759,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ...@@ -759,12 +759,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
if (i > MR_CACHE_LAST_STD_ENTRY) { if (i > MKEY_CACHE_LAST_STD_ENTRY) {
mlx5_odp_init_mr_cache_entry(ent); mlx5_odp_init_mkey_cache_entry(ent);
continue; continue;
} }
if (ent->order > mr_cache_max_order(dev)) if (ent->order > mkey_cache_max_order(dev))
continue; continue;
ent->page = PAGE_SHIFT; ent->page = PAGE_SHIFT;
...@@ -781,19 +781,19 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ...@@ -781,19 +781,19 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
xa_unlock_irq(&ent->mkeys); xa_unlock_irq(&ent->mkeys);
} }
mlx5_mr_cache_debugfs_init(dev); mlx5_mkey_cache_debugfs_init(dev);
return 0; return 0;
} }
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
{ {
unsigned int i; unsigned int i;
if (!dev->cache.wq) if (!dev->cache.wq)
return 0; return 0;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &dev->cache.ent[i]; struct mlx5_cache_ent *ent = &dev->cache.ent[i];
xa_lock_irq(&ent->mkeys); xa_lock_irq(&ent->mkeys);
...@@ -802,10 +802,10 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) ...@@ -802,10 +802,10 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
cancel_delayed_work_sync(&ent->dwork); cancel_delayed_work_sync(&ent->dwork);
} }
mlx5_mr_cache_debugfs_cleanup(dev); mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++)
clean_keys(dev, i); clean_keys(dev, i);
destroy_workqueue(dev->cache.wq); destroy_workqueue(dev->cache.wq);
...@@ -872,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift) ...@@ -872,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift)
return (npages + 1) / 2; return (npages + 1) / 2;
} }
static int mr_cache_max_order(struct mlx5_ib_dev *dev) static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
{ {
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
return MR_CACHE_LAST_STD_ENTRY + 2; return MKEY_CACHE_LAST_STD_ENTRY + 2;
return MLX5_MAX_UMR_SHIFT; return MLX5_MAX_UMR_SHIFT;
} }
static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev, static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
unsigned int order) unsigned int order)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mkey_cache *cache = &dev->cache;
if (order < cache->ent[0].order) if (order < cache->ent[0].order)
return &cache->ent[0]; return &cache->ent[0];
order = order - cache->ent[0].order; order = order - cache->ent[0].order;
if (order > MR_CACHE_LAST_STD_ENTRY) if (order > MKEY_CACHE_LAST_STD_ENTRY)
return NULL; return NULL;
return &cache->ent[order]; return &cache->ent[order];
} }
...@@ -930,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, ...@@ -930,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
0, iova); 0, iova);
if (WARN_ON(!page_size)) if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ent = mr_cache_ent_from_order( ent = mkey_cache_ent_from_order(
dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size))); dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
/* /*
* Matches access in alloc_cache_mr(). If the MR can't come from the * Matches access in alloc_cache_mr(). If the MR can't come from the
......
...@@ -1588,7 +1588,7 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1588,7 +1588,7 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
return err; return err;
} }
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
{ {
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return; return;
......
...@@ -728,10 +728,10 @@ enum { ...@@ -728,10 +728,10 @@ enum {
}; };
enum { enum {
MR_CACHE_LAST_STD_ENTRY = 20, MKEY_CACHE_LAST_STD_ENTRY = 20,
MLX5_IMR_MTT_CACHE_ENTRY, MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY, MLX5_IMR_KSM_CACHE_ENTRY,
MAX_MR_CACHE_ENTRIES MAX_MKEY_CACHE_ENTRIES
}; };
struct mlx5_profile { struct mlx5_profile {
...@@ -740,7 +740,7 @@ struct mlx5_profile { ...@@ -740,7 +740,7 @@ struct mlx5_profile {
struct { struct {
int size; int size;
int limit; int limit;
} mr_cache[MAX_MR_CACHE_ENTRIES]; } mr_cache[MAX_MKEY_CACHE_ENTRIES];
}; };
struct mlx5_hca_cap { struct mlx5_hca_cap {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册