提交 4a88f9b4 编写于 作者: G Guo Mengqi 提交者: Zhong Jinghua

mm: sharepool: add static modifier to find_spg_node_by_spg()

hulk inclusion
category: cleanup
bugzilla: https://gitee.com/openeuler/kernel/issues/I6D0N5

-------------------------------

Add static modifier to find_spg_node_by_spg(), for this function is
designed as an internal helper function, should only be used in
share_pool.c scope.
Also add static modifier to spa_overview_show and spg_overview_show.
Signed-off-by: NGuo Mengqi <guomengqi3@huawei.com>
上级 03aaeca8
...@@ -114,7 +114,6 @@ struct sp_meminfo { ...@@ -114,7 +114,6 @@ struct sp_meminfo {
}; };
#ifndef __GENKSYMS__ #ifndef __GENKSYMS__
enum sp_mapping_type { enum sp_mapping_type {
SP_MAPPING_START, SP_MAPPING_START,
SP_MAPPING_DVPP = SP_MAPPING_START, SP_MAPPING_DVPP = SP_MAPPING_START,
...@@ -465,11 +464,13 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg) ...@@ -465,11 +464,13 @@ static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
if (is_mapping_empty(local_dvpp_mapping)) { if (is_mapping_empty(local_dvpp_mapping)) {
sp_mapping_merge(spg_dvpp_mapping, local_dvpp_mapping); sp_mapping_merge(spg_dvpp_mapping, local_dvpp_mapping);
if (is_conflict) if (is_conflict)
pr_warn_ratelimited("task address space conflict, spg_id=%d\n", spg->id); pr_warn_ratelimited("task address space conflict, spg_id=%d\n",
spg->id);
} else if (is_mapping_empty(spg_dvpp_mapping)) { } else if (is_mapping_empty(spg_dvpp_mapping)) {
sp_mapping_merge(local_dvpp_mapping, spg_dvpp_mapping); sp_mapping_merge(local_dvpp_mapping, spg_dvpp_mapping);
if (is_conflict) if (is_conflict)
pr_warn_ratelimited("group address space conflict, spg_id=%d\n", spg->id); pr_warn_ratelimited("group address space conflict, spg_id=%d\n",
spg->id);
} else { } else {
pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id); pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id);
return -EINVAL; return -EINVAL;
...@@ -823,7 +824,7 @@ static void update_mem_usage(unsigned long size, bool inc, bool is_hugepage, ...@@ -823,7 +824,7 @@ static void update_mem_usage(unsigned long size, bool inc, bool is_hugepage,
} }
} }
struct sp_group_node *find_spg_node_by_spg(struct mm_struct *mm, static struct sp_group_node *find_spg_node_by_spg(struct mm_struct *mm,
struct sp_group *spg) struct sp_group *spg)
{ {
struct sp_group_node *spg_node; struct sp_group_node *spg_node;
...@@ -2443,7 +2444,6 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2443,7 +2444,6 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
* page fault later on, and more importantly sp_make_share_u2k() * page fault later on, and more importantly sp_make_share_u2k()
* depends on this feature (and MAP_LOCKED) to work correctly. * depends on this feature (and MAP_LOCKED) to work correctly.
*/ */
return do_mm_populate(mm, spa->va_start, ac->populate, 0); return do_mm_populate(mm, spa->va_start, ac->populate, 0);
} }
...@@ -2464,7 +2464,6 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2464,7 +2464,6 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
int ret; int ret;
ret = sp_alloc_mmap(mm, spa, spg_node, ac); ret = sp_alloc_mmap(mm, spa, spg_node, ac);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -2486,6 +2485,7 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, ...@@ -2486,6 +2485,7 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n", pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
ret); ret);
} }
return ret; return ret;
} }
...@@ -2538,8 +2538,7 @@ static int sp_alloc_mmap_populate(struct sp_area *spa, ...@@ -2538,8 +2538,7 @@ static int sp_alloc_mmap_populate(struct sp_area *spa,
sp_fallocate(spa); sp_fallocate(spa);
/* if hugepage allocation fails, this will transfer to normal page /* if hugepage allocation fails, this will transfer to normal page
* and try again. (only if SP_HUGEPAGE_ONLY is not flagged * and try again. (only if SP_HUGEPAGE_ONLY is not flagged */
*/
sp_alloc_fallback(spa, ac); sp_alloc_fallback(spa, ac);
return mmap_ret; return mmap_ret;
...@@ -3796,7 +3795,7 @@ static void spa_dvpp_stat_show(struct seq_file *seq) ...@@ -3796,7 +3795,7 @@ static void spa_dvpp_stat_show(struct seq_file *seq)
} }
void spa_overview_show(struct seq_file *seq) static void spa_overview_show(struct seq_file *seq)
{ {
unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num; unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num;
unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size; unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size;
...@@ -3852,7 +3851,7 @@ static int spg_info_show(int id, void *p, void *data) ...@@ -3852,7 +3851,7 @@ static int spg_info_show(int id, void *p, void *data)
return 0; return 0;
} }
void spg_overview_show(struct seq_file *seq) static void spg_overview_show(struct seq_file *seq)
{ {
if (!sp_is_enabled()) if (!sp_is_enabled())
return; return;
...@@ -4121,7 +4120,7 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, ...@@ -4121,7 +4120,7 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
goto out; goto out;
} }
/* /**
* The caller must ensure that this function is called * The caller must ensure that this function is called
* when the last thread in the thread group exits. * when the last thread in the thread group exits.
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册