提交 fa59b0a8 编写于 作者: C Chen Wandun 提交者: Zheng Zengkai

mm: Add support for limiting the usage of reliable memory in pagecache

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Add interface /proc/sys/vm/reliable_pagecache_max_bytes to set the
max size for reliable page cache, the max size cant beyond total
reliable ram.

the whole reliable memory feature depend on kernelcore=mirror,
and which depend on NUMA, so remove redundant code in UMA.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
上级 a698ac0b
...@@ -19,6 +19,7 @@ bool shmem_reliable __read_mostly = true; ...@@ -19,6 +19,7 @@ bool shmem_reliable __read_mostly = true;
bool pagecache_use_reliable_mem __read_mostly = true; bool pagecache_use_reliable_mem __read_mostly = true;
struct percpu_counter pagecache_reliable_pages; struct percpu_counter pagecache_reliable_pages;
struct percpu_counter anon_reliable_pages; struct percpu_counter anon_reliable_pages;
static unsigned long reliable_pagecache_max_bytes = ULONG_MAX;
bool mem_reliable_counter_initialized(void) bool mem_reliable_counter_initialized(void)
{ {
...@@ -65,12 +66,18 @@ void reliable_lru_add(enum lru_list lru, struct page *page, int val) ...@@ -65,12 +66,18 @@ void reliable_lru_add(enum lru_list lru, struct page *page, int val)
void page_cache_prepare_alloc(gfp_t *gfp) void page_cache_prepare_alloc(gfp_t *gfp)
{ {
s64 nr_reliable = 0;
if (!mem_reliable_is_enabled()) if (!mem_reliable_is_enabled())
return; return;
if (!pagecache_reliable_is_enabled()) if (!pagecache_reliable_is_enabled())
goto no_reliable; goto no_reliable;
nr_reliable = percpu_counter_read_positive(&pagecache_reliable_pages);
if (nr_reliable > reliable_pagecache_max_bytes >> PAGE_SHIFT)
goto no_reliable;
*gfp |= GFP_RELIABLE; *gfp |= GFP_RELIABLE;
return; return;
...@@ -171,11 +178,55 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -171,11 +178,55 @@ void reliable_report_meminfo(struct seq_file *m)
} }
} }
int reliable_pagecache_max_bytes_write(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
unsigned long old_value = reliable_pagecache_max_bytes;
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (!ret && write) {
if (reliable_pagecache_max_bytes >
PAGES_TO_B(total_reliable_pages())) {
reliable_pagecache_max_bytes = old_value;
return -EINVAL;
}
}
return ret;
}
static struct ctl_table reliable_ctl_table[] = {
{
.procname = "reliable_pagecache_max_bytes",
.data = &reliable_pagecache_max_bytes,
.maxlen = sizeof(reliable_pagecache_max_bytes),
.mode = 0644,
.proc_handler = reliable_pagecache_max_bytes_write,
},
{}
};
static struct ctl_table reliable_dir_table[] = {
{
.procname = "vm",
.maxlen = 0,
.mode = 0555,
.child = reliable_ctl_table,
},
{}
};
static int __init reliable_sysctl_init(void) static int __init reliable_sysctl_init(void)
{ {
if (!mem_reliable_is_enabled()) if (!mem_reliable_is_enabled())
return 0; return 0;
if (!register_sysctl_table(reliable_dir_table)) {
pr_err("register sysctl failed.");
return -ENOMEM;
}
percpu_counter_init(&pagecache_reliable_pages, 0, GFP_KERNEL); percpu_counter_init(&pagecache_reliable_pages, 0, GFP_KERNEL);
percpu_counter_init(&anon_reliable_pages, 0, GFP_KERNEL); percpu_counter_init(&anon_reliable_pages, 0, GFP_KERNEL);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册