提交 6943b93b 编写于 作者: C Chen Wandun 提交者: Yang Yingliang

mm: add support for limiting the usage of reliable memory in pagecache

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Add interface /proc/sys/vm/reliable_pagecache_max_bytes to set the
max size for reliable page cache, the max size cant beyond total
reliable ram.

the whole reliable memory feature depend on kernelcore=mirror,
and which depend on NUMA, so remove redundant code in UMA.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 2238a7c8
...@@ -41,6 +41,7 @@ extern bool mem_reliable_status(void); ...@@ -41,6 +41,7 @@ extern bool mem_reliable_status(void);
extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page,
int val); int val);
extern void page_cache_prepare_alloc(gfp_t *gfp);
static inline bool mem_reliable_is_enabled(void) static inline bool mem_reliable_is_enabled(void)
{ {
...@@ -175,6 +176,7 @@ static inline bool mem_reliable_shmem_limit_check(void) { return true; } ...@@ -175,6 +176,7 @@ static inline bool mem_reliable_shmem_limit_check(void) { return true; }
static inline void page_cache_reliable_lru_add(enum lru_list lru, static inline void page_cache_reliable_lru_add(enum lru_list lru,
struct page *page, struct page *page,
int val) {} int val) {}
static inline void page_cache_prepare_alloc(gfp_t *gfp) {}
#endif #endif
#endif #endif
...@@ -249,15 +249,7 @@ extern struct page *__page_cache_alloc(gfp_t gfp); ...@@ -249,15 +249,7 @@ extern struct page *__page_cache_alloc(gfp_t gfp);
#else #else
static inline struct page *__page_cache_alloc(gfp_t gfp) static inline struct page *__page_cache_alloc(gfp_t gfp)
{ {
struct page *page; return alloc_pages(gfp, 0);
if (pagecache_reliable_is_enabled())
gfp |= ___GFP_RELIABILITY;
page = alloc_pages(gfp, 0);
page_cache_fallback_inc(gfp, page);
return page;
} }
#endif #endif
......
...@@ -1040,10 +1040,7 @@ struct page *__page_cache_alloc(gfp_t gfp) ...@@ -1040,10 +1040,7 @@ struct page *__page_cache_alloc(gfp_t gfp)
int n; int n;
struct page *page; struct page *page;
if (pagecache_reliable_is_enabled()) page_cache_prepare_alloc(&gfp);
gfp |= ___GFP_RELIABILITY;
else
WARN_ON_ONCE(gfp & ___GFP_RELIABILITY);
if (cpuset_do_page_mem_spread()) { if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
......
...@@ -37,6 +37,10 @@ long shmem_reliable_nr_page = LONG_MAX; ...@@ -37,6 +37,10 @@ long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true; bool pagecache_use_reliable_mem __read_mostly = true;
atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0);
DEFINE_PER_CPU(long, pagecache_reliable_pages); DEFINE_PER_CPU(long, pagecache_reliable_pages);
static unsigned long zero;
static unsigned long reliable_pagecache_max_bytes = ULONG_MAX;
bool mem_reliable_status(void) bool mem_reliable_status(void)
{ {
return mem_reliable_is_enabled(); return mem_reliable_is_enabled();
...@@ -394,6 +398,23 @@ int reliable_shmem_bytes_limit_handler(struct ctl_table *table, int write, ...@@ -394,6 +398,23 @@ int reliable_shmem_bytes_limit_handler(struct ctl_table *table, int write,
} }
#endif #endif
int reliable_pagecache_max_bytes_write(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
unsigned long old_value = reliable_pagecache_max_bytes;
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (!ret && write) {
if (reliable_pagecache_max_bytes > total_reliable_mem_sz()) {
reliable_pagecache_max_bytes = old_value;
return -EINVAL;
}
}
return ret;
}
static struct ctl_table reliable_ctl_table[] = { static struct ctl_table reliable_ctl_table[] = {
{ {
.procname = "task_reliable_limit", .procname = "task_reliable_limit",
...@@ -425,6 +446,14 @@ static struct ctl_table reliable_ctl_table[] = { ...@@ -425,6 +446,14 @@ static struct ctl_table reliable_ctl_table[] = {
.proc_handler = reliable_shmem_bytes_limit_handler, .proc_handler = reliable_shmem_bytes_limit_handler,
}, },
#endif #endif
{
.procname = "reliable_pagecache_max_bytes",
.data = &reliable_pagecache_max_bytes,
.maxlen = sizeof(reliable_pagecache_max_bytes),
.mode = 0644,
.proc_handler = reliable_pagecache_max_bytes_write,
.extra1 = &zero,
},
{} {}
}; };
...@@ -438,6 +467,30 @@ static struct ctl_table reliable_dir_table[] = { ...@@ -438,6 +467,30 @@ static struct ctl_table reliable_dir_table[] = {
{} {}
}; };
void page_cache_prepare_alloc(gfp_t *gfp)
{
long nr_reliable = 0;
int cpu;
if (!mem_reliable_is_enabled())
return;
for_each_possible_cpu(cpu)
nr_reliable += this_cpu_read(pagecache_reliable_pages);
if (nr_reliable < 0)
goto no_reliable;
if (nr_reliable > reliable_pagecache_max_bytes >> PAGE_SHIFT)
goto no_reliable;
*gfp |= ___GFP_RELIABILITY;
return;
no_reliable:
*gfp &= ~___GFP_RELIABILITY;
}
static int __init reliable_sysctl_init(void) static int __init reliable_sysctl_init(void)
{ {
if (!mem_reliable_is_enabled()) if (!mem_reliable_is_enabled())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册