提交 7a36955e 编写于 作者: M Ma Wupeng 提交者: Yang Yingliang

mm: Introduce shmem mirrored memory limit for memory reliable

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

------------------------------------------

This limit is used to restrict the amount of mirrored memory by shmem.
This memory allocation will return no memory if reliable fallback is off
or fallback to non-mirrored region if reliable fallback on.

This limit can be set or access via
/proc/sys/vm/shmem_reliable_bytes_limit.
The default value of this limit is LONG_MAX. This limit can be set from 0
to the total size of mirrored memory.
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 45bd608e
...@@ -24,6 +24,7 @@ extern bool pagecache_use_reliable_mem; ...@@ -24,6 +24,7 @@ extern bool pagecache_use_reliable_mem;
extern atomic_long_t page_cache_fallback; extern atomic_long_t page_cache_fallback;
DECLARE_PER_CPU(long, nr_reliable_buddy_pages); DECLARE_PER_CPU(long, nr_reliable_buddy_pages);
extern unsigned long nr_reliable_reserve_pages __read_mostly; extern unsigned long nr_reliable_reserve_pages __read_mostly;
extern long shmem_reliable_nr_page __read_mostly;
extern void page_cache_fallback_inc(gfp_t gfp, struct page *page); extern void page_cache_fallback_inc(gfp_t gfp, struct page *page);
extern void add_reliable_mem_size(long sz); extern void add_reliable_mem_size(long sz);
...@@ -120,6 +121,12 @@ static inline bool mem_reliable_watermark_ok(int nr_page) ...@@ -120,6 +121,12 @@ static inline bool mem_reliable_watermark_ok(int nr_page)
return sum > nr_reliable_reserve_pages; return sum > nr_reliable_reserve_pages;
} }
static inline bool mem_reliable_shmem_limit_check(void)
{
return percpu_counter_read_positive(&reliable_shmem_used_nr_page) <
shmem_reliable_nr_page;
}
#else #else
#define reliable_enabled 0 #define reliable_enabled 0
#define reliable_allow_fb_enabled() false #define reliable_allow_fb_enabled() false
...@@ -161,6 +168,7 @@ static inline bool pagecache_reliable_is_enabled(void) { return false; } ...@@ -161,6 +168,7 @@ static inline bool pagecache_reliable_is_enabled(void) { return false; }
static inline bool mem_reliable_status(void) { return false; } static inline bool mem_reliable_status(void) { return false; }
static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {}
static inline bool mem_reliable_watermark_ok(int nr_page) { return true; } static inline bool mem_reliable_watermark_ok(int nr_page) { return true; }
static inline bool mem_reliable_shmem_limit_check(void) { return true; }
#endif #endif
#endif #endif
...@@ -32,6 +32,7 @@ bool shmem_reliable __read_mostly = true; ...@@ -32,6 +32,7 @@ bool shmem_reliable __read_mostly = true;
struct percpu_counter reliable_shmem_used_nr_page __read_mostly; struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
DEFINE_PER_CPU(long, nr_reliable_buddy_pages); DEFINE_PER_CPU(long, nr_reliable_buddy_pages);
unsigned long nr_reliable_reserve_pages = MEM_RELIABLE_RESERVE_MIN / PAGE_SIZE; unsigned long nr_reliable_reserve_pages = MEM_RELIABLE_RESERVE_MIN / PAGE_SIZE;
long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true; bool pagecache_use_reliable_mem __read_mostly = true;
atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0);
...@@ -342,6 +343,30 @@ int reliable_reserve_size_handler(struct ctl_table *table, int write, ...@@ -342,6 +343,30 @@ int reliable_reserve_size_handler(struct ctl_table *table, int write,
return ret; return ret;
} }
#ifdef CONFIG_SHMEM
static unsigned long sysctl_shmem_reliable_bytes_limit = ULONG_MAX;
int reliable_shmem_bytes_limit_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
unsigned long *data_ptr = (unsigned long *)(table->data);
unsigned long old = *data_ptr;
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret == 0 && write) {
if (*data_ptr > total_reliable_mem_sz()) {
*data_ptr = old;
return -EINVAL;
}
shmem_reliable_nr_page = *data_ptr >> PAGE_SHIFT;
}
return ret;
}
#endif
static struct ctl_table reliable_ctl_table[] = { static struct ctl_table reliable_ctl_table[] = {
{ {
.procname = "task_reliable_limit", .procname = "task_reliable_limit",
...@@ -364,6 +389,15 @@ static struct ctl_table reliable_ctl_table[] = { ...@@ -364,6 +389,15 @@ static struct ctl_table reliable_ctl_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = reliable_reserve_size_handler, .proc_handler = reliable_reserve_size_handler,
}, },
#ifdef CONFIG_SHMEM
{
.procname = "shmem_reliable_bytes_limit",
.data = &sysctl_shmem_reliable_bytes_limit,
.maxlen = sizeof(sysctl_shmem_reliable_bytes_limit),
.mode = 0644,
.proc_handler = reliable_shmem_bytes_limit_handler,
},
#endif
{} {}
}; };
......
...@@ -1597,12 +1597,20 @@ static struct page *shmem_alloc_page(gfp_t gfp, ...@@ -1597,12 +1597,20 @@ static struct page *shmem_alloc_page(gfp_t gfp,
return page; return page;
} }
static inline void shmem_prepare_alloc(gfp_t *gfp_mask) static inline bool shmem_prepare_alloc(gfp_t *gfp_mask)
{ {
if (!shmem_reliable_is_enabled()) if (!shmem_reliable_is_enabled())
return; return true;
if (mem_reliable_shmem_limit_check()) {
*gfp_mask |= ___GFP_RELIABILITY;
return true;
}
if (reliable_allow_fb_enabled())
return true;
*gfp_mask |= ___GFP_RELIABILITY; return false;
} }
static struct page *shmem_alloc_and_acct_page(gfp_t gfp, static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
...@@ -1621,7 +1629,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, ...@@ -1621,7 +1629,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
if (!shmem_inode_acct_block(inode, nr)) if (!shmem_inode_acct_block(inode, nr))
goto failed; goto failed;
shmem_prepare_alloc(&gfp); if (!shmem_prepare_alloc(&gfp))
goto no_mem;
if (huge) if (huge)
page = shmem_alloc_hugepage(gfp, info, index, node_id); page = shmem_alloc_hugepage(gfp, info, index, node_id);
...@@ -1633,6 +1642,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, ...@@ -1633,6 +1642,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
return page; return page;
} }
no_mem:
err = -ENOMEM; err = -ENOMEM;
shmem_inode_unacct_blocks(inode, nr); shmem_inode_unacct_blocks(inode, nr);
failed: failed:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册