提交 b87d4606 编写于 作者: M Ma Wupeng 提交者: Zheng Zengkai

mm: Introduce fallback mechanism for memory reliable

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Introduce fallback mechanism for memory reliable. memory allocation will
fallback to non-mirrored region if zone's low watermark is reached and
kswapd will be awakened at this time.

This mechanism is enabled by defalut and can be disabled by adding
"reliable_debug=F" to the kernel parameters. This mechanism rely on
CONFIG_MEMORY_RELIABLE and need "kernelcore=reliable" in the kernel
parameters.
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
上级 0122fd48
......@@ -4778,9 +4778,11 @@
See Documentation/admin-guide/cgroup-v1/cpusets.rst.
reliable_debug= [ARM64]
Format: [P]
Format: [F][,P]
Only works with CONFIG_MEMORY_RELIABLE and
"kernelcore=reliable" is configured.
F: User memory allocation(special user task, tmpfs) will
not allocate memory from non-mirrored region if failed.
P: Page cache does not use the reliable memory.
reserve= [KNL,BUGS] Force kernel to ignore I/O ports or memory
......
......@@ -16,6 +16,7 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable);
extern bool reliable_enabled;
extern bool shmem_reliable;
extern bool reliable_allow_fallback;
extern bool pagecache_use_reliable_mem;
extern struct percpu_counter pagecache_reliable_pages;
extern struct percpu_counter anon_reliable_pages;
......@@ -104,6 +105,11 @@ static inline bool mem_reliable_should_reclaim(void)
return false;
}
static inline bool reliable_allow_fb_enabled(void)
{
return reliable_allow_fallback;
}
#else
#define reliable_enabled 0
#define pagecache_use_reliable_mem 0
......@@ -138,6 +144,7 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask,
unsigned int order,
int preferred_nid,
nodemask_t *nodemask) {}
static inline bool reliable_allow_fb_enabled(void) { return false; }
#endif
#endif
......@@ -16,6 +16,7 @@ EXPORT_SYMBOL_GPL(mem_reliable);
bool reliable_enabled;
bool shmem_reliable __read_mostly = true;
bool reliable_allow_fallback __read_mostly = true;
bool pagecache_use_reliable_mem __read_mostly = true;
struct percpu_counter pagecache_reliable_pages;
struct percpu_counter anon_reliable_pages;
......@@ -292,6 +293,10 @@ static int __init setup_reliable_debug(char *str)
*/
for (; *str && *str != ','; str++) {
switch (*str) {
case 'F':
reliable_allow_fallback = false;
pr_info("disable memory reliable fallback\n");
break;
case 'P':
pagecache_use_reliable_mem = false;
pr_info("disable page cache use reliable memory\n");
......
......@@ -4664,6 +4664,28 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
return false;
}
#ifdef CONFIG_MEMORY_RELIABLE
static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
struct alloc_context *ac)
{
if (!reliable_allow_fb_enabled())
return;
if (gfp_mask & __GFP_NOFAIL)
return;
if ((ac->highest_zoneidx == ZONE_NORMAL) && (gfp_mask & GFP_RELIABLE)) {
ac->highest_zoneidx = gfp_zone(gfp_mask & ~GFP_RELIABLE);
ac->preferred_zoneref = first_zones_zonelist(
ac->zonelist, ac->highest_zoneidx, ac->nodemask);
return;
}
}
#else
static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
struct alloc_context *ac) {}
#endif
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct alloc_context *ac)
......@@ -4715,6 +4737,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (alloc_flags & ALLOC_KSWAPD)
wake_all_kswapds(order, gfp_mask, ac);
mem_reliable_fallback_slowpath(gfp_mask, ac);
/*
* The adjusted alloc_flags might result in immediate success, so try
* that first
......@@ -5228,7 +5252,7 @@ static inline bool check_after_alloc(gfp_t *gfp, unsigned int order,
*_page = NULL;
out_retry:
if (is_global_init(current)) {
if (reliable_allow_fb_enabled() || is_global_init(current)) {
*gfp &= ~GFP_RELIABLE;
return true;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册