提交 f5c69190 编写于 作者: C Chen Wandun 提交者: Yang Yingliang

mm: add cmdline for the reliable memory usage of page cache

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Add cmdline for the reliable memory usage of page cache.
Page cache will not use reliable memory when passing option
"P" to reliable_debug in cmdline.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 c0019109
...@@ -1973,7 +1973,7 @@ ...@@ -1973,7 +1973,7 @@
some extension. These two features are alternatives. some extension. These two features are alternatives.
reliable_debug= [ARM64] reliable_debug= [ARM64]
Format: [F][,S] Format: [F][,S][,P]
Only works with CONFIG_MEMORY_RELIABLE and Only works with CONFIG_MEMORY_RELIABLE and
"kernelcore=reliable" is configured. "kernelcore=reliable" is configured.
F: User tasks with PF_RELIABLE will not allocate F: User tasks with PF_RELIABLE will not allocate
...@@ -1981,6 +1981,7 @@ ...@@ -1981,6 +1981,7 @@
from mirrored region failed. from mirrored region failed.
Pagecache and tmpfs will follow this rule too. Pagecache and tmpfs will follow this rule too.
S: The shmem does not use the reliable memory. S: The shmem does not use the reliable memory.
P: Page cache does not use the reliable memory.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval] Format: <Controller#>[,poll interval]
......
...@@ -20,6 +20,7 @@ extern unsigned long task_reliable_limit __read_mostly; ...@@ -20,6 +20,7 @@ extern unsigned long task_reliable_limit __read_mostly;
extern bool reliable_allow_fallback; extern bool reliable_allow_fallback;
extern bool shmem_reliable; extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page; extern struct percpu_counter reliable_shmem_used_nr_page;
extern bool pagecache_use_reliable_mem;
extern void add_reliable_mem_size(long sz); extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem, extern void mem_reliable_init(bool has_unmirrored_mem,
...@@ -85,6 +86,11 @@ static inline bool shmem_reliable_is_enabled(void) ...@@ -85,6 +86,11 @@ static inline bool shmem_reliable_is_enabled(void)
return shmem_reliable; return shmem_reliable;
} }
static inline bool pagecache_reliable_is_enabled(void)
{
return pagecache_use_reliable_mem;
}
static inline void shmem_reliable_page_counter(struct page *page, int nr_page) static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
{ {
if (shmem_reliable_is_enabled() && page_reliable(page)) if (shmem_reliable_is_enabled() && page_reliable(page))
...@@ -94,6 +100,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) ...@@ -94,6 +100,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
#else #else
#define reliable_enabled 0 #define reliable_enabled 0
#define reliable_allow_fb_enabled() false #define reliable_allow_fb_enabled() false
#define pagecache_use_reliable_mem 0
static inline bool mem_reliable_is_enabled(void) { return false; } static inline bool mem_reliable_is_enabled(void) { return false; }
static inline void add_reliable_mem_size(long sz) {} static inline void add_reliable_mem_size(long sz) {}
...@@ -126,6 +133,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) ...@@ -126,6 +133,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
{ {
} }
static inline bool pagecache_reliable_is_enabled(void) { return false; }
#endif #endif
#endif #endif
...@@ -249,7 +249,9 @@ extern struct page *__page_cache_alloc(gfp_t gfp); ...@@ -249,7 +249,9 @@ extern struct page *__page_cache_alloc(gfp_t gfp);
#else #else
static inline struct page *__page_cache_alloc(gfp_t gfp) static inline struct page *__page_cache_alloc(gfp_t gfp)
{ {
gfp |= ___GFP_RELIABILITY; if (pagecache_reliable_is_enabled())
gfp |= ___GFP_RELIABILITY;
return alloc_pages(gfp, 0); return alloc_pages(gfp, 0);
} }
#endif #endif
......
...@@ -1042,7 +1042,11 @@ struct page *__page_cache_alloc(gfp_t gfp) ...@@ -1042,7 +1042,11 @@ struct page *__page_cache_alloc(gfp_t gfp)
int n; int n;
struct page *page; struct page *page;
gfp |= ___GFP_RELIABILITY; if (pagecache_reliable_is_enabled())
gfp |= ___GFP_RELIABILITY;
else
WARN_ON_ONCE(gfp & ___GFP_RELIABILITY);
if (cpuset_do_page_mem_spread()) { if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
do { do {
......
...@@ -21,6 +21,7 @@ bool reliable_allow_fallback __read_mostly = true; ...@@ -21,6 +21,7 @@ bool reliable_allow_fallback __read_mostly = true;
bool shmem_reliable __read_mostly = true; bool shmem_reliable __read_mostly = true;
struct percpu_counter reliable_shmem_used_nr_page __read_mostly; struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
bool pagecache_use_reliable_mem __read_mostly = true;
void add_reliable_mem_size(long sz) void add_reliable_mem_size(long sz)
{ {
atomic_long_add(sz, &total_reliable_mem); atomic_long_add(sz, &total_reliable_mem);
...@@ -249,6 +250,10 @@ static int __init setup_reliable_debug(char *str) ...@@ -249,6 +250,10 @@ static int __init setup_reliable_debug(char *str)
shmem_reliable = false; shmem_reliable = false;
pr_info("shmem reliable disabled."); pr_info("shmem reliable disabled.");
break; break;
case 'P':
pagecache_use_reliable_mem = false;
pr_info("disable page cache use reliable memory\n");
break;
default: default:
pr_err("reliable_debug option '%c' unknown. skipped\n", pr_err("reliable_debug option '%c' unknown. skipped\n",
*str); *str);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册