提交 3a3a1f75 编写于 作者: Z Zhou Guanghui 提交者: Yang Yingliang

shmem: Introduce shmem reliable

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

------------------------------------------

This feature depends on the overall memory reliable feature.
When the shared memory reliable feature is enabled, the pages
used by the shared memory are allocated from the mirrored
region by default. If the mirrored region is insufficient,
you can allocate resources from the non-mirrored region.
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 3023a4b3
...@@ -1973,13 +1973,14 @@ ...@@ -1973,13 +1973,14 @@
some extension. These two features are alternatives. some extension. These two features are alternatives.
reliable_debug= [ARM64] reliable_debug= [ARM64]
Format: [F] Format: [F][,S]
Only works with CONFIG_MEMORY_RELIABLE and Only works with CONFIG_MEMORY_RELIABLE and
"kernelcore=reliable" is configured. "kernelcore=reliable" is configured.
F: User tasks with PF_RELIABLE will not allocate F: User tasks with PF_RELIABLE will not allocate
memory from non-mirrored region if this allocation memory from non-mirrored region if this allocation
from mirrored region failed. from mirrored region failed.
Pagecache and tmpfs will follow this rule too. Pagecache and tmpfs will follow this rule too.
S: The shmem does not use the reliable memory.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval] Format: <Controller#>[,poll interval]
......
...@@ -17,10 +17,12 @@ extern bool reliable_enabled; ...@@ -17,10 +17,12 @@ extern bool reliable_enabled;
extern atomic_long_t reliable_user_used_nr_page; extern atomic_long_t reliable_user_used_nr_page;
extern unsigned long task_reliable_limit __read_mostly; extern unsigned long task_reliable_limit __read_mostly;
extern bool reliable_allow_fallback; extern bool reliable_allow_fallback;
extern bool shmem_reliable;
extern void add_reliable_mem_size(long sz); extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem, extern void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn); unsigned long *zone_movable_pfn);
extern void shmem_reliable_init(void);
extern void reliable_report_meminfo(struct seq_file *m); extern void reliable_report_meminfo(struct seq_file *m);
extern bool page_reliable(struct page *page); extern bool page_reliable(struct page *page);
extern void reliable_report_usage(struct seq_file *m, struct mm_struct *mm); extern void reliable_report_usage(struct seq_file *m, struct mm_struct *mm);
...@@ -75,6 +77,12 @@ static inline bool reliable_allow_fb_enabled(void) ...@@ -75,6 +77,12 @@ static inline bool reliable_allow_fb_enabled(void)
{ {
return reliable_allow_fallback; return reliable_allow_fallback;
} }
static inline bool shmem_reliable_is_enabled(void)
{
return shmem_reliable;
}
#else #else
#define reliable_enabled 0 #define reliable_enabled 0
#define reliable_allow_fb_enabled() false #define reliable_allow_fb_enabled() false
...@@ -83,6 +91,7 @@ static inline bool mem_reliable_is_enabled(void) { return false; } ...@@ -83,6 +91,7 @@ static inline bool mem_reliable_is_enabled(void) { return false; }
static inline void add_reliable_mem_size(long sz) {} static inline void add_reliable_mem_size(long sz) {}
static inline void mem_reliable_init(bool has_unmirrored_mem, static inline void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn) {} unsigned long *zone_movable_pfn) {}
static inline void shmem_reliable_init(void) {}
static inline bool zone_reliable(struct zone *zone) { return false; } static inline bool zone_reliable(struct zone *zone) { return false; }
static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
{ {
...@@ -104,6 +113,7 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask, ...@@ -104,6 +113,7 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask,
unsigned int order, unsigned int order,
int preferred_nid, int preferred_nid,
nodemask_t *nodemask) {} nodemask_t *nodemask) {}
static inline bool shmem_reliable_is_enabled(void) { return false; }
#endif #endif
......
...@@ -18,6 +18,7 @@ atomic_long_t reliable_user_used_nr_page; ...@@ -18,6 +18,7 @@ atomic_long_t reliable_user_used_nr_page;
/* reliable user limit for user tasks with reliable flag */ /* reliable user limit for user tasks with reliable flag */
unsigned long task_reliable_limit = ULONG_MAX; unsigned long task_reliable_limit = ULONG_MAX;
bool reliable_allow_fallback __read_mostly = true; bool reliable_allow_fallback __read_mostly = true;
bool shmem_reliable __read_mostly = true;
void add_reliable_mem_size(long sz) void add_reliable_mem_size(long sz)
{ {
...@@ -88,6 +89,17 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) ...@@ -88,6 +89,17 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn)
atomic_long_read(&total_reliable_mem)); atomic_long_read(&total_reliable_mem));
} }
void shmem_reliable_init(void)
{
if (!shmem_reliable_is_enabled())
return;
if (!mem_reliable_is_enabled()) {
shmem_reliable = false;
pr_info("shmem reliable disabled.\n");
}
}
static unsigned long total_reliable_mem_sz(void) static unsigned long total_reliable_mem_sz(void)
{ {
return atomic_long_read(&total_reliable_mem); return atomic_long_read(&total_reliable_mem);
...@@ -223,6 +235,10 @@ static int __init setup_reliable_debug(char *str) ...@@ -223,6 +235,10 @@ static int __init setup_reliable_debug(char *str)
reliable_allow_fallback = false; reliable_allow_fallback = false;
pr_info("fallback disabled."); pr_info("fallback disabled.");
break; break;
case 'S':
shmem_reliable = false;
pr_info("shmem reliable disabled.");
break;
default: default:
pr_err("reliable_debug option '%c' unknown. skipped\n", pr_err("reliable_debug option '%c' unknown. skipped\n",
*str); *str);
......
...@@ -1593,6 +1593,14 @@ static struct page *shmem_alloc_page(gfp_t gfp, ...@@ -1593,6 +1593,14 @@ static struct page *shmem_alloc_page(gfp_t gfp,
return page; return page;
} }
static inline void shmem_prepare_alloc(gfp_t *gfp_mask)
{
if (!shmem_reliable_is_enabled())
return;
*gfp_mask |= ___GFP_RELIABILITY;
}
static struct page *shmem_alloc_and_acct_page(gfp_t gfp, static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
struct inode *inode, struct inode *inode,
pgoff_t index, bool huge, int node_id) pgoff_t index, bool huge, int node_id)
...@@ -1609,6 +1617,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, ...@@ -1609,6 +1617,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
if (!shmem_inode_acct_block(inode, nr)) if (!shmem_inode_acct_block(inode, nr))
goto failed; goto failed;
shmem_prepare_alloc(&gfp);
if (huge) if (huge)
page = shmem_alloc_hugepage(gfp, info, index, node_id); page = shmem_alloc_hugepage(gfp, info, index, node_id);
else else
...@@ -3941,6 +3951,8 @@ int __init shmem_init(void) ...@@ -3941,6 +3951,8 @@ int __init shmem_init(void)
else else
shmem_huge = 0; /* just in case it was patched */ shmem_huge = 0; /* just in case it was patched */
#endif #endif
shmem_reliable_init();
return 0; return 0;
out1: out1:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册