提交 cb06286c 编写于 作者: Z Zhou Guanghui 提交者: Zheng Zengkai

shmem: Introduce shmem reliable

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4PM0Y
CVE: NA

------------------------------------------

This feature depends on the overall memory reliable feature.
When the shared memory reliable feature is enabled, the pages
used by the shared memory are allocated from the mirrored
region by default. If the mirrored region is insufficient,
you can allocate resources from the non-mirrored region.
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 6a5a682d
...@@ -13,10 +13,12 @@ ...@@ -13,10 +13,12 @@
extern struct static_key_false mem_reliable; extern struct static_key_false mem_reliable;
extern bool reliable_enabled; extern bool reliable_enabled;
extern bool shmem_reliable;
extern void add_reliable_mem_size(long sz); extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem, extern void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn); unsigned long *zone_movable_pfn);
extern void shmem_reliable_init(void);
extern void reliable_report_meminfo(struct seq_file *m); extern void reliable_report_meminfo(struct seq_file *m);
static inline bool mem_reliable_is_enabled(void) static inline bool mem_reliable_is_enabled(void)
...@@ -46,6 +48,11 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) ...@@ -46,6 +48,11 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
return false; return false;
} }
static inline bool shmem_reliable_is_enabled(void)
{
return shmem_reliable;
}
#else #else
#define reliable_enabled 0 #define reliable_enabled 0
...@@ -53,12 +60,14 @@ static inline bool mem_reliable_is_enabled(void) { return false; } ...@@ -53,12 +60,14 @@ static inline bool mem_reliable_is_enabled(void) { return false; }
static inline void add_reliable_mem_size(long sz) {} static inline void add_reliable_mem_size(long sz) {}
static inline void mem_reliable_init(bool has_unmirrored_mem, static inline void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn) {} unsigned long *zone_movable_pfn) {}
static inline void shmem_reliable_init(void) {}
static inline bool zone_reliable(struct zone *zone) { return false; } static inline bool zone_reliable(struct zone *zone) { return false; }
static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
{ {
return false; return false;
} }
static inline void reliable_report_meminfo(struct seq_file *m) {} static inline void reliable_report_meminfo(struct seq_file *m) {}
static inline bool shmem_reliable_is_enabled(void) { return false; }
#endif #endif
#endif #endif
...@@ -13,6 +13,7 @@ DEFINE_STATIC_KEY_FALSE(mem_reliable); ...@@ -13,6 +13,7 @@ DEFINE_STATIC_KEY_FALSE(mem_reliable);
bool reliable_enabled; bool reliable_enabled;
static atomic_long_t total_reliable_mem; static atomic_long_t total_reliable_mem;
bool shmem_reliable __read_mostly = true;
void add_reliable_mem_size(long sz) void add_reliable_mem_size(long sz)
{ {
...@@ -92,6 +93,17 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) ...@@ -92,6 +93,17 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn)
total_reliable_mem_sz()); total_reliable_mem_sz());
} }
void shmem_reliable_init(void)
{
if (!shmem_reliable_is_enabled())
return;
if (!mem_reliable_is_enabled()) {
shmem_reliable = false;
pr_info("shmem reliable disabled.\n");
}
}
void reliable_report_meminfo(struct seq_file *m) void reliable_report_meminfo(struct seq_file *m)
{ {
if (!mem_reliable_is_enabled()) if (!mem_reliable_is_enabled())
......
...@@ -1570,6 +1570,14 @@ static struct page *shmem_alloc_page(gfp_t gfp, ...@@ -1570,6 +1570,14 @@ static struct page *shmem_alloc_page(gfp_t gfp,
return page; return page;
} }
static inline void shmem_prepare_alloc(gfp_t *gfp_mask)
{
if (!shmem_reliable_is_enabled())
return;
*gfp_mask |= GFP_RELIABLE;
}
static struct page *shmem_alloc_and_acct_page(gfp_t gfp, static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
struct inode *inode, struct inode *inode,
pgoff_t index, bool huge, int node_id) pgoff_t index, bool huge, int node_id)
...@@ -1586,6 +1594,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, ...@@ -1586,6 +1594,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
if (!shmem_inode_acct_block(inode, nr)) if (!shmem_inode_acct_block(inode, nr))
goto failed; goto failed;
shmem_prepare_alloc(&gfp);
if (huge) if (huge)
page = shmem_alloc_hugepage(gfp, info, index, node_id); page = shmem_alloc_hugepage(gfp, info, index, node_id);
else else
...@@ -3944,6 +3954,8 @@ int __init shmem_init(void) ...@@ -3944,6 +3954,8 @@ int __init shmem_init(void)
else else
shmem_huge = 0; /* just in case it was patched */ shmem_huge = 0; /* just in case it was patched */
#endif #endif
shmem_reliable_init();
return 0; return 0;
out1: out1:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册