提交 8fb1b85c 编写于 作者: M Ma Wupeng 提交者: Zheng Zengkai

mm: Count reliable memory info based on zone info

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Count reliable memory info based on zone info. Any zone below
ZONE_MOVABLE is seed as reliable zone and sum the pages there.
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
上级 558ce3f0
...@@ -971,6 +971,7 @@ varies by architecture and compile options. The following is from a ...@@ -971,6 +971,7 @@ varies by architecture and compile options. The following is from a
ShmemPmdMapped: 0 kB ShmemPmdMapped: 0 kB
ReliableTotal: 7340032 kB ReliableTotal: 7340032 kB
ReliableUsed: 418824 kB ReliableUsed: 418824 kB
ReliableBuddyMem: 418824 kB
MemTotal MemTotal
Total usable RAM (i.e. physical RAM minus a few reserved Total usable RAM (i.e. physical RAM minus a few reserved
...@@ -1104,6 +1105,8 @@ ReliableTotal ...@@ -1104,6 +1105,8 @@ ReliableTotal
Total reliable memory size Total reliable memory size
ReliableUsed ReliableUsed
The used amount of reliable memory The used amount of reliable memory
ReliableBuddyMem
Size of unused mirrored memory in buddy system
vmallocinfo vmallocinfo
~~~~~~~~~~~ ~~~~~~~~~~~
......
...@@ -15,9 +15,9 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable); ...@@ -15,9 +15,9 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable);
extern bool reliable_enabled; extern bool reliable_enabled;
extern bool shmem_reliable; extern bool shmem_reliable;
extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem, extern void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn); unsigned long *zone_movable_pfn,
unsigned long mirrored_sz);
extern void shmem_reliable_init(void); extern void shmem_reliable_init(void);
extern void reliable_report_meminfo(struct seq_file *m); extern void reliable_report_meminfo(struct seq_file *m);
extern void page_cache_prepare_alloc(gfp_t *gfp); extern void page_cache_prepare_alloc(gfp_t *gfp);
...@@ -28,11 +28,6 @@ static inline bool mem_reliable_is_enabled(void) ...@@ -28,11 +28,6 @@ static inline bool mem_reliable_is_enabled(void)
return static_branch_likely(&mem_reliable); return static_branch_likely(&mem_reliable);
} }
static inline bool zone_reliable(struct zone *zone)
{
return mem_reliable_is_enabled() && zone_idx(zone) < ZONE_MOVABLE;
}
static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
{ {
if (!mem_reliable_is_enabled()) if (!mem_reliable_is_enabled())
...@@ -59,11 +54,10 @@ static inline bool shmem_reliable_is_enabled(void) ...@@ -59,11 +54,10 @@ static inline bool shmem_reliable_is_enabled(void)
#define reliable_enabled 0 #define reliable_enabled 0
static inline bool mem_reliable_is_enabled(void) { return false; } static inline bool mem_reliable_is_enabled(void) { return false; }
static inline void add_reliable_mem_size(long sz) {}
static inline void mem_reliable_init(bool has_unmirrored_mem, static inline void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn) {} unsigned long *zone_movable_pfn,
unsigned long mirrored_sz) {}
static inline void shmem_reliable_init(void) {} static inline void shmem_reliable_init(void) {}
static inline bool zone_reliable(struct zone *zone) { return false; }
static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
{ {
return false; return false;
......
...@@ -8,12 +8,12 @@ ...@@ -8,12 +8,12 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#define PAGES_TO_B(n_pages) ((n_pages) << PAGE_SHIFT)
DEFINE_STATIC_KEY_FALSE(mem_reliable); DEFINE_STATIC_KEY_FALSE(mem_reliable);
EXPORT_SYMBOL_GPL(mem_reliable); EXPORT_SYMBOL_GPL(mem_reliable);
bool reliable_enabled; bool reliable_enabled;
static atomic_long_t total_reliable_mem;
bool shmem_reliable __read_mostly = true; bool shmem_reliable __read_mostly = true;
bool mem_reliable_status(void) bool mem_reliable_status(void)
...@@ -28,62 +28,42 @@ void page_cache_prepare_alloc(gfp_t *gfp) ...@@ -28,62 +28,42 @@ void page_cache_prepare_alloc(gfp_t *gfp)
*gfp |= GFP_RELIABLE; *gfp |= GFP_RELIABLE;
} }
void add_reliable_mem_size(long sz) static unsigned long total_reliable_pages(void)
{
atomic_long_add(sz, &total_reliable_mem);
}
static unsigned long total_reliable_mem_sz(void)
{
return atomic_long_read(&total_reliable_mem);
}
static unsigned long used_reliable_mem_sz(void)
{ {
unsigned long nr_page = 0; unsigned long total_reliable_pages = 0;
struct zone *z; struct zone *z;
for_each_populated_zone(z) for_each_populated_zone(z)
if (zone_idx(z) < ZONE_MOVABLE) if (zone_idx(z) < ZONE_MOVABLE)
nr_page += zone_page_state(z, NR_FREE_PAGES); total_reliable_pages += zone_managed_pages(z);
return total_reliable_mem_sz() - nr_page * PAGE_SIZE; return total_reliable_pages;
} }
static int reliable_mem_notifier(struct notifier_block *nb, static unsigned long free_reliable_pages(void)
unsigned long action, void *arg)
{ {
struct memory_notify *m_arg = arg;
struct zone *zone; struct zone *zone;
unsigned long cnt = 0;
switch (action) { for_each_populated_zone(zone)
case MEM_ONLINE: if (zone_idx(zone) < ZONE_MOVABLE)
zone = page_zone(pfn_to_page(m_arg->start_pfn)); cnt += zone_page_state(zone, NR_FREE_PAGES);
if (zone_reliable(zone))
add_reliable_mem_size(m_arg->nr_pages * PAGE_SIZE);
break;
case MEM_OFFLINE:
zone = page_zone(pfn_to_page(m_arg->start_pfn));
if (zone_reliable(zone))
add_reliable_mem_size(-m_arg->nr_pages * PAGE_SIZE);
break;
default:
break;
}
return NOTIFY_OK; return cnt;
} }
static struct notifier_block reliable_notifier_block = { static unsigned long used_reliable_pages(void)
.notifier_call = reliable_mem_notifier, {
}; return total_reliable_pages() - free_reliable_pages();
}
void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn,
unsigned long mirrored_sz)
{ {
if (!reliable_enabled) if (!reliable_enabled)
return; return;
if (atomic_long_read(&total_reliable_mem) == 0) { if (!mirrored_sz) {
memset(zone_movable_pfn, 0, memset(zone_movable_pfn, 0,
sizeof(unsigned long) * MAX_NUMNODES); sizeof(unsigned long) * MAX_NUMNODES);
pr_err("init failed, mirrored memory size is zero.\n"); pr_err("init failed, mirrored memory size is zero.\n");
...@@ -95,15 +75,9 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) ...@@ -95,15 +75,9 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn)
return; return;
} }
if (register_hotmemory_notifier(&reliable_notifier_block)) {
pr_err("init failed, register memory notifier failed.\n");
return;
}
static_branch_enable(&mem_reliable); static_branch_enable(&mem_reliable);
pr_info("init succeed, mirrored memory size(%lu)\n", pr_info("init succeed, mirrored memory size(%lu)\n", mirrored_sz);
total_reliable_mem_sz());
} }
void shmem_reliable_init(void) void shmem_reliable_init(void)
...@@ -123,8 +97,7 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -123,8 +97,7 @@ void reliable_report_meminfo(struct seq_file *m)
if (!mem_reliable_is_enabled()) if (!mem_reliable_is_enabled())
return; return;
show_val_kb(m, "ReliableTotal: ", show_val_kb(m, "ReliableTotal: ", total_reliable_pages());
total_reliable_mem_sz() >> PAGE_SHIFT); show_val_kb(m, "ReliableUsed: ", used_reliable_pages());
show_val_kb(m, "ReliableUsed: ", show_val_kb(m, "ReliableBuddyMem: ", free_reliable_pages());
used_reliable_mem_sz() >> PAGE_SHIFT);
} }
...@@ -7525,10 +7525,11 @@ static void __init find_zone_movable_pfns_for_nodes(void) ...@@ -7525,10 +7525,11 @@ static void __init find_zone_movable_pfns_for_nodes(void)
if (mirrored_kernelcore) { if (mirrored_kernelcore) {
bool mem_below_4gb_not_mirrored = false; bool mem_below_4gb_not_mirrored = false;
bool has_unmirrored_mem = false; bool has_unmirrored_mem = false;
unsigned long mirrored_sz = 0;
for_each_mem_region(r) { for_each_mem_region(r) {
if (memblock_is_mirror(r)) { if (memblock_is_mirror(r)) {
add_reliable_mem_size(r->size); mirrored_sz += r->size;
continue; continue;
} }
...@@ -7550,7 +7551,8 @@ static void __init find_zone_movable_pfns_for_nodes(void) ...@@ -7550,7 +7551,8 @@ static void __init find_zone_movable_pfns_for_nodes(void)
if (mem_below_4gb_not_mirrored) if (mem_below_4gb_not_mirrored)
pr_warn("This configuration results in unmirrored kernel memory.\n"); pr_warn("This configuration results in unmirrored kernel memory.\n");
mem_reliable_init(has_unmirrored_mem, zone_movable_pfn); mem_reliable_init(has_unmirrored_mem, zone_movable_pfn,
mirrored_sz);
goto out2; goto out2;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册