提交 ee97b610 编写于 作者: M Ma Wupeng 提交者: Yang Yingliang

mm: Count mirrored pages in buddy system

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Add a counter to count mirrored pages in buddy system.
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 709c0fcc
......@@ -884,6 +884,7 @@ ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
ReliableTotal: 7340032 kB
ReliableUsed: 418824 kB
ReliableBuddyMem: 418824 kB
ReliableShmem: 96 kB
......@@ -977,6 +978,7 @@ VmallocChunk: largest contiguous block of vmalloc area which is free
allocations. This stat excludes the cost of metadata.
ReliableTotal: Total reliable memory size
ReliableUsed: The used amount of reliable memory
ReliableBuddyMem: Total mirrored memory size in buddy system
ReliableShmem: Reliable memory used by shmem
..............................................................................
......
......@@ -22,6 +22,7 @@ extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page;
extern bool pagecache_use_reliable_mem;
extern atomic_long_t page_cache_fallback;
DECLARE_PER_CPU(long, nr_reliable_buddy_pages);
extern void page_cache_fallback_inc(gfp_t gfp, struct page *page);
extern void add_reliable_mem_size(long sz);
......@@ -100,6 +101,12 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
percpu_counter_add(&reliable_shmem_used_nr_page, nr_page);
}
static inline void mem_reliable_buddy_counter(struct page *page, int nr_page)
{
if (page && page_reliable(page))
this_cpu_add(nr_reliable_buddy_pages, nr_page);
}
#else
#define reliable_enabled 0
#define reliable_allow_fb_enabled() false
......@@ -139,6 +146,7 @@ static inline void page_cache_fallback_inc(gfp_t gfp, struct page *page) {}
static inline bool pagecache_reliable_is_enabled(void) { return false; }
static inline bool mem_reliable_status(void) { return false; }
static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {}
#endif
#endif
......@@ -28,6 +28,7 @@ unsigned long task_reliable_limit = ULONG_MAX;
bool reliable_allow_fallback __read_mostly = true;
bool shmem_reliable __read_mostly = true;
struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
DEFINE_PER_CPU(long, nr_reliable_buddy_pages);
bool pagecache_use_reliable_mem __read_mostly = true;
atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0);
......@@ -168,11 +169,20 @@ static unsigned long used_reliable_mem_sz(void)
void reliable_report_meminfo(struct seq_file *m)
{
long buddy_pages_sum = 0;
int cpu;
if (mem_reliable_is_enabled()) {
for_each_possible_cpu(cpu)
buddy_pages_sum +=
per_cpu(nr_reliable_buddy_pages, cpu);
seq_printf(m, "ReliableTotal: %8lu kB\n",
total_reliable_mem_sz() >> 10);
seq_printf(m, "ReliableUsed: %8lu kB\n",
used_reliable_mem_sz() >> 10);
seq_printf(m, "ReliableBuddyMem: %8lu kB\n",
buddy_pages_sum << (PAGE_SHIFT - 10));
if (shmem_reliable_is_enabled()) {
unsigned long shmem = (unsigned long)percpu_counter_sum(
......
......@@ -1340,6 +1340,7 @@ static void __free_pages_ok(struct page *page, unsigned int order,
migratetype = get_pfnblock_migratetype(page, pfn);
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
mem_reliable_buddy_counter(page, 1 << order);
free_one_page(page_zone(page), page, pfn, order, migratetype,
fpi_flags);
local_irq_restore(flags);
......@@ -2919,6 +2920,7 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
migratetype = get_pcppage_migratetype(page);
__count_vm_event(PGFREE);
mem_reliable_buddy_counter(page, 1);
/*
* We only track unmovable, reclaimable and movable on pcp lists.
......@@ -3156,6 +3158,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
page = __rmqueue_pcplist(zone, migratetype, pcp, list);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
mem_reliable_buddy_counter(page, -(1 << order));
zone_statistics(preferred_zone, zone);
}
local_irq_restore(flags);
......@@ -3204,6 +3207,7 @@ struct page *rmqueue(struct zone *preferred_zone,
get_pcppage_migratetype(page));
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
mem_reliable_buddy_counter(page, -(1 << order));
zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册