提交 ea625d60 编写于 作者: C Chen Wandun 提交者: Yang Yingliang

mm: fix statistic of ReliableTaskUsed

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

----------------------------------------------

Previous method of accounting ReliableTaskUsed is inaccurate,
such as in cow, migration, shareable memory, which will lead
to overflow.

Fix statistic of ReliableTaskUsed by accounting reliable
anon page and reliable page cache.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: NWei Li <liwei391@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 b41b26bb
...@@ -22,6 +22,8 @@ extern bool shmem_reliable; ...@@ -22,6 +22,8 @@ extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page; extern struct percpu_counter reliable_shmem_used_nr_page;
extern bool pagecache_use_reliable_mem; extern bool pagecache_use_reliable_mem;
DECLARE_PER_CPU(long, nr_reliable_buddy_pages); DECLARE_PER_CPU(long, nr_reliable_buddy_pages);
DECLARE_PER_CPU(long, pagecache_reliable_pages);
DECLARE_PER_CPU(long, anon_reliable_pages);
extern unsigned long nr_reliable_reserve_pages __read_mostly; extern unsigned long nr_reliable_reserve_pages __read_mostly;
extern long shmem_reliable_nr_page __read_mostly; extern long shmem_reliable_nr_page __read_mostly;
...@@ -37,10 +39,10 @@ extern void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order, ...@@ -37,10 +39,10 @@ extern void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order,
int preferred_nid, nodemask_t *nodemask); int preferred_nid, nodemask_t *nodemask);
extern bool mem_reliable_status(void); extern bool mem_reliable_status(void);
extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, extern void reliable_lru_add(enum lru_list lru, struct page *page,
int val); int val);
extern void page_cache_prepare_alloc(gfp_t *gfp); extern void page_cache_prepare_alloc(gfp_t *gfp);
extern void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, extern void reliable_lru_add_batch(int zid, enum lru_list lru,
int val); int val);
static inline bool mem_reliable_is_enabled(void) static inline bool mem_reliable_is_enabled(void)
...@@ -82,8 +84,15 @@ static inline void reliable_page_counter(struct page *page, ...@@ -82,8 +84,15 @@ static inline void reliable_page_counter(struct page *page,
static inline bool reliable_mem_limit_check(unsigned long nr_page) static inline bool reliable_mem_limit_check(unsigned long nr_page)
{ {
return atomic_long_read(&reliable_task_used_nr_page) + nr_page <= int cpu;
task_reliable_limit / PAGE_SIZE; long num = 0;
for_each_possible_cpu(cpu) {
num += per_cpu(pagecache_reliable_pages, cpu);
num += per_cpu(anon_reliable_pages, cpu);
}
return num + nr_page <= task_reliable_limit / PAGE_SIZE;
} }
static inline bool reliable_allow_fb_enabled(void) static inline bool reliable_allow_fb_enabled(void)
...@@ -172,11 +181,11 @@ static inline bool mem_reliable_status(void) { return false; } ...@@ -172,11 +181,11 @@ static inline bool mem_reliable_status(void) { return false; }
static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {}
static inline bool mem_reliable_watermark_ok(int nr_page) { return true; } static inline bool mem_reliable_watermark_ok(int nr_page) { return true; }
static inline bool mem_reliable_shmem_limit_check(void) { return true; } static inline bool mem_reliable_shmem_limit_check(void) { return true; }
static inline void page_cache_reliable_lru_add(enum lru_list lru, static inline void reliable_lru_add(enum lru_list lru,
struct page *page, struct page *page,
int val) {} int val) {}
static inline void page_cache_prepare_alloc(gfp_t *gfp) {} static inline void page_cache_prepare_alloc(gfp_t *gfp) {}
static inline void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, static inline void reliable_lru_add_batch(int zid, enum lru_list lru,
int val) {} int val) {}
#endif #endif
......
...@@ -50,7 +50,7 @@ static __always_inline void add_page_to_lru_list(struct page *page, ...@@ -50,7 +50,7 @@ static __always_inline void add_page_to_lru_list(struct page *page,
{ {
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]); list_add(&page->lru, &lruvec->lists[lru]);
page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page)); reliable_lru_add(lru, page, hpage_nr_pages(page));
} }
...@@ -59,7 +59,7 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, ...@@ -59,7 +59,7 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
{ {
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]); list_add_tail(&page->lru, &lruvec->lists[lru]);
page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page)); reliable_lru_add(lru, page, hpage_nr_pages(page));
} }
static __always_inline void del_page_from_lru_list(struct page *page, static __always_inline void del_page_from_lru_list(struct page *page,
...@@ -67,7 +67,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, ...@@ -67,7 +67,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
{ {
list_del(&page->lru); list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
page_cache_reliable_lru_add(lru, page, -hpage_nr_pages(page)); reliable_lru_add(lru, page, -hpage_nr_pages(page));
} }
/** /**
......
...@@ -216,6 +216,11 @@ static inline int is_file_lru(enum lru_list lru) ...@@ -216,6 +216,11 @@ static inline int is_file_lru(enum lru_list lru)
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
} }
static inline int is_anon_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_ANON || lru == LRU_ACTIVE_ANON);
}
static inline int is_active_lru(enum lru_list lru) static inline int is_active_lru(enum lru_list lru)
{ {
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
......
...@@ -36,6 +36,7 @@ long shmem_reliable_nr_page = LONG_MAX; ...@@ -36,6 +36,7 @@ long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true; bool pagecache_use_reliable_mem __read_mostly = true;
DEFINE_PER_CPU(long, pagecache_reliable_pages); DEFINE_PER_CPU(long, pagecache_reliable_pages);
DEFINE_PER_CPU(long, anon_reliable_pages);
static unsigned long zero; static unsigned long zero;
static unsigned long reliable_pagecache_max_bytes = ULONG_MAX; static unsigned long reliable_pagecache_max_bytes = ULONG_MAX;
...@@ -59,36 +60,34 @@ bool page_reliable(struct page *page) ...@@ -59,36 +60,34 @@ bool page_reliable(struct page *page)
return page_zonenum(page) < ZONE_MOVABLE; return page_zonenum(page) < ZONE_MOVABLE;
} }
static bool reliable_and_lru_check(enum lru_list lru, struct page *page) void reliable_lru_add_batch(int zid, enum lru_list lru, int val)
{
if (!page_reliable(page))
return false;
if (!is_file_lru(lru))
return false;
return true;
}
void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru,
int val)
{ {
if (!mem_reliable_is_enabled()) if (!mem_reliable_is_enabled())
return; return;
if (zid < 0 || zid >= MAX_NR_ZONES) if (zid < ZONE_MOVABLE && zid >= 0) {
return; if (is_file_lru(lru))
this_cpu_add(pagecache_reliable_pages, val);
if (zid < ZONE_MOVABLE && is_file_lru(lru)) else if (is_anon_lru(lru))
this_cpu_add(pagecache_reliable_pages, val); this_cpu_add(anon_reliable_pages, val);
}
} }
void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) void reliable_lru_add(enum lru_list lru, struct page *page, int val)
{ {
if (!reliable_and_lru_check(lru, page)) if (!page_reliable(page))
return; return;
this_cpu_add(pagecache_reliable_pages, val); if (is_file_lru(lru))
this_cpu_add(pagecache_reliable_pages, val);
else if (is_anon_lru(lru))
this_cpu_add(anon_reliable_pages, val);
else if (lru == LRU_UNEVICTABLE) {
if (PageAnon(page))
this_cpu_add(anon_reliable_pages, val);
else
this_cpu_add(pagecache_reliable_pages, val);
}
} }
static int reliable_mem_notifier(struct notifier_block *nb, static int reliable_mem_notifier(struct notifier_block *nb,
...@@ -191,6 +190,7 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -191,6 +190,7 @@ void reliable_report_meminfo(struct seq_file *m)
{ {
bool pagecache_enabled = pagecache_reliable_is_enabled(); bool pagecache_enabled = pagecache_reliable_is_enabled();
long nr_pagecache_pages = 0; long nr_pagecache_pages = 0;
long nr_anon_pages = 0;
long nr_buddy_pages = 0; long nr_buddy_pages = 0;
int cpu; int cpu;
...@@ -199,6 +199,7 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -199,6 +199,7 @@ void reliable_report_meminfo(struct seq_file *m)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
nr_buddy_pages += per_cpu(nr_reliable_buddy_pages, cpu); nr_buddy_pages += per_cpu(nr_reliable_buddy_pages, cpu);
nr_anon_pages += per_cpu(anon_reliable_pages, cpu);
if (pagecache_enabled) if (pagecache_enabled)
nr_pagecache_pages += nr_pagecache_pages +=
per_cpu(pagecache_reliable_pages, cpu); per_cpu(pagecache_reliable_pages, cpu);
...@@ -208,8 +209,7 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -208,8 +209,7 @@ void reliable_report_meminfo(struct seq_file *m)
total_reliable_mem_sz() >> PAGE_SHIFT); total_reliable_mem_sz() >> PAGE_SHIFT);
show_val_kb(m, "ReliableUsed: ", show_val_kb(m, "ReliableUsed: ",
used_reliable_mem_sz() >> PAGE_SHIFT); used_reliable_mem_sz() >> PAGE_SHIFT);
show_val_kb(m, "ReliableTaskUsed: ", show_val_kb(m, "ReliableTaskUsed: ", nr_anon_pages + nr_pagecache_pages);
atomic_long_read(&reliable_task_used_nr_page));
show_val_kb(m, "ReliableBuddyMem: ", nr_buddy_pages); show_val_kb(m, "ReliableBuddyMem: ", nr_buddy_pages);
if (shmem_reliable_is_enabled()) { if (shmem_reliable_is_enabled()) {
...@@ -516,15 +516,21 @@ static void mem_reliable_feature_disable(int idx) ...@@ -516,15 +516,21 @@ static void mem_reliable_feature_disable(int idx)
void reliable_show_mem_info(void) void reliable_show_mem_info(void)
{ {
if (mem_reliable_is_enabled()) { int cpu;
pr_info("ReliableTotal: %lu kB", total_reliable_mem_sz() >> 10); long num = 0;
pr_info("ReliableUsed: %lu kB", used_reliable_mem_sz() >> 10);
pr_info("task_reliable_limit: %lu kB", if (!mem_reliable_is_enabled())
task_reliable_limit >> 10); return;
pr_info("reliable_user_used: %ld kB",
atomic_long_read(&reliable_task_used_nr_page) << for_each_possible_cpu(cpu) {
(PAGE_SHIFT - 10)); num += per_cpu(anon_reliable_pages, cpu);
num += per_cpu(pagecache_reliable_pages, cpu);
} }
pr_info("ReliableTotal: %lu kB", total_reliable_mem_sz() >> 10);
pr_info("ReliableUsed: %lu kB", used_reliable_mem_sz() >> 10);
pr_info("task_reliable_limit: %lu kB", task_reliable_limit >> 10);
pr_info("reliable_user_used: %ld kB", num << (PAGE_SHIFT - 10));
} }
void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order, void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order,
......
...@@ -1683,8 +1683,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, ...@@ -1683,8 +1683,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
#endif #endif
page_cache_reliable_lru_add_batch(zid, lru, reliable_lru_add_batch(zid, lru, -nr_zone_taken[zid]);
-nr_zone_taken[zid]);
} }
} }
...@@ -2099,10 +2098,9 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec, ...@@ -2099,10 +2098,9 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
nr_pages = hpage_nr_pages(page); nr_pages = hpage_nr_pages(page);
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
reliable_lru_add(lru, page, nr_pages);
list_move(&page->lru, &lruvec->lists[lru]); list_move(&page->lru, &lruvec->lists[lru]);
page_cache_reliable_lru_add(lru, page, nr_pages);
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
__ClearPageLRU(page); __ClearPageLRU(page);
__ClearPageActive(page); __ClearPageActive(page);
...@@ -4600,7 +4598,7 @@ static int add_page_for_reclaim_swapcache(struct page *page, ...@@ -4600,7 +4598,7 @@ static int add_page_for_reclaim_swapcache(struct page *page,
case 0: case 0:
list_move(&head->lru, pagelist); list_move(&head->lru, pagelist);
update_lru_size(lruvec, lru, page_zonenum(head), -hpage_nr_pages(head)); update_lru_size(lruvec, lru, page_zonenum(head), -hpage_nr_pages(head));
page_cache_reliable_lru_add(lru, head, -hpage_nr_pages(head)); reliable_lru_add(lru, head, -hpage_nr_pages(head));
break; break;
case -EBUSY: case -EBUSY:
list_move(&head->lru, src); list_move(&head->lru, src);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册