提交 32be46d7 编写于 作者: Z Zhou Guanghui 提交者: Wang Wensheng

shmem: Count and show reliable shmem info

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Count reliable shmem usage based on NR_SHMEM.
Add ReliableShmem in /proc/meminfo to show reliable memory info
used by shmem.

- ReliableShmem: reliable memory used by shmem
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
上级 5f0b48de
...@@ -972,6 +972,7 @@ varies by architecture and compile options. The following is from a ...@@ -972,6 +972,7 @@ varies by architecture and compile options. The following is from a
ReliableTotal: 7340032 kB ReliableTotal: 7340032 kB
ReliableUsed: 418824 kB ReliableUsed: 418824 kB
ReliableBuddyMem: 418824 kB ReliableBuddyMem: 418824 kB
ReliableShmem: 96 kB
MemTotal MemTotal
Total usable RAM (i.e. physical RAM minus a few reserved Total usable RAM (i.e. physical RAM minus a few reserved
...@@ -1107,6 +1108,8 @@ ReliableUsed ...@@ -1107,6 +1108,8 @@ ReliableUsed
The used amount of reliable memory The used amount of reliable memory
ReliableBuddyMem ReliableBuddyMem
Size of unused mirrored memory in buddy system Size of unused mirrored memory in buddy system
ReliableShmem
Total reliable memory used by share memory
vmallocinfo vmallocinfo
~~~~~~~~~~~ ~~~~~~~~~~~
......
...@@ -16,6 +16,7 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable); ...@@ -16,6 +16,7 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable);
extern bool reliable_enabled; extern bool reliable_enabled;
extern bool shmem_reliable; extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page;
extern bool reliable_allow_fallback; extern bool reliable_allow_fallback;
extern bool pagecache_use_reliable_mem; extern bool pagecache_use_reliable_mem;
extern struct percpu_counter pagecache_reliable_pages; extern struct percpu_counter pagecache_reliable_pages;
...@@ -81,6 +82,12 @@ static inline bool page_reliable(struct page *page) ...@@ -81,6 +82,12 @@ static inline bool page_reliable(struct page *page)
return page_zonenum(page) < ZONE_MOVABLE; return page_zonenum(page) < ZONE_MOVABLE;
} }
static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
{
if (shmem_reliable_is_enabled() && page_reliable(page))
percpu_counter_add(&reliable_shmem_used_nr_page, nr_page);
}
static inline u64 task_reliable_used_pages(void) static inline u64 task_reliable_used_pages(void)
{ {
s64 nr_pages; s64 nr_pages;
...@@ -126,6 +133,8 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) ...@@ -126,6 +133,8 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
} }
static inline void reliable_report_meminfo(struct seq_file *m) {} static inline void reliable_report_meminfo(struct seq_file *m) {}
static inline bool shmem_reliable_is_enabled(void) { return false; } static inline bool shmem_reliable_is_enabled(void) { return false; }
static inline void shmem_reliable_page_counter(struct page *page,
int nr_page) {}
static inline void page_cache_prepare_alloc(gfp_t *gfp) {} static inline void page_cache_prepare_alloc(gfp_t *gfp) {}
static inline bool mem_reliable_status(void) { return false; } static inline bool mem_reliable_status(void) { return false; }
static inline bool page_reliable(struct page *page) { return false; } static inline bool page_reliable(struct page *page) { return false; }
......
...@@ -192,6 +192,7 @@ static void unaccount_page_cache_page(struct address_space *mapping, ...@@ -192,6 +192,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) { if (PageSwapBacked(page)) {
__mod_lruvec_page_state(page, NR_SHMEM, -nr); __mod_lruvec_page_state(page, NR_SHMEM, -nr);
shmem_reliable_page_counter(page, -nr);
if (PageTransHuge(page)) if (PageTransHuge(page))
__dec_node_page_state(page, NR_SHMEM_THPS); __dec_node_page_state(page, NR_SHMEM_THPS);
} else if (PageTransHuge(page)) { } else if (PageTransHuge(page)) {
...@@ -800,10 +801,14 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) ...@@ -800,10 +801,14 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
__dec_lruvec_page_state(old, NR_FILE_PAGES); __dec_lruvec_page_state(old, NR_FILE_PAGES);
if (!PageHuge(new)) if (!PageHuge(new))
__inc_lruvec_page_state(new, NR_FILE_PAGES); __inc_lruvec_page_state(new, NR_FILE_PAGES);
if (PageSwapBacked(old)) if (PageSwapBacked(old)) {
__dec_lruvec_page_state(old, NR_SHMEM); __dec_lruvec_page_state(old, NR_SHMEM);
if (PageSwapBacked(new)) shmem_reliable_page_counter(old, -1);
}
if (PageSwapBacked(new)) {
__inc_lruvec_page_state(new, NR_SHMEM); __inc_lruvec_page_state(new, NR_SHMEM);
shmem_reliable_page_counter(new, 1);
}
xas_unlock_irqrestore(&xas, flags); xas_unlock_irqrestore(&xas, flags);
if (freepage) if (freepage)
freepage(old); freepage(old);
......
...@@ -1910,6 +1910,8 @@ static void collapse_file(struct mm_struct *mm, ...@@ -1910,6 +1910,8 @@ static void collapse_file(struct mm_struct *mm,
ClearPageActive(page); ClearPageActive(page);
ClearPageUnevictable(page); ClearPageUnevictable(page);
unlock_page(page); unlock_page(page);
if (is_shmem)
shmem_reliable_page_counter(page, -1);
put_page(page); put_page(page);
index++; index++;
} }
...@@ -1920,8 +1922,10 @@ static void collapse_file(struct mm_struct *mm, ...@@ -1920,8 +1922,10 @@ static void collapse_file(struct mm_struct *mm,
SetPageUptodate(new_page); SetPageUptodate(new_page);
page_ref_add(new_page, HPAGE_PMD_NR - 1); page_ref_add(new_page, HPAGE_PMD_NR - 1);
if (is_shmem) if (is_shmem) {
set_page_dirty(new_page); set_page_dirty(new_page);
shmem_reliable_page_counter(new_page, 1 << HPAGE_PMD_ORDER);
}
lru_cache_add(new_page); lru_cache_add(new_page);
/* /*
......
...@@ -16,6 +16,7 @@ EXPORT_SYMBOL_GPL(mem_reliable); ...@@ -16,6 +16,7 @@ EXPORT_SYMBOL_GPL(mem_reliable);
bool reliable_enabled; bool reliable_enabled;
bool shmem_reliable __read_mostly = true; bool shmem_reliable __read_mostly = true;
struct percpu_counter reliable_shmem_used_nr_page;
bool reliable_allow_fallback __read_mostly = true; bool reliable_allow_fallback __read_mostly = true;
bool pagecache_use_reliable_mem __read_mostly = true; bool pagecache_use_reliable_mem __read_mostly = true;
struct percpu_counter pagecache_reliable_pages; struct percpu_counter pagecache_reliable_pages;
...@@ -147,8 +148,12 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn, ...@@ -147,8 +148,12 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn,
void shmem_reliable_init(void) void shmem_reliable_init(void)
{ {
if (!mem_reliable_is_enabled() || !shmem_reliable_is_enabled()) if (!mem_reliable_is_enabled() || !shmem_reliable_is_enabled()) {
shmem_reliable = false; shmem_reliable = false;
return;
}
percpu_counter_init(&reliable_shmem_used_nr_page, 0, GFP_KERNEL);
} }
static void show_val_kb(struct seq_file *m, const char *s, unsigned long num) static void show_val_kb(struct seq_file *m, const char *s, unsigned long num)
...@@ -166,6 +171,12 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -166,6 +171,12 @@ void reliable_report_meminfo(struct seq_file *m)
show_val_kb(m, "ReliableUsed: ", used_reliable_pages()); show_val_kb(m, "ReliableUsed: ", used_reliable_pages());
show_val_kb(m, "ReliableBuddyMem: ", free_reliable_pages()); show_val_kb(m, "ReliableBuddyMem: ", free_reliable_pages());
if (shmem_reliable_is_enabled()) {
unsigned long shmem_pages = (unsigned long)percpu_counter_sum(
&reliable_shmem_used_nr_page);
show_val_kb(m, "ReliableShmem: ", shmem_pages);
}
if (pagecache_reliable_is_enabled()) { if (pagecache_reliable_is_enabled()) {
s64 nr_pagecache_pages = 0; s64 nr_pagecache_pages = 0;
unsigned long num = 0; unsigned long num = 0;
......
...@@ -481,6 +481,11 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -481,6 +481,11 @@ int migrate_page_move_mapping(struct address_space *mapping,
xas_unlock(&xas); xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */ /* Leave irq disabled to prevent preemption while updating stats */
if (PageSwapBacked(page) && !PageSwapCache(page)) {
shmem_reliable_page_counter(page, -nr);
shmem_reliable_page_counter(newpage, nr);
}
/* /*
* If moved to a different zone then also account * If moved to a different zone then also account
* the page for that zone. Other VM counters will be * the page for that zone. Other VM counters will be
......
...@@ -752,6 +752,7 @@ static int shmem_add_to_page_cache(struct page *page, ...@@ -752,6 +752,7 @@ static int shmem_add_to_page_cache(struct page *page,
mapping->nrpages += nr; mapping->nrpages += nr;
__mod_lruvec_page_state(page, NR_FILE_PAGES, nr); __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
__mod_lruvec_page_state(page, NR_SHMEM, nr); __mod_lruvec_page_state(page, NR_SHMEM, nr);
shmem_reliable_page_counter(page, nr);
unlock: unlock:
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp)); } while (xas_nomem(&xas, gfp));
...@@ -784,6 +785,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) ...@@ -784,6 +785,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
mapping->nrpages--; mapping->nrpages--;
__dec_lruvec_page_state(page, NR_FILE_PAGES); __dec_lruvec_page_state(page, NR_FILE_PAGES);
__dec_lruvec_page_state(page, NR_SHMEM); __dec_lruvec_page_state(page, NR_SHMEM);
shmem_reliable_page_counter(page, -1);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
put_page(page); put_page(page);
BUG_ON(error); BUG_ON(error);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册