提交 fc2c1dc8 编写于 作者: Z Zhou Guanghui 提交者: Yang Yingliang

shmem: Show reliable shmem info

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

------------------------------------------

Add ReliableShmem in /proc/meminfo to show reliable memory info
used by shmem.

- ReliableShmem: reliable memory used by shmem
Signed-off-by: NZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 3a3a1f75
...@@ -884,6 +884,7 @@ ShmemHugePages: 0 kB ...@@ -884,6 +884,7 @@ ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB ShmemPmdMapped: 0 kB
ReliableTotal: 7340032 kB ReliableTotal: 7340032 kB
ReliableUsed: 418824 kB ReliableUsed: 418824 kB
ReliableShmem: 96 kB
MemTotal: Total usable ram (i.e. physical ram minus a few reserved MemTotal: Total usable ram (i.e. physical ram minus a few reserved
...@@ -976,6 +977,7 @@ VmallocChunk: largest contiguous block of vmalloc area which is free ...@@ -976,6 +977,7 @@ VmallocChunk: largest contiguous block of vmalloc area which is free
allocations. This stat excludes the cost of metadata. allocations. This stat excludes the cost of metadata.
ReliableTotal: Total reliable memory size ReliableTotal: Total reliable memory size
ReliableUsed: The used amount of reliable memory ReliableUsed: The used amount of reliable memory
ReliableShmem: Reliable memory used by shmem
.............................................................................. ..............................................................................
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/percpu_counter.h>
#ifdef CONFIG_MEMORY_RELIABLE #ifdef CONFIG_MEMORY_RELIABLE
...@@ -18,6 +19,7 @@ extern atomic_long_t reliable_user_used_nr_page; ...@@ -18,6 +19,7 @@ extern atomic_long_t reliable_user_used_nr_page;
extern unsigned long task_reliable_limit __read_mostly; extern unsigned long task_reliable_limit __read_mostly;
extern bool reliable_allow_fallback; extern bool reliable_allow_fallback;
extern bool shmem_reliable; extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page;
extern void add_reliable_mem_size(long sz); extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem, extern void mem_reliable_init(bool has_unmirrored_mem,
...@@ -83,6 +85,12 @@ static inline bool shmem_reliable_is_enabled(void) ...@@ -83,6 +85,12 @@ static inline bool shmem_reliable_is_enabled(void)
return shmem_reliable; return shmem_reliable;
} }
static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
{
if (shmem_reliable_is_enabled() && page_reliable(page))
percpu_counter_add(&reliable_shmem_used_nr_page, nr_page);
}
#else #else
#define reliable_enabled 0 #define reliable_enabled 0
#define reliable_allow_fb_enabled() false #define reliable_allow_fb_enabled() false
...@@ -114,6 +122,9 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask, ...@@ -114,6 +122,9 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask,
int preferred_nid, int preferred_nid,
nodemask_t *nodemask) {} nodemask_t *nodemask) {}
static inline bool shmem_reliable_is_enabled(void) { return false; } static inline bool shmem_reliable_is_enabled(void) { return false; }
static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
{
}
#endif #endif
......
...@@ -19,6 +19,7 @@ atomic_long_t reliable_user_used_nr_page; ...@@ -19,6 +19,7 @@ atomic_long_t reliable_user_used_nr_page;
unsigned long task_reliable_limit = ULONG_MAX; unsigned long task_reliable_limit = ULONG_MAX;
bool reliable_allow_fallback __read_mostly = true; bool reliable_allow_fallback __read_mostly = true;
bool shmem_reliable __read_mostly = true; bool shmem_reliable __read_mostly = true;
struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
void add_reliable_mem_size(long sz) void add_reliable_mem_size(long sz)
{ {
...@@ -97,7 +98,10 @@ void shmem_reliable_init(void) ...@@ -97,7 +98,10 @@ void shmem_reliable_init(void)
if (!mem_reliable_is_enabled()) { if (!mem_reliable_is_enabled()) {
shmem_reliable = false; shmem_reliable = false;
pr_info("shmem reliable disabled.\n"); pr_info("shmem reliable disabled.\n");
return;
} }
percpu_counter_init(&reliable_shmem_used_nr_page, 0, GFP_KERNEL);
} }
static unsigned long total_reliable_mem_sz(void) static unsigned long total_reliable_mem_sz(void)
...@@ -124,6 +128,12 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -124,6 +128,12 @@ void reliable_report_meminfo(struct seq_file *m)
total_reliable_mem_sz() >> 10); total_reliable_mem_sz() >> 10);
seq_printf(m, "ReliableUsed: %8lu kB\n", seq_printf(m, "ReliableUsed: %8lu kB\n",
used_reliable_mem_sz() >> 10); used_reliable_mem_sz() >> 10);
if (shmem_reliable_is_enabled()) {
unsigned long shmem = (unsigned long)percpu_counter_sum(
&reliable_shmem_used_nr_page) << (PAGE_SHIFT - 10);
seq_printf(m, "ReliableShmem: %8lu kB\n", shmem);
}
} }
} }
......
...@@ -957,6 +957,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -957,6 +957,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
truncate_inode_page(mapping, page); truncate_inode_page(mapping, page);
} }
} }
shmem_reliable_page_counter(
page, -(1 << compound_order(page)));
unlock_page(page); unlock_page(page);
} }
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
...@@ -1067,6 +1069,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -1067,6 +1069,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
break; break;
} }
} }
shmem_reliable_page_counter(
page, -(1 << compound_order(page)));
unlock_page(page); unlock_page(page);
} }
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
...@@ -1962,6 +1966,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1962,6 +1966,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock); spin_unlock_irq(&info->lock);
shmem_reliable_page_counter(page, 1 << compound_order(page));
alloced = true; alloced = true;
if (PageTransHuge(page) && if (PageTransHuge(page) &&
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册