提交 5344b7e6 编写于 作者: N Nick Piggin 提交者: Linus Torvalds

vmstat: mlocked pages statistics

Add NR_MLOCK zone page state, which provides a (conservative) count of
mlocked pages (actually, the number of mlocked pages moved off the LRU).

Reworked by lts to fit in with the modified mlock page support in the
Reclaim Scalability series.

[kosaki.motohiro@jp.fujitsu.com: fix incorrect Mlocked field of /proc/meminfo]
[lee.schermerhorn@hp.com: mlocked-pages: add event counting with statistics]
Signed-off-by: NNick Piggin <npiggin@suse.de>
Signed-off-by: NLee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: NRik van Riel <riel@redhat.com>
Signed-off-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 ba470de4
......@@ -71,7 +71,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d Active(file): %8lu kB\n"
"Node %d Inactive(file): %8lu kB\n"
#ifdef CONFIG_UNEVICTABLE_LRU
"Node %d Noreclaim: %8lu kB\n"
"Node %d Unevictable: %8lu kB\n"
"Node %d Mlocked: %8lu kB\n"
#endif
#ifdef CONFIG_HIGHMEM
"Node %d HighTotal: %8lu kB\n"
......@@ -104,6 +105,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
#ifdef CONFIG_UNEVICTABLE_LRU
nid, K(node_page_state(nid, NR_UNEVICTABLE)),
nid, K(node_page_state(nid, NR_MLOCK)),
#endif
#ifdef CONFIG_HIGHMEM
nid, K(i.totalhigh),
......
......@@ -176,6 +176,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"Inactive(file): %8lu kB\n"
#ifdef CONFIG_UNEVICTABLE_LRU
"Unevictable: %8lu kB\n"
"Mlocked: %8lu kB\n"
#endif
#ifdef CONFIG_HIGHMEM
"HighTotal: %8lu kB\n"
......@@ -217,6 +218,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
K(pages[LRU_INACTIVE_FILE]),
#ifdef CONFIG_UNEVICTABLE_LRU
K(pages[LRU_UNEVICTABLE]),
K(global_page_state(NR_MLOCK)),
#endif
#ifdef CONFIG_HIGHMEM
K(i.totalhigh),
......
......@@ -88,8 +88,10 @@ enum zone_stat_item {
NR_ACTIVE_FILE, /* " " " " " */
#ifdef CONFIG_UNEVICTABLE_LRU
NR_UNEVICTABLE, /* " " " " " */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
#else
NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
NR_MLOCK = NR_ACTIVE_FILE,
#endif
NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
......
......@@ -45,6 +45,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
UNEVICTABLE_PGMLOCKED,
UNEVICTABLE_PGMUNLOCKED,
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
#endif
NR_VM_EVENT_ITEMS
};
......
......@@ -101,7 +101,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
return 0;
SetPageMlocked(page);
if (!TestSetPageMlocked(page)) {
inc_zone_page_state(page, NR_MLOCK);
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
return 1;
}
......@@ -128,12 +131,19 @@ static inline void clear_page_mlock(struct page *page)
/*
* mlock_migrate_page - called only from migrate_page_copy() to
* migrate the Mlocked page flag
* migrate the Mlocked page flag; update statistics.
*/
static inline void mlock_migrate_page(struct page *newpage, struct page *page)
{
if (TestClearPageMlocked(page))
if (TestClearPageMlocked(page)) {
unsigned long flags;
local_irq_save(flags);
__dec_zone_page_state(page, NR_MLOCK);
SetPageMlocked(newpage);
__inc_zone_page_state(newpage, NR_MLOCK);
local_irq_restore(flags);
}
}
......
......@@ -60,6 +60,8 @@ void __clear_page_mlock(struct page *page)
return;
}
dec_zone_page_state(page, NR_MLOCK);
count_vm_event(UNEVICTABLE_PGCLEARED);
if (!isolate_lru_page(page)) {
putback_lru_page(page);
} else {
......@@ -69,6 +71,9 @@ void __clear_page_mlock(struct page *page)
lru_add_drain_all();
if (!isolate_lru_page(page))
putback_lru_page(page);
else if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
}
}
......@@ -80,8 +85,12 @@ void mlock_vma_page(struct page *page)
{
BUG_ON(!PageLocked(page));
if (!TestSetPageMlocked(page) && !isolate_lru_page(page))
putback_lru_page(page);
if (!TestSetPageMlocked(page)) {
inc_zone_page_state(page, NR_MLOCK);
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
}
}
/*
......@@ -106,9 +115,31 @@ static void munlock_vma_page(struct page *page)
{
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page) && !isolate_lru_page(page)) {
try_to_munlock(page);
putback_lru_page(page);
if (TestClearPageMlocked(page)) {
dec_zone_page_state(page, NR_MLOCK);
if (!isolate_lru_page(page)) {
int ret = try_to_munlock(page);
/*
* did try_to_unlock() succeed or punt?
*/
if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
putback_lru_page(page);
} else {
/*
* We lost the race. let try_to_unmap() deal
* with it. At least we get the page state and
* mlock stats right. However, page is still on
* the noreclaim list. We'll fix that up when
* the page is eventually freed or we scan the
* noreclaim list.
*/
if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
else
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}
}
}
......
......@@ -625,6 +625,7 @@ static const char * const vmstat_text[] = {
"nr_active_file",
#ifdef CONFIG_UNEVICTABLE_LRU
"nr_unevictable",
"nr_mlock",
#endif
"nr_anon_pages",
"nr_mapped",
......@@ -684,6 +685,10 @@ static const char * const vmstat_text[] = {
"unevictable_pgs_culled",
"unevictable_pgs_scanned",
"unevictable_pgs_rescued",
"unevictable_pgs_mlocked",
"unevictable_pgs_munlocked",
"unevictable_pgs_cleared",
"unevictable_pgs_stranded",
#endif
#endif
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册