提交 79134171 编写于 作者: A Andrea Arcangeli 提交者: Linus Torvalds

thp: transparent hugepage vmstat

Add hugepage stat information to /proc/vmstat and /proc/meminfo.
Signed-off-by: NAndrea Arcangeli <aarcange@redhat.com>
Acked-by: NRik van Riel <riel@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 b9bbfbe3
...@@ -100,6 +100,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ...@@ -100,6 +100,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
"VmallocChunk: %8lu kB\n" "VmallocChunk: %8lu kB\n"
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
"HardwareCorrupted: %5lu kB\n" "HardwareCorrupted: %5lu kB\n"
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"AnonHugePages: %8lu kB\n"
#endif #endif
, ,
K(i.totalram), K(i.totalram),
...@@ -128,7 +131,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ...@@ -128,7 +131,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(i.freeswap), K(i.freeswap),
K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_FILE_DIRTY)),
K(global_page_state(NR_WRITEBACK)), K(global_page_state(NR_WRITEBACK)),
K(global_page_state(NR_ANON_PAGES)), K(global_page_state(NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
HPAGE_PMD_NR
#endif
),
K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_FILE_MAPPED)),
K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SHMEM)),
K(global_page_state(NR_SLAB_RECLAIMABLE) + K(global_page_state(NR_SLAB_RECLAIMABLE) +
...@@ -150,6 +158,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ...@@ -150,6 +158,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
vmi.largest_chunk >> 10 vmi.largest_chunk >> 10
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
HPAGE_PMD_NR)
#endif #endif
); );
......
...@@ -114,6 +114,7 @@ enum zone_stat_item { ...@@ -114,6 +114,7 @@ enum zone_stat_item {
NUMA_LOCAL, /* allocation from local node */ NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */ NUMA_OTHER, /* allocation from other node */
#endif #endif
NR_ANON_TRANSPARENT_HUGEPAGES,
NR_VM_ZONE_STAT_ITEMS }; NR_VM_ZONE_STAT_ITEMS };
/* /*
......
...@@ -751,6 +751,9 @@ static void __split_huge_page_refcount(struct page *page) ...@@ -751,6 +751,9 @@ static void __split_huge_page_refcount(struct page *page)
lru_add_page_tail(zone, page, page_tail); lru_add_page_tail(zone, page, page_tail);
} }
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
ClearPageCompound(page); ClearPageCompound(page);
compound_unlock(page); compound_unlock(page);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
......
...@@ -882,8 +882,13 @@ void do_page_add_anon_rmap(struct page *page, ...@@ -882,8 +882,13 @@ void do_page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive) struct vm_area_struct *vma, unsigned long address, int exclusive)
{ {
int first = atomic_inc_and_test(&page->_mapcount); int first = atomic_inc_and_test(&page->_mapcount);
if (first) if (first) {
if (!PageTransHuge(page))
__inc_zone_page_state(page, NR_ANON_PAGES); __inc_zone_page_state(page, NR_ANON_PAGES);
else
__inc_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
}
if (unlikely(PageKsm(page))) if (unlikely(PageKsm(page)))
return; return;
...@@ -911,7 +916,10 @@ void page_add_new_anon_rmap(struct page *page, ...@@ -911,7 +916,10 @@ void page_add_new_anon_rmap(struct page *page,
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
SetPageSwapBacked(page); SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
if (!PageTransHuge(page))
__inc_zone_page_state(page, NR_ANON_PAGES); __inc_zone_page_state(page, NR_ANON_PAGES);
else
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__page_set_anon_rmap(page, vma, address, 1); __page_set_anon_rmap(page, vma, address, 1);
if (page_evictable(page, vma)) if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE_ANON); lru_cache_add_lru(page, LRU_ACTIVE_ANON);
...@@ -964,7 +972,11 @@ void page_remove_rmap(struct page *page) ...@@ -964,7 +972,11 @@ void page_remove_rmap(struct page *page)
return; return;
if (PageAnon(page)) { if (PageAnon(page)) {
mem_cgroup_uncharge_page(page); mem_cgroup_uncharge_page(page);
if (!PageTransHuge(page))
__dec_zone_page_state(page, NR_ANON_PAGES); __dec_zone_page_state(page, NR_ANON_PAGES);
else
__dec_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
} else { } else {
__dec_zone_page_state(page, NR_FILE_MAPPED); __dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_update_file_mapped(page, -1); mem_cgroup_update_file_mapped(page, -1);
......
...@@ -880,6 +880,7 @@ static const char * const vmstat_text[] = { ...@@ -880,6 +880,7 @@ static const char * const vmstat_text[] = {
"numa_local", "numa_local",
"numa_other", "numa_other",
#endif #endif
"nr_anon_transparent_hugepages",
"nr_dirty_threshold", "nr_dirty_threshold",
"nr_dirty_background_threshold", "nr_dirty_background_threshold",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册