提交 a731286d 编写于 作者: K KOSAKI Motohiro 提交者: Linus Torvalds

mm: vmstat: add isolate pages

If the system is running a heavy load of processes then concurrent reclaim
can isolate a large number of pages from the LRU. /proc/vmstat and the
output generated for an OOM do not show how many pages were isolated.

This has been observed during process fork bomb testing (mstctl11 in LTP).

This patch shows the information about isolated pages.

Reproduced via:

-----------------------
% ./hackbench 140 process 1000
   => OOM occur

active_anon:146 inactive_anon:0 isolated_anon:49245
 active_file:79 inactive_file:18 isolated_file:113
 unevictable:0 dirty:0 writeback:0 unstable:0 buffer:39
 free:370 slab_reclaimable:309 slab_unreclaimable:5492
 mapped:53 shmem:15 pagetables:28140 bounce:0
Signed-off-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: NRik van Riel <riel@redhat.com>
Acked-by: NWu Fengguang <fengguang.wu@intel.com>
Reviewed-by: NMinchan Kim <minchan.kim@gmail.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 b35ea17b
...@@ -100,6 +100,8 @@ enum zone_stat_item { ...@@ -100,6 +100,8 @@ enum zone_stat_item {
NR_BOUNCE, NR_BOUNCE,
NR_VMSCAN_WRITE, NR_VMSCAN_WRITE,
NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */ NUMA_HIT, /* allocated in intended node */
......
...@@ -67,6 +67,8 @@ int putback_lru_pages(struct list_head *l) ...@@ -67,6 +67,8 @@ int putback_lru_pages(struct list_head *l)
list_for_each_entry_safe(page, page2, l, lru) { list_for_each_entry_safe(page, page2, l, lru) {
list_del(&page->lru); list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
!!page_is_file_cache(page));
putback_lru_page(page); putback_lru_page(page);
count++; count++;
} }
...@@ -698,6 +700,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -698,6 +700,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* restored. * restored.
*/ */
list_del(&page->lru); list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
!!page_is_file_cache(page));
putback_lru_page(page); putback_lru_page(page);
} }
...@@ -742,6 +746,13 @@ int migrate_pages(struct list_head *from, ...@@ -742,6 +746,13 @@ int migrate_pages(struct list_head *from,
struct page *page2; struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE; int swapwrite = current->flags & PF_SWAPWRITE;
int rc; int rc;
unsigned long flags;
local_irq_save(flags);
list_for_each_entry(page, from, lru)
__inc_zone_page_state(page, NR_ISOLATED_ANON +
!!page_is_file_cache(page));
local_irq_restore(flags);
if (!swapwrite) if (!swapwrite)
current->flags |= PF_SWAPWRITE; current->flags |= PF_SWAPWRITE;
......
...@@ -2134,16 +2134,18 @@ void show_free_areas(void) ...@@ -2134,16 +2134,18 @@ void show_free_areas(void)
} }
} }
printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
" inactive_file:%lu" " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
" unevictable:%lu" " unevictable:%lu"
" dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n" " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
global_page_state(NR_ACTIVE_ANON), global_page_state(NR_ACTIVE_ANON),
global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_ANON), global_page_state(NR_INACTIVE_ANON),
global_page_state(NR_ISOLATED_ANON),
global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_FILE), global_page_state(NR_INACTIVE_FILE),
global_page_state(NR_ISOLATED_FILE),
global_page_state(NR_UNEVICTABLE), global_page_state(NR_UNEVICTABLE),
global_page_state(NR_FILE_DIRTY), global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK), global_page_state(NR_WRITEBACK),
...@@ -2171,6 +2173,8 @@ void show_free_areas(void) ...@@ -2171,6 +2173,8 @@ void show_free_areas(void)
" active_file:%lukB" " active_file:%lukB"
" inactive_file:%lukB" " inactive_file:%lukB"
" unevictable:%lukB" " unevictable:%lukB"
" isolated(anon):%lukB"
" isolated(file):%lukB"
" present:%lukB" " present:%lukB"
" mlocked:%lukB" " mlocked:%lukB"
" dirty:%lukB" " dirty:%lukB"
...@@ -2197,6 +2201,8 @@ void show_free_areas(void) ...@@ -2197,6 +2201,8 @@ void show_free_areas(void)
K(zone_page_state(zone, NR_ACTIVE_FILE)), K(zone_page_state(zone, NR_ACTIVE_FILE)),
K(zone_page_state(zone, NR_INACTIVE_FILE)), K(zone_page_state(zone, NR_INACTIVE_FILE)),
K(zone_page_state(zone, NR_UNEVICTABLE)), K(zone_page_state(zone, NR_UNEVICTABLE)),
K(zone_page_state(zone, NR_ISOLATED_ANON)),
K(zone_page_state(zone, NR_ISOLATED_FILE)),
K(zone->present_pages), K(zone->present_pages),
K(zone_page_state(zone, NR_MLOCK)), K(zone_page_state(zone, NR_MLOCK)),
K(zone_page_state(zone, NR_FILE_DIRTY)), K(zone_page_state(zone, NR_FILE_DIRTY)),
......
...@@ -1072,6 +1072,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1072,6 +1072,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_active; unsigned long nr_active;
unsigned int count[NR_LRU_LISTS] = { 0, }; unsigned int count[NR_LRU_LISTS] = { 0, };
int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
unsigned long nr_anon;
unsigned long nr_file;
nr_taken = sc->isolate_pages(sc->swap_cluster_max, nr_taken = sc->isolate_pages(sc->swap_cluster_max,
&page_list, &nr_scan, sc->order, mode, &page_list, &nr_scan, sc->order, mode,
...@@ -1102,6 +1104,10 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1102,6 +1104,10 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_INACTIVE_ANON, __mod_zone_page_state(zone, NR_INACTIVE_ANON,
-count[LRU_INACTIVE_ANON]); -count[LRU_INACTIVE_ANON]);
nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
__mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
...@@ -1169,6 +1175,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -1169,6 +1175,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
} }
} }
__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
} while (nr_scanned < max_scan); } while (nr_scanned < max_scan);
done: done:
...@@ -1279,6 +1288,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1279,6 +1288,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
__mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
else else
__mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
while (!list_empty(&l_hold)) { while (!list_empty(&l_hold)) {
...@@ -1329,7 +1339,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1329,7 +1339,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
LRU_ACTIVE + file * LRU_FILE); LRU_ACTIVE + file * LRU_FILE);
move_active_pages_to_lru(zone, &l_inactive, move_active_pages_to_lru(zone, &l_inactive,
LRU_BASE + file * LRU_FILE); LRU_BASE + file * LRU_FILE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
......
...@@ -644,6 +644,8 @@ static const char * const vmstat_text[] = { ...@@ -644,6 +644,8 @@ static const char * const vmstat_text[] = {
"nr_bounce", "nr_bounce",
"nr_vmscan_write", "nr_vmscan_write",
"nr_writeback_temp", "nr_writeback_temp",
"nr_isolated_anon",
"nr_isolated_file",
"nr_shmem", "nr_shmem",
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
"numa_hit", "numa_hit",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册