diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index f25cef84b41db718a0d977c471b9afbc6773378e..0beaea0e5b832c9c77d235f6b05522ecba5962ac 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -29,6 +29,7 @@ struct reclaim_stat { unsigned nr_activate; unsigned nr_ref_keep; unsigned nr_unmap_fail; + unsigned nr_lazyfree_fail; }; #ifdef CONFIG_VM_EVENT_COUNTERS diff --git a/mm/vmscan.c b/mm/vmscan.c index a8e56368916497b0a308ab8d319cc2649246df88..b4850580770cb4bc563e186cb5d29e816039a8cf 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1128,6 +1128,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, unsigned nr_immediate = 0; unsigned nr_ref_keep = 0; unsigned nr_unmap_fail = 0; + unsigned nr_lazyfree_fail = 0; cond_resched(); @@ -1335,11 +1336,15 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ if (page_mapped(page)) { enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH; + bool was_swapbacked = PageSwapBacked(page); if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; + if (!try_to_unmap(page, flags)) { nr_unmap_fail++; + if (!was_swapbacked && PageSwapBacked(page)) + nr_lazyfree_fail++; goto activate_locked; } } @@ -1518,6 +1523,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, stat->nr_activate = pgactivate; stat->nr_ref_keep = nr_ref_keep; stat->nr_unmap_fail = nr_unmap_fail; + stat->nr_lazyfree_fail = nr_lazyfree_fail; } return nr_reclaimed; } @@ -1530,7 +1536,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, .priority = DEF_PRIORITY, .may_unmap = 1, }; - unsigned long ret; + struct reclaim_stat stat; + unsigned long nr_reclaimed; struct page *page, *next; LIST_HEAD(clean_pages); @@ -1542,11 +1549,21 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, } } - ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, - TTU_IGNORE_ACCESS, NULL, true); + nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, + TTU_IGNORE_ACCESS, &stat, true); list_splice(&clean_pages, page_list); - mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret); - return ret; + mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed); + /* + * Since lazyfree pages are isolated from file LRU from the beginning, + * they will rotate back to anonymous LRU in the end if it failed to + * discard so isolated count will be mismatched. + * Compensate the isolated count for both LRU lists. + */ + mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, + stat.nr_lazyfree_fail); + mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, + -stat.nr_lazyfree_fail); + return nr_reclaimed; } /*