diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 00c915f5836999ba81d2867c07b514c01dd07eca..83ae566fac5fc56b0f96f7e661f9a5d65af73ee2 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -40,6 +40,8 @@ extern bool mem_reliable_status(void); extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val); extern void page_cache_prepare_alloc(gfp_t *gfp); +extern void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, + int val); static inline bool mem_reliable_is_enabled(void) { @@ -174,6 +176,8 @@ static inline void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) {} static inline void page_cache_prepare_alloc(gfp_t *gfp) {} +static inline void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, + int val) {} #endif #endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index d02159b0182fa921809fcfc0737dae3f609741b9..8c664e0add91c128a697bd00ca922f18624d638e 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -64,12 +64,25 @@ static bool reliable_and_lru_check(enum lru_list lru, struct page *page) if (!page_reliable(page)) return false; - if (lru != LRU_ACTIVE_FILE && lru != LRU_INACTIVE_FILE) + if (!is_file_lru(lru)) return false; return true; } +void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, + int val) +{ + if (!mem_reliable_is_enabled()) + return; + + if (zid < 0 || zid >= MAX_NR_ZONES) + return; + + if (zid < ZONE_MOVABLE && is_file_lru(lru)) + this_cpu_add(pagecache_reliable_pages, val); +} + void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) { if (!reliable_and_lru_check(lru, page)) @@ -177,7 +190,7 @@ static void show_val_kb(struct seq_file *m, const char *s, unsigned long num) void reliable_report_meminfo(struct seq_file *m) { bool pagecache_enabled = pagecache_reliable_is_enabled(); - unsigned long nr_pagecache_pages = 0; + long nr_pagecache_pages = 0; long nr_buddy_pages = 0; int cpu; diff --git a/mm/vmscan.c b/mm/vmscan.c index 15e5864c51050e377814ffa9867581bf86b93213..994c116306aa2d896aae6de82aa3b968720203db 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1683,6 +1683,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); #endif + page_cache_reliable_lru_add_batch(zid, lru, + -nr_zone_taken[zid]); } } @@ -2099,6 +2101,8 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec, update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); list_move(&page->lru, &lruvec->lists[lru]); + page_cache_reliable_lru_add(lru, page, nr_pages); + if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page);