diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 83ae566fac5fc56b0f96f7e661f9a5d65af73ee2..3d25770e95cd91113a0f7ab5adb5abeab6030353 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -22,6 +22,8 @@ extern bool shmem_reliable; extern struct percpu_counter reliable_shmem_used_nr_page; extern bool pagecache_use_reliable_mem; DECLARE_PER_CPU(long, nr_reliable_buddy_pages); +DECLARE_PER_CPU(long, pagecache_reliable_pages); +DECLARE_PER_CPU(long, anon_reliable_pages); extern unsigned long nr_reliable_reserve_pages __read_mostly; extern long shmem_reliable_nr_page __read_mostly; @@ -37,10 +39,10 @@ extern void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask); extern bool mem_reliable_status(void); -extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, +extern void reliable_lru_add(enum lru_list lru, struct page *page, int val); extern void page_cache_prepare_alloc(gfp_t *gfp); -extern void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, +extern void reliable_lru_add_batch(int zid, enum lru_list lru, int val); static inline bool mem_reliable_is_enabled(void) @@ -82,8 +84,15 @@ static inline void reliable_page_counter(struct page *page, static inline bool reliable_mem_limit_check(unsigned long nr_page) { - return atomic_long_read(&reliable_task_used_nr_page) + nr_page <= - task_reliable_limit / PAGE_SIZE; + int cpu; + long num = 0; + + for_each_possible_cpu(cpu) { + num += per_cpu(pagecache_reliable_pages, cpu); + num += per_cpu(anon_reliable_pages, cpu); + } + + return num + nr_page <= task_reliable_limit / PAGE_SIZE; } static inline bool reliable_allow_fb_enabled(void) @@ -172,11 +181,11 @@ static inline bool mem_reliable_status(void) { return false; } static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {} static inline bool mem_reliable_watermark_ok(int nr_page) { return true; } static inline bool mem_reliable_shmem_limit_check(void) { return true; } -static inline void page_cache_reliable_lru_add(enum lru_list lru, +static inline void reliable_lru_add(enum lru_list lru, struct page *page, int val) {} static inline void page_cache_prepare_alloc(gfp_t *gfp) {} -static inline void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, +static inline void reliable_lru_add_batch(int zid, enum lru_list lru, int val) {} #endif diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 704a93c8f450659a7ce7b75ecaa0fc2f41134500..079ca455317ae145144c6f4010fe36f3aa0503b2 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -50,7 +50,7 @@ static __always_inline void add_page_to_lru_list(struct page *page, { update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); - page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page)); + reliable_lru_add(lru, page, hpage_nr_pages(page)); } @@ -59,7 +59,7 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, { update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); - page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page)); + reliable_lru_add(lru, page, hpage_nr_pages(page)); } static __always_inline void del_page_from_lru_list(struct page *page, @@ -67,7 +67,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, { list_del(&page->lru); update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); - page_cache_reliable_lru_add(lru, page, -hpage_nr_pages(page)); + reliable_lru_add(lru, page, -hpage_nr_pages(page)); } /** diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e7d2bca35682e53c47f459e270417d30d652a144..9285d4394e0708d77e5dd4f63ccace8005279490 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -216,6 +216,11 @@ static inline int is_file_lru(enum lru_list lru) return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); } +static inline int is_anon_lru(enum lru_list lru) +{ + return (lru == LRU_INACTIVE_ANON || lru == LRU_ACTIVE_ANON); +} + static inline int is_active_lru(enum lru_list lru) { return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 8c664e0add91c128a697bd00ca922f18624d638e..d1d05b32c6ef5faa849cb4631fcad364e221c4c4 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -36,6 +36,7 @@ long shmem_reliable_nr_page = LONG_MAX; bool pagecache_use_reliable_mem __read_mostly = true; DEFINE_PER_CPU(long, pagecache_reliable_pages); +DEFINE_PER_CPU(long, anon_reliable_pages); static unsigned long zero; static unsigned long reliable_pagecache_max_bytes = ULONG_MAX; @@ -59,36 +60,34 @@ bool page_reliable(struct page *page) return page_zonenum(page) < ZONE_MOVABLE; } -static bool reliable_and_lru_check(enum lru_list lru, struct page *page) -{ - if (!page_reliable(page)) - return false; - - if (!is_file_lru(lru)) - return false; - - return true; -} - -void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru, - int val) +void reliable_lru_add_batch(int zid, enum lru_list lru, int val) { if (!mem_reliable_is_enabled()) return; - if (zid < 0 || zid >= MAX_NR_ZONES) - return; - - if (zid < ZONE_MOVABLE && is_file_lru(lru)) - this_cpu_add(pagecache_reliable_pages, val); + if (zid < ZONE_MOVABLE && zid >= 0) { + if (is_file_lru(lru)) + this_cpu_add(pagecache_reliable_pages, val); + else if (is_anon_lru(lru)) + this_cpu_add(anon_reliable_pages, val); + } } -void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) +void reliable_lru_add(enum lru_list lru, struct page *page, int val) { - if (!reliable_and_lru_check(lru, page)) + if (!page_reliable(page)) return; - this_cpu_add(pagecache_reliable_pages, val); + if (is_file_lru(lru)) + this_cpu_add(pagecache_reliable_pages, val); + else if (is_anon_lru(lru)) + this_cpu_add(anon_reliable_pages, val); + else if (lru == LRU_UNEVICTABLE) { + if (PageAnon(page)) + this_cpu_add(anon_reliable_pages, val); + else + this_cpu_add(pagecache_reliable_pages, val); + } } static int reliable_mem_notifier(struct notifier_block *nb, @@ -191,6 +190,7 @@ void reliable_report_meminfo(struct seq_file *m) { bool pagecache_enabled = pagecache_reliable_is_enabled(); long nr_pagecache_pages = 0; + long nr_anon_pages = 0; long nr_buddy_pages = 0; int cpu; @@ -199,6 +199,7 @@ void reliable_report_meminfo(struct seq_file *m) for_each_possible_cpu(cpu) { nr_buddy_pages += per_cpu(nr_reliable_buddy_pages, cpu); + nr_anon_pages += per_cpu(anon_reliable_pages, cpu); if (pagecache_enabled) nr_pagecache_pages += per_cpu(pagecache_reliable_pages, cpu); @@ -208,8 +209,7 @@ void reliable_report_meminfo(struct seq_file *m) total_reliable_mem_sz() >> PAGE_SHIFT); show_val_kb(m, "ReliableUsed: ", used_reliable_mem_sz() >> PAGE_SHIFT); - show_val_kb(m, "ReliableTaskUsed: ", - atomic_long_read(&reliable_task_used_nr_page)); + show_val_kb(m, "ReliableTaskUsed: ", nr_anon_pages + nr_pagecache_pages); show_val_kb(m, "ReliableBuddyMem: ", nr_buddy_pages); if (shmem_reliable_is_enabled()) { @@ -516,15 +516,21 @@ static void mem_reliable_feature_disable(int idx) void reliable_show_mem_info(void) { - if (mem_reliable_is_enabled()) { - pr_info("ReliableTotal: %lu kB", total_reliable_mem_sz() >> 10); - pr_info("ReliableUsed: %lu kB", used_reliable_mem_sz() >> 10); - pr_info("task_reliable_limit: %lu kB", - task_reliable_limit >> 10); - pr_info("reliable_user_used: %ld kB", - atomic_long_read(&reliable_task_used_nr_page) << - (PAGE_SHIFT - 10)); + int cpu; + long num = 0; + + if (!mem_reliable_is_enabled()) + return; + + for_each_possible_cpu(cpu) { + num += per_cpu(anon_reliable_pages, cpu); + num += per_cpu(pagecache_reliable_pages, cpu); } + + pr_info("ReliableTotal: %lu kB", total_reliable_mem_sz() >> 10); + pr_info("ReliableUsed: %lu kB", used_reliable_mem_sz() >> 10); + pr_info("task_reliable_limit: %lu kB", task_reliable_limit >> 10); + pr_info("reliable_user_used: %ld kB", num << (PAGE_SHIFT - 10)); } void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order, diff --git a/mm/vmscan.c b/mm/vmscan.c index b6afafdef507548a4d36972bea28eb3856021e3f..d59b22a7ba9a2dad5694dbeb3742c420ebf218fa 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1683,8 +1683,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); #endif - page_cache_reliable_lru_add_batch(zid, lru, - -nr_zone_taken[zid]); + reliable_lru_add_batch(zid, lru, -nr_zone_taken[zid]); } } @@ -2099,10 +2098,9 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec, nr_pages = hpage_nr_pages(page); update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); + reliable_lru_add(lru, page, nr_pages); list_move(&page->lru, &lruvec->lists[lru]); - page_cache_reliable_lru_add(lru, page, nr_pages); - if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); @@ -4600,7 +4598,7 @@ static int add_page_for_reclaim_swapcache(struct page *page, case 0: list_move(&head->lru, pagelist); update_lru_size(lruvec, lru, page_zonenum(head), -hpage_nr_pages(head)); - page_cache_reliable_lru_add(lru, head, -hpage_nr_pages(head)); + reliable_lru_add(lru, head, -hpage_nr_pages(head)); break; case -EBUSY: list_move(&head->lru, src);