diff --git a/mm/filemap.c b/mm/filemap.c index a89d70097e686cc52d4668480e644755990bcd82..9b6e72e14a04b8dfbc52bb4e83007a283441c703 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -291,6 +291,7 @@ static void unaccount_page_cache_page(struct address_space *mapping, __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); if (PageSwapBacked(page)) { __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); + shmem_reliable_page_counter(page, -nr); if (PageTransHuge(page)) __dec_node_page_state(page, NR_SHMEM_THPS); } else { @@ -895,8 +896,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) */ if (!PageHuge(new)) __inc_node_page_state(new, NR_FILE_PAGES); - if (PageSwapBacked(new)) + if (PageSwapBacked(new)) { __inc_node_page_state(new, NR_SHMEM); + shmem_reliable_page_counter(new, 1); + } xa_unlock_irqrestore(&mapping->i_pages, flags); mem_cgroup_migrate(old, new); radix_tree_preload_end(); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 301d6aa079d7b83899fb4f1357a50d6d35336db3..2975fc124cb6a404a2cc71f956127288a3c87932 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1559,6 +1559,7 @@ static void collapse_shmem(struct mm_struct *mm, ClearPageActive(page); ClearPageUnevictable(page); unlock_page(page); + shmem_reliable_page_counter(page, -1); put_page(page); index++; } @@ -1573,6 +1574,7 @@ static void collapse_shmem(struct mm_struct *mm, mem_cgroup_commit_charge(new_page, memcg, false, true); count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1); lru_cache_add_anon(new_page); + shmem_reliable_page_counter(new_page, 1 << HPAGE_PMD_ORDER); /* * Remove pte page tables, so we can re-fault the page as huge. diff --git a/mm/migrate.c b/mm/migrate.c index f7721d0aece5f923237f9b4713a6da3415c6c48a..ecfa8829acfda1e73e095f8348eab70e078620f8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -548,6 +548,11 @@ int migrate_page_move_mapping(struct address_space *mapping, xa_unlock(&mapping->i_pages); /* Leave irq disabled to prevent preemption while updating stats */ + if (PageSwapBacked(page) && !PageSwapCache(page)) { + shmem_reliable_page_counter(page, -nr); + shmem_reliable_page_counter(newpage, nr); + } + /* * If moved to a different zone then also account * the page for that zone. Other VM counters will be diff --git a/mm/shmem.c b/mm/shmem.c index 4363dbc8d57e59745bf060f670422bc3714e42f9..8915a5b9ad0a5f5800bb63d32cadd414c8ea1f46 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -733,6 +733,7 @@ static int shmem_add_to_page_cache(struct page *page, __inc_node_page_state(page, NR_SHMEM_THPS); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); + shmem_reliable_page_counter(page, nr); xa_unlock_irq(&mapping->i_pages); } else { page->mapping = NULL; @@ -758,6 +759,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) mapping->nrpages--; __dec_node_page_state(page, NR_FILE_PAGES); __dec_node_page_state(page, NR_SHMEM); + shmem_reliable_page_counter(page, -1); xa_unlock_irq(&mapping->i_pages); put_page(page); BUG_ON(error); @@ -962,8 +964,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, truncate_inode_page(mapping, page); } } - shmem_reliable_page_counter( - page, -(1 << compound_order(page))); unlock_page(page); } pagevec_remove_exceptionals(&pvec); @@ -1074,8 +1074,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, break; } } - shmem_reliable_page_counter( - page, -(1 << compound_order(page))); unlock_page(page); } pagevec_remove_exceptionals(&pvec); @@ -1981,7 +1979,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); shmem_recalc_inode(inode); spin_unlock_irq(&info->lock); - shmem_reliable_page_counter(page, 1 << compound_order(page)); alloced = true; if (PageTransHuge(page) &&