提交 fde6ae61 编写于 作者: M Ma Wupeng 提交者: Laibin Qiu

mm: Count reliable shmem used based on NR_SHMEM

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

------------------------------------------

With this patch, reliable memory counter will be updated when NR_SHMEM is
updated. Pervious shmem reliable memory counter is not accurate if swap is
enabled.

NR_SHMEM update in memcg secenario is ignored because this has nothing to
do with the global counter. If shmem pages is migrated or collapsed from
one region to another region, reliable memory counter need to be updated
because these pages's reliable status may not be the same.
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NLaibin Qiu <qiulaibin@huawei.com>
上级 9b6c51cd
......@@ -291,6 +291,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) {
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
shmem_reliable_page_counter(page, -nr);
if (PageTransHuge(page))
__dec_node_page_state(page, NR_SHMEM_THPS);
} else {
......@@ -895,8 +896,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
*/
if (!PageHuge(new))
__inc_node_page_state(new, NR_FILE_PAGES);
if (PageSwapBacked(new))
if (PageSwapBacked(new)) {
__inc_node_page_state(new, NR_SHMEM);
shmem_reliable_page_counter(new, 1);
}
xa_unlock_irqrestore(&mapping->i_pages, flags);
mem_cgroup_migrate(old, new);
radix_tree_preload_end();
......
......@@ -1559,6 +1559,7 @@ static void collapse_shmem(struct mm_struct *mm,
ClearPageActive(page);
ClearPageUnevictable(page);
unlock_page(page);
shmem_reliable_page_counter(page, -1);
put_page(page);
index++;
}
......@@ -1573,6 +1574,7 @@ static void collapse_shmem(struct mm_struct *mm,
mem_cgroup_commit_charge(new_page, memcg, false, true);
count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
lru_cache_add_anon(new_page);
shmem_reliable_page_counter(new_page, 1 << HPAGE_PMD_ORDER);
/*
* Remove pte page tables, so we can re-fault the page as huge.
......
......@@ -548,6 +548,11 @@ int migrate_page_move_mapping(struct address_space *mapping,
xa_unlock(&mapping->i_pages);
/* Leave irq disabled to prevent preemption while updating stats */
if (PageSwapBacked(page) && !PageSwapCache(page)) {
shmem_reliable_page_counter(page, -nr);
shmem_reliable_page_counter(newpage, nr);
}
/*
* If moved to a different zone then also account
* the page for that zone. Other VM counters will be
......
......@@ -733,6 +733,7 @@ static int shmem_add_to_page_cache(struct page *page,
__inc_node_page_state(page, NR_SHMEM_THPS);
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
shmem_reliable_page_counter(page, nr);
xa_unlock_irq(&mapping->i_pages);
} else {
page->mapping = NULL;
......@@ -758,6 +759,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
mapping->nrpages--;
__dec_node_page_state(page, NR_FILE_PAGES);
__dec_node_page_state(page, NR_SHMEM);
shmem_reliable_page_counter(page, -1);
xa_unlock_irq(&mapping->i_pages);
put_page(page);
BUG_ON(error);
......@@ -962,8 +964,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
truncate_inode_page(mapping, page);
}
}
shmem_reliable_page_counter(
page, -(1 << compound_order(page)));
unlock_page(page);
}
pagevec_remove_exceptionals(&pvec);
......@@ -1074,8 +1074,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
break;
}
}
shmem_reliable_page_counter(
page, -(1 << compound_order(page)));
unlock_page(page);
}
pagevec_remove_exceptionals(&pvec);
......@@ -1981,7 +1979,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
shmem_reliable_page_counter(page, 1 << compound_order(page));
alloced = true;
if (PageTransHuge(page) &&
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册