提交 f4834f6b 编写于 作者: M Matthew Wilcox (Oracle) 提交者: Zheng Zengkai

mm: stop accounting shadow entries

mainline inclusion
from mainline-v5.13-rc1
commit 46be67b4
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZE5V
CVE: NA

-------------------------------------------------

We no longer need to keep track of how many shadow entries are present in
a mapping.  This saves a few writes to the inode and memory barriers.

Link: https://lkml.kernel.org/r/20201026151849.24232-3-willy@infradead.orgSigned-off-by: NMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: NVishal Verma <vishal.l.verma@intel.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NTong Tiangen <tongtiangen@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 2856f2ee
...@@ -140,17 +140,6 @@ static void page_cache_delete(struct address_space *mapping, ...@@ -140,17 +140,6 @@ static void page_cache_delete(struct address_space *mapping,
page->mapping = NULL; page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */ /* Leave page->index set: truncation lookup relies upon it */
if (shadow) {
mapping->nrexceptional += nr;
/*
* Make sure the nrexceptional update is committed before
* the nrpages update so that final truncate racing
* with reclaim does not see both counters 0 at the
* same time and miss a shadow entry.
*/
smp_wmb();
}
mapping->nrpages -= nr; mapping->nrpages -= nr;
} }
...@@ -885,8 +874,6 @@ noinline int __add_to_page_cache_locked(struct page *page, ...@@ -885,8 +874,6 @@ noinline int __add_to_page_cache_locked(struct page *page,
if (xas_error(&xas)) if (xas_error(&xas))
goto unlock; goto unlock;
if (old)
mapping->nrexceptional--;
mapping->nrpages++; mapping->nrpages++;
/* hugetlb pages do not participate in page cache accounting */ /* hugetlb pages do not participate in page cache accounting */
......
...@@ -158,7 +158,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, ...@@ -158,7 +158,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
xas_store(&xas, page); xas_store(&xas, page);
xas_next(&xas); xas_next(&xas);
} }
address_space->nrexceptional -= nr_shadows;
address_space->nrpages += nr; address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr); ADD_CACHE_INFO(add_total, nr);
...@@ -197,8 +196,6 @@ void __delete_from_swap_cache(struct page *page, ...@@ -197,8 +196,6 @@ void __delete_from_swap_cache(struct page *page,
xas_next(&xas); xas_next(&xas);
} }
ClearPageSwapCache(page); ClearPageSwapCache(page);
if (shadow)
address_space->nrexceptional += nr;
address_space->nrpages -= nr; address_space->nrpages -= nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
ADD_CACHE_INFO(del_total, nr); ADD_CACHE_INFO(del_total, nr);
...@@ -299,7 +296,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, ...@@ -299,7 +296,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
xas_store(&xas, NULL); xas_store(&xas, NULL);
nr_shadows++; nr_shadows++;
} }
address_space->nrexceptional -= nr_shadows;
xa_unlock_irq(&address_space->i_pages); xa_unlock_irq(&address_space->i_pages);
/* search the next swapcache until we meet end */ /* search the next swapcache until we meet end */
......
...@@ -40,7 +40,6 @@ static inline void __clear_shadow_entry(struct address_space *mapping, ...@@ -40,7 +40,6 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
if (xas_load(&xas) != entry) if (xas_load(&xas) != entry)
return; return;
xas_store(&xas, NULL); xas_store(&xas, NULL);
mapping->nrexceptional--;
} }
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
......
...@@ -557,7 +557,6 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, ...@@ -557,7 +557,6 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out_invalid; goto out_invalid;
if (WARN_ON_ONCE(node->count != node->nr_values)) if (WARN_ON_ONCE(node->count != node->nr_values))
goto out_invalid; goto out_invalid;
mapping->nrexceptional -= node->nr_values;
xa_delete_node(node, workingset_update_node); xa_delete_node(node, workingset_update_node);
__inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册