提交 7467c391 编写于 作者: M Muchun Song 提交者: Linus Torvalds

mm: memcontrol: rename lruvec_holds_page_lru_lock to page_matches_lruvec

lruvec_holds_page_lru_lock() doesn't check anything about locking and is
used to check whether the page belongs to the lruvec.  So rename it to
page_matches_lruvec().

Link: https://lkml.kernel.org/r/20210417043538.9793-6-songmuchun@bytedance.comSigned-off-by: NMuchun Song <songmuchun@bytedance.com>
Acked-by: NMichal Hocko <mhocko@suse.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: NShakeel Butt <shakeelb@google.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 f2e4d28d
...@@ -1492,8 +1492,8 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, ...@@ -1492,8 +1492,8 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
spin_unlock_irqrestore(&lruvec->lru_lock, flags); spin_unlock_irqrestore(&lruvec->lru_lock, flags);
} }
static inline bool lruvec_holds_page_lru_lock(struct page *page, /* Test requires a stable page->memcg binding, see page_memcg() */
struct lruvec *lruvec) static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
{ {
return lruvec_pgdat(lruvec) == page_pgdat(page) && return lruvec_pgdat(lruvec) == page_pgdat(page) &&
lruvec_memcg(lruvec) == page_memcg(page); lruvec_memcg(lruvec) == page_memcg(page);
...@@ -1504,7 +1504,7 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page, ...@@ -1504,7 +1504,7 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
struct lruvec *locked_lruvec) struct lruvec *locked_lruvec)
{ {
if (locked_lruvec) { if (locked_lruvec) {
if (lruvec_holds_page_lru_lock(page, locked_lruvec)) if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec; return locked_lruvec;
unlock_page_lruvec_irq(locked_lruvec); unlock_page_lruvec_irq(locked_lruvec);
...@@ -1518,7 +1518,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, ...@@ -1518,7 +1518,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
struct lruvec *locked_lruvec, unsigned long *flags) struct lruvec *locked_lruvec, unsigned long *flags)
{ {
if (locked_lruvec) { if (locked_lruvec) {
if (lruvec_holds_page_lru_lock(page, locked_lruvec)) if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec; return locked_lruvec;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags); unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
......
...@@ -2063,7 +2063,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, ...@@ -2063,7 +2063,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation * All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration). * inhibits memcg migration).
*/ */
VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page); VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
add_page_to_lru_list(page, lruvec); add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page); nr_pages = thp_nr_pages(page);
nr_moved += nr_pages; nr_moved += nr_pages;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册