提交 6ac57a66 编写于 作者: H Hugh Dickins 提交者: Yang Yingliang

mlock: fix unevictable_pgs event counts on THP

mainline inclusion
from mainline-v5.9-rc6
commit 0964730b
category: bugfix
bugzilla: 36232
CVE: NA

-------------------------------------------------

5.8 commit 5d91f31f ("mm: swap: fix vmstats for huge page") has
established that vm_events should count every subpage of a THP, including
unevictable_pgs_culled and unevictable_pgs_rescued; but
lru_cache_add_inactive_or_unevictable() was not doing so for
unevictable_pgs_mlocked, and mm/mlock.c was not doing so for
unevictable_pgs mlocked, munlocked, cleared and stranded.

Fix them; but THPs don't go the pagevec way in mlock.c, so no fixes needed
on that path.

Fixes: 5d91f31f ("mm: swap: fix vmstats for huge page")
Signed-off-by: NHugh Dickins <hughd@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: NShakeel Butt <shakeelb@google.com>
Acked-by: NYang Shi <shy828301@gmail.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Qian Cai <cai@lca.pw>
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008301408230.5954@eggly.anvilsSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 a3b20702
...@@ -58,12 +58,14 @@ EXPORT_SYMBOL(can_do_mlock); ...@@ -58,12 +58,14 @@ EXPORT_SYMBOL(can_do_mlock);
*/ */
void clear_page_mlock(struct page *page) void clear_page_mlock(struct page *page)
{ {
int nr_pages;
if (!TestClearPageMlocked(page)) if (!TestClearPageMlocked(page))
return; return;
mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages = hpage_nr_pages(page);
-hpage_nr_pages(page)); mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
count_vm_event(UNEVICTABLE_PGCLEARED); count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
/* /*
* The previous TestClearPageMlocked() corresponds to the smp_mb() * The previous TestClearPageMlocked() corresponds to the smp_mb()
* in __pagevec_lru_add_fn(). * in __pagevec_lru_add_fn().
...@@ -77,7 +79,7 @@ void clear_page_mlock(struct page *page) ...@@ -77,7 +79,7 @@ void clear_page_mlock(struct page *page)
* We lost the race. the page already moved to evictable list. * We lost the race. the page already moved to evictable list.
*/ */
if (PageUnevictable(page)) if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED); count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
} }
} }
...@@ -94,9 +96,10 @@ void mlock_vma_page(struct page *page) ...@@ -94,9 +96,10 @@ void mlock_vma_page(struct page *page)
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
if (!TestSetPageMlocked(page)) { if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK, int nr_pages = hpage_nr_pages(page);
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED); mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
if (!isolate_lru_page(page)) if (!isolate_lru_page(page))
putback_lru_page(page); putback_lru_page(page);
} }
...@@ -139,7 +142,7 @@ static void __munlock_isolated_page(struct page *page) ...@@ -139,7 +142,7 @@ static void __munlock_isolated_page(struct page *page)
/* Did try_to_unlock() succeed or punt? */ /* Did try_to_unlock() succeed or punt? */
if (!PageMlocked(page)) if (!PageMlocked(page))
count_vm_event(UNEVICTABLE_PGMUNLOCKED); count_vm_events(UNEVICTABLE_PGMUNLOCKED, hpage_nr_pages(page));
putback_lru_page(page); putback_lru_page(page);
} }
...@@ -155,10 +158,12 @@ static void __munlock_isolated_page(struct page *page) ...@@ -155,10 +158,12 @@ static void __munlock_isolated_page(struct page *page)
*/ */
static void __munlock_isolation_failed(struct page *page) static void __munlock_isolation_failed(struct page *page)
{ {
int nr_pages = hpage_nr_pages(page);
if (PageUnevictable(page)) if (PageUnevictable(page))
__count_vm_event(UNEVICTABLE_PGSTRANDED); __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
else else
__count_vm_event(UNEVICTABLE_PGMUNLOCKED); __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
} }
/** /**
......
...@@ -464,14 +464,14 @@ void lru_cache_add_active_or_unevictable(struct page *page, ...@@ -464,14 +464,14 @@ void lru_cache_add_active_or_unevictable(struct page *page,
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
SetPageActive(page); SetPageActive(page);
else if (!TestSetPageMlocked(page)) { else if (!TestSetPageMlocked(page)) {
int nr_pages = hpage_nr_pages(page);
/* /*
* We use the irq-unsafe __mod_zone_page_stat because this * We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte * counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled. * lock is held(spinlock), which implies preemption disabled.
*/ */
__mod_zone_page_state(page_zone(page), NR_MLOCK, __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
hpage_nr_pages(page)); count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
count_vm_event(UNEVICTABLE_PGMLOCKED);
} }
lru_cache_add(page); lru_cache_add(page);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册