提交 77fe7f13 编写于 作者: M Mel Gorman 提交者: Linus Torvalds

mm/page_alloc: check high-order pages for corruption during PCP operations

Eric Dumazet pointed out that commit 44042b44 ("mm/page_alloc: allow
high-order pages to be stored on the per-cpu lists") only checks the
head page during PCP refill and allocation operations.  This was an
oversight and all pages should be checked.  This will incur a small
performance penalty but it's necessary for correctness.

Link: https://lkml.kernel.org/r/20220310092456.GJ15701@techsingularity.net
Fixes: 44042b44 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists")
Signed-off-by: NMel Gorman <mgorman@techsingularity.net>
Reported-by: NEric Dumazet <edumazet@google.com>
Acked-by: NEric Dumazet <edumazet@google.com>
Reviewed-by: NShakeel Butt <shakeelb@google.com>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Acked-by: NDavid Rientjes <rientjes@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 3313204c
......@@ -2291,23 +2291,36 @@ static inline int check_new_page(struct page *page)
return 1;
}
static bool check_new_pages(struct page *page, unsigned int order)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return true;
}
return false;
}
#ifdef CONFIG_DEBUG_VM
/*
* With DEBUG_VM enabled, order-0 pages are checked for expected state when
* being allocated from pcp lists. With debug_pagealloc also enabled, they are
* also checked when pcp lists are refilled from the free lists.
*/
static inline bool check_pcp_refill(struct page *page)
static inline bool check_pcp_refill(struct page *page, unsigned int order)
{
if (debug_pagealloc_enabled_static())
return check_new_page(page);
return check_new_pages(page, order);
else
return false;
}
static inline bool check_new_pcp(struct page *page)
static inline bool check_new_pcp(struct page *page, unsigned int order)
{
return check_new_page(page);
return check_new_pages(page, order);
}
#else
/*
......@@ -2315,32 +2328,19 @@ static inline bool check_new_pcp(struct page *page)
* when pcp lists are being refilled from the free lists. With debug_pagealloc
* enabled, they are also checked when being allocated from the pcp lists.
*/
static inline bool check_pcp_refill(struct page *page)
static inline bool check_pcp_refill(struct page *page, unsigned int order)
{
return check_new_page(page);
return check_new_pages(page, order);
}
static inline bool check_new_pcp(struct page *page)
static inline bool check_new_pcp(struct page *page, unsigned int order)
{
if (debug_pagealloc_enabled_static())
return check_new_page(page);
return check_new_pages(page, order);
else
return false;
}
#endif /* CONFIG_DEBUG_VM */
static bool check_new_pages(struct page *page, unsigned int order)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return true;
}
return false;
}
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
......@@ -2982,7 +2982,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (unlikely(page == NULL))
break;
if (unlikely(check_pcp_refill(page)))
if (unlikely(check_pcp_refill(page, order)))
continue;
/*
......@@ -3600,7 +3600,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
page = list_first_entry(list, struct page, lru);
list_del(&page->lru);
pcp->count -= 1 << order;
} while (check_new_pcp(page));
} while (check_new_pcp(page, order));
return page;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册