提交 7b649150 编写于 作者: M Mike Kravetz 提交者: Zheng Zengkai

hugetlb: create remove_hugetlb_page() to separate functionality

mainline inclusion
from mainline-v5.13-rc1
commit 6eb4e88a
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZCW9
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=6eb4e88a6d27022ea8aff424d47a0a5dfc9fcb34

-------------------------------------------------

The new remove_hugetlb_page() routine is designed to remove a hugetlb
page from hugetlbfs processing.  It will remove the page from the active
or free list, update global counters and set the compound page
destructor to NULL so that PageHuge() will return false for the 'page'.
After this call, the 'page' can be treated as a normal compound page or
a collection of base size pages.

update_and_free_page no longer decrements h->nr_huge_pages{_node} as
this is performed in remove_hugetlb_page.  The only functionality
performed by update_and_free_page is to free the base pages to the lower
level allocators.

update_and_free_page is typically called after remove_hugetlb_page.

remove_hugetlb_page is to be called with the hugetlb_lock held.

Creating this routine and separating functionality is in preparation for
restructuring code to reduce lock hold times.  This commit should not
introduce any changes to functionality.

Link: https://lkml.kernel.org/r/20210409205254.242291-5-mike.kravetz@oracle.comSigned-off-by: NMike Kravetz <mike.kravetz@oracle.com>
Acked-by: NMichal Hocko <mhocko@suse.com>
Reviewed-by: NMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: NMuchun Song <songmuchun@bytedance.com>
Reviewed-by: NOscar Salvador <osalvador@suse.de>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: HORIGUCHI NAOYA <naoya.horiguchi@nec.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NChen Huang <chenhuang5@huawei.com>
Signed-off-by: NNanyong Sun <sunnanyong@huawei.com>
Reviewed-by: NTong Tiangen <tongtiangen@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 999737ef
...@@ -1322,6 +1322,41 @@ static inline void destroy_compound_gigantic_page(struct page *page, ...@@ -1322,6 +1322,41 @@ static inline void destroy_compound_gigantic_page(struct page *page,
unsigned int order) { } unsigned int order) { }
#endif #endif
/*
* Remove hugetlb page from lists, and update dtor so that page appears
* as just a compound page. A reference is held on the page.
*
* Must be called with hugetlb lock held.
*/
static void remove_hugetlb_page(struct hstate *h, struct page *page,
bool adjust_surplus)
{
int nid = page_to_nid(page);
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
list_del(&page->lru);
if (HPageFreed(page)) {
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
}
if (adjust_surplus) {
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
}
set_page_refcounted(page);
set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
h->nr_huge_pages--;
h->nr_huge_pages_node[nid]--;
}
static void update_and_free_page(struct hstate *h, struct page *page) static void update_and_free_page(struct hstate *h, struct page *page)
{ {
int i; int i;
...@@ -1330,8 +1365,6 @@ static void update_and_free_page(struct hstate *h, struct page *page) ...@@ -1330,8 +1365,6 @@ static void update_and_free_page(struct hstate *h, struct page *page)
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return; return;
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); for (i = 0; i < pages_per_huge_page(h);
i++, subpage = mem_map_next(subpage, page, i)) { i++, subpage = mem_map_next(subpage, page, i)) {
subpage->flags &= ~(1 << PG_locked | 1 << PG_error | subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
...@@ -1339,10 +1372,6 @@ static void update_and_free_page(struct hstate *h, struct page *page) ...@@ -1339,10 +1372,6 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_active | 1 << PG_private | 1 << PG_active | 1 << PG_private |
1 << PG_writeback); 1 << PG_writeback);
} }
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
set_page_refcounted(page);
if (hstate_is_gigantic(h)) { if (hstate_is_gigantic(h)) {
destroy_compound_gigantic_page(page, huge_page_order(h)); destroy_compound_gigantic_page(page, huge_page_order(h));
free_gigantic_page(page, huge_page_order(h)); free_gigantic_page(page, huge_page_order(h));
...@@ -1410,15 +1439,12 @@ static void __free_huge_page(struct page *page) ...@@ -1410,15 +1439,12 @@ static void __free_huge_page(struct page *page)
h->resv_huge_pages++; h->resv_huge_pages++;
if (HPageTemporary(page)) { if (HPageTemporary(page)) {
list_del(&page->lru); remove_hugetlb_page(h, page, false);
ClearHPageTemporary(page);
update_and_free_page(h, page); update_and_free_page(h, page);
} else if (h->surplus_huge_pages_node[nid]) { } else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */ /* remove the page from active list */
list_del(&page->lru); remove_hugetlb_page(h, page, true);
update_and_free_page(h, page); update_and_free_page(h, page);
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
} else { } else {
arch_clear_hugepage_flags(page); arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page); enqueue_huge_page(h, page);
...@@ -1704,13 +1730,7 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, ...@@ -1704,13 +1730,7 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
struct page *page = struct page *page =
list_entry(h->hugepage_freelists[node].next, list_entry(h->hugepage_freelists[node].next,
struct page, lru); struct page, lru);
list_del(&page->lru); remove_hugetlb_page(h, page, acct_surplus);
h->free_huge_pages--;
h->free_huge_pages_node[node]--;
if (acct_surplus) {
h->surplus_huge_pages--;
h->surplus_huge_pages_node[node]--;
}
update_and_free_page(h, page); update_and_free_page(h, page);
ret = 1; ret = 1;
break; break;
...@@ -1748,7 +1768,6 @@ int dissolve_free_huge_page(struct page *page) ...@@ -1748,7 +1768,6 @@ int dissolve_free_huge_page(struct page *page)
if (!page_count(page)) { if (!page_count(page)) {
struct page *head = compound_head(page); struct page *head = compound_head(page);
struct hstate *h = page_hstate(head); struct hstate *h = page_hstate(head);
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0) if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out; goto out;
...@@ -1779,9 +1798,7 @@ int dissolve_free_huge_page(struct page *page) ...@@ -1779,9 +1798,7 @@ int dissolve_free_huge_page(struct page *page)
SetPageHWPoison(page); SetPageHWPoison(page);
ClearPageHWPoison(head); ClearPageHWPoison(head);
} }
list_del(&head->lru); remove_hugetlb_page(h, page, false);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
h->max_huge_pages--; h->max_huge_pages--;
update_and_free_page(h, head); update_and_free_page(h, head);
rc = 0; rc = 0;
...@@ -2546,10 +2563,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count, ...@@ -2546,10 +2563,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
return; return;
if (PageHighMem(page)) if (PageHighMem(page))
continue; continue;
list_del(&page->lru); remove_hugetlb_page(h, page, false);
update_and_free_page(h, page); update_and_free_page(h, page);
h->free_huge_pages--;
h->free_huge_pages_node[page_to_nid(page)]--;
} }
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册