提交 94ae8ba7 编写于 作者: A Aneesh Kumar K.V 提交者: Linus Torvalds

hugetlb/cgroup: assign the page hugetlb cgroup when we move the page to active list.

A page's hugetlb cgroup assignment and movement to the active list should
occur with hugetlb_lock held.  Otherwise when we remove the hugetlb cgroup
we will iterate the active list and find pages with NULL hugetlb cgroup
values.
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: NMichal Hocko <mhocko@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 79dbb236
......@@ -928,14 +928,8 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);
if (!page) {
if (!page)
page = alloc_buddy_huge_page(h, nid);
if (page) {
spin_lock(&hugetlb_lock);
list_move(&page->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
}
}
return page;
}
......@@ -1150,9 +1144,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
}
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
if (page) {
/* update page cgroup details */
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
h_cg, page);
spin_unlock(&hugetlb_lock);
} else {
spin_unlock(&hugetlb_lock);
if (!page) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugetlb_cgroup_uncharge_cgroup(idx,
......@@ -1162,6 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock);
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
h_cg, page);
list_move(&page->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
}
......@@ -1169,8 +1169,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr);
/* update page cgroup details */
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
return page;
}
......
......@@ -213,6 +213,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
return ret;
}
/* Should be called with hugetlb_lock held */
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page)
......@@ -220,9 +221,7 @@ void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
if (hugetlb_cgroup_disabled() || !h_cg)
return;
spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, h_cg);
spin_unlock(&hugetlb_lock);
return;
}
......@@ -389,6 +388,7 @@ int __init hugetlb_cgroup_file_init(int idx)
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
{
struct hugetlb_cgroup *h_cg;
struct hstate *h = page_hstate(oldhpage);
if (hugetlb_cgroup_disabled())
return;
......@@ -401,6 +401,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
/* move the h_cg details to new cgroup */
set_hugetlb_cgroup(newhpage, h_cg);
list_move(&newhpage->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
cgroup_release_and_wakeup_rmdir(&h_cg->css);
return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册