提交 26234f36 编写于 作者: X Xiao Guangrong 提交者: Linus Torvalds

thp: introduce khugepaged_prealloc_page and khugepaged_alloc_page

They are used to abstract the difference between NUMA enabled and NUMA
disabled to make the code more readable
Signed-off-by: NXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 420256ef
......@@ -1827,28 +1827,34 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
}
}
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
static void khugepaged_alloc_sleep(void)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifndef CONFIG_NUMA
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
new_page = *hpage;
#else
#ifdef CONFIG_NUMA
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
if (IS_ERR(*hpage)) {
if (!*wait)
return false;
*wait = false;
khugepaged_alloc_sleep();
} else if (*hpage) {
put_page(*hpage);
*hpage = NULL;
}
return true;
}
static struct page
*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
VM_BUG_ON(*hpage);
/*
* Allocate the page while the vma is still valid and under
......@@ -1860,7 +1866,7 @@ static void collapse_huge_page(struct mm_struct *mm,
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
*hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node, __GFP_OTHER_NODE);
/*
......@@ -1868,15 +1874,81 @@ static void collapse_huge_page(struct mm_struct *mm,
* preparation for taking it in write mode.
*/
up_read(&mm->mmap_sem);
if (unlikely(!new_page)) {
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
return;
return NULL;
}
*hpage = new_page;
count_vm_event(THP_COLLAPSE_ALLOC);
return *hpage;
}
#else
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;
do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;
*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));
return hpage;
}
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
if (!*hpage)
*hpage = khugepaged_alloc_hugepage(wait);
if (unlikely(!*hpage))
return false;
return true;
}
static struct page
*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
return *hpage;
}
#endif
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/* release the mmap_sem read lock. */
new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
if (!new_page)
return;
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
return;
......@@ -2215,34 +2287,6 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}
static void khugepaged_alloc_sleep(void)
{
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;
do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;
*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));
return hpage;
}
#endif
static void khugepaged_do_scan(void)
{
struct page *hpage = NULL;
......@@ -2253,23 +2297,9 @@ static void khugepaged_do_scan(void)
barrier(); /* write khugepaged_pages_to_scan to local stack */
while (progress < pages) {
#ifndef CONFIG_NUMA
if (!hpage)
hpage = khugepaged_alloc_hugepage(&wait);
if (unlikely(!hpage))
if (!khugepaged_prealloc_page(&hpage, &wait))
break;
#else
if (IS_ERR(hpage)) {
if (!wait)
break;
wait = false;
khugepaged_alloc_sleep();
} else if (hpage) {
put_page(hpage);
hpage = NULL;
}
#endif
cond_resched();
if (unlikely(kthread_should_stop() || freezing(current)))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册