提交 52629506 编写于 作者: J Joonsoo Kim 提交者: Linus Torvalds

mm/rmap: use rmap_walk() in try_to_unmap()

Now, we have an infrastructure in rmap_walk() to handle difference from
variants of rmap traversing functions.

So, just use it in try_to_unmap().

In this patch, I change following things.

1. enable rmap_walk() if !CONFIG_MIGRATION.
2. mechanical change to use rmap_walk() in try_to_unmap().
Signed-off-by: NJoonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: NNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 0dd1c7bb
...@@ -190,7 +190,7 @@ int page_referenced_one(struct page *, struct vm_area_struct *, ...@@ -190,7 +190,7 @@ int page_referenced_one(struct page *, struct vm_area_struct *,
int try_to_unmap(struct page *, enum ttu_flags flags); int try_to_unmap(struct page *, enum ttu_flags flags);
int try_to_unmap_one(struct page *, struct vm_area_struct *, int try_to_unmap_one(struct page *, struct vm_area_struct *,
unsigned long address, enum ttu_flags flags); unsigned long address, void *arg);
/* /*
* Called from mm/filemap_xip.c to unmap empty zero page * Called from mm/filemap_xip.c to unmap empty zero page
...@@ -256,9 +256,6 @@ struct rmap_walk_control { ...@@ -256,9 +256,6 @@ struct rmap_walk_control {
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
}; };
/*
* Called by migrate.c to remove migration ptes, but might be used more later.
*/
int rmap_walk(struct page *page, struct rmap_walk_control *rwc); int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
......
...@@ -1982,7 +1982,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) ...@@ -1982,7 +1982,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
continue; continue;
ret = try_to_unmap_one(page, vma, ret = try_to_unmap_one(page, vma,
rmap_item->address, flags); rmap_item->address, (void *)flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) { if (ret != SWAP_AGAIN || !page_mapped(page)) {
anon_vma_unlock_read(anon_vma); anon_vma_unlock_read(anon_vma);
goto out; goto out;
...@@ -1996,7 +1996,6 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) ...@@ -1996,7 +1996,6 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
return ret; return ret;
} }
#ifdef CONFIG_MIGRATION
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
{ {
struct stable_node *stable_node; struct stable_node *stable_node;
...@@ -2054,6 +2053,7 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) ...@@ -2054,6 +2053,7 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
return ret; return ret;
} }
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage) void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{ {
struct stable_node *stable_node; struct stable_node *stable_node;
......
...@@ -1179,15 +1179,18 @@ void page_remove_rmap(struct page *page) ...@@ -1179,15 +1179,18 @@ void page_remove_rmap(struct page *page)
/* /*
* Subfunctions of try_to_unmap: try_to_unmap_one called * Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
*
* @arg: enum ttu_flags will be passed to this argument
*/ */
int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, enum ttu_flags flags) unsigned long address, void *arg)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
pte_t *pte; pte_t *pte;
pte_t pteval; pte_t pteval;
spinlock_t *ptl; spinlock_t *ptl;
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
enum ttu_flags flags = (enum ttu_flags)arg;
pte = page_check_address(page, mm, address, &ptl, 0); pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte) if (!pte)
...@@ -1513,6 +1516,11 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma) ...@@ -1513,6 +1516,11 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
return false; return false;
} }
static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
{
return is_vma_temporary_stack(vma);
}
/** /**
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
* rmap method * rmap method
...@@ -1558,7 +1566,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) ...@@ -1558,7 +1566,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
continue; continue;
address = vma_address(page, vma); address = vma_address(page, vma);
ret = try_to_unmap_one(page, vma, address, flags); ret = try_to_unmap_one(page, vma, address, (void *)flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
break; break;
} }
...@@ -1592,7 +1600,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1592,7 +1600,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
mutex_lock(&mapping->i_mmap_mutex); mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
ret = try_to_unmap_one(page, vma, address, flags); ret = try_to_unmap_one(page, vma, address, (void *)flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
goto out; goto out;
} }
...@@ -1614,6 +1622,11 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1614,6 +1622,11 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
return ret; return ret;
} }
static int page_not_mapped(struct page *page)
{
return !page_mapped(page);
};
/** /**
* try_to_unmap - try to remove all page table mappings to a page * try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped * @page: the page to get unmapped
...@@ -1631,16 +1644,29 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1631,16 +1644,29 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
int try_to_unmap(struct page *page, enum ttu_flags flags) int try_to_unmap(struct page *page, enum ttu_flags flags)
{ {
int ret; int ret;
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = page_not_mapped,
.file_nonlinear = try_to_unmap_nonlinear,
.anon_lock = page_lock_anon_vma_read,
};
BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
if (unlikely(PageKsm(page))) /*
ret = try_to_unmap_ksm(page, flags); * During exec, a temporary VMA is setup and later moved.
else if (PageAnon(page)) * The VMA is moved under the anon_vma lock but not the
ret = try_to_unmap_anon(page, flags); * page tables leading to a race where migration cannot
else * find the migration ptes. Rather than increasing the
ret = try_to_unmap_file(page, flags); * locking requirements of exec(), migration skips
* temporary VMAs until after exec() completes.
*/
if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page))
rwc.invalid_vma = invalid_migration_vma;
ret = rmap_walk(page, &rwc);
if (ret != SWAP_MLOCK && !page_mapped(page)) if (ret != SWAP_MLOCK && !page_mapped(page))
ret = SWAP_SUCCESS; ret = SWAP_SUCCESS;
return ret; return ret;
...@@ -1683,7 +1709,6 @@ void __put_anon_vma(struct anon_vma *anon_vma) ...@@ -1683,7 +1709,6 @@ void __put_anon_vma(struct anon_vma *anon_vma)
anon_vma_free(anon_vma); anon_vma_free(anon_vma);
} }
#ifdef CONFIG_MIGRATION
static struct anon_vma *rmap_walk_anon_lock(struct page *page, static struct anon_vma *rmap_walk_anon_lock(struct page *page,
struct rmap_walk_control *rwc) struct rmap_walk_control *rwc)
{ {
...@@ -1785,7 +1810,6 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc) ...@@ -1785,7 +1810,6 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
else else
return rmap_walk_file(page, rwc); return rmap_walk_file(page, rwc);
} }
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册