提交 a3351e52 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] Direct Migration V9: remove_from_swap() to remove swap ptes

Add remove_from_swap

remove_from_swap() allows the restoration of the pte entries that existed
before page migration occurred for anonymous pages by walking the reverse
maps.  This reduces swap use and establishes regular pte's without the need
for page faults.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 a48d07af
...@@ -92,6 +92,7 @@ static inline void page_dup_rmap(struct page *page) ...@@ -92,6 +92,7 @@ static inline void page_dup_rmap(struct page *page)
*/ */
int page_referenced(struct page *, int is_locked); int page_referenced(struct page *, int is_locked);
int try_to_unmap(struct page *, int ignore_refs); int try_to_unmap(struct page *, int ignore_refs);
void remove_from_swap(struct page *page);
/* /*
* Called from mm/filemap_xip.c to unmap empty zero page * Called from mm/filemap_xip.c to unmap empty zero page
......
...@@ -248,6 +248,7 @@ extern int remove_exclusive_swap_page(struct page *); ...@@ -248,6 +248,7 @@ extern int remove_exclusive_swap_page(struct page *);
struct backing_dev_info; struct backing_dev_info;
extern spinlock_t swap_lock; extern spinlock_t swap_lock;
extern int remove_vma_swap(struct vm_area_struct *vma, struct page *page);
/* linux/mm/thrash.c */ /* linux/mm/thrash.c */
extern struct mm_struct * swap_token_mm; extern struct mm_struct * swap_token_mm;
......
...@@ -206,6 +206,35 @@ static struct anon_vma *page_lock_anon_vma(struct page *page) ...@@ -206,6 +206,35 @@ static struct anon_vma *page_lock_anon_vma(struct page *page)
return anon_vma; return anon_vma;
} }
#ifdef CONFIG_MIGRATION
/*
* Remove an anonymous page from swap replacing the swap pte's
* through real pte's pointing to valid pages and then releasing
* the page from the swap cache.
*
* Must hold page lock on page.
*/
void remove_from_swap(struct page *page)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
if (!PageAnon(page) || !PageSwapCache(page))
return;
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
return;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
remove_vma_swap(vma, page);
spin_unlock(&anon_vma->lock);
delete_from_swap_cache(page);
}
#endif
/* /*
* At what user virtual address is page expected in vma? * At what user virtual address is page expected in vma?
*/ */
......
...@@ -554,6 +554,15 @@ static int unuse_mm(struct mm_struct *mm, ...@@ -554,6 +554,15 @@ static int unuse_mm(struct mm_struct *mm,
return 0; return 0;
} }
#ifdef CONFIG_MIGRATION
int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
{
swp_entry_t entry = { .val = page_private(page) };
return unuse_vma(vma, entry, page);
}
#endif
/* /*
* Scan swap_map from current position to next entry still in use. * Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty. * Recycle to start on reaching the end, returning 0 when empty.
......
...@@ -804,6 +804,15 @@ int migrate_page(struct page *newpage, struct page *page) ...@@ -804,6 +804,15 @@ int migrate_page(struct page *newpage, struct page *page)
migrate_page_copy(newpage, page); migrate_page_copy(newpage, page);
/*
* Remove auxiliary swap entries and replace
* them with real ptes.
*
* Note that a real pte entry will allow processes that are not
* waiting on the page lock to use the new page via the page tables
* before the new page is unlocked.
*/
remove_from_swap(newpage);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册