提交 e24f0b8f 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] page migration: simplify migrate_pages()

Currently migrate_pages() is mess with lots of goto.  Extract two functions
from migrate_pages() and get rid of the gotos.

Plus we can just unconditionally set the locked bit on the new page since we
are the only one holding a reference.  Locking is to stop others from
accessing the page once we establish references to the new page.

Remove the list_del from move_to_lru in order to have finer control over list
processing.

[akpm@osdl.org: add debug check]
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Jes Sorensen <jes@trained-monkey.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 8f9de51a
...@@ -84,7 +84,6 @@ int migrate_prep(void) ...@@ -84,7 +84,6 @@ int migrate_prep(void)
static inline void move_to_lru(struct page *page) static inline void move_to_lru(struct page *page)
{ {
list_del(&page->lru);
if (PageActive(page)) { if (PageActive(page)) {
/* /*
* lru_cache_add_active checks that * lru_cache_add_active checks that
...@@ -110,6 +109,7 @@ int putback_lru_pages(struct list_head *l) ...@@ -110,6 +109,7 @@ int putback_lru_pages(struct list_head *l)
int count = 0; int count = 0;
list_for_each_entry_safe(page, page2, l, lru) { list_for_each_entry_safe(page, page2, l, lru) {
list_del(&page->lru);
move_to_lru(page); move_to_lru(page);
count++; count++;
} }
...@@ -533,12 +533,109 @@ static int fallback_migrate_page(struct address_space *mapping, ...@@ -533,12 +533,109 @@ static int fallback_migrate_page(struct address_space *mapping,
return migrate_page(mapping, newpage, page); return migrate_page(mapping, newpage, page);
} }
/*
* Move a page to a newly allocated page
* The page is locked and all ptes have been successfully removed.
*
* The new page will have replaced the old page if this function
* is successful.
*/
static int move_to_new_page(struct page *newpage, struct page *page)
{
struct address_space *mapping;
int rc;
/*
* Block others from accessing the page when we get around to
* establishing additional references. We are the only one
* holding a reference to the new page at this point.
*/
if (TestSetPageLocked(newpage))
BUG();
/* Prepare mapping for the new page.*/
newpage->index = page->index;
newpage->mapping = page->mapping;
mapping = page_mapping(page);
if (!mapping)
rc = migrate_page(mapping, newpage, page);
else if (mapping->a_ops->migratepage)
/*
* Most pages have a mapping and most filesystems
* should provide a migration function. Anonymous
* pages are part of swap space which also has its
* own migration function. This is the most common
* path for page migration.
*/
rc = mapping->a_ops->migratepage(mapping,
newpage, page);
else
rc = fallback_migrate_page(mapping, newpage, page);
if (!rc)
remove_migration_ptes(page, newpage);
else
newpage->mapping = NULL;
unlock_page(newpage);
return rc;
}
/*
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
static int unmap_and_move(struct page *newpage, struct page *page, int force)
{
int rc = 0;
if (page_count(page) == 1)
/* page was freed from under us. So we are done. */
goto ret;
rc = -EAGAIN;
if (TestSetPageLocked(page)) {
if (!force)
goto ret;
lock_page(page);
}
if (PageWriteback(page)) {
if (!force)
goto unlock;
wait_on_page_writeback(page);
}
/*
* Establish migration ptes or remove ptes
*/
if (try_to_unmap(page, 1) != SWAP_FAIL) {
if (!page_mapped(page))
rc = move_to_new_page(newpage, page);
} else
/* A vma has VM_LOCKED set -> permanent failure */
rc = -EPERM;
if (rc)
remove_migration_ptes(page, page);
unlock:
unlock_page(page);
ret:
if (rc != -EAGAIN) {
list_del(&newpage->lru);
move_to_lru(newpage);
}
return rc;
}
/* /*
* migrate_pages * migrate_pages
* *
* Two lists are passed to this function. The first list * Two lists are passed to this function. The first list
* contains the pages isolated from the LRU to be migrated. * contains the pages isolated from the LRU to be migrated.
* The second list contains new pages that the pages isolated * The second list contains new pages that the isolated pages
* can be moved to. * can be moved to.
* *
* The function returns after 10 attempts or if no pages * The function returns after 10 attempts or if no pages
...@@ -550,7 +647,7 @@ static int fallback_migrate_page(struct address_space *mapping, ...@@ -550,7 +647,7 @@ static int fallback_migrate_page(struct address_space *mapping,
int migrate_pages(struct list_head *from, struct list_head *to, int migrate_pages(struct list_head *from, struct list_head *to,
struct list_head *moved, struct list_head *failed) struct list_head *moved, struct list_head *failed)
{ {
int retry; int retry = 1;
int nr_failed = 0; int nr_failed = 0;
int pass = 0; int pass = 0;
struct page *page; struct page *page;
...@@ -561,118 +658,33 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -561,118 +658,33 @@ int migrate_pages(struct list_head *from, struct list_head *to,
if (!swapwrite) if (!swapwrite)
current->flags |= PF_SWAPWRITE; current->flags |= PF_SWAPWRITE;
redo: for(pass = 0; pass < 10 && retry; pass++) {
retry = 0; retry = 0;
list_for_each_entry_safe(page, page2, from, lru) {
struct page *newpage = NULL;
struct address_space *mapping;
cond_resched();
rc = 0;
if (page_count(page) == 1)
/* page was freed from under us. So we are done. */
goto next;
if (to && list_empty(to))
break;
/*
* Skip locked pages during the first two passes to give the
* functions holding the lock time to release the page. Later we
* use lock_page() to have a higher chance of acquiring the
* lock.
*/
rc = -EAGAIN;
if (pass > 2)
lock_page(page);
else
if (TestSetPageLocked(page))
goto next;
/*
* Only wait on writeback if we have already done a pass where
* we we may have triggered writeouts for lots of pages.
*/
if (pass > 0)
wait_on_page_writeback(page);
else
if (PageWriteback(page))
goto unlock_page;
/*
* Establish migration ptes or remove ptes
*/
rc = -EPERM;
if (try_to_unmap(page, 1) == SWAP_FAIL)
/* A vma has VM_LOCKED set -> permanent failure */
goto unlock_page;
rc = -EAGAIN; list_for_each_entry_safe(page, page2, from, lru) {
if (page_mapped(page))
goto unlock_page;
newpage = lru_to_page(to); if (list_empty(to))
lock_page(newpage); break;
/* Prepare mapping for the new page.*/
newpage->index = page->index;
newpage->mapping = page->mapping;
/* cond_resched();
* Pages are properly locked and writeback is complete.
* Try to migrate the page.
*/
mapping = page_mapping(page);
if (!mapping)
rc = migrate_page(mapping, newpage, page);
else if (mapping->a_ops->migratepage) rc = unmap_and_move(lru_to_page(to), page, pass > 2);
/*
* Most pages have a mapping and most filesystems
* should provide a migration function. Anonymous
* pages are part of swap space which also has its
* own migration function. This is the most common
* path for page migration.
*/
rc = mapping->a_ops->migratepage(mapping,
newpage, page);
else
rc = fallback_migrate_page(mapping, newpage, page);
if (!rc)
remove_migration_ptes(page, newpage);
unlock_page(newpage);
unlock_page:
if (rc)
remove_migration_ptes(page, page);
unlock_page(page); switch(rc) {
case -EAGAIN:
next:
if (rc) {
if (newpage)
newpage->mapping = NULL;
if (rc == -EAGAIN)
retry++; retry++;
else { break;
case 0:
list_move(&page->lru, moved);
break;
default:
/* Permanent failure */ /* Permanent failure */
list_move(&page->lru, failed); list_move(&page->lru, failed);
nr_failed++; nr_failed++;
break;
} }
} else {
if (newpage) {
/* Successful migration. Return page to LRU */
move_to_lru(newpage);
}
list_move(&page->lru, moved);
} }
} }
if (retry && pass++ < 10)
goto redo;
if (!swapwrite) if (!swapwrite)
current->flags &= ~PF_SWAPWRITE; current->flags &= ~PF_SWAPWRITE;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册