提交 b16664e4 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] Direct Migration V9: PageSwapCache checks

Check for PageSwapCache after looking up and locking a swap page.

The page migration code may change a swap pte to point to a different page
under lock_page().

If that happens then the vm must retry the lookup operation in the swap space
to find the correct page number.  There are a couple of locations in the VM
where a lock_page() is done on a swap page.  In these locations we need to
check afterwards if the page was migrated.  If the page was migrated then the
old page that was looked up before was freed and no longer has the
PageSwapCache bit set.
Signed-off-by: NHirokazu Takahashi <taka@valinux.co.jp>
Signed-off-by: NDave Hansen <haveblue@us.ibm.com>
Signed-off-by: Christoph Lameter <clameter@@sgi.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 2a16e3f4
...@@ -1871,6 +1871,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1871,6 +1871,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out; goto out;
entry = pte_to_swp_entry(orig_pte); entry = pte_to_swp_entry(orig_pte);
again:
page = lookup_swap_cache(entry); page = lookup_swap_cache(entry);
if (!page) { if (!page) {
swapin_readahead(entry, address, vma); swapin_readahead(entry, address, vma);
...@@ -1894,6 +1895,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1894,6 +1895,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
mark_page_accessed(page); mark_page_accessed(page);
lock_page(page); lock_page(page);
if (!PageSwapCache(page)) {
/* Page migration has occured */
unlock_page(page);
page_cache_release(page);
goto again;
}
/* /*
* Back out if somebody else already faulted in this pte. * Back out if somebody else already faulted in this pte.
......
...@@ -1028,6 +1028,14 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1028,6 +1028,14 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
page_cache_release(swappage); page_cache_release(swappage);
goto repeat; goto repeat;
} }
if (!PageSwapCache(swappage)) {
/* Page migration has occured */
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
unlock_page(swappage);
page_cache_release(swappage);
goto repeat;
}
if (PageWriteback(swappage)) { if (PageWriteback(swappage)) {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
......
...@@ -646,6 +646,7 @@ static int try_to_unuse(unsigned int type) ...@@ -646,6 +646,7 @@ static int try_to_unuse(unsigned int type)
*/ */
swap_map = &si->swap_map[i]; swap_map = &si->swap_map[i];
entry = swp_entry(type, i); entry = swp_entry(type, i);
again:
page = read_swap_cache_async(entry, NULL, 0); page = read_swap_cache_async(entry, NULL, 0);
if (!page) { if (!page) {
/* /*
...@@ -680,6 +681,12 @@ static int try_to_unuse(unsigned int type) ...@@ -680,6 +681,12 @@ static int try_to_unuse(unsigned int type)
wait_on_page_locked(page); wait_on_page_locked(page);
wait_on_page_writeback(page); wait_on_page_writeback(page);
lock_page(page); lock_page(page);
if (!PageSwapCache(page)) {
/* Page migration has occured */
unlock_page(page);
page_cache_release(page);
goto again;
}
wait_on_page_writeback(page); wait_on_page_writeback(page);
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册