提交 2e0e26c7 编写于 作者: H Hugh Dickins 提交者: Linus Torvalds

tmpfs: open a window in shmem_unuse_inode

There are a couple of reasons (patches follow) why it would be good to open a
window for sleep in shmem_unuse_inode, between its search for a matching swap
entry, and its handling of the entry found.

shmem_unuse_inode must then use igrab to hold the inode against deletion in
that window, and its corresponding iput might result in deletion: so it had
better unlock_page before the iput, and might as well release the page too.

Nor is there any need to hold on to shmem_swaplist_mutex once we know we'll
leave the loop.  So this unwinding moves from try_to_unuse and shmem_unuse
into shmem_unuse_inode, in the case when it finds a match.

Let try_to_unuse break on error in the shmem_unuse case, as it does in the
unuse_mm case: though at this point in the series, no error to break on.
Signed-off-by: NHugh Dickins <hugh@veritas.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 cb5f7b9a
...@@ -838,10 +838,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -838,10 +838,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
if (size > SHMEM_NR_DIRECT) if (size > SHMEM_NR_DIRECT)
size = SHMEM_NR_DIRECT; size = SHMEM_NR_DIRECT;
offset = shmem_find_swp(entry, ptr, ptr+size); offset = shmem_find_swp(entry, ptr, ptr+size);
if (offset >= 0) { if (offset >= 0)
shmem_swp_balance_unmap();
goto found; goto found;
}
if (!info->i_indirect) if (!info->i_indirect)
goto lost2; goto lost2;
...@@ -879,11 +877,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -879,11 +877,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
if (size > ENTRIES_PER_PAGE) if (size > ENTRIES_PER_PAGE)
size = ENTRIES_PER_PAGE; size = ENTRIES_PER_PAGE;
offset = shmem_find_swp(entry, ptr, ptr+size); offset = shmem_find_swp(entry, ptr, ptr+size);
shmem_swp_unmap(ptr);
if (offset >= 0) { if (offset >= 0) {
shmem_dir_unmap(dir); shmem_dir_unmap(dir);
goto found; goto found;
} }
shmem_swp_unmap(ptr);
} }
} }
lost1: lost1:
...@@ -893,10 +891,25 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -893,10 +891,25 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
return 0; return 0;
found: found:
idx += offset; idx += offset;
inode = &info->vfs_inode; inode = igrab(&info->vfs_inode);
error = add_to_page_cache(page, inode->i_mapping, idx, GFP_ATOMIC); spin_unlock(&info->lock);
/* move head to start search for next from here */
list_move_tail(&shmem_swaplist, &info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
error = 1;
if (!inode)
goto out;
spin_lock(&info->lock);
ptr = shmem_swp_entry(info, idx, NULL);
if (ptr && ptr->val == entry.val)
error = add_to_page_cache(page, inode->i_mapping,
idx, GFP_ATOMIC);
if (error == -EEXIST) { if (error == -EEXIST) {
struct page *filepage = find_get_page(inode->i_mapping, idx); struct page *filepage = find_get_page(inode->i_mapping, idx);
error = 1;
if (filepage) { if (filepage) {
/* /*
* There might be a more uptodate page coming down * There might be a more uptodate page coming down
...@@ -911,16 +924,18 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -911,16 +924,18 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
delete_from_swap_cache(page); delete_from_swap_cache(page);
set_page_dirty(page); set_page_dirty(page);
info->flags |= SHMEM_PAGEIN; info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, ptr + offset, 0); shmem_swp_set(info, ptr, 0);
swap_free(entry);
error = 1; /* not an error, but entry was found */
} }
if (ptr)
shmem_swp_unmap(ptr); shmem_swp_unmap(ptr);
spin_unlock(&info->lock); spin_unlock(&info->lock);
/* out:
* Decrement swap count even when the entry is left behind: unlock_page(page);
* try_to_unuse will skip over mms, then reincrement count. page_cache_release(page);
*/ iput(inode); /* allows for NULL */
swap_free(entry); return error;
return 1;
} }
/* /*
...@@ -935,18 +950,16 @@ int shmem_unuse(swp_entry_t entry, struct page *page) ...@@ -935,18 +950,16 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
mutex_lock(&shmem_swaplist_mutex); mutex_lock(&shmem_swaplist_mutex);
list_for_each_safe(p, next, &shmem_swaplist) { list_for_each_safe(p, next, &shmem_swaplist) {
info = list_entry(p, struct shmem_inode_info, swaplist); info = list_entry(p, struct shmem_inode_info, swaplist);
if (!info->swapped) if (info->swapped)
found = shmem_unuse_inode(info, entry, page);
else
list_del_init(&info->swaplist); list_del_init(&info->swaplist);
else if (shmem_unuse_inode(info, entry, page)) {
/* move head to start search for next from here */
list_move_tail(&shmem_swaplist, &info->swaplist);
found = 1;
break;
}
cond_resched(); cond_resched();
if (found)
goto out;
} }
mutex_unlock(&shmem_swaplist_mutex); mutex_unlock(&shmem_swaplist_mutex);
return found; out: return found; /* 0 or 1 or -ENOMEM */
} }
/* /*
......
...@@ -814,7 +814,7 @@ static int try_to_unuse(unsigned int type) ...@@ -814,7 +814,7 @@ static int try_to_unuse(unsigned int type)
atomic_inc(&new_start_mm->mm_users); atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users); atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
while (*swap_map > 1 && !retval && while (*swap_map > 1 && !retval && !shmem &&
(p = p->next) != &start_mm->mmlist) { (p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist); mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users)) if (!atomic_inc_not_zero(&mm->mm_users))
...@@ -846,6 +846,13 @@ static int try_to_unuse(unsigned int type) ...@@ -846,6 +846,13 @@ static int try_to_unuse(unsigned int type)
mmput(start_mm); mmput(start_mm);
start_mm = new_start_mm; start_mm = new_start_mm;
} }
if (shmem) {
/* page has already been unlocked and released */
if (shmem > 0)
continue;
retval = shmem;
break;
}
if (retval) { if (retval) {
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
...@@ -884,12 +891,6 @@ static int try_to_unuse(unsigned int type) ...@@ -884,12 +891,6 @@ static int try_to_unuse(unsigned int type)
* read from disk into another page. Splitting into two * read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared * pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files. * private" pages, but they are handled by tmpfs files.
*
* Note shmem_unuse already deleted a swappage from
* the swap cache, unless the move to filepage failed:
* in which case it left swappage in cache, lowered its
* swap count to pass quickly through the loops above,
* and now we must reincrement count to try again later.
*/ */
if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
struct writeback_control wbc = { struct writeback_control wbc = {
...@@ -900,12 +901,8 @@ static int try_to_unuse(unsigned int type) ...@@ -900,12 +901,8 @@ static int try_to_unuse(unsigned int type)
lock_page(page); lock_page(page);
wait_on_page_writeback(page); wait_on_page_writeback(page);
} }
if (PageSwapCache(page)) { if (PageSwapCache(page))
if (shmem)
swap_duplicate(entry);
else
delete_from_swap_cache(page); delete_from_swap_cache(page);
}
/* /*
* So we could skip searching mms once swap count went * So we could skip searching mms once swap count went
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册