提交 529ae9aa 编写于 作者: N Nick Piggin 提交者: Linus Torvalds

mm: rename page trylock

Converting page lock to new locking bitops requires a change of page flag
operation naming, so we might as well convert it to something nicer
(!TestSetPageLocked_Lock => trylock_page, SetPageLocked => set_page_locked).

This also facilitates lockdeping of page lock.
Signed-off-by: NNick Piggin <npiggin@suse.de>
Acked-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: NPeter Zijlstra <peterz@infradead.org>
Acked-by: NAndrew Morton <akpm@linux-foundation.org>
Acked-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 e9ba9698
...@@ -1747,7 +1747,7 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, ...@@ -1747,7 +1747,7 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
*/ */
flush_dcache_page(pages[i]); flush_dcache_page(pages[i]);
/* ?? Is locking needed? I don't think so */ /* ?? Is locking needed? I don't think so */
/* if (TestSetPageLocked(pages[i])) /* if (!trylock_page(pages[i]))
goto out_unlock; */ goto out_unlock; */
} }
......
...@@ -404,7 +404,7 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb, ...@@ -404,7 +404,7 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
page = pages[loop]; page = pages[loop];
if (page->index > wb->last) if (page->index > wb->last)
break; break;
if (TestSetPageLocked(page)) if (!trylock_page(page))
break; break;
if (!PageDirty(page) || if (!PageDirty(page) ||
page_private(page) != (unsigned long) wb) { page_private(page) != (unsigned long) wb) {
......
...@@ -1280,7 +1280,7 @@ static int cifs_writepages(struct address_space *mapping, ...@@ -1280,7 +1280,7 @@ static int cifs_writepages(struct address_space *mapping,
if (first < 0) if (first < 0)
lock_page(page); lock_page(page);
else if (TestSetPageLocked(page)) else if (!trylock_page(page))
break; break;
if (unlikely(page->mapping != mapping)) { if (unlikely(page->mapping != mapping)) {
......
...@@ -63,7 +63,7 @@ static void release_buffer_page(struct buffer_head *bh) ...@@ -63,7 +63,7 @@ static void release_buffer_page(struct buffer_head *bh)
goto nope; goto nope;
/* OK, it's a truncated page */ /* OK, it's a truncated page */
if (TestSetPageLocked(page)) if (!trylock_page(page))
goto nope; goto nope;
page_cache_get(page); page_cache_get(page);
...@@ -446,7 +446,7 @@ void journal_commit_transaction(journal_t *journal) ...@@ -446,7 +446,7 @@ void journal_commit_transaction(journal_t *journal)
spin_lock(&journal->j_list_lock); spin_lock(&journal->j_list_lock);
} }
if (unlikely(!buffer_uptodate(bh))) { if (unlikely(!buffer_uptodate(bh))) {
if (TestSetPageLocked(bh->b_page)) { if (!trylock_page(bh->b_page)) {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
lock_page(bh->b_page); lock_page(bh->b_page);
spin_lock(&journal->j_list_lock); spin_lock(&journal->j_list_lock);
......
...@@ -67,7 +67,7 @@ static void release_buffer_page(struct buffer_head *bh) ...@@ -67,7 +67,7 @@ static void release_buffer_page(struct buffer_head *bh)
goto nope; goto nope;
/* OK, it's a truncated page */ /* OK, it's a truncated page */
if (TestSetPageLocked(page)) if (!trylock_page(page))
goto nope; goto nope;
page_cache_get(page); page_cache_get(page);
......
...@@ -627,7 +627,7 @@ static int journal_list_still_alive(struct super_block *s, ...@@ -627,7 +627,7 @@ static int journal_list_still_alive(struct super_block *s,
static void release_buffer_page(struct buffer_head *bh) static void release_buffer_page(struct buffer_head *bh)
{ {
struct page *page = bh->b_page; struct page *page = bh->b_page;
if (!page->mapping && !TestSetPageLocked(page)) { if (!page->mapping && trylock_page(page)) {
page_cache_get(page); page_cache_get(page);
put_bh(bh); put_bh(bh);
if (!page->mapping) if (!page->mapping)
......
...@@ -371,7 +371,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, ...@@ -371,7 +371,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
* for an in-flight io page * for an in-flight io page
*/ */
if (flags & SPLICE_F_NONBLOCK) { if (flags & SPLICE_F_NONBLOCK) {
if (TestSetPageLocked(page)) { if (!trylock_page(page)) {
error = -EAGAIN; error = -EAGAIN;
break; break;
} }
......
...@@ -675,7 +675,7 @@ xfs_probe_cluster( ...@@ -675,7 +675,7 @@ xfs_probe_cluster(
} else } else
pg_offset = PAGE_CACHE_SIZE; pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && !TestSetPageLocked(page)) { if (page->index == tindex && trylock_page(page)) {
pg_len = xfs_probe_page(page, pg_offset, mapped); pg_len = xfs_probe_page(page, pg_offset, mapped);
unlock_page(page); unlock_page(page);
} }
...@@ -759,7 +759,7 @@ xfs_convert_page( ...@@ -759,7 +759,7 @@ xfs_convert_page(
if (page->index != tindex) if (page->index != tindex)
goto fail; goto fail;
if (TestSetPageLocked(page)) if (!trylock_page(page))
goto fail; goto fail;
if (PageWriteback(page)) if (PageWriteback(page))
goto fail_unlock_page; goto fail_unlock_page;
......
...@@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \ ...@@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \
struct page; /* forward declaration */ struct page; /* forward declaration */
PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) TESTPAGEFLAG(Locked, locked)
PAGEFLAG(Error, error) PAGEFLAG(Error, error)
PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
......
...@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, ...@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, filler, data); return read_cache_page(mapping, index, filler, data);
} }
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run SetPageLocked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
SetPageLocked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
ClearPageLocked(page);
return error;
}
/* /*
* Return byte-offset into filesystem object for page. * Return byte-offset into filesystem object for page.
*/ */
...@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page); ...@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page); extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page); extern void unlock_page(struct page *page);
static inline void set_page_locked(struct page *page)
{
set_bit(PG_locked, &page->flags);
}
static inline void clear_page_locked(struct page *page)
{
clear_bit(PG_locked, &page->flags);
}
static inline int trylock_page(struct page *page)
{
return !test_and_set_bit(PG_locked, &page->flags);
}
/* /*
* lock_page may only be called if we have the page's inode pinned. * lock_page may only be called if we have the page's inode pinned.
*/ */
static inline void lock_page(struct page *page) static inline void lock_page(struct page *page)
{ {
might_sleep(); might_sleep();
if (TestSetPageLocked(page)) if (!trylock_page(page))
__lock_page(page); __lock_page(page);
} }
...@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page) ...@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page)
static inline int lock_page_killable(struct page *page) static inline int lock_page_killable(struct page *page)
{ {
might_sleep(); might_sleep();
if (TestSetPageLocked(page)) if (!trylock_page(page))
return __lock_page_killable(page); return __lock_page_killable(page);
return 0; return 0;
} }
...@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page) ...@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page)
static inline void lock_page_nosync(struct page *page) static inline void lock_page_nosync(struct page *page)
{ {
might_sleep(); might_sleep();
if (TestSetPageLocked(page)) if (!trylock_page(page))
__lock_page_nosync(page); __lock_page_nosync(page);
} }
...@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) ...@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
return ret; return ret;
} }
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run set_page_locked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
set_page_locked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
clear_page_locked(page);
return error;
}
#endif /* _LINUX_PAGEMAP_H */ #endif /* _LINUX_PAGEMAP_H */
...@@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit); ...@@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
* *
* The first mb is necessary to safely close the critical section opened by the * The first mb is necessary to safely close the critical section opened by the
* TestSetPageLocked(), the second mb is necessary to enforce ordering between * test_and_set_bit() to lock the page; the second mb is necessary to enforce
* the clear_bit and the read of the waitqueue (to avoid SMP races with a * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
* parallel wait_on_page_locked()). * races with a parallel wait_on_page_locked()).
*/ */
void unlock_page(struct page *page) void unlock_page(struct page *page)
{ {
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
if (!TestClearPageLocked(page)) if (!test_and_clear_bit(PG_locked, &page->flags))
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
wake_up_page(page, PG_locked); wake_up_page(page, PG_locked);
...@@ -931,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) ...@@ -931,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
struct page *page = find_get_page(mapping, index); struct page *page = find_get_page(mapping, index);
if (page) { if (page) {
if (!TestSetPageLocked(page)) if (trylock_page(page))
return page; return page;
page_cache_release(page); page_cache_release(page);
return NULL; return NULL;
...@@ -1027,7 +1027,7 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos, ...@@ -1027,7 +1027,7 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
if (inode->i_blkbits == PAGE_CACHE_SHIFT || if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
!mapping->a_ops->is_partially_uptodate) !mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date; goto page_not_up_to_date;
if (TestSetPageLocked(page)) if (!trylock_page(page))
goto page_not_up_to_date; goto page_not_up_to_date;
if (!mapping->a_ops->is_partially_uptodate(page, if (!mapping->a_ops->is_partially_uptodate(page,
desc, offset)) desc, offset))
......
...@@ -1789,7 +1789,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1789,7 +1789,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* not dirty accountable. * not dirty accountable.
*/ */
if (PageAnon(old_page)) { if (PageAnon(old_page)) {
if (!TestSetPageLocked(old_page)) { if (trylock_page(old_page)) {
reuse = can_share_swap_page(old_page); reuse = can_share_swap_page(old_page);
unlock_page(old_page); unlock_page(old_page);
} }
......
...@@ -605,7 +605,7 @@ static int move_to_new_page(struct page *newpage, struct page *page) ...@@ -605,7 +605,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
* establishing additional references. We are the only one * establishing additional references. We are the only one
* holding a reference to the new page at this point. * holding a reference to the new page at this point.
*/ */
if (TestSetPageLocked(newpage)) if (!trylock_page(newpage))
BUG(); BUG();
/* Prepare mapping for the new page.*/ /* Prepare mapping for the new page.*/
...@@ -667,7 +667,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -667,7 +667,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
BUG_ON(charge); BUG_ON(charge);
rc = -EAGAIN; rc = -EAGAIN;
if (TestSetPageLocked(page)) { if (!trylock_page(page)) {
if (!force) if (!force)
goto move_newpage; goto move_newpage;
lock_page(page); lock_page(page);
......
...@@ -422,7 +422,7 @@ int page_referenced(struct page *page, int is_locked, ...@@ -422,7 +422,7 @@ int page_referenced(struct page *page, int is_locked,
referenced += page_referenced_anon(page, mem_cont); referenced += page_referenced_anon(page, mem_cont);
else if (is_locked) else if (is_locked)
referenced += page_referenced_file(page, mem_cont); referenced += page_referenced_file(page, mem_cont);
else if (TestSetPageLocked(page)) else if (!trylock_page(page))
referenced++; referenced++;
else { else {
if (page->mapping) if (page->mapping)
......
...@@ -1265,7 +1265,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1265,7 +1265,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
} }
/* We have to do this with page locked to prevent races */ /* We have to do this with page locked to prevent races */
if (TestSetPageLocked(swappage)) { if (!trylock_page(swappage)) {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
wait_on_page_locked(swappage); wait_on_page_locked(swappage);
...@@ -1329,7 +1329,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1329,7 +1329,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
filepage = find_get_page(mapping, idx); filepage = find_get_page(mapping, idx);
if (filepage && if (filepage &&
(!PageUptodate(filepage) || TestSetPageLocked(filepage))) { (!PageUptodate(filepage) || !trylock_page(filepage))) {
spin_unlock(&info->lock); spin_unlock(&info->lock);
wait_on_page_locked(filepage); wait_on_page_locked(filepage);
page_cache_release(filepage); page_cache_release(filepage);
......
...@@ -444,7 +444,7 @@ void pagevec_strip(struct pagevec *pvec) ...@@ -444,7 +444,7 @@ void pagevec_strip(struct pagevec *pvec)
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
if (PagePrivate(page) && !TestSetPageLocked(page)) { if (PagePrivate(page) && trylock_page(page)) {
if (PagePrivate(page)) if (PagePrivate(page))
try_to_release_page(page, 0); try_to_release_page(page, 0);
unlock_page(page); unlock_page(page);
......
...@@ -201,7 +201,7 @@ void delete_from_swap_cache(struct page *page) ...@@ -201,7 +201,7 @@ void delete_from_swap_cache(struct page *page)
*/ */
static inline void free_swap_cache(struct page *page) static inline void free_swap_cache(struct page *page)
{ {
if (PageSwapCache(page) && !TestSetPageLocked(page)) { if (PageSwapCache(page) && trylock_page(page)) {
remove_exclusive_swap_page(page); remove_exclusive_swap_page(page);
unlock_page(page); unlock_page(page);
} }
...@@ -302,9 +302,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -302,9 +302,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* re-using the just freed swap entry for an existing page. * re-using the just freed swap entry for an existing page.
* May fail (-ENOMEM) if radix-tree node allocation failed. * May fail (-ENOMEM) if radix-tree node allocation failed.
*/ */
SetPageLocked(new_page); set_page_locked(new_page);
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (!err) { if (likely(!err)) {
/* /*
* Initiate read into locked page and return. * Initiate read into locked page and return.
*/ */
...@@ -312,7 +312,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -312,7 +312,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
swap_readpage(NULL, new_page); swap_readpage(NULL, new_page);
return new_page; return new_page;
} }
ClearPageLocked(new_page); clear_page_locked(new_page);
swap_free(entry); swap_free(entry);
} while (err != -ENOMEM); } while (err != -ENOMEM);
......
...@@ -403,7 +403,7 @@ void free_swap_and_cache(swp_entry_t entry) ...@@ -403,7 +403,7 @@ void free_swap_and_cache(swp_entry_t entry)
if (p) { if (p) {
if (swap_entry_free(p, swp_offset(entry)) == 1) { if (swap_entry_free(p, swp_offset(entry)) == 1) {
page = find_get_page(&swapper_space, entry.val); page = find_get_page(&swapper_space, entry.val);
if (page && unlikely(TestSetPageLocked(page))) { if (page && unlikely(!trylock_page(page))) {
page_cache_release(page); page_cache_release(page);
page = NULL; page = NULL;
} }
......
...@@ -187,7 +187,7 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -187,7 +187,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (page_index > next) if (page_index > next)
next = page_index; next = page_index;
next++; next++;
if (TestSetPageLocked(page)) if (!trylock_page(page))
continue; continue;
if (PageWriteback(page)) { if (PageWriteback(page)) {
unlock_page(page); unlock_page(page);
...@@ -280,7 +280,7 @@ unsigned long __invalidate_mapping_pages(struct address_space *mapping, ...@@ -280,7 +280,7 @@ unsigned long __invalidate_mapping_pages(struct address_space *mapping,
pgoff_t index; pgoff_t index;
int lock_failed; int lock_failed;
lock_failed = TestSetPageLocked(page); lock_failed = !trylock_page(page);
/* /*
* We really shouldn't be looking at the ->index of an * We really shouldn't be looking at the ->index of an
......
...@@ -496,7 +496,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -496,7 +496,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
page = lru_to_page(page_list); page = lru_to_page(page_list);
list_del(&page->lru); list_del(&page->lru);
if (TestSetPageLocked(page)) if (!trylock_page(page))
goto keep; goto keep;
VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
...@@ -582,7 +582,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -582,7 +582,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* A synchronous write - probably a ramdisk. Go * A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page. * ahead and try to reclaim the page.
*/ */
if (TestSetPageLocked(page)) if (!trylock_page(page))
goto keep; goto keep;
if (PageDirty(page) || PageWriteback(page)) if (PageDirty(page) || PageWriteback(page))
goto keep_locked; goto keep_locked;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册