提交 19fd6231 编写于 作者: N Nick Piggin 提交者: Linus Torvalds

mm: spinlock tree_lock

mapping->tree_lock has no read lockers.  convert the lock from an rwlock
to a spinlock.
Signed-off-by: NNick Piggin <npiggin@suse.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Reviewed-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 a60637c8
...@@ -706,7 +706,7 @@ static int __set_page_dirty(struct page *page, ...@@ -706,7 +706,7 @@ static int __set_page_dirty(struct page *page,
if (TestSetPageDirty(page)) if (TestSetPageDirty(page))
return 0; return 0;
write_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */ if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page)); WARN_ON_ONCE(warn && !PageUptodate(page));
...@@ -719,7 +719,7 @@ static int __set_page_dirty(struct page *page, ...@@ -719,7 +719,7 @@ static int __set_page_dirty(struct page *page,
radix_tree_tag_set(&mapping->page_tree, radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY); page_index(page), PAGECACHE_TAG_DIRTY);
} }
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return 1; return 1;
......
...@@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode) ...@@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_dentry); INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_devices);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.tree_lock); spin_lock_init(&inode->i_data.tree_lock);
spin_lock_init(&inode->i_data.i_mmap_lock); spin_lock_init(&inode->i_data.i_mmap_lock);
INIT_LIST_HEAD(&inode->i_data.private_list); INIT_LIST_HEAD(&inode->i_data.private_list);
spin_lock_init(&inode->i_data.private_lock); spin_lock_init(&inode->i_data.private_lock);
......
...@@ -424,9 +424,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma, ...@@ -424,9 +424,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
} }
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \
write_lock_irq(&(mapping)->tree_lock) spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \ #define flush_dcache_mmap_unlock(mapping) \
write_unlock_irq(&(mapping)->tree_lock) spin_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_user_range(vma,page,addr,len) \ #define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page) flush_dcache_page(page)
......
...@@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm); ...@@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm);
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \
write_lock_irq(&(mapping)->tree_lock) spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \ #define flush_dcache_mmap_unlock(mapping) \
write_unlock_irq(&(mapping)->tree_lock) spin_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_page(vma,page) do { \ #define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page(page); \ flush_kernel_dcache_page(page); \
......
...@@ -499,7 +499,7 @@ struct backing_dev_info; ...@@ -499,7 +499,7 @@ struct backing_dev_info;
struct address_space { struct address_space {
struct inode *host; /* owner: inode, block_device */ struct inode *host; /* owner: inode, block_device */
struct radix_tree_root page_tree; /* radix tree of all pages */ struct radix_tree_root page_tree; /* radix tree of all pages */
rwlock_t tree_lock; /* and rwlock protecting it */ spinlock_t tree_lock; /* and lock protecting it */
unsigned int i_mmap_writable;/* count VM_SHARED mappings */ unsigned int i_mmap_writable;/* count VM_SHARED mappings */
struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
......
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
/* /*
* Remove a page from the page cache and free it. Caller has to make * Remove a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage * sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold a write_lock on the mapping's tree_lock. * is safe. The caller must hold the mapping's tree_lock.
*/ */
void __remove_from_page_cache(struct page *page) void __remove_from_page_cache(struct page *page)
{ {
...@@ -141,9 +141,9 @@ void remove_from_page_cache(struct page *page) ...@@ -141,9 +141,9 @@ void remove_from_page_cache(struct page *page)
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
write_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
__remove_from_page_cache(page); __remove_from_page_cache(page);
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
} }
static int sync_page(void *word) static int sync_page(void *word)
...@@ -469,7 +469,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, ...@@ -469,7 +469,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
page->mapping = mapping; page->mapping = mapping;
page->index = offset; page->index = offset;
write_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page); error = radix_tree_insert(&mapping->page_tree, offset, page);
if (likely(!error)) { if (likely(!error)) {
mapping->nrpages++; mapping->nrpages++;
...@@ -480,7 +480,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, ...@@ -480,7 +480,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
page_cache_release(page); page_cache_release(page);
} }
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
radix_tree_preload_end(); radix_tree_preload_end();
} else } else
mem_cgroup_uncharge_cache_page(page); mem_cgroup_uncharge_cache_page(page);
......
...@@ -323,7 +323,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, ...@@ -323,7 +323,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
return 0; return 0;
} }
write_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
pslot = radix_tree_lookup_slot(&mapping->page_tree, pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page)); page_index(page));
...@@ -331,12 +331,12 @@ static int migrate_page_move_mapping(struct address_space *mapping, ...@@ -331,12 +331,12 @@ static int migrate_page_move_mapping(struct address_space *mapping,
expected_count = 2 + !!PagePrivate(page); expected_count = 2 + !!PagePrivate(page);
if (page_count(page) != expected_count || if (page_count(page) != expected_count ||
(struct page *)radix_tree_deref_slot(pslot) != page) { (struct page *)radix_tree_deref_slot(pslot) != page) {
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN; return -EAGAIN;
} }
if (!page_freeze_refs(page, expected_count)) { if (!page_freeze_refs(page, expected_count)) {
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN; return -EAGAIN;
} }
...@@ -373,10 +373,9 @@ static int migrate_page_move_mapping(struct address_space *mapping, ...@@ -373,10 +373,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
__dec_zone_page_state(page, NR_FILE_PAGES); __dec_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(newpage, NR_FILE_PAGES); __inc_zone_page_state(newpage, NR_FILE_PAGES);
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
if (!PageSwapCache(newpage)) { if (!PageSwapCache(newpage))
mem_cgroup_uncharge_cache_page(page); mem_cgroup_uncharge_cache_page(page);
}
return 0; return 0;
} }
......
...@@ -1088,7 +1088,7 @@ int __set_page_dirty_nobuffers(struct page *page) ...@@ -1088,7 +1088,7 @@ int __set_page_dirty_nobuffers(struct page *page)
if (!mapping) if (!mapping)
return 1; return 1;
write_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
mapping2 = page_mapping(page); mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */ if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping); BUG_ON(mapping2 != mapping);
...@@ -1102,7 +1102,7 @@ int __set_page_dirty_nobuffers(struct page *page) ...@@ -1102,7 +1102,7 @@ int __set_page_dirty_nobuffers(struct page *page)
radix_tree_tag_set(&mapping->page_tree, radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY); page_index(page), PAGECACHE_TAG_DIRTY);
} }
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
if (mapping->host) { if (mapping->host) {
/* !PageAnon && !swapper_space */ /* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
...@@ -1258,7 +1258,7 @@ int test_clear_page_writeback(struct page *page) ...@@ -1258,7 +1258,7 @@ int test_clear_page_writeback(struct page *page)
struct backing_dev_info *bdi = mapping->backing_dev_info; struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags; unsigned long flags;
write_lock_irqsave(&mapping->tree_lock, flags); spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestClearPageWriteback(page); ret = TestClearPageWriteback(page);
if (ret) { if (ret) {
radix_tree_tag_clear(&mapping->page_tree, radix_tree_tag_clear(&mapping->page_tree,
...@@ -1269,7 +1269,7 @@ int test_clear_page_writeback(struct page *page) ...@@ -1269,7 +1269,7 @@ int test_clear_page_writeback(struct page *page)
__bdi_writeout_inc(bdi); __bdi_writeout_inc(bdi);
} }
} }
write_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else { } else {
ret = TestClearPageWriteback(page); ret = TestClearPageWriteback(page);
} }
...@@ -1287,7 +1287,7 @@ int test_set_page_writeback(struct page *page) ...@@ -1287,7 +1287,7 @@ int test_set_page_writeback(struct page *page)
struct backing_dev_info *bdi = mapping->backing_dev_info; struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags; unsigned long flags;
write_lock_irqsave(&mapping->tree_lock, flags); spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestSetPageWriteback(page); ret = TestSetPageWriteback(page);
if (!ret) { if (!ret) {
radix_tree_tag_set(&mapping->page_tree, radix_tree_tag_set(&mapping->page_tree,
...@@ -1300,7 +1300,7 @@ int test_set_page_writeback(struct page *page) ...@@ -1300,7 +1300,7 @@ int test_set_page_writeback(struct page *page)
radix_tree_tag_clear(&mapping->page_tree, radix_tree_tag_clear(&mapping->page_tree,
page_index(page), page_index(page),
PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
write_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else { } else {
ret = TestSetPageWriteback(page); ret = TestSetPageWriteback(page);
} }
......
...@@ -39,7 +39,7 @@ static struct backing_dev_info swap_backing_dev_info = { ...@@ -39,7 +39,7 @@ static struct backing_dev_info swap_backing_dev_info = {
struct address_space swapper_space = { struct address_space swapper_space = {
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
.tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
.a_ops = &swap_aops, .a_ops = &swap_aops,
.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
.backing_dev_info = &swap_backing_dev_info, .backing_dev_info = &swap_backing_dev_info,
...@@ -80,7 +80,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) ...@@ -80,7 +80,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
SetPageSwapCache(page); SetPageSwapCache(page);
set_page_private(page, entry.val); set_page_private(page, entry.val);
write_lock_irq(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree, error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page); entry.val, page);
if (likely(!error)) { if (likely(!error)) {
...@@ -88,7 +88,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) ...@@ -88,7 +88,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
__inc_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(page, NR_FILE_PAGES);
INC_CACHE_INFO(add_total); INC_CACHE_INFO(add_total);
} }
write_unlock_irq(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end(); radix_tree_preload_end();
if (unlikely(error)) { if (unlikely(error)) {
...@@ -182,9 +182,9 @@ void delete_from_swap_cache(struct page *page) ...@@ -182,9 +182,9 @@ void delete_from_swap_cache(struct page *page)
entry.val = page_private(page); entry.val = page_private(page);
write_lock_irq(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
write_unlock_irq(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
swap_free(entry); swap_free(entry);
page_cache_release(page); page_cache_release(page);
......
...@@ -369,13 +369,13 @@ int remove_exclusive_swap_page(struct page *page) ...@@ -369,13 +369,13 @@ int remove_exclusive_swap_page(struct page *page)
retval = 0; retval = 0;
if (p->swap_map[swp_offset(entry)] == 1) { if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the swapcache lock held.. */ /* Recheck the page count with the swapcache lock held.. */
write_lock_irq(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
if ((page_count(page) == 2) && !PageWriteback(page)) { if ((page_count(page) == 2) && !PageWriteback(page)) {
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
SetPageDirty(page); SetPageDirty(page);
retval = 1; retval = 1;
} }
write_unlock_irq(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
} }
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
......
...@@ -349,18 +349,18 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) ...@@ -349,18 +349,18 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
return 0; return 0;
write_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
if (PageDirty(page)) if (PageDirty(page))
goto failed; goto failed;
BUG_ON(PagePrivate(page)); BUG_ON(PagePrivate(page));
__remove_from_page_cache(page); __remove_from_page_cache(page);
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
ClearPageUptodate(page); ClearPageUptodate(page);
page_cache_release(page); /* pagecache ref */ page_cache_release(page); /* pagecache ref */
return 1; return 1;
failed: failed:
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
return 0; return 0;
} }
......
...@@ -399,7 +399,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page) ...@@ -399,7 +399,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page)); BUG_ON(mapping != page_mapping(page));
write_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
/* /*
* The non racy check for a busy page. * The non racy check for a busy page.
* *
...@@ -436,17 +436,17 @@ static int __remove_mapping(struct address_space *mapping, struct page *page) ...@@ -436,17 +436,17 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) }; swp_entry_t swap = { .val = page_private(page) };
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
swap_free(swap); swap_free(swap);
} else { } else {
__remove_from_page_cache(page); __remove_from_page_cache(page);
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
} }
return 1; return 1;
cannot_free: cannot_free:
write_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册