提交 55fa6091 编写于 作者: D Dave Chinner 提交者: Al Viro

fs: move i_sb_list out from under inode_lock

Protect the per-sb inode list with a new global lock
inode_sb_list_lock and use it to protect the list manipulations and
traversals. This lock replaces the inode_lock as the inodes on the
list can be validity checked while holding the inode->i_lock and
hence the inode_lock is no longer needed to protect the list.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 f283c86a
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */ /* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches; int sysctl_drop_caches;
...@@ -16,7 +17,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) ...@@ -16,7 +17,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
{ {
struct inode *inode, *toput_inode = NULL; struct inode *inode, *toput_inode = NULL;
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
...@@ -26,13 +27,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) ...@@ -26,13 +27,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
} }
__iget(inode); __iget(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
invalidate_mapping_pages(inode->i_mapping, 0, -1); invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode); iput(toput_inode);
toput_inode = inode; toput_inode = inode;
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
iput(toput_inode); iput(toput_inode);
} }
......
...@@ -1123,7 +1123,7 @@ static void wait_sb_inodes(struct super_block *sb) ...@@ -1123,7 +1123,7 @@ static void wait_sb_inodes(struct super_block *sb)
*/ */
WARN_ON(!rwsem_is_locked(&sb->s_umount)); WARN_ON(!rwsem_is_locked(&sb->s_umount));
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
/* /*
* Data integrity sync. Must wait for all pages under writeback, * Data integrity sync. Must wait for all pages under writeback,
...@@ -1143,14 +1143,15 @@ static void wait_sb_inodes(struct super_block *sb) ...@@ -1143,14 +1143,15 @@ static void wait_sb_inodes(struct super_block *sb)
} }
__iget(inode); __iget(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
/* /*
* We hold a reference to 'inode' so it couldn't have * We hold a reference to 'inode' so it couldn't have been
* been removed from s_inodes list while we dropped the * removed from s_inodes list while we dropped the
* inode_lock. We cannot iput the inode now as we can * inode_sb_list_lock. We cannot iput the inode now as we can
* be holding the last reference and we cannot iput it * be holding the last reference and we cannot iput it under
* under inode_lock. So we keep the reference and iput * inode_sb_list_lock. So we keep the reference and iput it
* it later. * later.
*/ */
iput(old_inode); iput(old_inode);
old_inode = inode; old_inode = inode;
...@@ -1159,9 +1160,9 @@ static void wait_sb_inodes(struct super_block *sb) ...@@ -1159,9 +1160,9 @@ static void wait_sb_inodes(struct super_block *sb)
cond_resched(); cond_resched();
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
iput(old_inode); iput(old_inode);
} }
......
...@@ -34,10 +34,15 @@ ...@@ -34,10 +34,15 @@
* inode->i_state, inode->i_hash, __iget() * inode->i_state, inode->i_hash, __iget()
* inode_lru_lock protects: * inode_lru_lock protects:
* inode_lru, inode->i_lru * inode_lru, inode->i_lru
* inode_sb_list_lock protects:
* sb->s_inodes, inode->i_sb_list
* *
* Lock ordering: * Lock ordering:
* inode_lock * inode_lock
* inode->i_lock * inode->i_lock
*
* inode_sb_list_lock
* inode->i_lock
* inode_lru_lock * inode_lru_lock
*/ */
...@@ -99,6 +104,8 @@ static struct hlist_head *inode_hashtable __read_mostly; ...@@ -99,6 +104,8 @@ static struct hlist_head *inode_hashtable __read_mostly;
*/ */
DEFINE_SPINLOCK(inode_lock); DEFINE_SPINLOCK(inode_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
/* /*
* iprune_sem provides exclusion between the icache shrinking and the * iprune_sem provides exclusion between the icache shrinking and the
* umount path. * umount path.
...@@ -378,26 +385,23 @@ static void inode_lru_list_del(struct inode *inode) ...@@ -378,26 +385,23 @@ static void inode_lru_list_del(struct inode *inode)
spin_unlock(&inode_lru_lock); spin_unlock(&inode_lru_lock);
} }
static inline void __inode_sb_list_add(struct inode *inode)
{
list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
}
/** /**
* inode_sb_list_add - add inode to the superblock list of inodes * inode_sb_list_add - add inode to the superblock list of inodes
* @inode: inode to add * @inode: inode to add
*/ */
void inode_sb_list_add(struct inode *inode) void inode_sb_list_add(struct inode *inode)
{ {
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
__inode_sb_list_add(inode); list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
} }
EXPORT_SYMBOL_GPL(inode_sb_list_add); EXPORT_SYMBOL_GPL(inode_sb_list_add);
static inline void __inode_sb_list_del(struct inode *inode) static inline void inode_sb_list_del(struct inode *inode)
{ {
spin_lock(&inode_sb_list_lock);
list_del_init(&inode->i_sb_list); list_del_init(&inode->i_sb_list);
spin_unlock(&inode_sb_list_lock);
} }
static unsigned long hash(struct super_block *sb, unsigned long hashval) static unsigned long hash(struct super_block *sb, unsigned long hashval)
...@@ -481,9 +485,10 @@ static void evict(struct inode *inode) ...@@ -481,9 +485,10 @@ static void evict(struct inode *inode)
spin_lock(&inode_lock); spin_lock(&inode_lock);
list_del_init(&inode->i_wb_list); list_del_init(&inode->i_wb_list);
__inode_sb_list_del(inode);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
inode_sb_list_del(inode);
if (op->evict_inode) { if (op->evict_inode) {
op->evict_inode(inode); op->evict_inode(inode);
} else { } else {
...@@ -539,7 +544,7 @@ void evict_inodes(struct super_block *sb) ...@@ -539,7 +544,7 @@ void evict_inodes(struct super_block *sb)
struct inode *inode, *next; struct inode *inode, *next;
LIST_HEAD(dispose); LIST_HEAD(dispose);
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (atomic_read(&inode->i_count)) if (atomic_read(&inode->i_count))
continue; continue;
...@@ -555,7 +560,7 @@ void evict_inodes(struct super_block *sb) ...@@ -555,7 +560,7 @@ void evict_inodes(struct super_block *sb)
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose); list_add(&inode->i_lru, &dispose);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
dispose_list(&dispose); dispose_list(&dispose);
...@@ -584,7 +589,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) ...@@ -584,7 +589,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
struct inode *inode, *next; struct inode *inode, *next;
LIST_HEAD(dispose); LIST_HEAD(dispose);
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
...@@ -607,7 +612,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) ...@@ -607,7 +612,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose); list_add(&inode->i_lru, &dispose);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
dispose_list(&dispose); dispose_list(&dispose);
...@@ -867,16 +872,14 @@ struct inode *new_inode(struct super_block *sb) ...@@ -867,16 +872,14 @@ struct inode *new_inode(struct super_block *sb)
{ {
struct inode *inode; struct inode *inode;
spin_lock_prefetch(&inode_lock); spin_lock_prefetch(&inode_sb_list_lock);
inode = alloc_inode(sb); inode = alloc_inode(sb);
if (inode) { if (inode) {
spin_lock(&inode_lock);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
inode->i_state = 0; inode->i_state = 0;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
__inode_sb_list_add(inode); inode_sb_list_add(inode);
spin_unlock(&inode_lock);
} }
return inode; return inode;
} }
...@@ -945,7 +948,7 @@ static struct inode *get_new_inode(struct super_block *sb, ...@@ -945,7 +948,7 @@ static struct inode *get_new_inode(struct super_block *sb,
inode->i_state = I_NEW; inode->i_state = I_NEW;
hlist_add_head(&inode->i_hash, head); hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
__inode_sb_list_add(inode); inode_sb_list_add(inode);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
/* Return the locked inode with I_NEW set, the /* Return the locked inode with I_NEW set, the
...@@ -994,7 +997,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb, ...@@ -994,7 +997,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
inode->i_state = I_NEW; inode->i_state = I_NEW;
hlist_add_head(&inode->i_hash, head); hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
__inode_sb_list_add(inode); inode_sb_list_add(inode);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
/* Return the locked inode with I_NEW set, the /* Return the locked inode with I_NEW set, the
......
...@@ -125,6 +125,8 @@ extern long do_handle_open(int mountdirfd, ...@@ -125,6 +125,8 @@ extern long do_handle_open(int mountdirfd,
/* /*
* inode.c * inode.c
*/ */
extern spinlock_t inode_sb_list_lock;
extern int get_nr_dirty_inodes(void); extern int get_nr_dirty_inodes(void);
extern void evict_inodes(struct super_block *); extern void evict_inodes(struct super_block *);
extern int invalidate_inodes(struct super_block *, bool); extern int invalidate_inodes(struct super_block *, bool);
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <linux/fsnotify_backend.h> #include <linux/fsnotify_backend.h>
#include "fsnotify.h" #include "fsnotify.h"
#include "../internal.h"
/* /*
* Recalculate the mask of events relevant to a given inode locked. * Recalculate the mask of events relevant to a given inode locked.
*/ */
...@@ -237,15 +239,14 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark, ...@@ -237,15 +239,14 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
* fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes. * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @list: list of inodes being unmounted (sb->s_inodes) * @list: list of inodes being unmounted (sb->s_inodes)
* *
* Called with inode_lock held, protecting the unmounting super block's list * Called during unmount with no locks held, so needs to be safe against
* of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.
* We temporarily drop inode_lock, however, and CAN block.
*/ */
void fsnotify_unmount_inodes(struct list_head *list) void fsnotify_unmount_inodes(struct list_head *list)
{ {
struct inode *inode, *next_i, *need_iput = NULL; struct inode *inode, *next_i, *need_iput = NULL;
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
list_for_each_entry_safe(inode, next_i, list, i_sb_list) { list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
struct inode *need_iput_tmp; struct inode *need_iput_tmp;
...@@ -293,12 +294,11 @@ void fsnotify_unmount_inodes(struct list_head *list) ...@@ -293,12 +294,11 @@ void fsnotify_unmount_inodes(struct list_head *list)
} }
/* /*
* We can safely drop inode_lock here because we hold * We can safely drop inode_sb_list_lock here because we hold
* references on both inode and next_i. Also no new inodes * references on both inode and next_i. Also no new inodes
* will be added since the umount has begun. Finally, * will be added since the umount has begun.
* iprune_mutex keeps shrink_icache_memory() away.
*/ */
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
if (need_iput_tmp) if (need_iput_tmp)
iput(need_iput_tmp); iput(need_iput_tmp);
...@@ -310,7 +310,7 @@ void fsnotify_unmount_inodes(struct list_head *list) ...@@ -310,7 +310,7 @@ void fsnotify_unmount_inodes(struct list_head *list)
iput(inode); iput(inode);
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
} }
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/writeback.h> /* for inode_lock, oddly enough.. */ #include "../internal.h" /* ugh */
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -900,7 +900,7 @@ static void add_dquot_ref(struct super_block *sb, int type) ...@@ -900,7 +900,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
int reserved = 0; int reserved = 0;
#endif #endif
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
...@@ -915,19 +915,23 @@ static void add_dquot_ref(struct super_block *sb, int type) ...@@ -915,19 +915,23 @@ static void add_dquot_ref(struct super_block *sb, int type)
#endif #endif
__iget(inode); __iget(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
iput(old_inode); iput(old_inode);
__dquot_initialize(inode, type); __dquot_initialize(inode, type);
/* We hold a reference to 'inode' so it couldn't have been
* removed from s_inodes list while we dropped the inode_lock. /*
* We cannot iput the inode now as we can be holding the last * We hold a reference to 'inode' so it couldn't have been
* reference and we cannot iput it under inode_lock. So we * removed from s_inodes list while we dropped the
* keep the reference and iput it later. */ * inode_sb_list_lock We cannot iput the inode now as we can be
* holding the last reference and we cannot iput it under
* inode_sb_list_lock. So we keep the reference and iput it
* later.
*/
old_inode = inode; old_inode = inode;
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
iput(old_inode); iput(old_inode);
#ifdef CONFIG_QUOTA_DEBUG #ifdef CONFIG_QUOTA_DEBUG
...@@ -1008,7 +1012,7 @@ static void remove_dquot_ref(struct super_block *sb, int type, ...@@ -1008,7 +1012,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
struct inode *inode; struct inode *inode;
int reserved = 0; int reserved = 0;
spin_lock(&inode_lock); spin_lock(&inode_sb_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
/* /*
* We have to scan also I_NEW inodes because they can already * We have to scan also I_NEW inodes because they can already
...@@ -1022,7 +1026,7 @@ static void remove_dquot_ref(struct super_block *sb, int type, ...@@ -1022,7 +1026,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
remove_inode_dquot_ref(inode, type, tofree_head); remove_inode_dquot_ref(inode, type, tofree_head);
} }
} }
spin_unlock(&inode_lock); spin_unlock(&inode_sb_list_lock);
#ifdef CONFIG_QUOTA_DEBUG #ifdef CONFIG_QUOTA_DEBUG
if (reserved) { if (reserved) {
printk(KERN_WARNING "VFS (%s): Writes happened after quota" printk(KERN_WARNING "VFS (%s): Writes happened after quota"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册