提交 19156840 编写于 作者: D Dave Chinner 提交者: Al Viro

dentry: move to per-sb LRU locks

With the dentry LRUs being per-sb structures, there is no real need for
a global dentry_lru_lock. The locking can be made more fine-grained by
moving to a per-sb LRU lock, isolating the LRU operations of different
filesytsems completely from each other. The need for this is independent
of any performance consideration that may arise: in the interest of
abstracting the lru operations away, it is mandatory that each lru works
around its own lock instead of a global lock for all of them.

[glommer@openvz.org: updated changelog ]
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Signed-off-by: NGlauber Costa <glommer@openvz.org>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Acked-by: NMel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 62d36c77
......@@ -48,7 +48,7 @@
* - the dcache hash table
* s_anon bl list spinlock protects:
* - the s_anon list (see __d_drop)
* dcache_lru_lock protects:
* dentry->d_sb->s_dentry_lru_lock protects:
* - the dcache lru lists and counters
* d_lock protects:
* - d_flags
......@@ -63,7 +63,7 @@
* Ordering:
* dentry->d_inode->i_lock
* dentry->d_lock
* dcache_lru_lock
* dentry->d_sb->s_dentry_lru_lock
* dcache_hash_bucket lock
* s_anon lock
*
......@@ -81,7 +81,6 @@
int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
EXPORT_SYMBOL(rename_lock);
......@@ -362,12 +361,12 @@ static void dentry_unlink_inode(struct dentry * dentry)
static void dentry_lru_add(struct dentry *dentry)
{
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
spin_lock(&dcache_lru_lock);
spin_lock(&dentry->d_sb->s_dentry_lru_lock);
dentry->d_flags |= DCACHE_LRU_LIST;
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
this_cpu_inc(nr_dentry_unused);
spin_unlock(&dcache_lru_lock);
spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
}
......@@ -385,15 +384,15 @@ static void __dentry_lru_del(struct dentry *dentry)
static void dentry_lru_del(struct dentry *dentry)
{
if (!list_empty(&dentry->d_lru)) {
spin_lock(&dcache_lru_lock);
spin_lock(&dentry->d_sb->s_dentry_lru_lock);
__dentry_lru_del(dentry);
spin_unlock(&dcache_lru_lock);
spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
}
static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
{
spin_lock(&dcache_lru_lock);
spin_lock(&dentry->d_sb->s_dentry_lru_lock);
if (list_empty(&dentry->d_lru)) {
dentry->d_flags |= DCACHE_LRU_LIST;
list_add_tail(&dentry->d_lru, list);
......@@ -402,7 +401,7 @@ static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
} else {
list_move_tail(&dentry->d_lru, list);
}
spin_unlock(&dcache_lru_lock);
spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
/**
......@@ -895,14 +894,14 @@ void prune_dcache_sb(struct super_block *sb, int count)
LIST_HEAD(tmp);
relock:
spin_lock(&dcache_lru_lock);
spin_lock(&sb->s_dentry_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
dentry = list_entry(sb->s_dentry_lru.prev,
struct dentry, d_lru);
BUG_ON(dentry->d_sb != sb);
if (!spin_trylock(&dentry->d_lock)) {
spin_unlock(&dcache_lru_lock);
spin_unlock(&sb->s_dentry_lru_lock);
cpu_relax();
goto relock;
}
......@@ -918,11 +917,11 @@ void prune_dcache_sb(struct super_block *sb, int count)
if (!--count)
break;
}
cond_resched_lock(&dcache_lru_lock);
cond_resched_lock(&sb->s_dentry_lru_lock);
}
if (!list_empty(&referenced))
list_splice(&referenced, &sb->s_dentry_lru);
spin_unlock(&dcache_lru_lock);
spin_unlock(&sb->s_dentry_lru_lock);
shrink_dentry_list(&tmp);
}
......@@ -938,14 +937,14 @@ void shrink_dcache_sb(struct super_block *sb)
{
LIST_HEAD(tmp);
spin_lock(&dcache_lru_lock);
spin_lock(&sb->s_dentry_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
list_splice_init(&sb->s_dentry_lru, &tmp);
spin_unlock(&dcache_lru_lock);
spin_unlock(&sb->s_dentry_lru_lock);
shrink_dentry_list(&tmp);
spin_lock(&dcache_lru_lock);
spin_lock(&sb->s_dentry_lru_lock);
}
spin_unlock(&dcache_lru_lock);
spin_unlock(&sb->s_dentry_lru_lock);
}
EXPORT_SYMBOL(shrink_dcache_sb);
......
......@@ -176,6 +176,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
INIT_LIST_HEAD(&s->s_dentry_lru);
spin_lock_init(&s->s_dentry_lru_lock);
INIT_LIST_HEAD(&s->s_inode_lru);
spin_lock_init(&s->s_inode_lru_lock);
INIT_LIST_HEAD(&s->s_mounts);
......
......@@ -1269,7 +1269,9 @@ struct super_block {
struct list_head s_files;
#endif
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
/* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
/* s_dentry_lru_lock protects s_dentry_lru and s_nr_dentry_unused */
spinlock_t s_dentry_lru_lock ____cacheline_aligned_in_smp;
struct list_head s_dentry_lru; /* unused dentry lru */
long s_nr_dentry_unused; /* # of dentry on lru */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册