提交 24264434 编写于 作者: S Steven Whitehouse

[GFS2] Rewrite of examine_bucket()

The existing implementation of this function in glock.c was not
very efficient as it relied upon keeping a cursor element upon the
hash chain in question and moving it along. This new version improves
upon this by using the current element as a cursor. This is possible
since we only look at the "next" element in the list after we've
taken the read_lock() subsequent to calling the examiner function.
Obviously we have to eventually drop the ref count that we are then
left with and we cannot do that while holding the read_lock, so we
do that next time we drop the lock. That means either just before
we examine another glock, or when the loop has terminated.

The new implementation has several advantages: it uses only a
read_lock() rather than a write_lock(), so it can run simnultaneously
with other code, it doesn't need a "plug" element, so that it removes
a test not only from this list iterator, but from all the other glock
list iterators too. So it makes things faster and smaller.
Signed-off-by: NSteven Whitehouse <swhiteho@redhat.com>
上级 94610610
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/gfs2_ondisk.h> #include <linux/gfs2_ondisk.h>
#include <linux/list.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "gfs2.h" #include "gfs2.h"
...@@ -33,12 +34,6 @@ ...@@ -33,12 +34,6 @@
#include "super.h" #include "super.h"
#include "util.h" #include "util.h"
/* Must be kept in sync with the beginning of struct gfs2_glock */
struct glock_plug {
struct list_head gl_list;
unsigned long gl_flags;
};
struct greedy { struct greedy {
struct gfs2_holder gr_gh; struct gfs2_holder gr_gh;
struct work_struct gr_work; struct work_struct gr_work;
...@@ -52,6 +47,7 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl); ...@@ -52,6 +47,7 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int dump_glock(struct gfs2_glock *gl); static int dump_glock(struct gfs2_glock *gl);
static int dump_inode(struct gfs2_inode *ip);
#define GFS2_GL_HASH_SHIFT 13 #define GFS2_GL_HASH_SHIFT 13
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
...@@ -214,7 +210,7 @@ int gfs2_glock_put(struct gfs2_glock *gl) ...@@ -214,7 +210,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
write_lock(gl_lock_addr(gl->gl_hash)); write_lock(gl_lock_addr(gl->gl_hash));
if (kref_put(&gl->gl_ref, kill_glock)) { if (kref_put(&gl->gl_ref, kill_glock)) {
list_del_init(&gl_hash_table[gl->gl_hash].hb_list); list_del_init(&gl->gl_list);
write_unlock(gl_lock_addr(gl->gl_hash)); write_unlock(gl_lock_addr(gl->gl_hash));
BUG_ON(spin_is_locked(&gl->gl_spin)); BUG_ON(spin_is_locked(&gl->gl_spin));
glock_free(gl); glock_free(gl);
...@@ -265,8 +261,6 @@ static struct gfs2_glock *search_bucket(unsigned int hash, ...@@ -265,8 +261,6 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
struct gfs2_glock *gl; struct gfs2_glock *gl;
list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) { list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) {
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
if (!lm_name_equal(&gl->gl_name, name)) if (!lm_name_equal(&gl->gl_name, name))
continue; continue;
if (gl->gl_sbd != sdp) if (gl->gl_sbd != sdp)
...@@ -1899,51 +1893,33 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) ...@@ -1899,51 +1893,33 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
unsigned int hash) unsigned int hash)
{ {
struct glock_plug plug; struct gfs2_glock *gl, *prev = NULL;
struct list_head *tmp; int has_entries = 0;
struct gfs2_glock *gl; struct list_head *head = &gl_hash_table[hash].hb_list;
int entries;
/* Add "plug" to end of bucket list, work back up list from there */
memset(&plug.gl_flags, 0, sizeof(unsigned long));
set_bit(GLF_PLUG, &plug.gl_flags);
write_lock(gl_lock_addr(hash));
list_add(&plug.gl_list, &gl_hash_table[hash].hb_list);
write_unlock(gl_lock_addr(hash));
for (;;) {
write_lock(gl_lock_addr(hash));
for (;;) {
tmp = plug.gl_list.next;
if (tmp == &gl_hash_table[hash].hb_list) { read_lock(gl_lock_addr(hash));
list_del(&plug.gl_list); /* Can't use list_for_each_entry - don't want prefetch here */
entries = !list_empty(&gl_hash_table[hash].hb_list); if (list_empty(head))
write_unlock(gl_lock_addr(hash)); goto out;
return entries; has_entries = 1;
} gl = list_entry(head->next, struct gfs2_glock, gl_list);
gl = list_entry(tmp, struct gfs2_glock, gl_list); while(&gl->gl_list != head) {
if (gl->gl_sbd == sdp) {
/* Move plug up list */
list_move(&plug.gl_list, &gl->gl_list);
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
if (gl->gl_sbd != sdp)
continue;
/* examiner() must glock_put() */
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
read_unlock(gl_lock_addr(hash));
break; if (prev)
gfs2_glock_put(prev);
prev = gl;
examiner(gl);
read_lock(gl_lock_addr(hash));
} }
gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
write_unlock(gl_lock_addr(hash));
examiner(gl);
} }
out:
read_unlock(gl_lock_addr(hash));
if (prev)
gfs2_glock_put(prev);
return has_entries;
} }
/** /**
...@@ -1955,23 +1931,19 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, ...@@ -1955,23 +1931,19 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
static void scan_glock(struct gfs2_glock *gl) static void scan_glock(struct gfs2_glock *gl)
{ {
if (gl->gl_ops == &gfs2_inode_glops) if (gl->gl_ops == &gfs2_inode_glops)
goto out; return;
if (gfs2_glmutex_trylock(gl)) { if (gfs2_glmutex_trylock(gl)) {
if (queue_empty(gl, &gl->gl_holders) && if (queue_empty(gl, &gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
demote_ok(gl))
goto out_schedule; goto out_schedule;
gfs2_glmutex_unlock(gl); gfs2_glmutex_unlock(gl);
} }
out:
gfs2_glock_put(gl);
return; return;
out_schedule: out_schedule:
gfs2_glmutex_unlock(gl); gfs2_glmutex_unlock(gl);
gfs2_glock_schedule_for_reclaim(gl); gfs2_glock_schedule_for_reclaim(gl);
gfs2_glock_put(gl);
} }
/** /**
...@@ -2014,11 +1986,8 @@ static void clear_glock(struct gfs2_glock *gl) ...@@ -2014,11 +1986,8 @@ static void clear_glock(struct gfs2_glock *gl)
if (queue_empty(gl, &gl->gl_holders) && if (queue_empty(gl, &gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED) gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED); handle_callback(gl, LM_ST_UNLOCKED);
gfs2_glmutex_unlock(gl); gfs2_glmutex_unlock(gl);
} }
gfs2_glock_put(gl);
} }
/** /**
...@@ -2040,10 +2009,10 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) ...@@ -2040,10 +2009,10 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
for (;;) { for (;;) {
cont = 0; cont = 0;
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
for (x = 0; x < GFS2_GL_HASH_SIZE; x++) if (examine_bucket(clear_glock, sdp, x))
if (examine_bucket(clear_glock, sdp, x))
cont = 1; cont = 1;
}
if (!wait || !cont) if (!wait || !cont)
break; break;
...@@ -2234,8 +2203,6 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) ...@@ -2234,8 +2203,6 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
read_lock(gl_lock_addr(x)); read_lock(gl_lock_addr(x));
list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) { list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) {
if (test_bit(GLF_PLUG, &gl->gl_flags))
continue;
if (gl->gl_sbd != sdp) if (gl->gl_sbd != sdp)
continue; continue;
......
...@@ -152,7 +152,6 @@ struct gfs2_holder { ...@@ -152,7 +152,6 @@ struct gfs2_holder {
}; };
enum { enum {
GLF_PLUG = 0,
GLF_LOCK = 1, GLF_LOCK = 1,
GLF_STICKY = 2, GLF_STICKY = 2,
GLF_PREFETCH = 3, GLF_PREFETCH = 3,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册