提交 320dd101 编写于 作者: S Steven Whitehouse

[GFS2] glock debugging and inode cache changes

This adds some extra debugging to glock.c and changes
inode.c's deallocation code to call the debugging code
at a suitable moment. I'm chasing down a particular bug
to do with deallocation at the moment and the code can
go again once the bug is fixed.

Also this includes the first part of some changes to unify
the Linux struct inode and GFS2's struct gfs2_inode. This
transformation will happen in small parts over the next short
period.
Signed-off-by: NSteven Whitehouse <swhiteho@redhat.com>
上级 3a8a9a10
......@@ -47,6 +47,7 @@ struct greedy {
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
static int dump_glock(struct gfs2_glock *gl);
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode?
......@@ -290,6 +291,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
spin_lock_init(&gl->gl_spin);
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_owner = NULL;
gl->gl_ip = 0;
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters2);
......@@ -661,8 +664,11 @@ void gfs2_glmutex_lock(struct gfs2_glock *gl)
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
list_add_tail(&gh.gh_list, &gl->gl_waiters1);
else
else {
gl->gl_owner = current;
gl->gl_ip = (unsigned long)__builtin_return_address(0);
complete(&gh.gh_wait);
}
spin_unlock(&gl->gl_spin);
wait_for_completion(&gh.gh_wait);
......@@ -683,6 +689,10 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
acquired = 0;
else {
gl->gl_owner = current;
gl->gl_ip = (unsigned long)__builtin_return_address(0);
}
spin_unlock(&gl->gl_spin);
return acquired;
......@@ -698,6 +708,8 @@ void gfs2_glmutex_unlock(struct gfs2_glock *gl)
{
spin_lock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags);
gl->gl_owner = NULL;
gl->gl_ip = 0;
run_queue(gl);
BUG_ON(!spin_is_locked(&gl->gl_spin));
spin_unlock(&gl->gl_spin);
......@@ -1173,7 +1185,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
struct gfs2_sbd *sdp = gl->gl_sbd;
int error = 0;
restart:
restart:
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
set_bit(HIF_ABORTED, &gh->gh_iflags);
return -EIO;
......@@ -1196,6 +1208,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
clear_bit(GLF_PREFETCH, &gl->gl_flags);
if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
dump_glock(gl);
return error;
}
......@@ -2212,9 +2227,8 @@ static int dump_glock(struct gfs2_glock *gl)
spin_lock(&gl->gl_spin);
printk(KERN_INFO "Glock (%u, %llu)\n",
gl->gl_name.ln_type,
gl->gl_name.ln_number);
printk(KERN_INFO "Glock (%u, %llu)\n", gl->gl_name.ln_type,
gl->gl_name.ln_number);
printk(KERN_INFO " gl_flags =");
for (x = 0; x < 32; x++)
if (test_bit(x, &gl->gl_flags))
......@@ -2222,6 +2236,8 @@ static int dump_glock(struct gfs2_glock *gl)
printk(" \n");
printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
......
......@@ -27,6 +27,7 @@
#define GL_SYNC 0x00000800
#define GL_NOCANCEL 0x00001000
#define GL_AOP 0x00004000
#define GL_DUMP 0x00008000
#define GLR_TRYFAILED 13
#define GLR_CANCELED 14
......
......@@ -183,6 +183,8 @@ struct gfs2_glock {
spinlock_t gl_spin;
unsigned int gl_state;
struct task_struct *gl_owner;
unsigned long gl_ip;
struct list_head gl_holders;
struct list_head gl_waiters1; /* HIF_MUTEX */
struct list_head gl_waiters2; /* HIF_DEMOTE, HIF_GREEDY */
......@@ -244,6 +246,7 @@ enum {
};
struct gfs2_inode {
struct inode i_inode;
struct gfs2_inum i_num;
atomic_t i_count;
......@@ -270,6 +273,11 @@ struct gfs2_inode {
struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT];
};
static inline struct gfs2_inode *GFS2_I(struct inode *inode)
{
return container_of(inode, struct gfs2_inode, i_inode);
}
enum {
GFF_DID_DIRECT_ALLOC = 0,
};
......
......@@ -504,7 +504,7 @@ static int inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul,
error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
&gfs2_inode_glops, LM_ST_EXCLUSIVE,
LM_FLAG_TRY_1CB, &i_gh);
LM_FLAG_TRY_1CB|GL_DUMP, &i_gh);
switch(error) {
case 0:
break;
......@@ -724,9 +724,8 @@ struct inode *gfs2_lookupi(struct inode *dir, struct qstr *name, int is_root,
if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
(name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
dir == sb->s_root->d_inode)) {
gfs2_inode_hold(dip);
ipp = dip;
goto done;
igrab(dir);
return dir;
}
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
......@@ -734,7 +733,7 @@ struct inode *gfs2_lookupi(struct inode *dir, struct qstr *name, int is_root,
return ERR_PTR(error);
if (!is_root) {
error = gfs2_repermission(dip->i_vnode, MAY_EXEC, NULL);
error = gfs2_repermission(dir, MAY_EXEC, NULL);
if (error)
goto out;
}
......@@ -756,7 +755,6 @@ struct inode *gfs2_lookupi(struct inode *dir, struct qstr *name, int is_root,
out:
gfs2_glock_dq_uninit(&d_gh);
done:
if (error == -ENOENT)
return NULL;
if (error == 0) {
......@@ -1058,7 +1056,6 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
int error;
munge_mode_uid_gid(dip, &mode, &uid, &gid);
gfs2_alloc_get(dip);
error = gfs2_quota_lock(dip, uid, gid);
......@@ -1069,19 +1066,14 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
if (error)
goto out_quota;
error = gfs2_trans_begin(sdp, RES_DINODE + RES_UNLINKED +
RES_QUOTA, 0);
error = gfs2_trans_begin(sdp, RES_DINODE + RES_UNLINKED + RES_QUOTA, 0);
if (error)
goto out_quota;
ul->ul_ut.ut_flags = 0;
error = gfs2_unlinked_ondisk_munge(sdp, ul);
init_dinode(dip, gl, &ul->ul_ut.ut_inum,
mode, uid, gid);
init_dinode(dip, gl, &ul->ul_ut.ut_inum, mode, uid, gid);
gfs2_quota_change(dip, +1, uid, gid);
gfs2_trans_end(sdp);
out_quota:
......@@ -1089,7 +1081,6 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
out:
gfs2_alloc_put(dip);
return error;
}
......@@ -1123,8 +1114,7 @@ static int link_dinode(struct gfs2_inode *dip, struct qstr *name,
if (error)
goto fail_quota_locks;
error = gfs2_trans_begin(sdp,
sdp->sd_max_dirres +
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
al->al_rgd->rd_ri.ri_length +
2 * RES_DINODE + RES_UNLINKED +
RES_STATFS + RES_QUOTA, 0);
......@@ -1157,19 +1147,18 @@ static int link_dinode(struct gfs2_inode *dip, struct qstr *name,
return 0;
fail_end_trans:
fail_end_trans:
gfs2_trans_end(sdp);
fail_ipreserv:
fail_ipreserv:
if (dip->i_alloc.al_rgd)
gfs2_inplace_release(dip);
fail_quota_locks:
fail_quota_locks:
gfs2_quota_unlock(dip);
fail:
fail:
gfs2_alloc_put(dip);
return error;
}
......@@ -1226,11 +1215,9 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, struct qstr *name,
if (ul->ul_ut.ut_inum.no_addr < dip->i_num.no_addr) {
gfs2_glock_dq(ghs);
error = gfs2_glock_nq_num(sdp,
ul->ul_ut.ut_inum.no_addr,
&gfs2_inode_glops,
LM_ST_EXCLUSIVE, GL_SKIP,
ghs + 1);
error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
&gfs2_inode_glops, LM_ST_EXCLUSIVE,
GL_SKIP, ghs + 1);
if (error) {
gfs2_unlinked_put(sdp, ul);
return ERR_PTR(error);
......@@ -1248,11 +1235,9 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, struct qstr *name,
if (error)
goto fail_gunlock2;
} else {
error = gfs2_glock_nq_num(sdp,
ul->ul_ut.ut_inum.no_addr,
&gfs2_inode_glops,
LM_ST_EXCLUSIVE, GL_SKIP,
ghs + 1);
error = gfs2_glock_nq_num(sdp, ul->ul_ut.ut_inum.no_addr,
&gfs2_inode_glops, LM_ST_EXCLUSIVE,
GL_SKIP, ghs + 1);
if (error)
goto fail_gunlock;
}
......@@ -1285,18 +1270,17 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, struct qstr *name,
return ERR_PTR(-ENOMEM);
return inode;
fail_iput:
fail_iput:
gfs2_inode_put(ip);
fail_gunlock2:
fail_gunlock2:
gfs2_glock_dq_uninit(ghs + 1);
fail_gunlock:
fail_gunlock:
gfs2_glock_dq(ghs);
fail:
fail:
gfs2_unlinked_put(sdp, ul);
return ERR_PTR(error);
}
......
......@@ -23,6 +23,20 @@
#include "sys.h"
#include "util.h"
static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
{
struct gfs2_inode *ip = foo;
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
inode_init_once(&ip->i_inode);
atomic_set(&ip->i_count, 0);
ip->i_vnode = &ip->i_inode;
spin_lock_init(&ip->i_spin);
init_rwsem(&ip->i_rw_mutex);
memset(ip->i_cache, 0, sizeof(ip->i_cache));
}
}
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
......@@ -49,7 +63,9 @@ static int __init init_gfs2_fs(void)
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
0, 0, NULL, NULL);
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_PANIC|SLAB_MEM_SPREAD),
gfs2_init_inode_once, NULL);
if (!gfs2_inode_cachep)
goto fail;
......
......@@ -361,7 +361,31 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
return 0;
}
static struct inode *gfs2_alloc_inode(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip;
ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
if (ip) {
ip->i_flags = 0;
ip->i_gl = NULL;
ip->i_sbd = sdp;
ip->i_vnode = &ip->i_inode;
ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
ip->i_last_pfault = jiffies;
}
return &ip->i_inode;
}
static void gfs2_destroy_inode(struct inode *inode)
{
kmem_cache_free(gfs2_inode_cachep, inode);
}
struct super_operations gfs2_super_ops = {
.alloc_inode = gfs2_alloc_inode,
.destroy_inode = gfs2_destroy_inode,
.write_inode = gfs2_write_inode,
.put_super = gfs2_put_super,
.write_super = gfs2_write_super,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册