提交 4fd1a579 编写于 作者: A Andreas Gruenbacher 提交者: Bob Peterson

gfs2: Get rid of flush_delayed_work in gfs2_evict_inode

So far, gfs2_evict_inode clears gl->gl_object and then flushes the glock
work queue to make sure that inode glops which dereference gl->gl_object
have finished running before the inode is destroyed.  However, flushing
the work queue may do more work than needed, and in particular, it may
call into DLM, which we want to avoid here.  Use a bit lock
(GIF_GLOP_PENDING) to synchronize between the inode glops and
gfs2_evict_inode instead to get rid of the flushing.

In addition, flush the work queues of existing glocks before reusing
them for new inodes to get those glocks into a known state: the glock
state engine currently doesn't handle glock re-appropriation correctly.
(We may be able to fix the glock state engine instead later.)

Based on a patch by Steven Whitehouse <swhiteho@redhat.com>.
Signed-off-by: NAndreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: NBob Peterson <rpeterso@redhat.com>
上级 722f6f62
...@@ -257,4 +257,11 @@ static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) ...@@ -257,4 +257,11 @@ static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
return gh->gh_gl; return gh->gh_gl;
} }
static inline void glock_set_object(struct gfs2_glock *gl, void *object)
{
spin_lock(&gl->gl_lockref.lock);
gl->gl_object = object;
spin_unlock(&gl->gl_lockref.lock);
}
#endif /* __GLOCK_DOT_H__ */ #endif /* __GLOCK_DOT_H__ */
...@@ -197,6 +197,27 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) ...@@ -197,6 +197,27 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
rgd->rd_flags &= ~GFS2_RDF_UPTODATE; rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
} }
static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
{
struct gfs2_inode *ip;
spin_lock(&gl->gl_lockref.lock);
ip = gl->gl_object;
if (ip)
set_bit(GIF_GLOP_PENDING, &ip->i_flags);
spin_unlock(&gl->gl_lockref.lock);
return ip;
}
static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
{
if (!ip)
return;
clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
}
/** /**
* inode_go_sync - Sync the dirty data and/or metadata for an inode glock * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
* @gl: the glock protecting the inode * @gl: the glock protecting the inode
...@@ -205,25 +226,24 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) ...@@ -205,25 +226,24 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
static void inode_go_sync(struct gfs2_glock *gl) static void inode_go_sync(struct gfs2_glock *gl)
{ {
struct gfs2_inode *ip = gl->gl_object; struct gfs2_inode *ip = gfs2_glock2inode(gl);
int isreg = ip && S_ISREG(ip->i_inode.i_mode);
struct address_space *metamapping = gfs2_glock2aspace(gl); struct address_space *metamapping = gfs2_glock2aspace(gl);
int error; int error;
if (ip && !S_ISREG(ip->i_inode.i_mode)) if (isreg) {
ip = NULL;
if (ip) {
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
inode_dio_wait(&ip->i_inode); inode_dio_wait(&ip->i_inode);
} }
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return; goto out;
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH); gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
filemap_fdatawrite(metamapping); filemap_fdatawrite(metamapping);
if (ip) { if (isreg) {
struct address_space *mapping = ip->i_inode.i_mapping; struct address_space *mapping = ip->i_inode.i_mapping;
filemap_fdatawrite(mapping); filemap_fdatawrite(mapping);
error = filemap_fdatawait(mapping); error = filemap_fdatawait(mapping);
...@@ -238,6 +258,9 @@ static void inode_go_sync(struct gfs2_glock *gl) ...@@ -238,6 +258,9 @@ static void inode_go_sync(struct gfs2_glock *gl)
*/ */
smp_mb__before_atomic(); smp_mb__before_atomic();
clear_bit(GLF_DIRTY, &gl->gl_flags); clear_bit(GLF_DIRTY, &gl->gl_flags);
out:
gfs2_clear_glop_pending(ip);
} }
/** /**
...@@ -253,7 +276,7 @@ static void inode_go_sync(struct gfs2_glock *gl) ...@@ -253,7 +276,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
static void inode_go_inval(struct gfs2_glock *gl, int flags) static void inode_go_inval(struct gfs2_glock *gl, int flags)
{ {
struct gfs2_inode *ip = gl->gl_object; struct gfs2_inode *ip = gfs2_glock2inode(gl);
gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
...@@ -274,6 +297,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) ...@@ -274,6 +297,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
} }
if (ip && S_ISREG(ip->i_inode.i_mode)) if (ip && S_ISREG(ip->i_inode.i_mode))
truncate_inode_pages(ip->i_inode.i_mapping, 0); truncate_inode_pages(ip->i_inode.i_mapping, 0);
gfs2_clear_glop_pending(ip);
} }
/** /**
......
...@@ -385,6 +385,7 @@ enum { ...@@ -385,6 +385,7 @@ enum {
GIF_SW_PAGED = 3, GIF_SW_PAGED = 3,
GIF_ORDERED = 4, GIF_ORDERED = 4,
GIF_FREE_VFS_INODE = 5, GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
}; };
struct gfs2_inode { struct gfs2_inode {
......
...@@ -144,7 +144,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -144,7 +144,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (unlikely(error)) if (unlikely(error))
goto fail; goto fail;
ip->i_gl->gl_object = ip; flush_delayed_work(&ip->i_gl->gl_work);
glock_set_object(ip->i_gl, ip);
error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (unlikely(error)) if (unlikely(error))
...@@ -173,8 +174,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -173,8 +174,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error)) if (unlikely(error))
goto fail_put; goto fail_put;
flush_delayed_work(&ip->i_iopen_gh.gh_gl->gl_work);
ip->i_iopen_gh.gh_gl->gl_object = ip; glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_put(io_gl); gfs2_glock_put(io_gl);
io_gl = NULL; io_gl = NULL;
......
...@@ -1631,8 +1631,8 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1631,8 +1631,8 @@ static void gfs2_evict_inode(struct inode *inode)
gfs2_ordered_del_inode(ip); gfs2_ordered_del_inode(ip);
clear_inode(inode); clear_inode(inode);
gfs2_dir_hash_inval(ip); gfs2_dir_hash_inval(ip);
ip->i_gl->gl_object = NULL; glock_set_object(ip->i_gl, NULL);
flush_delayed_work(&ip->i_gl->gl_work); wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl); gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put(ip->i_gl); gfs2_glock_put(ip->i_gl);
ip->i_gl = NULL; ip->i_gl = NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册