提交 a8d770d9 编写于 作者: D Dave Chinner 提交者: Christoph Hellwig

xfs: use xfs_sync_inodes() for device flushing

Currently xfs_device_flush calls sync_blockdev() which is
a no-op for XFS as all it's metadata is held in a different
address to the one sync_blockdev() works on.

Call xfs_sync_inodes() instead to flush all the delayed
allocation blocks out. To do this as efficiently as possible,
do it via two passes - one to do an async flush of all the
dirty blocks and a second to wait for all the IO to complete.
This requires some modification to the xfs-sync_inodes_ag()
flush code to do efficiently.
Signed-off-by: NDave Chinner <david@fromorbit.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
上级 9d7fef74
...@@ -74,14 +74,14 @@ xfs_flush_pages( ...@@ -74,14 +74,14 @@ xfs_flush_pages(
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
xfs_iflags_clear(ip, XFS_ITRUNCATED); xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = filemap_fdatawrite(mapping); ret = -filemap_fdatawrite(mapping);
if (flags & XFS_B_ASYNC)
return -ret;
ret2 = filemap_fdatawait(mapping);
if (!ret)
ret = ret2;
} }
return -ret; if (flags & XFS_B_ASYNC)
return ret;
ret2 = xfs_wait_on_pages(ip, first, last);
if (!ret)
ret = ret2;
return ret;
} }
int int
......
...@@ -62,12 +62,6 @@ xfs_sync_inodes_ag( ...@@ -62,12 +62,6 @@ xfs_sync_inodes_ag(
uint32_t first_index = 0; uint32_t first_index = 0;
int error = 0; int error = 0;
int last_error = 0; int last_error = 0;
int fflag = XFS_B_ASYNC;
if (flags & SYNC_DELWRI)
fflag = XFS_B_DELWRI;
if (flags & SYNC_WAIT)
fflag = 0; /* synchronous overrides all */
do { do {
struct inode *inode; struct inode *inode;
...@@ -128,11 +122,23 @@ xfs_sync_inodes_ag( ...@@ -128,11 +122,23 @@ xfs_sync_inodes_ag(
* If we have to flush data or wait for I/O completion * If we have to flush data or wait for I/O completion
* we need to hold the iolock. * we need to hold the iolock.
*/ */
if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) { if (flags & SYNC_DELWRI) {
xfs_ilock(ip, XFS_IOLOCK_SHARED); if (VN_DIRTY(inode)) {
lock_flags |= XFS_IOLOCK_SHARED; if (flags & SYNC_TRYLOCK) {
error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
if (flags & SYNC_IOWAIT) lock_flags |= XFS_IOLOCK_SHARED;
} else {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
lock_flags |= XFS_IOLOCK_SHARED;
}
if (lock_flags & XFS_IOLOCK_SHARED) {
error = xfs_flush_pages(ip, 0, -1,
(flags & SYNC_WAIT) ? 0
: XFS_B_ASYNC,
FI_NONE);
}
}
if (VN_CACHED(inode) && (flags & SYNC_IOWAIT))
xfs_ioend_wait(ip); xfs_ioend_wait(ip);
} }
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
...@@ -400,9 +406,9 @@ xfs_syncd_queue_work( ...@@ -400,9 +406,9 @@ xfs_syncd_queue_work(
void *data, void *data,
void (*syncer)(struct xfs_mount *, void *)) void (*syncer)(struct xfs_mount *, void *))
{ {
struct bhv_vfs_sync_work *work; struct xfs_sync_work *work;
work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP); work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
INIT_LIST_HEAD(&work->w_list); INIT_LIST_HEAD(&work->w_list);
work->w_syncer = syncer; work->w_syncer = syncer;
work->w_data = data; work->w_data = data;
...@@ -445,23 +451,24 @@ xfs_flush_inode( ...@@ -445,23 +451,24 @@ xfs_flush_inode(
* (IOW, "If at first you don't succeed, use a Bigger Hammer"). * (IOW, "If at first you don't succeed, use a Bigger Hammer").
*/ */
STATIC void STATIC void
xfs_flush_device_work( xfs_flush_inodes_work(
struct xfs_mount *mp, struct xfs_mount *mp,
void *arg) void *arg)
{ {
struct inode *inode = arg; struct inode *inode = arg;
sync_blockdev(mp->m_super->s_bdev); xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK);
xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT);
iput(inode); iput(inode);
} }
void void
xfs_flush_device( xfs_flush_inodes(
xfs_inode_t *ip) xfs_inode_t *ip)
{ {
struct inode *inode = VFS_I(ip); struct inode *inode = VFS_I(ip);
igrab(inode); igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work);
delay(msecs_to_jiffies(500)); delay(msecs_to_jiffies(500));
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
} }
...@@ -497,7 +504,7 @@ xfssyncd( ...@@ -497,7 +504,7 @@ xfssyncd(
{ {
struct xfs_mount *mp = arg; struct xfs_mount *mp = arg;
long timeleft; long timeleft;
bhv_vfs_sync_work_t *work, *n; xfs_sync_work_t *work, *n;
LIST_HEAD (tmp); LIST_HEAD (tmp);
set_freezable(); set_freezable();
......
...@@ -21,18 +21,19 @@ ...@@ -21,18 +21,19 @@
struct xfs_mount; struct xfs_mount;
struct xfs_perag; struct xfs_perag;
typedef struct bhv_vfs_sync_work { typedef struct xfs_sync_work {
struct list_head w_list; struct list_head w_list;
struct xfs_mount *w_mount; struct xfs_mount *w_mount;
void *w_data; /* syncer routine argument */ void *w_data; /* syncer routine argument */
void (*w_syncer)(struct xfs_mount *, void *); void (*w_syncer)(struct xfs_mount *, void *);
} bhv_vfs_sync_work_t; } xfs_sync_work_t;
#define SYNC_ATTR 0x0001 /* sync attributes */ #define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_DELWRI 0x0002 /* look at delayed writes */ #define SYNC_DELWRI 0x0002 /* look at delayed writes */
#define SYNC_WAIT 0x0004 /* wait for i/o to complete */ #define SYNC_WAIT 0x0004 /* wait for i/o to complete */
#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */ #define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */
#define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */ #define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */
#define SYNC_TRYLOCK 0x0020 /* only try to lock inodes */
int xfs_syncd_init(struct xfs_mount *mp); int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp); void xfs_syncd_stop(struct xfs_mount *mp);
...@@ -44,7 +45,7 @@ int xfs_quiesce_data(struct xfs_mount *mp); ...@@ -44,7 +45,7 @@ int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inode(struct xfs_inode *ip); void xfs_flush_inode(struct xfs_inode *ip);
void xfs_flush_device(struct xfs_inode *ip); void xfs_flush_inodes(struct xfs_inode *ip);
int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode); int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode);
......
...@@ -361,7 +361,7 @@ xfs_flush_space( ...@@ -361,7 +361,7 @@ xfs_flush_space(
return 0; return 0;
case 2: case 2:
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_flush_device(ip); xfs_flush_inodes(ip);
xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ilock(ip, XFS_ILOCK_EXCL);
*fsynced = 3; *fsynced = 3;
return 0; return 0;
......
...@@ -313,7 +313,7 @@ typedef struct xfs_mount { ...@@ -313,7 +313,7 @@ typedef struct xfs_mount {
#endif #endif
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct task_struct *m_sync_task; /* generalised sync thread */ struct task_struct *m_sync_task; /* generalised sync thread */
bhv_vfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */ xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */
struct list_head m_sync_list; /* sync thread work item list */ struct list_head m_sync_list; /* sync thread work item list */
spinlock_t m_sync_lock; /* work item list lock */ spinlock_t m_sync_lock; /* work item list lock */
int m_sync_seq; /* sync thread generation no. */ int m_sync_seq; /* sync thread generation no. */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册