提交 4bc1ea6b 编写于 作者: D Dave Chinner 提交者: Ben Myers

xfs: remove xfs_flush_pages

It is a complex wrapper around VFS functions, but there are VFS
functions that provide exactly the same functionality. Call the VFS
functions directly and remove the unnecessary indirection and
complexity.

We don't need to care about clearing the XFS_ITRUNCATED flag, as
that is done during .writepages. Hence is cleared by the VFS
writeback path if there is anything to write back during the flush.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NAndrew Dahl <adahl@sgi.com>
Signed-off-by: NBen Myers <bpm@sgi.com>
上级 95eacf0f
......@@ -1641,7 +1641,7 @@ xfs_vm_bmap(
trace_xfs_vm_bmap(XFS_I(inode));
xfs_ilock(ip, XFS_IOLOCK_SHARED);
xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
filemap_write_and_wait(mapping);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return generic_block_bmap(mapping, block, xfs_get_blocks);
}
......
......@@ -5599,7 +5599,7 @@ xfs_getbmap(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
if (error)
goto out_unlock_iolock;
}
......
......@@ -44,27 +44,3 @@ xfs_flushinval_pages(
truncate_inode_pages_range(mapping, first, last);
return -ret;
}
int
xfs_flush_pages(
xfs_inode_t *ip,
xfs_off_t first,
xfs_off_t last,
uint64_t flags,
int fiopt)
{
struct address_space *mapping = VFS_I(ip)->i_mapping;
int ret = 0;
int ret2;
xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = -filemap_fdatawrite_range(mapping, first,
last == -1 ? LLONG_MAX : last);
if (flags & XBF_ASYNC)
return ret;
ret2 = -filemap_fdatawait_range(mapping, first,
last == -1 ? XFS_ISIZE(ip) - 1 : last);
if (!ret)
ret = ret2;
return ret;
}
......@@ -780,8 +780,8 @@ xfs_setattr_size(
* care about here.
*/
if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
error = xfs_flush_pages(ip, ip->i_d.di_size, newsize, 0,
FI_NONE);
error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
ip->i_d.di_size, newsize);
if (error)
goto out_unlock;
}
......
......@@ -428,8 +428,11 @@ xfs_release(
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
if (truncated) {
xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) {
error = -filemap_flush(VFS_I(ip)->i_mapping);
if (error)
return error;
}
}
}
......
......@@ -50,8 +50,6 @@ int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
int xfs_flushinval_pages(struct xfs_inode *ip, xfs_off_t first,
xfs_off_t last, int fiopt);
int xfs_flush_pages(struct xfs_inode *ip, xfs_off_t first,
xfs_off_t last, uint64_t flags, int fiopt);
int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
int xfs_free_eofblocks(struct xfs_mount *, struct xfs_inode *, bool);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册