提交 690c2a38 编写于 作者: C Christoph Hellwig 提交者: Darrick J. Wong

xfs: split out a new set of read-only iomap ops

Start untangling xfs_file_iomap_begin by splitting out the read-only
case into its own set of iomap_ops with a very simply iomap_begin
helper.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: NDarrick J. Wong <darrick.wong@oracle.com>
上级 43568226
...@@ -634,7 +634,7 @@ xfs_vm_bmap( ...@@ -634,7 +634,7 @@ xfs_vm_bmap(
*/ */
if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip)) if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
return 0; return 0;
return iomap_bmap(mapping, block, &xfs_iomap_ops); return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
} }
STATIC int STATIC int
...@@ -642,7 +642,7 @@ xfs_vm_readpage( ...@@ -642,7 +642,7 @@ xfs_vm_readpage(
struct file *unused, struct file *unused,
struct page *page) struct page *page)
{ {
return iomap_readpage(page, &xfs_iomap_ops); return iomap_readpage(page, &xfs_read_iomap_ops);
} }
STATIC int STATIC int
...@@ -652,7 +652,7 @@ xfs_vm_readpages( ...@@ -652,7 +652,7 @@ xfs_vm_readpages(
struct list_head *pages, struct list_head *pages,
unsigned nr_pages) unsigned nr_pages)
{ {
return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops); return iomap_readpages(mapping, pages, nr_pages, &xfs_read_iomap_ops);
} }
static int static int
...@@ -662,7 +662,8 @@ xfs_iomap_swapfile_activate( ...@@ -662,7 +662,8 @@ xfs_iomap_swapfile_activate(
sector_t *span) sector_t *span)
{ {
sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file)); sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops); return iomap_swapfile_activate(sis, swap_file, span,
&xfs_read_iomap_ops);
} }
const struct address_space_operations xfs_address_space_operations = { const struct address_space_operations xfs_address_space_operations = {
......
...@@ -188,7 +188,8 @@ xfs_file_dio_aio_read( ...@@ -188,7 +188,8 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp); file_accessed(iocb->ki_filp);
xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_ilock(ip, XFS_IOLOCK_SHARED);
ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL, is_sync_kiocb(iocb)); ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
is_sync_kiocb(iocb));
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return ret; return ret;
...@@ -215,7 +216,7 @@ xfs_file_dax_read( ...@@ -215,7 +216,7 @@ xfs_file_dax_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_ilock(ip, XFS_IOLOCK_SHARED);
} }
ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops); ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
file_accessed(iocb->ki_filp); file_accessed(iocb->ki_filp);
...@@ -1153,7 +1154,9 @@ __xfs_filemap_fault( ...@@ -1153,7 +1154,9 @@ __xfs_filemap_fault(
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
pfn_t pfn; pfn_t pfn;
ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops); ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
(write_fault && !vmf->cow_page) ?
&xfs_iomap_ops : &xfs_read_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC) if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn); ret = dax_finish_sync_fault(vmf, pe_size, pfn);
} else { } else {
......
...@@ -950,11 +950,13 @@ xfs_file_iomap_begin( ...@@ -950,11 +950,13 @@ xfs_file_iomap_begin(
u16 iomap_flags = 0; u16 iomap_flags = 0;
unsigned lockmode; unsigned lockmode;
ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) && if (!(flags & IOMAP_DIRECT) && !IS_DAX(inode) &&
!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) { !xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */ /* Reserve delalloc blocks for regular writeback. */
return xfs_file_iomap_begin_delay(inode, offset, length, flags, return xfs_file_iomap_begin_delay(inode, offset, length, flags,
iomap, srcmap); iomap, srcmap);
...@@ -975,17 +977,6 @@ xfs_file_iomap_begin( ...@@ -975,17 +977,6 @@ xfs_file_iomap_begin(
if (error) if (error)
goto out_unlock; goto out_unlock;
if (flags & IOMAP_REPORT) {
/* Trim the mapping to the nearest shared extent boundary. */
error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
if (error)
goto out_unlock;
}
/* Non-modifying mapping requested, so we are done */
if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
goto out_found;
/* /*
* Break shared extents if necessary. Checks for non-blocking IO have * Break shared extents if necessary. Checks for non-blocking IO have
* been done up front, so we don't need to do them here. * been done up front, so we don't need to do them here.
...@@ -1051,10 +1042,8 @@ xfs_file_iomap_begin( ...@@ -1051,10 +1042,8 @@ xfs_file_iomap_begin(
* so consider them to be dirty for the purposes of O_DSYNC even if * so consider them to be dirty for the purposes of O_DSYNC even if
* there is no other metadata changes pending or have been made here. * there is no other metadata changes pending or have been made here.
*/ */
if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode)) if (offset + length > i_size_read(inode))
iomap_flags |= IOMAP_F_DIRTY; iomap_flags |= IOMAP_F_DIRTY;
if (shared)
iomap_flags |= IOMAP_F_SHARED;
return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_found: out_found:
...@@ -1157,6 +1146,48 @@ const struct iomap_ops xfs_iomap_ops = { ...@@ -1157,6 +1146,48 @@ const struct iomap_ops xfs_iomap_ops = {
.iomap_end = xfs_file_iomap_end, .iomap_end = xfs_file_iomap_end,
}; };
static int
xfs_read_iomap_begin(
struct inode *inode,
loff_t offset,
loff_t length,
unsigned flags,
struct iomap *iomap,
struct iomap *srcmap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
int nimaps = 1, error = 0;
bool shared = false;
unsigned lockmode;
ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
if (error)
return error;
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0);
if (!error && (flags & IOMAP_REPORT))
error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
xfs_iunlock(ip, lockmode);
if (error)
return error;
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
}
const struct iomap_ops xfs_read_iomap_ops = {
.iomap_begin = xfs_read_iomap_begin,
};
static int static int
xfs_seek_iomap_begin( xfs_seek_iomap_begin(
struct inode *inode, struct inode *inode,
......
...@@ -40,6 +40,7 @@ xfs_aligned_fsb_count( ...@@ -40,6 +40,7 @@ xfs_aligned_fsb_count(
} }
extern const struct iomap_ops xfs_iomap_ops; extern const struct iomap_ops xfs_iomap_ops;
extern const struct iomap_ops xfs_read_iomap_ops;
extern const struct iomap_ops xfs_seek_iomap_ops; extern const struct iomap_ops xfs_seek_iomap_ops;
extern const struct iomap_ops xfs_xattr_iomap_ops; extern const struct iomap_ops xfs_xattr_iomap_ops;
......
...@@ -1114,7 +1114,7 @@ xfs_vn_fiemap( ...@@ -1114,7 +1114,7 @@ xfs_vn_fiemap(
&xfs_xattr_iomap_ops); &xfs_xattr_iomap_ops);
} else { } else {
error = iomap_fiemap(inode, fieinfo, start, length, error = iomap_fiemap(inode, fieinfo, start, length,
&xfs_iomap_ops); &xfs_read_iomap_ops);
} }
xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册