提交 4e087a3b 编写于 作者: C Christoph Hellwig 提交者: Darrick J. Wong

xfs: use a struct iomap in xfs_writepage_ctx

In preparation for moving the XFS writeback code to fs/iomap.c, switch
it to use struct iomap instead of the XFS-specific struct xfs_bmbt_irec.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NCarlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: NDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: NDarrick J. Wong <darrick.wong@oracle.com>
上级 05b30949
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "xfs_ag_resv.h" #include "xfs_ag_resv.h"
#include "xfs_refcount.h" #include "xfs_refcount.h"
#include "xfs_icache.h" #include "xfs_icache.h"
#include "xfs_iomap.h"
kmem_zone_t *xfs_bmap_free_item_zone; kmem_zone_t *xfs_bmap_free_item_zone;
...@@ -4456,16 +4457,21 @@ int ...@@ -4456,16 +4457,21 @@ int
xfs_bmapi_convert_delalloc( xfs_bmapi_convert_delalloc(
struct xfs_inode *ip, struct xfs_inode *ip,
int whichfork, int whichfork,
xfs_fileoff_t offset_fsb, xfs_off_t offset,
struct xfs_bmbt_irec *imap, struct iomap *iomap,
unsigned int *seq) unsigned int *seq)
{ {
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
struct xfs_bmalloca bma = { NULL }; struct xfs_bmalloca bma = { NULL };
u16 flags = 0;
struct xfs_trans *tp; struct xfs_trans *tp;
int error; int error;
if (whichfork == XFS_COW_FORK)
flags |= IOMAP_F_SHARED;
/* /*
* Space for the extent and indirect blocks was reserved when the * Space for the extent and indirect blocks was reserved when the
* delalloc extent was created so there's no need to do so here. * delalloc extent was created so there's no need to do so here.
...@@ -4495,7 +4501,7 @@ xfs_bmapi_convert_delalloc( ...@@ -4495,7 +4501,7 @@ xfs_bmapi_convert_delalloc(
* the extent. Just return the real extent at this offset. * the extent. Just return the real extent at this offset.
*/ */
if (!isnullstartblock(bma.got.br_startblock)) { if (!isnullstartblock(bma.got.br_startblock)) {
*imap = bma.got; xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq); *seq = READ_ONCE(ifp->if_seq);
goto out_trans_cancel; goto out_trans_cancel;
} }
...@@ -4528,7 +4534,7 @@ xfs_bmapi_convert_delalloc( ...@@ -4528,7 +4534,7 @@ xfs_bmapi_convert_delalloc(
XFS_STATS_INC(mp, xs_xstrat_quick); XFS_STATS_INC(mp, xs_xstrat_quick);
ASSERT(!isnullstartblock(bma.got.br_startblock)); ASSERT(!isnullstartblock(bma.got.br_startblock));
*imap = bma.got; xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq); *seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK) if (whichfork == XFS_COW_FORK)
......
...@@ -228,8 +228,7 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork, ...@@ -228,8 +228,7 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
int eof); int eof);
int xfs_bmapi_convert_delalloc(struct xfs_inode *ip, int whichfork, int xfs_bmapi_convert_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap, xfs_off_t offset, struct iomap *iomap, unsigned int *seq);
unsigned int *seq);
int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp, int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork, struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp, struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp,
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* structure owned by writepages passed to individual writepage calls * structure owned by writepages passed to individual writepage calls
*/ */
struct xfs_writepage_ctx { struct xfs_writepage_ctx {
struct xfs_bmbt_irec imap; struct iomap iomap;
int fork; int fork;
unsigned int data_seq; unsigned int data_seq;
unsigned int cow_seq; unsigned int cow_seq;
...@@ -267,7 +267,7 @@ xfs_end_ioend( ...@@ -267,7 +267,7 @@ xfs_end_ioend(
*/ */
if (ioend->io_fork == XFS_COW_FORK) if (ioend->io_fork == XFS_COW_FORK)
error = xfs_reflink_end_cow(ip, offset, size); error = xfs_reflink_end_cow(ip, offset, size);
else if (ioend->io_state == XFS_EXT_UNWRITTEN) else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false); error = xfs_iomap_write_unwritten(ip, offset, size, false);
else else
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
...@@ -300,8 +300,8 @@ xfs_ioend_can_merge( ...@@ -300,8 +300,8 @@ xfs_ioend_can_merge(
return false; return false;
if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK)) if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
return false; return false;
if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^ if ((ioend->io_type == IOMAP_UNWRITTEN) ^
(next->io_state == XFS_EXT_UNWRITTEN)) (next->io_type == IOMAP_UNWRITTEN))
return false; return false;
if (ioend->io_offset + ioend->io_size != next->io_offset) if (ioend->io_offset + ioend->io_size != next->io_offset)
return false; return false;
...@@ -403,7 +403,7 @@ xfs_end_bio( ...@@ -403,7 +403,7 @@ xfs_end_bio(
unsigned long flags; unsigned long flags;
if (ioend->io_fork == XFS_COW_FORK || if (ioend->io_fork == XFS_COW_FORK ||
ioend->io_state == XFS_EXT_UNWRITTEN || ioend->io_type == IOMAP_UNWRITTEN ||
ioend->io_append_trans != NULL) { ioend->io_append_trans != NULL) {
spin_lock_irqsave(&ip->i_ioend_lock, flags); spin_lock_irqsave(&ip->i_ioend_lock, flags);
if (list_empty(&ip->i_ioend_list)) if (list_empty(&ip->i_ioend_list))
...@@ -423,10 +423,10 @@ static bool ...@@ -423,10 +423,10 @@ static bool
xfs_imap_valid( xfs_imap_valid(
struct xfs_writepage_ctx *wpc, struct xfs_writepage_ctx *wpc,
struct xfs_inode *ip, struct xfs_inode *ip,
xfs_fileoff_t offset_fsb) loff_t offset)
{ {
if (offset_fsb < wpc->imap.br_startoff || if (offset < wpc->iomap.offset ||
offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount) offset >= wpc->iomap.offset + wpc->iomap.length)
return false; return false;
/* /*
* If this is a COW mapping, it is sufficient to check that the mapping * If this is a COW mapping, it is sufficient to check that the mapping
...@@ -453,7 +453,7 @@ xfs_imap_valid( ...@@ -453,7 +453,7 @@ xfs_imap_valid(
/* /*
* Pass in a dellalloc extent and convert it to real extents, return the real * Pass in a dellalloc extent and convert it to real extents, return the real
* extent that maps offset_fsb in wpc->imap. * extent that maps offset_fsb in wpc->iomap.
* *
* The current page is held locked so nothing could have removed the block * The current page is held locked so nothing could have removed the block
* backing offset_fsb, although it could have moved from the COW to the data * backing offset_fsb, although it could have moved from the COW to the data
...@@ -463,23 +463,23 @@ static int ...@@ -463,23 +463,23 @@ static int
xfs_convert_blocks( xfs_convert_blocks(
struct xfs_writepage_ctx *wpc, struct xfs_writepage_ctx *wpc,
struct xfs_inode *ip, struct xfs_inode *ip,
xfs_fileoff_t offset_fsb) loff_t offset)
{ {
int error; int error;
/* /*
* Attempt to allocate whatever delalloc extent currently backs * Attempt to allocate whatever delalloc extent currently backs offset
* offset_fsb and put the result into wpc->imap. Allocate in a loop * and put the result into wpc->iomap. Allocate in a loop because it
* because it may take several attempts to allocate real blocks for a * may take several attempts to allocate real blocks for a contiguous
* contiguous delalloc extent if free space is sufficiently fragmented. * delalloc extent if free space is sufficiently fragmented.
*/ */
do { do {
error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb, error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset,
&wpc->imap, wpc->fork == XFS_COW_FORK ? &wpc->iomap, wpc->fork == XFS_COW_FORK ?
&wpc->cow_seq : &wpc->data_seq); &wpc->cow_seq : &wpc->data_seq);
if (error) if (error)
return error; return error;
} while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb); } while (wpc->iomap.offset + wpc->iomap.length <= offset);
return 0; return 0;
} }
...@@ -519,7 +519,7 @@ xfs_map_blocks( ...@@ -519,7 +519,7 @@ xfs_map_blocks(
* against concurrent updates and provides a memory barrier on the way * against concurrent updates and provides a memory barrier on the way
* out that ensures that we always see the current value. * out that ensures that we always see the current value.
*/ */
if (xfs_imap_valid(wpc, ip, offset_fsb)) if (xfs_imap_valid(wpc, ip, offset))
return 0; return 0;
/* /*
...@@ -552,7 +552,7 @@ xfs_map_blocks( ...@@ -552,7 +552,7 @@ xfs_map_blocks(
* No COW extent overlap. Revalidate now that we may have updated * No COW extent overlap. Revalidate now that we may have updated
* ->cow_seq. If the data mapping is still valid, we're done. * ->cow_seq. If the data mapping is still valid, we're done.
*/ */
if (xfs_imap_valid(wpc, ip, offset_fsb)) { if (xfs_imap_valid(wpc, ip, offset)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0; return 0;
} }
...@@ -592,11 +592,11 @@ xfs_map_blocks( ...@@ -592,11 +592,11 @@ xfs_map_blocks(
isnullstartblock(imap.br_startblock)) isnullstartblock(imap.br_startblock))
goto allocate_blocks; goto allocate_blocks;
wpc->imap = imap; xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap); trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
return 0; return 0;
allocate_blocks: allocate_blocks:
error = xfs_convert_blocks(wpc, ip, offset_fsb); error = xfs_convert_blocks(wpc, ip, offset);
if (error) { if (error) {
/* /*
* If we failed to find the extent in the COW fork we might have * If we failed to find the extent in the COW fork we might have
...@@ -616,12 +616,15 @@ xfs_map_blocks( ...@@ -616,12 +616,15 @@ xfs_map_blocks(
* original delalloc one. Trim the return extent to the next COW * original delalloc one. Trim the return extent to the next COW
* boundary again to force a re-lookup. * boundary again to force a re-lookup.
*/ */
if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF && if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount) loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
wpc->iomap.length = cow_offset - wpc->iomap.offset;
}
ASSERT(wpc->imap.br_startoff <= offset_fsb); ASSERT(wpc->iomap.offset <= offset);
ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb); ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap); trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
return 0; return 0;
} }
...@@ -664,7 +667,7 @@ xfs_submit_ioend( ...@@ -664,7 +667,7 @@ xfs_submit_ioend(
/* Reserve log space if we might write beyond the on-disk inode size. */ /* Reserve log space if we might write beyond the on-disk inode size. */
if (!status && if (!status &&
(ioend->io_fork == XFS_COW_FORK || (ioend->io_fork == XFS_COW_FORK ||
ioend->io_state != XFS_EXT_UNWRITTEN) && ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) && xfs_ioend_is_append(ioend) &&
!ioend->io_append_trans) !ioend->io_append_trans)
status = xfs_setfilesize_trans_alloc(ioend); status = xfs_setfilesize_trans_alloc(ioend);
...@@ -693,10 +696,8 @@ xfs_submit_ioend( ...@@ -693,10 +696,8 @@ xfs_submit_ioend(
static struct xfs_ioend * static struct xfs_ioend *
xfs_alloc_ioend( xfs_alloc_ioend(
struct inode *inode, struct inode *inode,
int fork, struct xfs_writepage_ctx *wpc,
xfs_exntst_t state,
xfs_off_t offset, xfs_off_t offset,
struct block_device *bdev,
sector_t sector, sector_t sector,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
...@@ -704,7 +705,7 @@ xfs_alloc_ioend( ...@@ -704,7 +705,7 @@ xfs_alloc_ioend(
struct bio *bio; struct bio *bio;
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset); bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
bio_set_dev(bio, bdev); bio_set_dev(bio, wpc->iomap.bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
bio->bi_write_hint = inode->i_write_hint; bio->bi_write_hint = inode->i_write_hint;
...@@ -712,8 +713,8 @@ xfs_alloc_ioend( ...@@ -712,8 +713,8 @@ xfs_alloc_ioend(
ioend = container_of(bio, struct xfs_ioend, io_inline_bio); ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list); INIT_LIST_HEAD(&ioend->io_list);
ioend->io_fork = fork; ioend->io_fork = wpc->fork;
ioend->io_state = state; ioend->io_type = wpc->iomap.type;
ioend->io_inode = inode; ioend->io_inode = inode;
ioend->io_size = 0; ioend->io_size = 0;
ioend->io_offset = offset; ioend->io_offset = offset;
...@@ -761,26 +762,19 @@ xfs_add_to_ioend( ...@@ -761,26 +762,19 @@ xfs_add_to_ioend(
struct writeback_control *wbc, struct writeback_control *wbc,
struct list_head *iolist) struct list_head *iolist)
{ {
struct xfs_inode *ip = XFS_I(inode); sector_t sector = iomap_sector(&wpc->iomap, offset);
struct xfs_mount *mp = ip->i_mount;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
unsigned len = i_blocksize(inode); unsigned len = i_blocksize(inode);
unsigned poff = offset & (PAGE_SIZE - 1); unsigned poff = offset & (PAGE_SIZE - 1);
bool merged, same_page = false; bool merged, same_page = false;
sector_t sector;
sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
if (!wpc->ioend || if (!wpc->ioend ||
wpc->fork != wpc->ioend->io_fork || wpc->fork != wpc->ioend->io_fork ||
wpc->imap.br_state != wpc->ioend->io_state || wpc->iomap.type != wpc->ioend->io_type ||
sector != bio_end_sector(wpc->ioend->io_bio) || sector != bio_end_sector(wpc->ioend->io_bio) ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) { offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
if (wpc->ioend) if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist); list_add(&wpc->ioend->io_list, iolist);
wpc->ioend = xfs_alloc_ioend(inode, wpc->fork, wpc->ioend = xfs_alloc_ioend(inode, wpc, offset, sector, wbc);
wpc->imap.br_state, offset, bdev, sector, wbc);
} }
merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
...@@ -894,7 +888,7 @@ xfs_writepage_map( ...@@ -894,7 +888,7 @@ xfs_writepage_map(
error = xfs_map_blocks(wpc, inode, file_offset); error = xfs_map_blocks(wpc, inode, file_offset);
if (error) if (error)
break; break;
if (wpc->imap.br_startblock == HOLESTARTBLOCK) if (wpc->iomap.type == IOMAP_HOLE)
continue; continue;
xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc, xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
&submit_list); &submit_list);
......
...@@ -14,7 +14,7 @@ extern struct bio_set xfs_ioend_bioset; ...@@ -14,7 +14,7 @@ extern struct bio_set xfs_ioend_bioset;
struct xfs_ioend { struct xfs_ioend {
struct list_head io_list; /* next ioend in chain */ struct list_head io_list; /* next ioend in chain */
int io_fork; /* inode fork written back */ int io_fork; /* inode fork written back */
xfs_exntst_t io_state; /* extent state */ u16 io_type;
struct inode *io_inode; /* file being written to */ struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */ size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */ xfs_off_t io_offset; /* offset in the file */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册