提交 e696663a 编写于 作者: C Christoph Hellwig 提交者: Darrick J. Wong

xfs: simplify the xfs_iomap_write_direct calling

Move the EOF alignment and checking for the next allocated extent into
the callers to avoid the need to pass the byte based offset and count
as well as looking at the incoming imap.  The added benefit is that
the caller can unlock the incoming ilock and the function doesn't have
funny unbalanced locking contexts.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: NDarrick J. Wong <darrick.wong@oracle.com>
上级 307cdb54
...@@ -148,7 +148,7 @@ xfs_eof_alignment( ...@@ -148,7 +148,7 @@ xfs_eof_alignment(
* Check if last_fsb is outside the last extent, and if so grow it to the next * Check if last_fsb is outside the last extent, and if so grow it to the next
* stripe unit boundary. * stripe unit boundary.
*/ */
static xfs_fileoff_t xfs_fileoff_t
xfs_iomap_eof_align_last_fsb( xfs_iomap_eof_align_last_fsb(
struct xfs_inode *ip, struct xfs_inode *ip,
xfs_fileoff_t end_fsb) xfs_fileoff_t end_fsb)
...@@ -185,61 +185,36 @@ xfs_iomap_eof_align_last_fsb( ...@@ -185,61 +185,36 @@ xfs_iomap_eof_align_last_fsb(
int int
xfs_iomap_write_direct( xfs_iomap_write_direct(
xfs_inode_t *ip, struct xfs_inode *ip,
xfs_off_t offset, xfs_fileoff_t offset_fsb,
size_t count, xfs_fileoff_t count_fsb,
xfs_bmbt_irec_t *imap, struct xfs_bmbt_irec *imap)
int nmaps)
{ {
xfs_mount_t *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); struct xfs_trans *tp;
xfs_fileoff_t last_fsb = xfs_iomap_end_fsb(mp, offset, count); xfs_filblks_t resaligned;
xfs_filblks_t count_fsb, resaligned;
xfs_extlen_t extsz;
int nimaps; int nimaps;
int quota_flag; int quota_flag;
int rt; uint qblocks, resblks;
xfs_trans_t *tp; unsigned int resrtextents = 0;
uint qblocks, resblks, resrtextents;
int error; int error;
int lockmode;
int bmapi_flags = XFS_BMAPI_PREALLOC; int bmapi_flags = XFS_BMAPI_PREALLOC;
uint tflags = 0; uint tflags = 0;
rt = XFS_IS_REALTIME_INODE(ip);
extsz = xfs_get_extsz_hint(ip);
lockmode = XFS_ILOCK_SHARED; /* locked by caller */
ASSERT(xfs_isilocked(ip, lockmode));
if (offset + count > XFS_ISIZE(ip)) {
last_fsb = xfs_iomap_eof_align_last_fsb(ip, last_fsb);
} else {
if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = min(last_fsb, (xfs_fileoff_t)
imap->br_blockcount +
imap->br_startoff);
}
count_fsb = last_fsb - offset_fsb;
ASSERT(count_fsb > 0); ASSERT(count_fsb > 0);
resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
if (unlikely(rt)) { resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
xfs_get_extsz_hint(ip));
if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
resrtextents = qblocks = resaligned; resrtextents = qblocks = resaligned;
resrtextents /= mp->m_sb.sb_rextsize; resrtextents /= mp->m_sb.sb_rextsize;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
quota_flag = XFS_QMOPT_RES_RTBLKS; quota_flag = XFS_QMOPT_RES_RTBLKS;
} else { } else {
resrtextents = 0;
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
quota_flag = XFS_QMOPT_RES_REGBLKS; quota_flag = XFS_QMOPT_RES_REGBLKS;
} }
/*
* Drop the shared lock acquired by the caller, attach the dquot if
* necessary and move on to transaction setup.
*/
xfs_iunlock(ip, lockmode);
error = xfs_qm_dqattach(ip); error = xfs_qm_dqattach(ip);
if (error) if (error)
return error; return error;
...@@ -269,8 +244,7 @@ xfs_iomap_write_direct( ...@@ -269,8 +244,7 @@ xfs_iomap_write_direct(
if (error) if (error)
return error; return error;
lockmode = XFS_ILOCK_EXCL; xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_ilock(ip, lockmode);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
if (error) if (error)
...@@ -307,7 +281,7 @@ xfs_iomap_write_direct( ...@@ -307,7 +281,7 @@ xfs_iomap_write_direct(
error = xfs_alert_fsblock_zero(ip, imap); error = xfs_alert_fsblock_zero(ip, imap);
out_unlock: out_unlock:
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error; return error;
out_res_cancel: out_res_cancel:
...@@ -807,14 +781,16 @@ xfs_direct_write_iomap_begin( ...@@ -807,14 +781,16 @@ xfs_direct_write_iomap_begin(
* lower level functions are updated. * lower level functions are updated.
*/ */
length = min_t(loff_t, length, 1024 * PAGE_SIZE); length = min_t(loff_t, length, 1024 * PAGE_SIZE);
end_fsb = xfs_iomap_end_fsb(mp, offset, length);
/* if (offset + length > XFS_ISIZE(ip))
* xfs_iomap_write_direct() expects the shared lock. It is unlocked on end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
* return. else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
*/ end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
if (lockmode == XFS_ILOCK_EXCL) xfs_iunlock(ip, lockmode);
xfs_ilock_demote(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset, length, &imap, nimaps); error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
&imap);
if (error) if (error)
return error; return error;
......
...@@ -11,9 +11,11 @@ ...@@ -11,9 +11,11 @@
struct xfs_inode; struct xfs_inode;
struct xfs_bmbt_irec; struct xfs_bmbt_irec;
int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb,
struct xfs_bmbt_irec *, int); xfs_fileoff_t count_fsb, struct xfs_bmbt_irec *imap);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
xfs_fileoff_t end_fsb);
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
struct xfs_bmbt_irec *, u16); struct xfs_bmbt_irec *, u16);
......
...@@ -143,21 +143,20 @@ xfs_fs_map_blocks( ...@@ -143,21 +143,20 @@ xfs_fs_map_blocks(
lock_flags = xfs_ilock_data_map_shared(ip); lock_flags = xfs_ilock_data_map_shared(ip);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
&imap, &nimaps, bmapi_flags); &imap, &nimaps, bmapi_flags);
xfs_iunlock(ip, lock_flags);
if (error)
goto out_unlock;
ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK); ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
if (write && (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) { if (!error && write &&
/* (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) {
* xfs_iomap_write_direct() expects to take ownership of the if (offset + length > XFS_ISIZE(ip))
* shared ilock. end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
*/ else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
xfs_ilock(ip, XFS_ILOCK_SHARED); end_fsb = min(end_fsb, imap.br_startoff +
error = xfs_iomap_write_direct(ip, offset, length, &imap, imap.br_blockcount);
nimaps); xfs_iunlock(ip, lock_flags);
error = xfs_iomap_write_direct(ip, offset_fsb,
end_fsb - offset_fsb, &imap);
if (error) if (error)
goto out_unlock; goto out_unlock;
...@@ -170,6 +169,8 @@ xfs_fs_map_blocks( ...@@ -170,6 +169,8 @@ xfs_fs_map_blocks(
XFS_PREALLOC_SET | XFS_PREALLOC_SYNC); XFS_PREALLOC_SET | XFS_PREALLOC_SYNC);
if (error) if (error)
goto out_unlock; goto out_unlock;
} else {
xfs_iunlock(ip, lock_flags);
} }
xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_iunlock(ip, XFS_IOLOCK_EXCL);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册