提交 a363f0c2 编写于 作者: D Dave Chinner 提交者: Dave Chinner

xfs: ensure sync write errors are returned

xfs_file_aio_write() only returns the error from synchronous
flushing of the data and inode if error == 0. At the point where
error is being checked, it is guaranteed to be > 0. Therefore any
errors returned by the data or fsync flush will never be returned.
Fix the checks so we overwrite the current error once and only if an
error really occurred.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NAlex Elder <aelder@sgi.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
上级 d0eb2f38
...@@ -574,7 +574,7 @@ xfs_file_aio_write( ...@@ -574,7 +574,7 @@ xfs_file_aio_write(
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
ssize_t ret = 0, error = 0; ssize_t ret = 0;
int ioflags = 0; int ioflags = 0;
xfs_fsize_t isize, new_size; xfs_fsize_t isize, new_size;
int iolock; int iolock;
...@@ -590,9 +590,9 @@ xfs_file_aio_write( ...@@ -590,9 +590,9 @@ xfs_file_aio_write(
if (file->f_mode & FMODE_NOCMTIME) if (file->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS; ioflags |= IO_INVIS;
error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
if (error) if (ret)
return error; return ret;
count = ocount; count = ocount;
if (count == 0) if (count == 0)
...@@ -616,9 +616,9 @@ xfs_file_aio_write( ...@@ -616,9 +616,9 @@ xfs_file_aio_write(
xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
start: start:
error = -generic_write_checks(file, &pos, &count, ret = generic_write_checks(file, &pos, &count,
S_ISBLK(inode->i_mode)); S_ISBLK(inode->i_mode));
if (error) { if (ret) {
xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
goto out_unlock_mutex; goto out_unlock_mutex;
} }
...@@ -660,8 +660,8 @@ xfs_file_aio_write( ...@@ -660,8 +660,8 @@ xfs_file_aio_write(
*/ */
if (pos > ip->i_size) { if (pos > ip->i_size) {
error = xfs_zero_eof(ip, pos, ip->i_size); ret = -xfs_zero_eof(ip, pos, ip->i_size);
if (error) { if (ret) {
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
goto out_unlock_internal; goto out_unlock_internal;
} }
...@@ -674,8 +674,8 @@ xfs_file_aio_write( ...@@ -674,8 +674,8 @@ xfs_file_aio_write(
* by root. This keeps people from modifying setuid and * by root. This keeps people from modifying setuid and
* setgid binaries. * setgid binaries.
*/ */
error = -file_remove_suid(file); ret = file_remove_suid(file);
if (unlikely(error)) if (unlikely(ret))
goto out_unlock_internal; goto out_unlock_internal;
/* We can write back this queue in page reclaim */ /* We can write back this queue in page reclaim */
...@@ -684,10 +684,10 @@ xfs_file_aio_write( ...@@ -684,10 +684,10 @@ xfs_file_aio_write(
if ((ioflags & IO_ISDIRECT)) { if ((ioflags & IO_ISDIRECT)) {
if (mapping->nrpages) { if (mapping->nrpages) {
WARN_ON(need_i_mutex == 0); WARN_ON(need_i_mutex == 0);
error = xfs_flushinval_pages(ip, ret = -xfs_flushinval_pages(ip,
(pos & PAGE_CACHE_MASK), (pos & PAGE_CACHE_MASK),
-1, FI_REMAPF_LOCKED); -1, FI_REMAPF_LOCKED);
if (error) if (ret)
goto out_unlock_internal; goto out_unlock_internal;
} }
...@@ -720,24 +720,22 @@ xfs_file_aio_write( ...@@ -720,24 +720,22 @@ xfs_file_aio_write(
} }
} else { } else {
int enospc = 0; int enospc = 0;
ssize_t ret2 = 0;
write_retry: write_retry:
trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags); trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags);
ret2 = generic_file_buffered_write(iocb, iovp, nr_segs, ret = generic_file_buffered_write(iocb, iovp, nr_segs,
pos, &iocb->ki_pos, count, ret); pos, &iocb->ki_pos, count, ret);
/* /*
* if we just got an ENOSPC, flush the inode now we * if we just got an ENOSPC, flush the inode now we
* aren't holding any page locks and retry *once* * aren't holding any page locks and retry *once*
*/ */
if (ret2 == -ENOSPC && !enospc) { if (ret == -ENOSPC && !enospc) {
error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE); ret = xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
if (error) if (ret)
goto out_unlock_internal; goto out_unlock_internal;
enospc = 1; enospc = 1;
goto write_retry; goto write_retry;
} }
ret = ret2;
} }
current->backing_dev_info = NULL; current->backing_dev_info = NULL;
...@@ -753,7 +751,6 @@ xfs_file_aio_write( ...@@ -753,7 +751,6 @@ xfs_file_aio_write(
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
} }
error = -ret;
if (ret <= 0) if (ret <= 0)
goto out_unlock_internal; goto out_unlock_internal;
...@@ -762,23 +759,23 @@ xfs_file_aio_write( ...@@ -762,23 +759,23 @@ xfs_file_aio_write(
/* Handle various SYNC-type writes */ /* Handle various SYNC-type writes */
if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
loff_t end = pos + ret - 1; loff_t end = pos + ret - 1;
int error2; int error, error2;
xfs_iunlock(ip, iolock); xfs_iunlock(ip, iolock);
if (need_i_mutex) if (need_i_mutex)
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
error2 = filemap_write_and_wait_range(mapping, pos, end); error = filemap_write_and_wait_range(mapping, pos, end);
if (!error)
error = error2;
if (need_i_mutex) if (need_i_mutex)
mutex_lock(&inode->i_mutex); mutex_lock(&inode->i_mutex);
xfs_ilock(ip, iolock); xfs_ilock(ip, iolock);
error2 = -xfs_file_fsync(file, error2 = -xfs_file_fsync(file,
(file->f_flags & __O_SYNC) ? 0 : 1); (file->f_flags & __O_SYNC) ? 0 : 1);
if (!error) if (error)
error = error2; ret = error;
else if (error2)
ret = error2;
} }
out_unlock_internal: out_unlock_internal:
...@@ -800,7 +797,7 @@ xfs_file_aio_write( ...@@ -800,7 +797,7 @@ xfs_file_aio_write(
out_unlock_mutex: out_unlock_mutex:
if (need_i_mutex) if (need_i_mutex)
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
return -error; return ret;
} }
STATIC int STATIC int
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册