提交 5b46fcc3 编写于 作者: D Dave Chinner 提交者: Zheng Zengkai

xfs: factor out forced iclog flushes

mainline-inclusion
from mainline-v5.14-rc1
commit 45eddb41
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4KIAO
CVE: NA

Reference:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=45eddb414047c366744cc60dd6cef7c7e58c6ab9

-------------------------------------------------

We force iclogs in several places - we need them all to have the
same cache flush semantics, so start by factoring out the iclog
force into a common helper.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NDarrick J. Wong <djwong@kernel.org>
Signed-off-by: NDarrick J. Wong <djwong@kernel.org>
Signed-off-by: NGuo Xuenan <guoxuenan@huawei.com>
Reviewed-by: NLihong Kou <koulihong@huawei.com>
Reviewed-by: NZhang Yi <yi.zhang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 8c4617bb
......@@ -752,6 +752,20 @@ xfs_log_mount_cancel(
xfs_log_unmount(mp);
}
/*
* Flush out the iclog to disk ensuring that device caches are flushed and
* the iclog hits stable storage before any completion waiters are woken.
*/
static inline int
xlog_force_iclog(
struct xlog_in_core *iclog)
{
atomic_inc(&iclog->ic_refcnt);
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
}
/*
* Wait for the iclog and all prior iclogs to be written disk as required by the
* log force state machine. Waiting on ic_force_wait ensures iclog completions
......@@ -836,18 +850,8 @@ xlog_unmount_write(
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(log, iclog, 0);
else
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_state == XLOG_STATE_IOERROR);
/*
* Ensure the journal is fully flushed and on stable storage once the
* iclog containing the unmount record is written.
*/
iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
error = xlog_state_release_iclog(log, iclog, 0);
error = xlog_force_iclog(iclog);
xlog_wait_on_iclog(iclog);
if (tic) {
......@@ -3106,17 +3110,9 @@ xfs_log_force(
iclog = iclog->ic_prev;
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
if (atomic_read(&iclog->ic_refcnt) == 0) {
/*
* We are the only one with access to this iclog.
*
* Flush it out now. There should be a roundoff of zero
* to show that someone has already taken care of the
* roundoff from the previous sync.
*/
atomic_inc(&iclog->ic_refcnt);
/* We have exclusive access to this iclog. */
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog, 0))
if (xlog_force_iclog(iclog))
goto out_error;
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
......@@ -3193,9 +3189,7 @@ xlog_force_lsn(
&log->l_icloglock);
return -EAGAIN;
}
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog, 0))
if (xlog_force_iclog(iclog))
goto out_error;
if (log_flushed)
*log_flushed = 1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册