提交 2ced19cb 编写于 作者: D Dave Chinner 提交者: Dave Chinner

xfs: make AIL tail pushing independent of the grant lock

The xlog_grant_push_ail() currently takes the grant lock internally to sample
the tail lsn, last sync lsn and the reserve grant head. Most of the callers
already hold the grant lock but have to drop it before calling
xlog_grant_push_ail(). This is a left over from when the AIL tail pushing was
done in line and hence xlog_grant_push_ail had to drop the grant lock. AIL push
is now done in another thread and hence we can safely hold the grant lock over
the entire xlog_grant_push_ail call.

Push the grant lock outside of xlog_grant_push_ail() to simplify the locking
and synchronisation needed for tail pushing.  This will reduce traffic on the
grant lock by itself, but this is only one step in preparing for the complete
removal of the grant lock.

While there, clean up the formatting of xlog_grant_push_ail() to match the
rest of the XFS code.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
上级 eb40a875
...@@ -70,7 +70,7 @@ STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); ...@@ -70,7 +70,7 @@ STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
/* local functions to manipulate grant head */ /* local functions to manipulate grant head */
STATIC int xlog_grant_log_space(xlog_t *log, STATIC int xlog_grant_log_space(xlog_t *log,
xlog_ticket_t *xtic); xlog_ticket_t *xtic);
STATIC void xlog_grant_push_ail(xfs_mount_t *mp, STATIC void xlog_grant_push_ail(struct log *log,
int need_bytes); int need_bytes);
STATIC void xlog_regrant_reserve_log_space(xlog_t *log, STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
xlog_ticket_t *ticket); xlog_ticket_t *ticket);
...@@ -318,7 +318,9 @@ xfs_log_reserve( ...@@ -318,7 +318,9 @@ xfs_log_reserve(
trace_xfs_log_reserve(log, internal_ticket); trace_xfs_log_reserve(log, internal_ticket);
xlog_grant_push_ail(mp, internal_ticket->t_unit_res); spin_lock(&log->l_grant_lock);
xlog_grant_push_ail(log, internal_ticket->t_unit_res);
spin_unlock(&log->l_grant_lock);
retval = xlog_regrant_write_log_space(log, internal_ticket); retval = xlog_regrant_write_log_space(log, internal_ticket);
} else { } else {
/* may sleep if need to allocate more tickets */ /* may sleep if need to allocate more tickets */
...@@ -332,9 +334,11 @@ xfs_log_reserve( ...@@ -332,9 +334,11 @@ xfs_log_reserve(
trace_xfs_log_reserve(log, internal_ticket); trace_xfs_log_reserve(log, internal_ticket);
xlog_grant_push_ail(mp, spin_lock(&log->l_grant_lock);
xlog_grant_push_ail(log,
(internal_ticket->t_unit_res * (internal_ticket->t_unit_res *
internal_ticket->t_cnt)); internal_ticket->t_cnt));
spin_unlock(&log->l_grant_lock);
retval = xlog_grant_log_space(log, internal_ticket); retval = xlog_grant_log_space(log, internal_ticket);
} }
...@@ -1185,23 +1189,22 @@ xlog_commit_record( ...@@ -1185,23 +1189,22 @@ xlog_commit_record(
* water mark. In this manner, we would be creating a low water mark. * water mark. In this manner, we would be creating a low water mark.
*/ */
STATIC void STATIC void
xlog_grant_push_ail(xfs_mount_t *mp, xlog_grant_push_ail(
struct log *log,
int need_bytes) int need_bytes)
{ {
xlog_t *log = mp->m_log; /* pointer to the log */ xfs_lsn_t threshold_lsn = 0;
xfs_lsn_t tail_lsn; /* lsn of the log tail */ xfs_lsn_t tail_lsn;
xfs_lsn_t threshold_lsn = 0; /* lsn we'd like to be at */ int free_blocks;
int free_blocks; /* free blocks left to write to */ int free_bytes;
int free_bytes; /* free bytes left to write to */ int threshold_block;
int threshold_block; /* block in lsn we'd like to be at */ int threshold_cycle;
int threshold_cycle; /* lsn cycle we'd like to be at */
int free_threshold; int free_threshold;
ASSERT(BTOBB(need_bytes) < log->l_logBBsize); ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
spin_lock(&log->l_grant_lock);
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
tail_lsn = log->l_tail_lsn; tail_lsn = log->l_tail_lsn;
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
free_blocks = BTOBBT(free_bytes); free_blocks = BTOBBT(free_bytes);
/* /*
...@@ -1212,32 +1215,32 @@ xlog_grant_push_ail(xfs_mount_t *mp, ...@@ -1212,32 +1215,32 @@ xlog_grant_push_ail(xfs_mount_t *mp,
free_threshold = BTOBB(need_bytes); free_threshold = BTOBB(need_bytes);
free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
free_threshold = MAX(free_threshold, 256); free_threshold = MAX(free_threshold, 256);
if (free_blocks < free_threshold) { if (free_blocks >= free_threshold)
return;
threshold_block = BLOCK_LSN(tail_lsn) + free_threshold; threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
threshold_cycle = CYCLE_LSN(tail_lsn); threshold_cycle = CYCLE_LSN(tail_lsn);
if (threshold_block >= log->l_logBBsize) { if (threshold_block >= log->l_logBBsize) {
threshold_block -= log->l_logBBsize; threshold_block -= log->l_logBBsize;
threshold_cycle += 1; threshold_cycle += 1;
} }
threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block); threshold_lsn = xlog_assign_lsn(threshold_cycle,
threshold_block);
/* Don't pass in an lsn greater than the lsn of the last /*
* Don't pass in an lsn greater than the lsn of the last
* log record known to be on disk. * log record known to be on disk.
*/ */
if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
threshold_lsn = log->l_last_sync_lsn; threshold_lsn = log->l_last_sync_lsn;
}
spin_unlock(&log->l_grant_lock);
/* /*
* Get the transaction layer to kick the dirty buffers out to * Get the transaction layer to kick the dirty buffers out to
* disk asynchronously. No point in trying to do this if * disk asynchronously. No point in trying to do this if
* the filesystem is shutting down. * the filesystem is shutting down.
*/ */
if (threshold_lsn && if (!XLOG_FORCED_SHUTDOWN(log))
!XLOG_FORCED_SHUTDOWN(log))
xfs_trans_ail_push(log->l_ailp, threshold_lsn); xfs_trans_ail_push(log->l_ailp, threshold_lsn);
} /* xlog_grant_push_ail */ }
/* /*
* The bdstrat callback function for log bufs. This gives us a central * The bdstrat callback function for log bufs. This gives us a central
...@@ -2543,9 +2546,7 @@ xlog_grant_log_space(xlog_t *log, ...@@ -2543,9 +2546,7 @@ xlog_grant_log_space(xlog_t *log,
trace_xfs_log_grant_sleep2(log, tic); trace_xfs_log_grant_sleep2(log, tic);
spin_unlock(&log->l_grant_lock); xlog_grant_push_ail(log, need_bytes);
xlog_grant_push_ail(log->l_mp, need_bytes);
spin_lock(&log->l_grant_lock);
XFS_STATS_INC(xs_sleep_logspace); XFS_STATS_INC(xs_sleep_logspace);
xlog_wait(&tic->t_wait, &log->l_grant_lock); xlog_wait(&tic->t_wait, &log->l_grant_lock);
...@@ -2641,9 +2642,7 @@ xlog_regrant_write_log_space(xlog_t *log, ...@@ -2641,9 +2642,7 @@ xlog_regrant_write_log_space(xlog_t *log,
trace_xfs_log_regrant_write_sleep1(log, tic); trace_xfs_log_regrant_write_sleep1(log, tic);
spin_unlock(&log->l_grant_lock); xlog_grant_push_ail(log, need_bytes);
xlog_grant_push_ail(log->l_mp, need_bytes);
spin_lock(&log->l_grant_lock);
XFS_STATS_INC(xs_sleep_logspace); XFS_STATS_INC(xs_sleep_logspace);
xlog_wait(&tic->t_wait, &log->l_grant_lock); xlog_wait(&tic->t_wait, &log->l_grant_lock);
...@@ -2666,9 +2665,7 @@ xlog_regrant_write_log_space(xlog_t *log, ...@@ -2666,9 +2665,7 @@ xlog_regrant_write_log_space(xlog_t *log,
if (free_bytes < need_bytes) { if (free_bytes < need_bytes) {
if (list_empty(&tic->t_queue)) if (list_empty(&tic->t_queue))
list_add_tail(&tic->t_queue, &log->l_writeq); list_add_tail(&tic->t_queue, &log->l_writeq);
spin_unlock(&log->l_grant_lock); xlog_grant_push_ail(log, need_bytes);
xlog_grant_push_ail(log->l_mp, need_bytes);
spin_lock(&log->l_grant_lock);
XFS_STATS_INC(xs_sleep_logspace); XFS_STATS_INC(xs_sleep_logspace);
trace_xfs_log_regrant_write_sleep2(log, tic); trace_xfs_log_regrant_write_sleep2(log, tic);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册