提交 c4a391b5 编写于 作者: J Jan Kara 提交者: Linus Torvalds

writeback: do not sync data dirtied after sync start

When there are processes heavily creating small files while sync(2) is
running, it can easily happen that quite some new files are created
between WB_SYNC_NONE and WB_SYNC_ALL pass of sync(2).  That can happen
especially if there are several busy filesystems (remember that sync
traverses filesystems sequentially and waits in WB_SYNC_ALL phase on one
fs before starting it on another fs).  Because WB_SYNC_ALL pass is slow
(e.g.  causes a transaction commit and cache flush for each inode in
ext3), resulting sync(2) times are rather large.

The following script reproduces the problem:

  function run_writers
  {
    for (( i = 0; i < 10; i++ )); do
      mkdir $1/dir$i
      for (( j = 0; j < 40000; j++ )); do
        dd if=/dev/zero of=$1/dir$i/$j bs=4k count=4 &>/dev/null
      done &
    done
  }

  for dir in "$@"; do
    run_writers $dir
  done

  sleep 40
  time sync

Fix the problem by disregarding inodes dirtied after sync(2) was called
in the WB_SYNC_ALL pass.  To allow for this, sync_inodes_sb() now takes
a time stamp when sync has started which is used for setting up work for
flusher threads.

To give some numbers, when above script is run on two ext4 filesystems
on simple SATA drive, the average sync time from 10 runs is 267.549
seconds with standard deviation 104.799426.  With the patched kernel,
the average sync time from 10 runs is 2.995 seconds with standard
deviation 0.096.
Signed-off-by: NJan Kara <jack@suse.cz>
Reviewed-by: NFengguang Wu <fengguang.wu@intel.com>
Reviewed-by: NDave Chinner <dchinner@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 46c77e2b
......@@ -39,13 +39,18 @@
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
unsigned long *older_than_this;
/*
* Write only inodes dirtied before this time. Don't forget to set
* older_than_this_is_set when you set this.
*/
unsigned long older_than_this;
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned int older_than_this_is_set:1;
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
......@@ -246,10 +251,10 @@ static int move_expired_inodes(struct list_head *delaying_queue,
int do_sb_sort = 0;
int moved = 0;
WARN_ON_ONCE(!work->older_than_this_is_set);
while (!list_empty(delaying_queue)) {
inode = wb_inode(delaying_queue->prev);
if (work->older_than_this &&
inode_dirtied_after(inode, *work->older_than_this))
if (inode_dirtied_after(inode, work->older_than_this))
break;
list_move(&inode->i_wb_list, &tmp);
moved++;
......@@ -733,6 +738,8 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
.sync_mode = WB_SYNC_NONE,
.range_cyclic = 1,
.reason = reason,
.older_than_this = jiffies,
.older_than_this_is_set = 1,
};
spin_lock(&wb->list_lock);
......@@ -791,12 +798,13 @@ static long wb_writeback(struct bdi_writeback *wb,
{
unsigned long wb_start = jiffies;
long nr_pages = work->nr_pages;
unsigned long oldest_jif;
struct inode *inode;
long progress;
oldest_jif = jiffies;
work->older_than_this = &oldest_jif;
if (!work->older_than_this_is_set) {
work->older_than_this = jiffies;
work->older_than_this_is_set = 1;
}
spin_lock(&wb->list_lock);
for (;;) {
......@@ -830,10 +838,10 @@ static long wb_writeback(struct bdi_writeback *wb,
* safe.
*/
if (work->for_kupdate) {
oldest_jif = jiffies -
work->older_than_this = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
} else if (work->for_background)
oldest_jif = jiffies;
work->older_than_this = jiffies;
trace_writeback_start(wb->bdi, work);
if (list_empty(&wb->b_io))
......@@ -1345,18 +1353,21 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
/**
* sync_inodes_sb - sync sb inode pages
* @sb: the superblock
* @sb: the superblock
* @older_than_this: timestamp
*
* This function writes and waits on any dirty inode belonging to this
* super_block.
* superblock that has been dirtied before given timestamp.
*/
void sync_inodes_sb(struct super_block *sb)
void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
{
DECLARE_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.older_than_this = older_than_this,
.older_than_this_is_set = 1,
.range_cyclic = 0,
.done = &done,
.reason = WB_REASON_SYNC,
......
......@@ -27,10 +27,11 @@
* wait == 1 case since in that case write_inode() functions do
* sync_dirty_buffer() and thus effectively write one block at a time.
*/
static int __sync_filesystem(struct super_block *sb, int wait)
static int __sync_filesystem(struct super_block *sb, int wait,
unsigned long start)
{
if (wait)
sync_inodes_sb(sb);
sync_inodes_sb(sb, start);
else
writeback_inodes_sb(sb, WB_REASON_SYNC);
......@@ -47,6 +48,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
int sync_filesystem(struct super_block *sb)
{
int ret;
unsigned long start = jiffies;
/*
* We need to be protected against the filesystem going from
......@@ -60,17 +62,17 @@ int sync_filesystem(struct super_block *sb)
if (sb->s_flags & MS_RDONLY)
return 0;
ret = __sync_filesystem(sb, 0);
ret = __sync_filesystem(sb, 0, start);
if (ret < 0)
return ret;
return __sync_filesystem(sb, 1);
return __sync_filesystem(sb, 1, start);
}
EXPORT_SYMBOL_GPL(sync_filesystem);
static void sync_inodes_one_sb(struct super_block *sb, void *arg)
{
if (!(sb->s_flags & MS_RDONLY))
sync_inodes_sb(sb);
sync_inodes_sb(sb, *((unsigned long *)arg));
}
static void sync_fs_one_sb(struct super_block *sb, void *arg)
......@@ -102,9 +104,10 @@ static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
SYSCALL_DEFINE0(sync)
{
int nowait = 0, wait = 1;
unsigned long start = jiffies;
wakeup_flusher_threads(0, WB_REASON_SYNC);
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_inodes_one_sb, &start);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
......
......@@ -918,7 +918,7 @@ xfs_flush_inodes(
struct super_block *sb = mp->m_super;
if (down_read_trylock(&sb->s_umount)) {
sync_inodes_sb(sb);
sync_inodes_sb(sb, jiffies);
up_read(&sb->s_umount);
}
}
......
......@@ -97,7 +97,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
......
......@@ -287,11 +287,11 @@ TRACE_EVENT(writeback_queue_io,
__field(int, reason)
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
unsigned long older_than_this = work->older_than_this;
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->older = older_than_this;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
(jiffies - older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册