fs-writeback.c 33.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
J
Jens Axboe 已提交
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18 19 20 21
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
22 23
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
28
#include "internal.h"
L
Linus Torvalds 已提交
29

30
#define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
31

32 33 34 35 36
/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

37 38 39 40 41 42 43
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
struct wb_writeback_args {
	long nr_pages;
	struct super_block *sb;
	enum writeback_sync_modes sync_mode;
44 45 46
	int for_kupdate:1;
	int range_cyclic:1;
	int for_background:1;
47 48
};

49 50
/*
 * Work items for the bdi_writeback threads
51
 */
52
struct bdi_work {
53 54
	struct list_head list;		/* pending work list */
	struct rcu_head rcu_head;	/* for RCU free/clear of work */
55

56 57
	unsigned long seen;		/* threads that have seen this work */
	atomic_t pending;		/* number of threads still to do work */
58

59
	struct wb_writeback_args args;	/* writeback arguments */
60

61
	unsigned long state;		/* flag bits, see WS_* */
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
};

enum {
	WS_USED_B = 0,
	WS_ONSTACK_B,
};

#define WS_USED (1 << WS_USED_B)
#define WS_ONSTACK (1 << WS_ONSTACK_B)

static inline bool bdi_work_on_stack(struct bdi_work *work)
{
	return test_bit(WS_ONSTACK_B, &work->state);
}

static inline void bdi_work_init(struct bdi_work *work,
78
				 struct wb_writeback_args *args)
79 80
{
	INIT_RCU_HEAD(&work->rcu_head);
81
	work->args = *args;
82 83 84
	work->state = WS_USED;
}

85 86 87 88
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
89 90
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
91 92 93
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
94
	return !list_empty(&bdi->work_list);
95 96
}

97
static void bdi_work_clear(struct bdi_work *work)
98
{
99 100
	clear_bit(WS_USED_B, &work->state);
	smp_mb__after_clear_bit();
101 102 103 104 105
	/*
	 * work can have disappeared at this point. bit waitq functions
	 * should be able to tolerate this, provided bdi_sched_wait does
	 * not dereference it's pointer argument.
	*/
106
	wake_up_bit(&work->state, WS_USED_B);
107 108
}

109
static void bdi_work_free(struct rcu_head *head)
110
{
111
	struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
112

113 114 115 116
	if (!bdi_work_on_stack(work))
		kfree(work);
	else
		bdi_work_clear(work);
117 118
}

119
static void wb_work_complete(struct bdi_work *work)
L
Linus Torvalds 已提交
120
{
121
	const enum writeback_sync_modes sync_mode = work->args.sync_mode;
122
	int onstack = bdi_work_on_stack(work);
L
Linus Torvalds 已提交
123 124

	/*
125 126 127 128
	 * For allocated work, we can clear the done/seen bit right here.
	 * For on-stack work, we need to postpone both the clear and free
	 * to after the RCU grace period, since the stack could be invalidated
	 * as soon as bdi_work_clear() has done the wakeup.
L
Linus Torvalds 已提交
129
	 */
130
	if (!onstack)
131
		bdi_work_clear(work);
132
	if (sync_mode == WB_SYNC_NONE || onstack)
133 134
		call_rcu(&work->rcu_head, bdi_work_free);
}
L
Linus Torvalds 已提交
135

136 137
static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
{
L
Linus Torvalds 已提交
138
	/*
139 140
	 * The caller has retrieved the work arguments from this work,
	 * drop our reference. If this is the last ref, delete and free it
L
Linus Torvalds 已提交
141
	 */
142 143
	if (atomic_dec_and_test(&work->pending)) {
		struct backing_dev_info *bdi = wb->bdi;
L
Linus Torvalds 已提交
144

145 146 147
		spin_lock(&bdi->wb_lock);
		list_del_rcu(&work->list);
		spin_unlock(&bdi->wb_lock);
L
Linus Torvalds 已提交
148

149 150 151
		wb_work_complete(work);
	}
}
L
Linus Torvalds 已提交
152

153 154
static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
{
155 156 157 158
	work->seen = bdi->wb_mask;
	BUG_ON(!work->seen);
	atomic_set(&work->pending, bdi->wb_cnt);
	BUG_ON(!bdi->wb_cnt);
L
Linus Torvalds 已提交
159

160
	/*
161 162 163
	 * list_add_tail_rcu() contains the necessary barriers to
	 * make sure the above stores are seen before the item is
	 * noticed on the list
164 165 166 167
	 */
	spin_lock(&bdi->wb_lock);
	list_add_tail_rcu(&work->list, &bdi->work_list);
	spin_unlock(&bdi->wb_lock);
168 169 170 171 172 173 174 175 176

	/*
	 * If the default thread isn't there, make sure we add it. When
	 * it gets created and wakes up, we'll run this work.
	 */
	if (unlikely(list_empty_careful(&bdi->wb_list)))
		wake_up_process(default_backing_dev_info.wb.task);
	else {
		struct bdi_writeback *wb = &bdi->wb;
L
Linus Torvalds 已提交
177

178
		if (wb->task)
179
			wake_up_process(wb->task);
L
Linus Torvalds 已提交
180 181 182
	}
}

183 184 185 186 187 188 189 190 191
/*
 * Used for on-stack allocated work items. The caller needs to wait until
 * the wb threads have acked the work before it's safe to continue.
 */
static void bdi_wait_on_work_clear(struct bdi_work *work)
{
	wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
		    TASK_UNINTERRUPTIBLE);
}
L
Linus Torvalds 已提交
192

193
static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
194
				 struct wb_writeback_args *args)
L
Linus Torvalds 已提交
195
{
196 197
	struct bdi_work *work;

198 199 200 201
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
202
	work = kmalloc(sizeof(*work), GFP_ATOMIC);
203
	if (work) {
204
		bdi_work_init(work, args);
205 206 207
		bdi_queue_work(bdi, work);
	} else {
		struct bdi_writeback *wb = &bdi->wb;
208

209 210 211
		if (wb->task)
			wake_up_process(wb->task);
	}
212 213
}

214 215 216 217 218 219 220 221 222 223 224 225
/**
 * bdi_sync_writeback - start and wait for writeback
 * @bdi: the backing device to write from
 * @sb: write inodes from this super_block
 *
 * Description:
 *   This does WB_SYNC_ALL data integrity writeback and waits for the
 *   IO to complete. Callers must hold the sb s_umount semaphore for
 *   reading, to avoid having the super disappear before we are done.
 */
static void bdi_sync_writeback(struct backing_dev_info *bdi,
			       struct super_block *sb)
226
{
227 228 229 230 231 232 233
	struct wb_writeback_args args = {
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
	};
	struct bdi_work work;
234

235 236
	bdi_work_init(&work, &args);
	work.state |= WS_ONSTACK;
237

238 239 240 241 242 243 244
	bdi_queue_work(bdi, &work);
	bdi_wait_on_work_clear(&work);
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
245
 * @sb: write inodes from this super_block
246 247 248 249 250 251 252 253
 * @nr_pages: the number of pages to write
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
 *   started when this function returns, we make no guarentees on
 *   completion. Caller need not hold sb s_umount semaphore.
 *
 */
254 255
void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
			 long nr_pages)
256 257
{
	struct wb_writeback_args args = {
258
		.sb		= sb,
259 260 261 262 263
		.sync_mode	= WB_SYNC_NONE,
		.nr_pages	= nr_pages,
		.range_cyclic	= 1,
	};

264 265 266 267 268 269 270 271 272
	/*
	 * We treat @nr_pages=0 as the special case to do background writeback,
	 * ie. to sync pages until the background dirty threshold is reached.
	 */
	if (!nr_pages) {
		args.nr_pages = LONG_MAX;
		args.for_background = 1;
	}

273
	bdi_alloc_queue_work(bdi, &args);
L
Linus Torvalds 已提交
274 275
}

276 277 278 279 280
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
281
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
282 283 284 285 286
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
static void redirty_tail(struct inode *inode)
{
287
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
288

289
	if (!list_empty(&wb->b_dirty)) {
290
		struct inode *tail;
291

292
		tail = list_entry(wb->b_dirty.next, struct inode, i_list);
293
		if (time_before(inode->dirtied_when, tail->dirtied_when))
294 295
			inode->dirtied_when = jiffies;
	}
296
	list_move(&inode->i_list, &wb->b_dirty);
297 298
}

299
/*
300
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
301
 */
302
static void requeue_io(struct inode *inode)
303
{
304 305 306
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;

	list_move(&inode->i_list, &wb->b_more_io);
307 308
}

J
Joern Engel 已提交
309 310 311 312 313 314 315 316 317
static void inode_sync_complete(struct inode *inode)
{
	/*
	 * Prevent speculative execution through spin_unlock(&inode_lock);
	 */
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

318 319 320 321 322 323 324 325
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
326
	 * from permanently stopping the whole bdi writeback.
327 328 329 330 331 332
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

333 334 335 336 337 338 339
/*
 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 */
static void move_expired_inodes(struct list_head *delaying_queue,
			       struct list_head *dispatch_queue,
				unsigned long *older_than_this)
{
340 341
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
342
	struct super_block *sb = NULL;
343
	struct inode *inode;
344
	int do_sb_sort = 0;
345

346
	while (!list_empty(delaying_queue)) {
347
		inode = list_entry(delaying_queue->prev, struct inode, i_list);
348
		if (older_than_this &&
349
		    inode_dirtied_after(inode, *older_than_this))
350
			break;
351 352 353
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
354 355 356
		list_move(&inode->i_list, &tmp);
	}

357 358 359 360 361 362
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
		return;
	}

363 364 365 366 367 368 369 370 371
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
		inode = list_entry(tmp.prev, struct inode, i_list);
		sb = inode->i_sb;
		list_for_each_prev_safe(pos, node, &tmp) {
			inode = list_entry(pos, struct inode, i_list);
			if (inode->i_sb == sb)
				list_move(&inode->i_list, dispatch_queue);
		}
372 373 374 375 376 377
	}
}

/*
 * Queue all expired dirty inodes for io, eldest first.
 */
378
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
379
{
380 381
	list_splice_init(&wb->b_more_io, wb->b_io.prev);
	move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
382 383
}

384
static int write_inode(struct inode *inode, struct writeback_control *wbc)
385
{
386
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
387
		return inode->i_sb->s_op->write_inode(inode, wbc);
388
	return 0;
389 390
}

L
Linus Torvalds 已提交
391
/*
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
 * Wait for writeback on an inode to complete.
 */
static void inode_wait_for_writeback(struct inode *inode)
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
	do {
		spin_unlock(&inode_lock);
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
		spin_lock(&inode_lock);
	} while (inode->i_state & I_SYNC);
}

/*
 * Write out an inode's dirty pages.  Called under inode_lock.  Either the
 * caller has ref on the inode (either via __iget or via syscall against an fd)
 * or the inode has I_WILL_FREE set (via generic_forget_inode)
 *
L
Linus Torvalds 已提交
412 413 414 415 416 417 418 419 420
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 *
 * Called under inode_lock.
 */
static int
421
writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
L
Linus Torvalds 已提交
422 423
{
	struct address_space *mapping = inode->i_mapping;
424
	unsigned dirty;
L
Linus Torvalds 已提交
425 426
	int ret;

427 428 429 430 431 432 433 434
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		/*
		 * If this inode is locked for writeback and we are not doing
435
		 * writeback-for-data-integrity, move it to b_more_io so that
436 437 438
		 * writeback can proceed with the other inodes on s_io.
		 *
		 * We'll have another go at writing back this inode when we
439
		 * completed a full scan of b_io.
440
		 */
441
		if (wbc->sync_mode != WB_SYNC_ALL) {
442 443 444 445 446 447 448 449 450 451
			requeue_io(inode);
			return 0;
		}

		/*
		 * It's a data-integrity sync.  We must wait.
		 */
		inode_wait_for_writeback(inode);
	}

J
Joern Engel 已提交
452
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
453

J
Joern Engel 已提交
454
	/* Set I_SYNC, reset I_DIRTY */
L
Linus Torvalds 已提交
455
	dirty = inode->i_state & I_DIRTY;
J
Joern Engel 已提交
456
	inode->i_state |= I_SYNC;
L
Linus Torvalds 已提交
457 458 459 460 461 462
	inode->i_state &= ~I_DIRTY;

	spin_unlock(&inode_lock);

	ret = do_writepages(mapping, wbc);

463 464 465 466 467
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
468
	if (wbc->sync_mode == WB_SYNC_ALL) {
469
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
470 471 472 473
		if (ret == 0)
			ret = err;
	}

474 475
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
476
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
477 478 479 480 481
		if (ret == 0)
			ret = err;
	}

	spin_lock(&inode_lock);
J
Joern Engel 已提交
482
	inode->i_state &= ~I_SYNC;
483
	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
484
		if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
485
			/*
486 487 488 489 490 491 492
			 * More pages get dirtied by a fast dirtier.
			 */
			goto select_queue;
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * At least XFS will redirty the inode during the
			 * writeback (delalloc) and on io completion (isize).
493 494 495
			 */
			redirty_tail(inode);
		} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
L
Linus Torvalds 已提交
496 497 498
			/*
			 * We didn't write back all the pages.  nfs_writepages()
			 * sometimes bales out without doing anything. Redirty
499
			 * the inode; Move it from b_io onto b_more_io/b_dirty.
500 501 502
			 */
			/*
			 * akpm: if the caller was the kupdate function we put
503
			 * this inode at the head of b_dirty so it gets first
504 505 506 507 508
			 * consideration.  Otherwise, move it to the tail, for
			 * the reasons described there.  I'm not really sure
			 * how much sense this makes.  Presumably I had a good
			 * reasons for doing it this way, and I'd rather not
			 * muck with it at present.
L
Linus Torvalds 已提交
509 510 511
			 */
			if (wbc->for_kupdate) {
				/*
512
				 * For the kupdate function we move the inode
513
				 * to b_more_io so it will get more writeout as
514
				 * soon as the queue becomes uncongested.
L
Linus Torvalds 已提交
515 516
				 */
				inode->i_state |= I_DIRTY_PAGES;
517
select_queue:
518 519 520 521 522 523 524 525 526 527 528
				if (wbc->nr_to_write <= 0) {
					/*
					 * slice used up: queue for next turn
					 */
					requeue_io(inode);
				} else {
					/*
					 * somehow blocked: retry later
					 */
					redirty_tail(inode);
				}
L
Linus Torvalds 已提交
529 530 531 532 533 534 535 536 537
			} else {
				/*
				 * Otherwise fully redirty the inode so that
				 * other inodes on this superblock will get some
				 * writeout.  Otherwise heavy writing to one
				 * file would indefinitely suspend writeout of
				 * all the other files.
				 */
				inode->i_state |= I_DIRTY_PAGES;
538
				redirty_tail(inode);
L
Linus Torvalds 已提交
539 540 541 542 543 544 545 546 547 548 549 550 551
			}
		} else if (atomic_read(&inode->i_count)) {
			/*
			 * The inode is clean, inuse
			 */
			list_move(&inode->i_list, &inode_in_use);
		} else {
			/*
			 * The inode is clean, unused
			 */
			list_move(&inode->i_list, &inode_unused);
		}
	}
J
Joern Engel 已提交
552
	inode_sync_complete(inode);
L
Linus Torvalds 已提交
553 554 555
	return ret;
}

556 557 558 559 560 561 562 563 564 565 566
static void unpin_sb_for_writeback(struct super_block **psb)
{
	struct super_block *sb = *psb;

	if (sb) {
		up_read(&sb->s_umount);
		put_super(sb);
		*psb = NULL;
	}
}

567 568 569 570 571 572 573 574 575
/*
 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
 * before calling writeback. So make sure that we do pin it, so it doesn't
 * go away while we are writing inodes from it.
 *
 * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
 * 1 if we failed.
 */
static int pin_sb_for_writeback(struct writeback_control *wbc,
576
				struct inode *inode, struct super_block **psb)
577 578 579
{
	struct super_block *sb = inode->i_sb;

580 581 582 583 584 585 586 587 588
	/*
	 * If this sb is already pinned, nothing more to do. If not and
	 * *psb is non-NULL, unpin the old one first
	 */
	if (sb == *psb)
		return 0;
	else if (*psb)
		unpin_sb_for_writeback(psb);

589 590 591 592 593 594 595 596 597 598 599 600 601
	/*
	 * Caller must already hold the ref for this
	 */
	if (wbc->sync_mode == WB_SYNC_ALL) {
		WARN_ON(!rwsem_is_locked(&sb->s_umount));
		return 0;
	}

	spin_lock(&sb_lock);
	sb->s_count++;
	if (down_read_trylock(&sb->s_umount)) {
		if (sb->s_root) {
			spin_unlock(&sb_lock);
602
			goto pinned;
603 604 605 606 607 608 609 610 611 612
		}
		/*
		 * umounted, drop rwsem again and fall through to failure
		 */
		up_read(&sb->s_umount);
	}

	sb->s_count--;
	spin_unlock(&sb_lock);
	return 1;
613 614 615
pinned:
	*psb = sb;
	return 0;
616 617 618 619
}

static void writeback_inodes_wb(struct bdi_writeback *wb,
				struct writeback_control *wbc)
L
Linus Torvalds 已提交
620
{
621
	struct super_block *sb = wbc->sb, *pin_sb = NULL;
L
Linus Torvalds 已提交
622 623
	const unsigned long start = jiffies;	/* livelock avoidance */

624
	spin_lock(&inode_lock);
L
Linus Torvalds 已提交
625

626 627
	if (!wbc->for_kupdate || list_empty(&wb->b_io))
		queue_io(wb, wbc->older_than_this);
628

629 630
	while (!list_empty(&wb->b_io)) {
		struct inode *inode = list_entry(wb->b_io.prev,
L
Linus Torvalds 已提交
631 632 633
						struct inode, i_list);
		long pages_skipped;

634 635 636 637 638 639 640 641
		/*
		 * super block given and doesn't match, skip this inode
		 */
		if (sb && sb != inode->i_sb) {
			redirty_tail(inode);
			continue;
		}

642
		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
643 644 645 646
			requeue_io(inode);
			continue;
		}

647 648 649 650 651
		/*
		 * Was this inode dirtied after sync_sb_inodes was called?
		 * This keeps sync from extra jobs and livelock.
		 */
		if (inode_dirtied_after(inode, start))
L
Linus Torvalds 已提交
652 653
			break;

654
		if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
655 656 657
			requeue_io(inode);
			continue;
		}
L
Linus Torvalds 已提交
658

659
		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
L
Linus Torvalds 已提交
660 661
		__iget(inode);
		pages_skipped = wbc->pages_skipped;
662
		writeback_single_inode(inode, wbc);
L
Linus Torvalds 已提交
663 664 665 666 667
		if (wbc->pages_skipped != pages_skipped) {
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
668
			redirty_tail(inode);
L
Linus Torvalds 已提交
669 670 671
		}
		spin_unlock(&inode_lock);
		iput(inode);
672
		cond_resched();
L
Linus Torvalds 已提交
673
		spin_lock(&inode_lock);
674 675
		if (wbc->nr_to_write <= 0) {
			wbc->more_io = 1;
L
Linus Torvalds 已提交
676
			break;
677
		}
678
		if (!list_empty(&wb->b_more_io))
679
			wbc->more_io = 1;
L
Linus Torvalds 已提交
680
	}
N
Nick Piggin 已提交
681

682 683
	unpin_sb_for_writeback(&pin_sb);

684 685 686 687
	spin_unlock(&inode_lock);
	/* Leave any unwritten inodes on b_io */
}

688 689 690 691 692 693 694
void writeback_inodes_wbc(struct writeback_control *wbc)
{
	struct backing_dev_info *bdi = wbc->bdi;

	writeback_inodes_wb(&bdi->wb, wbc);
}

695
/*
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
 * The maximum number of pages to writeout in a single bdi flush/kupdate
 * operation.  We do this so we don't hold I_SYNC against an inode for
 * enormous amounts of time, which would block a userspace task which has
 * been forced to throttle against that inode.  Also, the code reevaluates
 * the dirty each time it has written this many pages.
 */
#define MAX_WRITEBACK_PAGES     1024

static inline bool over_bground_thresh(void)
{
	unsigned long background_thresh, dirty_thresh;

	get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);

	return (global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
}

/*
 * Explicit flushing or periodic writeback of "old" data.
716
 *
717 718 719 720
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
721
 *
722 723 724
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
725
 *
726 727
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
728
 */
729 730
static long wb_writeback(struct bdi_writeback *wb,
			 struct wb_writeback_args *args)
731
{
732 733
	struct writeback_control wbc = {
		.bdi			= wb->bdi,
734 735
		.sb			= args->sb,
		.sync_mode		= args->sync_mode,
736
		.older_than_this	= NULL,
737
		.for_kupdate		= args->for_kupdate,
738
		.for_background		= args->for_background,
739
		.range_cyclic		= args->range_cyclic,
740 741 742
	};
	unsigned long oldest_jif;
	long wrote = 0;
J
Jan Kara 已提交
743
	struct inode *inode;
744

745 746 747 748 749
	if (wbc.for_kupdate) {
		wbc.older_than_this = &oldest_jif;
		oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
	}
750 751 752 753
	if (!wbc.range_cyclic) {
		wbc.range_start = 0;
		wbc.range_end = LLONG_MAX;
	}
N
Nick Piggin 已提交
754

755 756
	for (;;) {
		/*
757
		 * Stop writeback when nr_pages has been consumed
758
		 */
759
		if (args->nr_pages <= 0)
760
			break;
761

N
Nick Piggin 已提交
762
		/*
763 764
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
765
		 */
766
		if (args->for_background && !over_bground_thresh())
767
			break;
N
Nick Piggin 已提交
768

769 770 771 772
		wbc.more_io = 0;
		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
		wbc.pages_skipped = 0;
		writeback_inodes_wb(wb, &wbc);
773
		args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
774 775 776
		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;

		/*
777
		 * If we consumed everything, see if we have more
778
		 */
779 780 781 782 783 784
		if (wbc.nr_to_write <= 0)
			continue;
		/*
		 * Didn't write everything and we don't have more IO, bail
		 */
		if (!wbc.more_io)
785
			break;
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
		/*
		 * Did we write something? Try for more
		 */
		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
			continue;
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		spin_lock(&inode_lock);
		if (!list_empty(&wb->b_more_io))  {
			inode = list_entry(wb->b_more_io.prev,
						struct inode, i_list);
			inode_wait_for_writeback(inode);
801
		}
802
		spin_unlock(&inode_lock);
803 804 805 806 807 808 809
	}

	return wrote;
}

/*
 * Return the next bdi_work struct that hasn't been processed by this
810 811 812 813 814
 * wb thread yet. ->seen is initially set for each thread that exists
 * for this device, when a thread first notices a piece of work it
 * clears its bit. Depending on writeback type, the thread will notify
 * completion on either receiving the work (WB_SYNC_NONE) or after
 * it is done (WB_SYNC_ALL).
815 816 817 818 819 820 821 822 823
 */
static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
					   struct bdi_writeback *wb)
{
	struct bdi_work *work, *ret = NULL;

	rcu_read_lock();

	list_for_each_entry_rcu(work, &bdi->work_list, list) {
824
		if (!test_bit(wb->nr, &work->seen))
825
			continue;
826
		clear_bit(wb->nr, &work->seen);
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850

		ret = work;
		break;
	}

	rcu_read_unlock();
	return ret;
}

static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
	nr_pages = global_page_state(NR_FILE_DIRTY) +
			global_page_state(NR_UNSTABLE_NFS) +
			(inodes_stat.nr_inodes - inodes_stat.nr_unused);

851 852 853 854 855 856 857 858 859 860
	if (nr_pages) {
		struct wb_writeback_args args = {
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
		};

		return wb_writeback(wb, &args);
	}
861 862 863 864 865 866 867 868 869 870 871

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
	struct bdi_work *work;
872
	long wrote = 0;
873 874

	while ((work = get_next_work_item(bdi, wb)) != NULL) {
875
		struct wb_writeback_args args = work->args;
876 877 878 879 880

		/*
		 * Override sync mode, in case we must wait for completion
		 */
		if (force_wait)
881
			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
882 883 884 885 886

		/*
		 * If this isn't a data integrity operation, just notify
		 * that we have seen this work and we are now starting it.
		 */
887
		if (args.sync_mode == WB_SYNC_NONE)
888 889
			wb_clear_pending(wb, work);

890
		wrote += wb_writeback(wb, &args);
891 892 893 894 895

		/*
		 * This is a data integrity writeback, so only do the
		 * notification when we have completed the work.
		 */
896
		if (args.sync_mode == WB_SYNC_ALL)
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
			wb_clear_pending(wb, work);
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
int bdi_writeback_task(struct bdi_writeback *wb)
{
	unsigned long last_active = jiffies;
	unsigned long wait_jiffies = -1UL;
	long pages_written;

	while (!kthread_should_stop()) {
		pages_written = wb_do_writeback(wb, 0);

		if (pages_written)
			last_active = jiffies;
		else if (wait_jiffies != -1UL) {
			unsigned long max_idle;

N
Nick Piggin 已提交
926
			/*
927 928 929
			 * Longest period of inactivity that we tolerate. If we
			 * see dirty data again later, the task will get
			 * recreated automatically.
N
Nick Piggin 已提交
930
			 */
931 932 933 934 935 936
			max_idle = max(5UL * 60 * HZ, wait_jiffies);
			if (time_after(jiffies, max_idle + last_active))
				break;
		}

		wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
937
		schedule_timeout_interruptible(wait_jiffies);
938 939 940 941 942 943 944
		try_to_freeze();
	}

	return 0;
}

/*
945 946
 * Schedule writeback for all backing devices. This does WB_SYNC_NONE
 * writeback, for integrity writeback see bdi_sync_writeback().
947
 */
948
static void bdi_writeback_all(struct super_block *sb, long nr_pages)
949
{
950 951 952 953 954
	struct wb_writeback_args args = {
		.sb		= sb,
		.nr_pages	= nr_pages,
		.sync_mode	= WB_SYNC_NONE,
	};
955 956
	struct backing_dev_info *bdi;

957
	rcu_read_lock();
958

959
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
960 961
		if (!bdi_has_dirty_io(bdi))
			continue;
N
Nick Piggin 已提交
962

963
		bdi_alloc_queue_work(bdi, &args);
964 965
	}

966
	rcu_read_unlock();
L
Linus Torvalds 已提交
967 968 969
}

/*
970 971 972 973 974 975 976 977
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
 */
void wakeup_flusher_threads(long nr_pages)
{
	if (nr_pages == 0)
		nr_pages = global_page_state(NR_FILE_DIRTY) +
				global_page_state(NR_UNSTABLE_NFS);
978
	bdi_writeback_all(NULL, nr_pages);
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
}

static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1009
 *
1010 1011 1012 1013 1014 1015 1016 1017 1018
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1019
 *
1020 1021
 * This function *must* be atomic for the I_DIRTY_PAGES case -
 * set_page_dirty() is called under spinlock in several places.
L
Linus Torvalds 已提交
1022
 *
1023 1024 1025 1026 1027 1028
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1029
 */
1030
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1031
{
1032
	struct super_block *sb = inode->i_sb;
L
Linus Torvalds 已提交
1033

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
			sb->s_op->dirty_inode(inode);
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

	spin_lock(&inode_lock);
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
			goto out;

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
			if (hlist_unhashed(&inode->i_hash))
				goto out;
		}
		if (inode->i_state & (I_FREEING|I_CLEAR))
			goto out;

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
			struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1087 1088 1089 1090 1091 1092 1093 1094
			struct backing_dev_info *bdi = wb->bdi;

			if (bdi_cap_writeback_dirty(bdi) &&
			    !test_bit(BDI_registered, &bdi->state)) {
				WARN_ON(1);
				printk(KERN_ERR "bdi-%s not registered\n",
								bdi->name);
			}
1095 1096 1097

			inode->dirtied_when = jiffies;
			list_move(&inode->i_list, &wb->b_dirty);
L
Linus Torvalds 已提交
1098 1099
		}
	}
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
out:
	spin_unlock(&inode_lock);
}
EXPORT_SYMBOL(__mark_inode_dirty);

/*
 * Write out a superblock's list of dirty inodes.  A wait will be performed
 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 *
 * If older_than_this is non-NULL, then only write out inodes which
 * had their first dirtying at a time earlier than *older_than_this.
 *
 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 * This function assumes that the blockdev superblock's inodes are backed by
 * a variety of queues, so all inodes are searched.  For other superblocks,
 * assume that all inodes are backed by the same queue.
 *
 * The inodes to be written are parked on bdi->b_io.  They are moved back onto
 * bdi->b_dirty as they are selected for writing.  This way, none can be missed
 * on the writer throttling path, and we get decent balancing between many
 * throttled threads: we don't want them all piling up on inode_sync_wait.
 */
1122
static void wait_sb_inodes(struct super_block *sb)
1123 1124 1125 1126 1127 1128 1129
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1130
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140

	spin_lock(&inode_lock);

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1141
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
		struct address_space *mapping;

		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
			continue;
		mapping = inode->i_mapping;
		if (mapping->nrpages == 0)
			continue;
		__iget(inode);
		spin_unlock(&inode_lock);
		/*
		 * We hold a reference to 'inode' so it couldn't have
		 * been removed from s_inodes list while we dropped the
		 * inode_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it
		 * under inode_lock. So we keep the reference and iput
		 * it later.
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

		spin_lock(&inode_lock);
	}
	spin_unlock(&inode_lock);
	iput(old_inode);
L
Linus Torvalds 已提交
1170 1171
}

1172 1173 1174
/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
L
Linus Torvalds 已提交
1175
 *
1176 1177 1178 1179
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO. The number of pages submitted is
 * returned.
L
Linus Torvalds 已提交
1180
 */
1181
void writeback_inodes_sb(struct super_block *sb)
L
Linus Torvalds 已提交
1182
{
1183 1184 1185
	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
	long nr_to_write;
L
Linus Torvalds 已提交
1186

1187
	nr_to_write = nr_dirty + nr_unstable +
N
Nick Piggin 已提交
1188 1189
			(inodes_stat.nr_inodes - inodes_stat.nr_unused);

1190
	bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
1191 1192 1193
}
EXPORT_SYMBOL(writeback_inodes_sb);

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_if_idle(struct super_block *sb)
{
	if (!writeback_in_progress(sb->s_bdi)) {
		writeback_inodes_sb(sb);
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1211 1212 1213 1214 1215 1216 1217
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
 * super_block. The number of pages synced is returned.
 */
1218
void sync_inodes_sb(struct super_block *sb)
1219
{
1220 1221
	bdi_sync_writeback(sb->s_bdi, sb);
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1222
}
1223
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1224 1225

/**
1226 1227 1228 1229 1230 1231
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1232
 *
1233
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1234 1235 1236 1237 1238 1239
 */
int write_inode_now(struct inode *inode, int sync)
{
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1240
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1241 1242
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1243 1244 1245
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1246
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1247 1248 1249

	might_sleep();
	spin_lock(&inode_lock);
1250
	ret = writeback_single_inode(inode, &wbc);
L
Linus Torvalds 已提交
1251 1252
	spin_unlock(&inode_lock);
	if (sync)
J
Joern Engel 已提交
1253
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
	int ret;

	spin_lock(&inode_lock);
1274
	ret = writeback_single_inode(inode, wbc);
L
Linus Torvalds 已提交
1275 1276 1277 1278
	spin_unlock(&inode_lock);
	return ret;
}
EXPORT_SYMBOL(sync_inode);