fs-writeback.c 33.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
J
Jens Axboe 已提交
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23 24
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
25 26 27 28
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
29
#include "internal.h"
L
Linus Torvalds 已提交
30

31
#define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
32

33 34 35 36 37
/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

38 39 40 41 42 43 44
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
struct wb_writeback_args {
	long nr_pages;
	struct super_block *sb;
	enum writeback_sync_modes sync_mode;
45 46 47
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
48 49
};

50 51
/*
 * Work items for the bdi_writeback threads
52
 */
53
struct bdi_work {
54 55
	struct list_head list;		/* pending work list */
	struct rcu_head rcu_head;	/* for RCU free/clear of work */
56

57 58
	unsigned long seen;		/* threads that have seen this work */
	atomic_t pending;		/* number of threads still to do work */
59

60
	struct wb_writeback_args args;	/* writeback arguments */
61

62
	unsigned long state;		/* flag bits, see WS_* */
63 64 65
};

enum {
66 67
	WS_INPROGRESS = 0,
	WS_ONSTACK,
68 69 70
};

static inline void bdi_work_init(struct bdi_work *work,
71
				 struct wb_writeback_args *args)
72 73
{
	INIT_RCU_HEAD(&work->rcu_head);
74
	work->args = *args;
75
	__set_bit(WS_INPROGRESS, &work->state);
76 77
}

78 79 80 81
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
82 83
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
84 85 86
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
87
	return !list_empty(&bdi->work_list);
88 89
}

90
static void bdi_work_free(struct rcu_head *head)
91
{
92
	struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
93

94 95 96
	clear_bit(WS_INPROGRESS, &work->state);
	smp_mb__after_clear_bit();
	wake_up_bit(&work->state, WS_INPROGRESS);
L
Linus Torvalds 已提交
97

98 99
	if (!test_bit(WS_ONSTACK, &work->state))
		kfree(work);
100
}
L
Linus Torvalds 已提交
101

102 103
static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
{
L
Linus Torvalds 已提交
104
	/*
105 106
	 * The caller has retrieved the work arguments from this work,
	 * drop our reference. If this is the last ref, delete and free it
L
Linus Torvalds 已提交
107
	 */
108 109
	if (atomic_dec_and_test(&work->pending)) {
		struct backing_dev_info *bdi = wb->bdi;
L
Linus Torvalds 已提交
110

111 112 113
		spin_lock(&bdi->wb_lock);
		list_del_rcu(&work->list);
		spin_unlock(&bdi->wb_lock);
L
Linus Torvalds 已提交
114

115
		call_rcu(&work->rcu_head, bdi_work_free);
116 117
	}
}
L
Linus Torvalds 已提交
118

119 120
static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
{
121 122 123 124
	work->seen = bdi->wb_mask;
	BUG_ON(!work->seen);
	atomic_set(&work->pending, bdi->wb_cnt);
	BUG_ON(!bdi->wb_cnt);
L
Linus Torvalds 已提交
125

126
	/*
127 128 129
	 * list_add_tail_rcu() contains the necessary barriers to
	 * make sure the above stores are seen before the item is
	 * noticed on the list
130 131 132 133
	 */
	spin_lock(&bdi->wb_lock);
	list_add_tail_rcu(&work->list, &bdi->work_list);
	spin_unlock(&bdi->wb_lock);
134 135 136 137 138 139 140 141 142

	/*
	 * If the default thread isn't there, make sure we add it. When
	 * it gets created and wakes up, we'll run this work.
	 */
	if (unlikely(list_empty_careful(&bdi->wb_list)))
		wake_up_process(default_backing_dev_info.wb.task);
	else {
		struct bdi_writeback *wb = &bdi->wb;
L
Linus Torvalds 已提交
143

144
		if (wb->task)
145
			wake_up_process(wb->task);
L
Linus Torvalds 已提交
146 147 148
	}
}

149 150 151 152
/*
 * Used for on-stack allocated work items. The caller needs to wait until
 * the wb threads have acked the work before it's safe to continue.
 */
153
static void bdi_wait_on_work_done(struct bdi_work *work)
154
{
155
	wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait,
156 157
		    TASK_UNINTERRUPTIBLE);
}
L
Linus Torvalds 已提交
158

159
static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
160
				 struct wb_writeback_args *args)
L
Linus Torvalds 已提交
161
{
162 163
	struct bdi_work *work;

164 165 166 167
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
168
	work = kmalloc(sizeof(*work), GFP_ATOMIC);
169
	if (work) {
170
		bdi_work_init(work, args);
171 172 173
		bdi_queue_work(bdi, work);
	} else {
		struct bdi_writeback *wb = &bdi->wb;
174

175 176 177
		if (wb->task)
			wake_up_process(wb->task);
	}
178 179
}

180
/**
181
 * bdi_queue_work_onstack - start and wait for writeback
182
 * @args: parameters to control the work queue writeback
183 184
 *
 * Description:
185 186
 *   This function initiates writeback and waits for the operation to
 *   complete. Callers must hold the sb s_umount semaphore for
187 188
 *   reading, to avoid having the super disappear before we are done.
 */
189
static void bdi_queue_work_onstack(struct wb_writeback_args *args)
190
{
191
	struct bdi_work work;
192

193
	bdi_work_init(&work, args);
194
	__set_bit(WS_ONSTACK, &work.state);
195

196
	bdi_queue_work(args->sb->s_bdi, &work);
197
	bdi_wait_on_work_done(&work);
198 199 200 201 202 203 204 205 206 207
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
 * @nr_pages: the number of pages to write
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
 *   started when this function returns, we make no guarentees on
208
 *   completion. Caller need not hold sb s_umount semaphore.
209 210
 *
 */
211
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
212 213 214 215 216 217 218
{
	struct wb_writeback_args args = {
		.sync_mode	= WB_SYNC_NONE,
		.nr_pages	= nr_pages,
		.range_cyclic	= 1,
	};

219 220
	bdi_alloc_queue_work(bdi, &args);
}
221

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
/**
 * bdi_start_background_writeback - start background writeback
 * @bdi: the backing device to write from
 *
 * Description:
 *   This does WB_SYNC_NONE background writeback. The IO is only
 *   started when this function returns, we make no guarentees on
 *   completion. Caller need not hold sb s_umount semaphore.
 */
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
	struct wb_writeback_args args = {
		.sync_mode	= WB_SYNC_NONE,
		.nr_pages	= LONG_MAX,
		.for_background = 1,
		.range_cyclic	= 1,
	};
239
	bdi_alloc_queue_work(bdi, &args);
L
Linus Torvalds 已提交
240 241
}

242 243 244 245 246
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
247
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
248 249 250 251 252
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
static void redirty_tail(struct inode *inode)
{
253
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
254

255
	if (!list_empty(&wb->b_dirty)) {
256
		struct inode *tail;
257

258
		tail = list_entry(wb->b_dirty.next, struct inode, i_list);
259
		if (time_before(inode->dirtied_when, tail->dirtied_when))
260 261
			inode->dirtied_when = jiffies;
	}
262
	list_move(&inode->i_list, &wb->b_dirty);
263 264
}

265
/*
266
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
267
 */
268
static void requeue_io(struct inode *inode)
269
{
270 271 272
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;

	list_move(&inode->i_list, &wb->b_more_io);
273 274
}

J
Joern Engel 已提交
275 276 277 278 279 280 281 282 283
static void inode_sync_complete(struct inode *inode)
{
	/*
	 * Prevent speculative execution through spin_unlock(&inode_lock);
	 */
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

284 285 286 287 288 289 290 291
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
292
	 * from permanently stopping the whole bdi writeback.
293 294 295 296 297 298
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

299 300 301 302 303 304 305
/*
 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 */
static void move_expired_inodes(struct list_head *delaying_queue,
			       struct list_head *dispatch_queue,
				unsigned long *older_than_this)
{
306 307
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
308
	struct super_block *sb = NULL;
309
	struct inode *inode;
310
	int do_sb_sort = 0;
311

312
	while (!list_empty(delaying_queue)) {
313
		inode = list_entry(delaying_queue->prev, struct inode, i_list);
314
		if (older_than_this &&
315
		    inode_dirtied_after(inode, *older_than_this))
316
			break;
317 318 319
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
320 321 322
		list_move(&inode->i_list, &tmp);
	}

323 324 325 326 327 328
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
		return;
	}

329 330 331 332 333 334 335 336 337
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
		inode = list_entry(tmp.prev, struct inode, i_list);
		sb = inode->i_sb;
		list_for_each_prev_safe(pos, node, &tmp) {
			inode = list_entry(pos, struct inode, i_list);
			if (inode->i_sb == sb)
				list_move(&inode->i_list, dispatch_queue);
		}
338 339 340 341 342 343
	}
}

/*
 * Queue all expired dirty inodes for io, eldest first.
 */
344
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
345
{
346 347
	list_splice_init(&wb->b_more_io, wb->b_io.prev);
	move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
348 349
}

350
static int write_inode(struct inode *inode, struct writeback_control *wbc)
351
{
352
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
353
		return inode->i_sb->s_op->write_inode(inode, wbc);
354
	return 0;
355 356
}

L
Linus Torvalds 已提交
357
/*
358 359 360 361 362 363 364 365
 * Wait for writeback on an inode to complete.
 */
static void inode_wait_for_writeback(struct inode *inode)
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
366
	 while (inode->i_state & I_SYNC) {
367 368 369
		spin_unlock(&inode_lock);
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
		spin_lock(&inode_lock);
370
	}
371 372 373 374 375 376 377
}

/*
 * Write out an inode's dirty pages.  Called under inode_lock.  Either the
 * caller has ref on the inode (either via __iget or via syscall against an fd)
 * or the inode has I_WILL_FREE set (via generic_forget_inode)
 *
L
Linus Torvalds 已提交
378 379 380 381 382 383 384 385 386
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 *
 * Called under inode_lock.
 */
static int
387
writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
L
Linus Torvalds 已提交
388 389
{
	struct address_space *mapping = inode->i_mapping;
390
	unsigned dirty;
L
Linus Torvalds 已提交
391 392
	int ret;

393 394 395 396 397 398 399 400
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		/*
		 * If this inode is locked for writeback and we are not doing
401
		 * writeback-for-data-integrity, move it to b_more_io so that
402 403 404
		 * writeback can proceed with the other inodes on s_io.
		 *
		 * We'll have another go at writing back this inode when we
405
		 * completed a full scan of b_io.
406
		 */
407
		if (wbc->sync_mode != WB_SYNC_ALL) {
408 409 410 411 412 413 414 415 416 417
			requeue_io(inode);
			return 0;
		}

		/*
		 * It's a data-integrity sync.  We must wait.
		 */
		inode_wait_for_writeback(inode);
	}

J
Joern Engel 已提交
418
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
419

420
	/* Set I_SYNC, reset I_DIRTY_PAGES */
J
Joern Engel 已提交
421
	inode->i_state |= I_SYNC;
422
	inode->i_state &= ~I_DIRTY_PAGES;
L
Linus Torvalds 已提交
423 424 425 426
	spin_unlock(&inode_lock);

	ret = do_writepages(mapping, wbc);

427 428 429 430 431
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
432
	if (wbc->sync_mode == WB_SYNC_ALL) {
433
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
434 435 436 437
		if (ret == 0)
			ret = err;
	}

438 439 440 441 442 443 444 445 446
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
	spin_lock(&inode_lock);
	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
	spin_unlock(&inode_lock);
447 448
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
449
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
450 451 452 453 454
		if (ret == 0)
			ret = err;
	}

	spin_lock(&inode_lock);
J
Joern Engel 已提交
455
	inode->i_state &= ~I_SYNC;
456
	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
457
		if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
458
			/*
459 460 461 462 463 464 465
			 * More pages get dirtied by a fast dirtier.
			 */
			goto select_queue;
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * At least XFS will redirty the inode during the
			 * writeback (delalloc) and on io completion (isize).
466 467 468
			 */
			redirty_tail(inode);
		} else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
L
Linus Torvalds 已提交
469 470 471
			/*
			 * We didn't write back all the pages.  nfs_writepages()
			 * sometimes bales out without doing anything. Redirty
472
			 * the inode; Move it from b_io onto b_more_io/b_dirty.
473 474 475
			 */
			/*
			 * akpm: if the caller was the kupdate function we put
476
			 * this inode at the head of b_dirty so it gets first
477 478 479 480 481
			 * consideration.  Otherwise, move it to the tail, for
			 * the reasons described there.  I'm not really sure
			 * how much sense this makes.  Presumably I had a good
			 * reasons for doing it this way, and I'd rather not
			 * muck with it at present.
L
Linus Torvalds 已提交
482 483 484
			 */
			if (wbc->for_kupdate) {
				/*
485
				 * For the kupdate function we move the inode
486
				 * to b_more_io so it will get more writeout as
487
				 * soon as the queue becomes uncongested.
L
Linus Torvalds 已提交
488 489
				 */
				inode->i_state |= I_DIRTY_PAGES;
490
select_queue:
491 492 493 494 495 496 497 498 499 500 501
				if (wbc->nr_to_write <= 0) {
					/*
					 * slice used up: queue for next turn
					 */
					requeue_io(inode);
				} else {
					/*
					 * somehow blocked: retry later
					 */
					redirty_tail(inode);
				}
L
Linus Torvalds 已提交
502 503 504 505 506 507 508 509 510
			} else {
				/*
				 * Otherwise fully redirty the inode so that
				 * other inodes on this superblock will get some
				 * writeout.  Otherwise heavy writing to one
				 * file would indefinitely suspend writeout of
				 * all the other files.
				 */
				inode->i_state |= I_DIRTY_PAGES;
511
				redirty_tail(inode);
L
Linus Torvalds 已提交
512 513 514 515 516 517 518 519 520 521 522 523 524
			}
		} else if (atomic_read(&inode->i_count)) {
			/*
			 * The inode is clean, inuse
			 */
			list_move(&inode->i_list, &inode_in_use);
		} else {
			/*
			 * The inode is clean, unused
			 */
			list_move(&inode->i_list, &inode_unused);
		}
	}
J
Joern Engel 已提交
525
	inode_sync_complete(inode);
L
Linus Torvalds 已提交
526 527 528
	return ret;
}

529
/*
530
 * For background writeback the caller does not have the sb pinned
531 532 533
 * before calling writeback. So make sure that we do pin it, so it doesn't
 * go away while we are writing inodes from it.
 */
534
static bool pin_sb_for_writeback(struct super_block *sb)
535 536
{
	spin_lock(&sb_lock);
537 538 539 540 541
	if (list_empty(&sb->s_instances)) {
		spin_unlock(&sb_lock);
		return false;
	}

542
	sb->s_count++;
543 544
	spin_unlock(&sb_lock);

545
	if (down_read_trylock(&sb->s_umount)) {
546
		if (sb->s_root)
547
			return true;
548 549
		up_read(&sb->s_umount);
	}
550 551

	put_super(sb);
552
	return false;
553 554
}

555 556
/*
 * Write a portion of b_io inodes which belong to @sb.
557 558
 *
 * If @only_this_sb is true, then find and write all such
559 560
 * inodes. Otherwise write only ones which go sequentially
 * in reverse order.
561
 *
562 563 564
 * Return 1, if the caller writeback routine should be
 * interrupted. Otherwise return 0.
 */
565 566
static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
		struct writeback_control *wbc, bool only_this_sb)
L
Linus Torvalds 已提交
567
{
568
	while (!list_empty(&wb->b_io)) {
L
Linus Torvalds 已提交
569
		long pages_skipped;
570 571
		struct inode *inode = list_entry(wb->b_io.prev,
						 struct inode, i_list);
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588

		if (inode->i_sb != sb) {
			if (only_this_sb) {
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
				redirty_tail(inode);
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
589
			return 0;
590 591
		}

592
		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
593 594 595
			requeue_io(inode);
			continue;
		}
596 597 598 599
		/*
		 * Was this inode dirtied after sync_sb_inodes was called?
		 * This keeps sync from extra jobs and livelock.
		 */
600 601
		if (inode_dirtied_after(inode, wbc->wb_start))
			return 1;
L
Linus Torvalds 已提交
602

603
		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
L
Linus Torvalds 已提交
604 605
		__iget(inode);
		pages_skipped = wbc->pages_skipped;
606
		writeback_single_inode(inode, wbc);
L
Linus Torvalds 已提交
607 608 609 610 611
		if (wbc->pages_skipped != pages_skipped) {
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
612
			redirty_tail(inode);
L
Linus Torvalds 已提交
613 614 615
		}
		spin_unlock(&inode_lock);
		iput(inode);
616
		cond_resched();
L
Linus Torvalds 已提交
617
		spin_lock(&inode_lock);
618 619
		if (wbc->nr_to_write <= 0) {
			wbc->more_io = 1;
620
			return 1;
621
		}
622
		if (!list_empty(&wb->b_more_io))
623
			wbc->more_io = 1;
L
Linus Torvalds 已提交
624
	}
625 626 627 628
	/* b_io is empty */
	return 1;
}

629 630
void writeback_inodes_wb(struct bdi_writeback *wb,
		struct writeback_control *wbc)
631 632 633 634 635 636 637
{
	int ret = 0;

	wbc->wb_start = jiffies; /* livelock avoidance */
	spin_lock(&inode_lock);
	if (!wbc->for_kupdate || list_empty(&wb->b_io))
		queue_io(wb, wbc->older_than_this);
N
Nick Piggin 已提交
638

639 640 641 642
	while (!list_empty(&wb->b_io)) {
		struct inode *inode = list_entry(wb->b_io.prev,
						 struct inode, i_list);
		struct super_block *sb = inode->i_sb;
643

644 645 646
		if (!pin_sb_for_writeback(sb)) {
			requeue_io(inode);
			continue;
647
		}
648 649
		ret = writeback_sb_inodes(sb, wb, wbc, false);
		drop_super(sb);
650 651 652 653

		if (ret)
			break;
	}
654 655 656 657
	spin_unlock(&inode_lock);
	/* Leave any unwritten inodes on b_io */
}

658 659 660 661 662 663 664 665 666 667 668 669 670
static void __writeback_inodes_sb(struct super_block *sb,
		struct bdi_writeback *wb, struct writeback_control *wbc)
{
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

	wbc->wb_start = jiffies; /* livelock avoidance */
	spin_lock(&inode_lock);
	if (!wbc->for_kupdate || list_empty(&wb->b_io))
		queue_io(wb, wbc->older_than_this);
	writeback_sb_inodes(sb, wb, wbc, true);
	spin_unlock(&inode_lock);
}

671
/*
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
 * The maximum number of pages to writeout in a single bdi flush/kupdate
 * operation.  We do this so we don't hold I_SYNC against an inode for
 * enormous amounts of time, which would block a userspace task which has
 * been forced to throttle against that inode.  Also, the code reevaluates
 * the dirty each time it has written this many pages.
 */
#define MAX_WRITEBACK_PAGES     1024

static inline bool over_bground_thresh(void)
{
	unsigned long background_thresh, dirty_thresh;

	get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);

	return (global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
}

/*
 * Explicit flushing or periodic writeback of "old" data.
692
 *
693 694 695 696
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
697
 *
698 699 700
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
701
 *
702 703
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
704
 */
705 706
static long wb_writeback(struct bdi_writeback *wb,
			 struct wb_writeback_args *args)
707
{
708
	struct writeback_control wbc = {
709
		.sync_mode		= args->sync_mode,
710
		.older_than_this	= NULL,
711
		.for_kupdate		= args->for_kupdate,
712
		.for_background		= args->for_background,
713
		.range_cyclic		= args->range_cyclic,
714 715 716
	};
	unsigned long oldest_jif;
	long wrote = 0;
J
Jan Kara 已提交
717
	struct inode *inode;
718

719 720 721 722 723
	if (wbc.for_kupdate) {
		wbc.older_than_this = &oldest_jif;
		oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
	}
724 725 726 727
	if (!wbc.range_cyclic) {
		wbc.range_start = 0;
		wbc.range_end = LLONG_MAX;
	}
N
Nick Piggin 已提交
728

729 730
	for (;;) {
		/*
731
		 * Stop writeback when nr_pages has been consumed
732
		 */
733
		if (args->nr_pages <= 0)
734
			break;
735

N
Nick Piggin 已提交
736
		/*
737 738
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
739
		 */
740
		if (args->for_background && !over_bground_thresh())
741
			break;
N
Nick Piggin 已提交
742

743 744 745
		wbc.more_io = 0;
		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
		wbc.pages_skipped = 0;
746 747 748 749
		if (args->sb)
			__writeback_inodes_sb(args->sb, wb, &wbc);
		else
			writeback_inodes_wb(wb, &wbc);
750
		args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
751 752 753
		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;

		/*
754
		 * If we consumed everything, see if we have more
755
		 */
756 757 758 759 760 761
		if (wbc.nr_to_write <= 0)
			continue;
		/*
		 * Didn't write everything and we don't have more IO, bail
		 */
		if (!wbc.more_io)
762
			break;
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
		/*
		 * Did we write something? Try for more
		 */
		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
			continue;
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		spin_lock(&inode_lock);
		if (!list_empty(&wb->b_more_io))  {
			inode = list_entry(wb->b_more_io.prev,
						struct inode, i_list);
			inode_wait_for_writeback(inode);
778
		}
779
		spin_unlock(&inode_lock);
780 781 782 783 784 785 786
	}

	return wrote;
}

/*
 * Return the next bdi_work struct that hasn't been processed by this
787 788 789 790 791
 * wb thread yet. ->seen is initially set for each thread that exists
 * for this device, when a thread first notices a piece of work it
 * clears its bit. Depending on writeback type, the thread will notify
 * completion on either receiving the work (WB_SYNC_NONE) or after
 * it is done (WB_SYNC_ALL).
792 793 794 795 796 797 798 799 800
 */
static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
					   struct bdi_writeback *wb)
{
	struct bdi_work *work, *ret = NULL;

	rcu_read_lock();

	list_for_each_entry_rcu(work, &bdi->work_list, list) {
801
		if (!test_bit(wb->nr, &work->seen))
802
			continue;
803
		clear_bit(wb->nr, &work->seen);
804 805 806 807 808 809 810 811 812 813 814 815 816 817

		ret = work;
		break;
	}

	rcu_read_unlock();
	return ret;
}

static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

818 819 820 821 822 823
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

824 825 826 827 828 829 830 831 832 833
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
	nr_pages = global_page_state(NR_FILE_DIRTY) +
			global_page_state(NR_UNSTABLE_NFS) +
			(inodes_stat.nr_inodes - inodes_stat.nr_unused);

834 835 836 837 838 839 840 841 842 843
	if (nr_pages) {
		struct wb_writeback_args args = {
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
		};

		return wb_writeback(wb, &args);
	}
844 845 846 847 848 849 850 851 852 853 854

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
	struct bdi_work *work;
855
	long wrote = 0;
856 857

	while ((work = get_next_work_item(bdi, wb)) != NULL) {
858
		struct wb_writeback_args args = work->args;
859 860 861 862 863

		/*
		 * Override sync mode, in case we must wait for completion
		 */
		if (force_wait)
864
			work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
865 866 867 868 869

		/*
		 * If this isn't a data integrity operation, just notify
		 * that we have seen this work and we are now starting it.
		 */
870
		if (!test_bit(WS_ONSTACK, &work->state))
871 872
			wb_clear_pending(wb, work);

873
		wrote += wb_writeback(wb, &args);
874 875 876 877 878

		/*
		 * This is a data integrity writeback, so only do the
		 * notification when we have completed the work.
		 */
879
		if (test_bit(WS_ONSTACK, &work->state))
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
			wb_clear_pending(wb, work);
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
int bdi_writeback_task(struct bdi_writeback *wb)
{
	unsigned long last_active = jiffies;
	unsigned long wait_jiffies = -1UL;
	long pages_written;

	while (!kthread_should_stop()) {
		pages_written = wb_do_writeback(wb, 0);

		if (pages_written)
			last_active = jiffies;
		else if (wait_jiffies != -1UL) {
			unsigned long max_idle;

N
Nick Piggin 已提交
909
			/*
910 911 912
			 * Longest period of inactivity that we tolerate. If we
			 * see dirty data again later, the task will get
			 * recreated automatically.
N
Nick Piggin 已提交
913
			 */
914 915 916 917 918
			max_idle = max(5UL * 60 * HZ, wait_jiffies);
			if (time_after(jiffies, max_idle + last_active))
				break;
		}

919 920 921
		if (dirty_writeback_interval) {
			wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
			schedule_timeout_interruptible(wait_jiffies);
922 923 924 925 926 927 928
		} else {
			set_current_state(TASK_INTERRUPTIBLE);
			if (list_empty_careful(&wb->bdi->work_list) &&
			    !kthread_should_stop())
				schedule();
			__set_current_state(TASK_RUNNING);
		}
929

930 931 932 933 934 935 936
		try_to_freeze();
	}

	return 0;
}

/*
937 938
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
939
 */
940
void wakeup_flusher_threads(long nr_pages)
941
{
942
	struct backing_dev_info *bdi;
943 944 945
	struct wb_writeback_args args = {
		.sync_mode	= WB_SYNC_NONE,
	};
946

947 948 949 950 951 952
	if (nr_pages) {
		args.nr_pages = nr_pages;
	} else {
		args.nr_pages = global_page_state(NR_FILE_DIRTY) +
				global_page_state(NR_UNSTABLE_NFS);
	}
953

954
	rcu_read_lock();
955
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
956 957
		if (!bdi_has_dirty_io(bdi))
			continue;
958
		bdi_alloc_queue_work(bdi, &args);
959
	}
960
	rcu_read_unlock();
L
Linus Torvalds 已提交
961 962
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
991
 *
992 993 994 995 996 997 998 999 1000
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1001
 *
1002 1003
 * This function *must* be atomic for the I_DIRTY_PAGES case -
 * set_page_dirty() is called under spinlock in several places.
L
Linus Torvalds 已提交
1004
 *
1005 1006 1007 1008 1009 1010
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1011
 */
1012
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1013
{
1014
	struct super_block *sb = inode->i_sb;
L
Linus Torvalds 已提交
1015

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
			sb->s_op->dirty_inode(inode);
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

	spin_lock(&inode_lock);
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
			goto out;

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
			if (hlist_unhashed(&inode->i_hash))
				goto out;
		}
		if (inode->i_state & (I_FREEING|I_CLEAR))
			goto out;

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
			struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1069 1070 1071 1072 1073 1074 1075 1076
			struct backing_dev_info *bdi = wb->bdi;

			if (bdi_cap_writeback_dirty(bdi) &&
			    !test_bit(BDI_registered, &bdi->state)) {
				WARN_ON(1);
				printk(KERN_ERR "bdi-%s not registered\n",
								bdi->name);
			}
1077 1078 1079

			inode->dirtied_when = jiffies;
			list_move(&inode->i_list, &wb->b_dirty);
L
Linus Torvalds 已提交
1080 1081
		}
	}
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
out:
	spin_unlock(&inode_lock);
}
EXPORT_SYMBOL(__mark_inode_dirty);

/*
 * Write out a superblock's list of dirty inodes.  A wait will be performed
 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 *
 * If older_than_this is non-NULL, then only write out inodes which
 * had their first dirtying at a time earlier than *older_than_this.
 *
 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 * This function assumes that the blockdev superblock's inodes are backed by
 * a variety of queues, so all inodes are searched.  For other superblocks,
 * assume that all inodes are backed by the same queue.
 *
 * The inodes to be written are parked on bdi->b_io.  They are moved back onto
 * bdi->b_dirty as they are selected for writing.  This way, none can be missed
 * on the writer throttling path, and we get decent balancing between many
 * throttled threads: we don't want them all piling up on inode_sync_wait.
 */
1104
static void wait_sb_inodes(struct super_block *sb)
1105 1106 1107 1108 1109 1110 1111
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1112
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

	spin_lock(&inode_lock);

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1123
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
		struct address_space *mapping;

		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
			continue;
		mapping = inode->i_mapping;
		if (mapping->nrpages == 0)
			continue;
		__iget(inode);
		spin_unlock(&inode_lock);
		/*
		 * We hold a reference to 'inode' so it couldn't have
		 * been removed from s_inodes list while we dropped the
		 * inode_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it
		 * under inode_lock. So we keep the reference and iput
		 * it later.
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

		spin_lock(&inode_lock);
	}
	spin_unlock(&inode_lock);
	iput(old_inode);
L
Linus Torvalds 已提交
1152 1153
}

1154 1155 1156
/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
L
Linus Torvalds 已提交
1157
 *
1158 1159 1160 1161
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO. The number of pages submitted is
 * returned.
L
Linus Torvalds 已提交
1162
 */
1163
void writeback_inodes_sb(struct super_block *sb)
L
Linus Torvalds 已提交
1164
{
1165 1166
	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1167 1168 1169 1170
	struct wb_writeback_args args = {
		.sb		= sb,
		.sync_mode	= WB_SYNC_NONE,
	};
1171

1172 1173
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1174
	args.nr_pages = nr_dirty + nr_unstable +
1175 1176
			(inodes_stat.nr_inodes - inodes_stat.nr_unused);

1177
	bdi_queue_work_onstack(&args);
1178
}
1179
EXPORT_SYMBOL(writeback_inodes_sb);
1180

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_if_idle(struct super_block *sb)
{
	if (!writeback_in_progress(sb->s_bdi)) {
1191
		down_read(&sb->s_umount);
1192
		writeback_inodes_sb(sb);
1193
		up_read(&sb->s_umount);
1194 1195 1196 1197 1198 1199
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1200 1201 1202 1203 1204 1205 1206
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
 * super_block. The number of pages synced is returned.
 */
1207
void sync_inodes_sb(struct super_block *sb)
1208
{
1209 1210 1211 1212 1213 1214 1215
	struct wb_writeback_args args = {
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
	};

1216 1217
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1218
	bdi_queue_work_onstack(&args);
1219
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1220
}
1221
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1222 1223

/**
1224 1225 1226 1227 1228 1229
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1230
 *
1231
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1232 1233 1234 1235 1236 1237
 */
int write_inode_now(struct inode *inode, int sync)
{
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1238
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1239 1240
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1241 1242 1243
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1244
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1245 1246 1247

	might_sleep();
	spin_lock(&inode_lock);
1248
	ret = writeback_single_inode(inode, &wbc);
L
Linus Torvalds 已提交
1249 1250
	spin_unlock(&inode_lock);
	if (sync)
J
Joern Engel 已提交
1251
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
	int ret;

	spin_lock(&inode_lock);
1272
	ret = writeback_single_inode(inode, wbc);
L
Linus Torvalds 已提交
1273 1274 1275 1276
	spin_unlock(&inode_lock);
	return ret;
}
EXPORT_SYMBOL(sync_inode);