fs-writeback.c 38.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
17
#include <linux/export.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
24 25
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
26 27 28
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
29
#include <linux/tracepoint.h>
30
#include "internal.h"
L
Linus Torvalds 已提交
31

32 33 34 35 36
/*
 * 4MB minimal write chunk size
 */
#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_CACHE_SHIFT - 10))

37 38 39
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
40
struct wb_writeback_work {
41 42
	long nr_pages;
	struct super_block *sb;
43
	unsigned long *older_than_this;
44
	enum writeback_sync_modes sync_mode;
45
	unsigned int tagged_writepages:1;
46 47 48
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
49
	enum wb_reason reason;		/* why was writeback initiated? */
50

51
	struct list_head list;		/* pending work list */
52
	struct completion *done;	/* set if the caller waits */
53 54
};

55 56 57 58 59
/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

60 61 62 63
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
64 65
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
66 67 68
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
J
Jan Kara 已提交
69
	return test_bit(BDI_writeback_running, &bdi->state);
70 71
}

72 73 74 75
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

76 77 78 79
	if (strcmp(sb->s_type->name, "bdev") == 0)
		return inode->i_mapping->backing_dev_info;

	return sb->s_bdi;
80 81
}

N
Nick Piggin 已提交
82 83 84 85 86
static inline struct inode *wb_inode(struct list_head *head)
{
	return list_entry(head, struct inode, i_wb_list);
}

87 88 89 90 91 92 93 94
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure and inline functions so that the definition
 * remains local to this file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

95 96
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
97
{
98 99 100 101 102 103 104
	if (bdi->wb.task) {
		wake_up_process(bdi->wb.task);
	} else {
		/*
		 * The bdi thread isn't there, wake up the forker thread which
		 * will create and run it.
		 */
105
		wake_up_process(default_backing_dev_info.wb.task);
L
Linus Torvalds 已提交
106
	}
107 108 109 110 111 112 113 114 115 116 117 118
}

static void bdi_queue_work(struct backing_dev_info *bdi,
			   struct wb_writeback_work *work)
{
	trace_writeback_queue(bdi, work);

	spin_lock_bh(&bdi->wb_lock);
	list_add_tail(&work->list, &bdi->work_list);
	if (!bdi->wb.task)
		trace_writeback_nothread(bdi, work);
	bdi_wakeup_flusher(bdi);
119
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
120 121
}

122 123
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
124
		      bool range_cyclic, enum wb_reason reason)
L
Linus Torvalds 已提交
125
{
126
	struct wb_writeback_work *work;
127

128 129 130 131
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
132 133
	work = kzalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
134 135
		if (bdi->wb.task) {
			trace_writeback_nowork(bdi);
136
			wake_up_process(bdi->wb.task);
137
		}
138
		return;
139
	}
140

141 142 143
	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
144
	work->reason	= reason;
145

146
	bdi_queue_work(bdi, work);
147 148 149 150 151 152
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
 * @nr_pages: the number of pages to write
153
 * @reason: reason why some writeback work was initiated
154 155 156
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
L
Lucas De Marchi 已提交
157
 *   started when this function returns, we make no guarantees on
158
 *   completion. Caller need not hold sb s_umount semaphore.
159 160
 *
 */
161 162
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
			enum wb_reason reason)
163
{
164
	__bdi_start_writeback(bdi, nr_pages, true, reason);
165
}
166

167 168 169 170 171
/**
 * bdi_start_background_writeback - start background writeback
 * @bdi: the backing device to write from
 *
 * Description:
172 173 174 175
 *   This makes sure WB_SYNC_NONE background writeback happens. When
 *   this function returns, it is only guaranteed that for given BDI
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
176 177 178
 */
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
179 180 181 182
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
183
	trace_writeback_wake_background(bdi);
184 185 186
	spin_lock_bh(&bdi->wb_lock);
	bdi_wakeup_flusher(bdi);
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
187 188
}

189 190 191 192 193
/*
 * Remove the inode from the writeback list it is on.
 */
void inode_wb_list_del(struct inode *inode)
{
194 195 196
	struct backing_dev_info *bdi = inode_to_bdi(inode);

	spin_lock(&bdi->wb.list_lock);
197
	list_del_init(&inode->i_wb_list);
198
	spin_unlock(&bdi->wb.list_lock);
199 200
}

201 202 203 204 205
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
206
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
207 208 209
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
210
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
211
{
212
	assert_spin_locked(&wb->list_lock);
213
	if (!list_empty(&wb->b_dirty)) {
214
		struct inode *tail;
215

N
Nick Piggin 已提交
216
		tail = wb_inode(wb->b_dirty.next);
217
		if (time_before(inode->dirtied_when, tail->dirtied_when))
218 219
			inode->dirtied_when = jiffies;
	}
N
Nick Piggin 已提交
220
	list_move(&inode->i_wb_list, &wb->b_dirty);
221 222
}

223
/*
224
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
225
 */
226
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
227
{
228
	assert_spin_locked(&wb->list_lock);
N
Nick Piggin 已提交
229
	list_move(&inode->i_wb_list, &wb->b_more_io);
230 231
}

J
Joern Engel 已提交
232 233
static void inode_sync_complete(struct inode *inode)
{
234 235
	inode->i_state &= ~I_SYNC;
	/* Waiters must see I_SYNC cleared before being woken up */
J
Joern Engel 已提交
236 237 238 239
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

240 241 242 243 244 245 246 247
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
248
	 * from permanently stopping the whole bdi writeback.
249 250 251 252 253 254
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

255
/*
J
Jan Kara 已提交
256 257
 * Move expired (dirtied after work->older_than_this) dirty inodes from
 * @delaying_queue to @dispatch_queue.
258
 */
259
static int move_expired_inodes(struct list_head *delaying_queue,
260
			       struct list_head *dispatch_queue,
261
			       struct wb_writeback_work *work)
262
{
263 264
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
265
	struct super_block *sb = NULL;
266
	struct inode *inode;
267
	int do_sb_sort = 0;
268
	int moved = 0;
269

270
	while (!list_empty(delaying_queue)) {
N
Nick Piggin 已提交
271
		inode = wb_inode(delaying_queue->prev);
272 273
		if (work->older_than_this &&
		    inode_dirtied_after(inode, *work->older_than_this))
274
			break;
275 276 277
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
N
Nick Piggin 已提交
278
		list_move(&inode->i_wb_list, &tmp);
279
		moved++;
280 281
	}

282 283 284
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
285
		goto out;
286 287
	}

288 289
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
N
Nick Piggin 已提交
290
		sb = wb_inode(tmp.prev)->i_sb;
291
		list_for_each_prev_safe(pos, node, &tmp) {
N
Nick Piggin 已提交
292
			inode = wb_inode(pos);
293
			if (inode->i_sb == sb)
N
Nick Piggin 已提交
294
				list_move(&inode->i_wb_list, dispatch_queue);
295
		}
296
	}
297 298
out:
	return moved;
299 300 301 302
}

/*
 * Queue all expired dirty inodes for io, eldest first.
303 304 305 306 307 308 309 310
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
311
 */
312
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
313
{
314
	int moved;
315
	assert_spin_locked(&wb->list_lock);
316
	list_splice_init(&wb->b_more_io, &wb->b_io);
317 318
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
	trace_writeback_queue_io(wb, work, moved);
319 320
}

321
static int write_inode(struct inode *inode, struct writeback_control *wbc)
322
{
323
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
324
		return inode->i_sb->s_op->write_inode(inode, wbc);
325
	return 0;
326 327
}

L
Linus Torvalds 已提交
328
/*
329 330
 * Wait for writeback on an inode to complete.
 */
331
static void inode_wait_for_writeback(struct inode *inode)
332 333 334 335 336
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
337 338
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
339
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
340
		spin_lock(&inode->i_lock);
341
	}
342 343
}

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
/*
 * Find proper writeback list for the inode depending on its current state and
 * possibly also change of its state while we were doing writeback.  Here we
 * handle things such as livelock prevention or fairness of writeback among
 * inodes. This function can be called only by flusher thread - noone else
 * processes all inodes in writeback lists and requeueing inodes behind flusher
 * thread's back can have unexpected consequences.
 */
static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
			  struct writeback_control *wbc)
{
	if (inode->i_state & I_FREEING)
		return;

	/*
	 * Sync livelock prevention. Each inode is tagged and synced in one
	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
	 * the dirty time to prevent enqueue and sync it again.
	 */
	if ((inode->i_state & I_DIRTY) &&
	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
		inode->dirtied_when = jiffies;

367 368 369 370 371 372 373 374 375
	if (wbc->pages_skipped) {
		/*
		 * writeback is not making progress due to locked
		 * buffers. Skip this inode for now.
		 */
		redirty_tail(inode, wb);
		return;
	}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
		/*
		 * We didn't write back all the pages.  nfs_writepages()
		 * sometimes bales out without doing anything.
		 */
		if (wbc->nr_to_write <= 0) {
			/* Slice used up. Queue for next turn. */
			requeue_io(inode, wb);
		} else {
			/*
			 * Writeback blocked by something other than
			 * congestion. Delay the inode for some time to
			 * avoid spinning on the CPU (100% iowait)
			 * retrying writeback of the dirty page/inode
			 * that cannot be performed immediately.
			 */
			redirty_tail(inode, wb);
		}
	} else if (inode->i_state & I_DIRTY) {
		/*
		 * Filesystems can dirty the inode during writeback operations,
		 * such as delayed allocation during submission or metadata
		 * updates after data IO completion.
		 */
		redirty_tail(inode, wb);
	} else {
		/* The inode is clean. Remove from writeback lists. */
		list_del_init(&inode->i_wb_list);
	}
}

407
/*
408 409 410
 * Write out an inode and its dirty pages. Do not update the writeback list
 * linkage. That is left to the caller. The caller is also responsible for
 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
L
Linus Torvalds 已提交
411 412
 */
static int
413 414
__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
			 struct writeback_control *wbc)
L
Linus Torvalds 已提交
415 416
{
	struct address_space *mapping = inode->i_mapping;
417
	long nr_to_write = wbc->nr_to_write;
418
	unsigned dirty;
L
Linus Torvalds 已提交
419 420
	int ret;

421
	WARN_ON(!(inode->i_state & I_SYNC));
L
Linus Torvalds 已提交
422 423 424

	ret = do_writepages(mapping, wbc);

425 426 427 428 429
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
430
	if (wbc->sync_mode == WB_SYNC_ALL) {
431
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
432 433 434 435
		if (ret == 0)
			ret = err;
	}

436 437 438 439 440
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
441
	spin_lock(&inode->i_lock);
442 443 444
	/* Clear I_DIRTY_PAGES if we've written out all dirty pages */
	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		inode->i_state &= ~I_DIRTY_PAGES;
445 446
	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
447
	spin_unlock(&inode->i_lock);
448 449
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
450
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
451 452 453
		if (ret == 0)
			ret = err;
	}
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	trace_writeback_single_inode(inode, wbc, nr_to_write);
	return ret;
}

/*
 * Write out an inode's dirty pages. Either the caller has an active reference
 * on the inode or the inode has I_WILL_FREE set.
 *
 * This function is designed to be called for writing back one inode which
 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
 * and does more profound writeback list handling in writeback_sb_inodes().
 */
static int
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		       struct writeback_control *wbc)
{
	int ret = 0;

	spin_lock(&inode->i_lock);
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		if (wbc->sync_mode != WB_SYNC_ALL)
			goto out;
		/*
		 * It's a data-integrity sync.  We must wait.
		 */
		inode_wait_for_writeback(inode);
	}
	WARN_ON(inode->i_state & I_SYNC);
	/*
	 * Skip inode if it is clean. We don't want to mess with writeback
	 * lists in this function since flusher thread may be doing for example
	 * sync in parallel and if we move the inode, it could get skipped. So
	 * here we make sure inode is on some writeback list and leave it there
	 * unless we have completely cleaned the inode.
	 */
	if (!(inode->i_state & I_DIRTY))
		goto out;
	inode->i_state |= I_SYNC;
	spin_unlock(&inode->i_lock);

	ret = __writeback_single_inode(inode, wb, wbc);
L
Linus Torvalds 已提交
500

501
	spin_lock(&wb->list_lock);
502
	spin_lock(&inode->i_lock);
503 504 505 506 507 508 509
	/*
	 * If inode is clean, remove it from writeback lists. Otherwise don't
	 * touch it. See comment above for explanation.
	 */
	if (!(inode->i_state & I_DIRTY))
		list_del_init(&inode->i_wb_list);
	spin_unlock(&wb->list_lock);
J
Joern Engel 已提交
510
	inode_sync_complete(inode);
511 512
out:
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
513 514 515
	return ret;
}

516 517
static long writeback_chunk_size(struct backing_dev_info *bdi,
				 struct wb_writeback_work *work)
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
{
	long pages;

	/*
	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
	 * here avoids calling into writeback_inodes_wb() more than once.
	 *
	 * The intended call sequence for WB_SYNC_ALL writeback is:
	 *
	 *      wb_writeback()
	 *          writeback_sb_inodes()       <== called only once
	 *              write_cache_pages()     <== called once for each inode
	 *                   (quickly) tag currently dirty pages
	 *                   (maybe slowly) sync all tagged pages
	 */
	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
		pages = LONG_MAX;
536 537 538 539 540 541 542
	else {
		pages = min(bdi->avg_write_bandwidth / 2,
			    global_dirty_limit / DIRTY_SCOPE);
		pages = min(pages, work->nr_pages);
		pages = round_down(pages + MIN_WRITEBACK_PAGES,
				   MIN_WRITEBACK_PAGES);
	}
543 544 545 546

	return pages;
}

547 548
/*
 * Write a portion of b_io inodes which belong to @sb.
549 550
 *
 * If @only_this_sb is true, then find and write all such
551 552
 * inodes. Otherwise write only ones which go sequentially
 * in reverse order.
553
 *
554
 * Return the number of pages and/or inodes written.
555
 */
556 557 558
static long writeback_sb_inodes(struct super_block *sb,
				struct bdi_writeback *wb,
				struct wb_writeback_work *work)
L
Linus Torvalds 已提交
559
{
560 561 562 563 564 565 566 567 568 569 570 571 572
	struct writeback_control wbc = {
		.sync_mode		= work->sync_mode,
		.tagged_writepages	= work->tagged_writepages,
		.for_kupdate		= work->for_kupdate,
		.for_background		= work->for_background,
		.range_cyclic		= work->range_cyclic,
		.range_start		= 0,
		.range_end		= LLONG_MAX,
	};
	unsigned long start_time = jiffies;
	long write_chunk;
	long wrote = 0;  /* count both pages and inodes */

573
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
574
		struct inode *inode = wb_inode(wb->b_io.prev);
575 576

		if (inode->i_sb != sb) {
577
			if (work->sb) {
578 579 580 581 582
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
583
				redirty_tail(inode, wb);
584 585 586 587 588 589 590 591
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
592
			break;
593 594
		}

595 596 597 598 599
		/*
		 * Don't bother with new inodes or inodes beeing freed, first
		 * kind does not need peridic writeout yet, and for the latter
		 * kind writeout is handled by the freer.
		 */
600
		spin_lock(&inode->i_lock);
601
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
602
			spin_unlock(&inode->i_lock);
603
			redirty_tail(inode, wb);
604 605
			continue;
		}
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
			/*
			 * If this inode is locked for writeback and we are not
			 * doing writeback-for-data-integrity, move it to
			 * b_more_io so that writeback can proceed with the
			 * other inodes on s_io.
			 *
			 * We'll have another go at writing back this inode
			 * when we completed a full scan of b_io.
			 */
			spin_unlock(&inode->i_lock);
			requeue_io(inode, wb);
			trace_writeback_sb_inodes_requeue(inode);
			continue;
		}
621 622
		spin_unlock(&wb->list_lock);

L
Linus Torvalds 已提交
623
		__iget(inode);
624 625 626 627 628 629 630 631 632
		/*
		 * We already requeued the inode if it had I_SYNC set and we
		 * are doing WB_SYNC_NONE writeback. So this catches only the
		 * WB_SYNC_ALL case.
		 */
		if (inode->i_state & I_SYNC)
			inode_wait_for_writeback(inode);
		inode->i_state |= I_SYNC;
		spin_unlock(&inode->i_lock);
633
		write_chunk = writeback_chunk_size(wb->bdi, work);
634 635
		wbc.nr_to_write = write_chunk;
		wbc.pages_skipped = 0;
636

637
		__writeback_single_inode(inode, wb, &wbc);
638

639 640
		work->nr_pages -= write_chunk - wbc.nr_to_write;
		wrote += write_chunk - wbc.nr_to_write;
641 642
		spin_lock(&wb->list_lock);
		spin_lock(&inode->i_lock);
643 644
		if (!(inode->i_state & I_DIRTY))
			wrote++;
645 646
		requeue_inode(inode, wb, &wbc);
		inode_sync_complete(inode);
647
		spin_unlock(&inode->i_lock);
648
		spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
649
		iput(inode);
650
		cond_resched();
651
		spin_lock(&wb->list_lock);
652 653 654 655 656 657 658 659 660
		/*
		 * bail out to wb_writeback() often enough to check
		 * background threshold and other termination conditions.
		 */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
661
		}
L
Linus Torvalds 已提交
662
	}
663
	return wrote;
664 665
}

666 667
static long __writeback_inodes_wb(struct bdi_writeback *wb,
				  struct wb_writeback_work *work)
668
{
669 670
	unsigned long start_time = jiffies;
	long wrote = 0;
N
Nick Piggin 已提交
671

672
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
673
		struct inode *inode = wb_inode(wb->b_io.prev);
674
		struct super_block *sb = inode->i_sb;
675

676
		if (!grab_super_passive(sb)) {
677 678 679 680 681 682
			/*
			 * grab_super_passive() may fail consistently due to
			 * s_umount being grabbed by someone else. Don't use
			 * requeue_io() to avoid busy retrying the inode/sb.
			 */
			redirty_tail(inode, wb);
683
			continue;
684
		}
685
		wrote += writeback_sb_inodes(sb, wb, work);
686
		drop_super(sb);
687

688 689 690 691 692 693 694
		/* refer to the same tests at the end of writeback_sb_inodes */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
		}
695
	}
696
	/* Leave any unwritten inodes on b_io */
697
	return wrote;
698 699
}

700 701
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
				enum wb_reason reason)
702
{
703 704 705 706
	struct wb_writeback_work work = {
		.nr_pages	= nr_pages,
		.sync_mode	= WB_SYNC_NONE,
		.range_cyclic	= 1,
707
		.reason		= reason,
708
	};
709

710
	spin_lock(&wb->list_lock);
W
Wu Fengguang 已提交
711
	if (list_empty(&wb->b_io))
712
		queue_io(wb, &work);
713
	__writeback_inodes_wb(wb, &work);
714
	spin_unlock(&wb->list_lock);
715

716 717
	return nr_pages - work.nr_pages;
}
718

719
static bool over_bground_thresh(struct backing_dev_info *bdi)
720 721 722
{
	unsigned long background_thresh, dirty_thresh;

723
	global_dirty_limits(&background_thresh, &dirty_thresh);
724

725 726 727 728 729 730 731 732 733
	if (global_page_state(NR_FILE_DIRTY) +
	    global_page_state(NR_UNSTABLE_NFS) > background_thresh)
		return true;

	if (bdi_stat(bdi, BDI_RECLAIMABLE) >
				bdi_dirty_limit(bdi, background_thresh))
		return true;

	return false;
734 735
}

736 737 738 739 740 741 742
/*
 * Called under wb->list_lock. If there are multiple wb per bdi,
 * only the flusher working on the first wb should do it.
 */
static void wb_update_bandwidth(struct bdi_writeback *wb,
				unsigned long start_time)
{
743
	__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
744 745
}

746 747
/*
 * Explicit flushing or periodic writeback of "old" data.
748
 *
749 750 751 752
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
753
 *
754 755 756
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
757
 *
758 759
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
760
 */
761
static long wb_writeback(struct bdi_writeback *wb,
762
			 struct wb_writeback_work *work)
763
{
764
	unsigned long wb_start = jiffies;
765
	long nr_pages = work->nr_pages;
766
	unsigned long oldest_jif;
J
Jan Kara 已提交
767
	struct inode *inode;
768
	long progress;
769

770
	oldest_jif = jiffies;
771
	work->older_than_this = &oldest_jif;
N
Nick Piggin 已提交
772

773
	spin_lock(&wb->list_lock);
774 775
	for (;;) {
		/*
776
		 * Stop writeback when nr_pages has been consumed
777
		 */
778
		if (work->nr_pages <= 0)
779
			break;
780

781 782 783 784 785 786 787 788 789 790
		/*
		 * Background writeout and kupdate-style writeback may
		 * run forever. Stop them if there is other work to do
		 * so that e.g. sync can proceed. They'll be restarted
		 * after the other works are all done.
		 */
		if ((work->for_background || work->for_kupdate) &&
		    !list_empty(&wb->bdi->work_list))
			break;

N
Nick Piggin 已提交
791
		/*
792 793
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
794
		 */
795
		if (work->for_background && !over_bground_thresh(wb->bdi))
796
			break;
N
Nick Piggin 已提交
797

798 799 800 801 802 803
		/*
		 * Kupdate and background works are special and we want to
		 * include all inodes that need writing. Livelock avoidance is
		 * handled by these works yielding to any other work so we are
		 * safe.
		 */
804 805 806
		if (work->for_kupdate) {
			oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
807 808
		} else if (work->for_background)
			oldest_jif = jiffies;
809

810
		trace_writeback_start(wb->bdi, work);
811
		if (list_empty(&wb->b_io))
812
			queue_io(wb, work);
813
		if (work->sb)
814
			progress = writeback_sb_inodes(work->sb, wb, work);
815
		else
816 817
			progress = __writeback_inodes_wb(wb, work);
		trace_writeback_written(wb->bdi, work);
818

819
		wb_update_bandwidth(wb, wb_start);
820 821

		/*
822 823 824 825 826 827
		 * Did we write something? Try for more
		 *
		 * Dirty inodes are moved to b_io for writeback in batches.
		 * The completion of the current batch does not necessarily
		 * mean the overall work is done. So we keep looping as long
		 * as made some progress on cleaning pages or inodes.
828
		 */
829
		if (progress)
830 831
			continue;
		/*
832
		 * No more inodes for IO, bail
833
		 */
834
		if (list_empty(&wb->b_more_io))
835
			break;
836 837 838 839 840 841
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		if (!list_empty(&wb->b_more_io))  {
842
			trace_writeback_wait(wb->bdi, work);
N
Nick Piggin 已提交
843
			inode = wb_inode(wb->b_more_io.prev);
844
			spin_lock(&inode->i_lock);
845 846
			spin_unlock(&wb->list_lock);
			inode_wait_for_writeback(inode);
847
			spin_unlock(&inode->i_lock);
848
			spin_lock(&wb->list_lock);
849 850
		}
	}
851
	spin_unlock(&wb->list_lock);
852

853
	return nr_pages - work->nr_pages;
854 855 856
}

/*
857
 * Return the next wb_writeback_work struct that hasn't been processed yet.
858
 */
859
static struct wb_writeback_work *
860
get_next_work_item(struct backing_dev_info *bdi)
861
{
862
	struct wb_writeback_work *work = NULL;
863

864
	spin_lock_bh(&bdi->wb_lock);
865 866 867 868
	if (!list_empty(&bdi->work_list)) {
		work = list_entry(bdi->work_list.next,
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
869
	}
870
	spin_unlock_bh(&bdi->wb_lock);
871
	return work;
872 873
}

874 875 876 877 878 879 880 881 882 883 884
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

885 886
static long wb_check_background_flush(struct bdi_writeback *wb)
{
887
	if (over_bground_thresh(wb->bdi)) {
888 889 890 891 892 893

		struct wb_writeback_work work = {
			.nr_pages	= LONG_MAX,
			.sync_mode	= WB_SYNC_NONE,
			.for_background	= 1,
			.range_cyclic	= 1,
894
			.reason		= WB_REASON_BACKGROUND,
895 896 897 898 899 900 901 902
		};

		return wb_writeback(wb, &work);
	}

	return 0;
}

903 904 905 906 907
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

908 909 910 911 912 913
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

914 915 916 917 918 919
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
920
	nr_pages = get_nr_dirty_pages();
921

922
	if (nr_pages) {
923
		struct wb_writeback_work work = {
924 925 926 927
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
928
			.reason		= WB_REASON_PERIODIC,
929 930
		};

931
		return wb_writeback(wb, &work);
932
	}
933 934 935 936 937 938 939 940 941 942

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
943
	struct wb_writeback_work *work;
944
	long wrote = 0;
945

J
Jan Kara 已提交
946
	set_bit(BDI_writeback_running, &wb->bdi->state);
947
	while ((work = get_next_work_item(bdi)) != NULL) {
948 949
		/*
		 * Override sync mode, in case we must wait for completion
950
		 * because this thread is exiting now.
951 952
		 */
		if (force_wait)
953
			work->sync_mode = WB_SYNC_ALL;
954

955 956
		trace_writeback_exec(bdi, work);

957
		wrote += wb_writeback(wb, work);
958 959

		/*
960 961
		 * Notify the caller of completion if this is a synchronous
		 * work item, otherwise just free it.
962
		 */
963 964 965 966
		if (work->done)
			complete(work->done);
		else
			kfree(work);
967 968 969 970 971 972
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);
973
	wrote += wb_check_background_flush(wb);
J
Jan Kara 已提交
974
	clear_bit(BDI_writeback_running, &wb->bdi->state);
975 976 977 978 979 980 981 982

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
983
int bdi_writeback_thread(void *data)
984
{
985 986
	struct bdi_writeback *wb = data;
	struct backing_dev_info *bdi = wb->bdi;
987 988
	long pages_written;

P
Peter Zijlstra 已提交
989
	current->flags |= PF_SWAPWRITE;
990
	set_freezable();
991
	wb->last_active = jiffies;
992 993 994 995 996 997

	/*
	 * Our parent may run at a different priority, just set us to normal
	 */
	set_user_nice(current, 0);

998 999
	trace_writeback_thread_start(bdi);

1000
	while (!kthread_freezable_should_stop(NULL)) {
1001 1002 1003 1004 1005 1006
		/*
		 * Remove own delayed wake-up timer, since we are already awake
		 * and we'll take care of the preriodic write-back.
		 */
		del_timer(&wb->wakeup_timer);

1007 1008
		pages_written = wb_do_writeback(wb, 0);

1009 1010
		trace_writeback_pages_written(pages_written);

1011
		if (pages_written)
1012
			wb->last_active = jiffies;
1013

1014
		set_current_state(TASK_INTERRUPTIBLE);
1015
		if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
1016
			__set_current_state(TASK_RUNNING);
1017
			continue;
1018 1019
		}

1020
		if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1021
			schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
1022 1023 1024 1025 1026 1027
		else {
			/*
			 * We have nothing to do, so can go sleep without any
			 * timeout and save power. When a work is queued or
			 * something is made dirty - we will be woken up.
			 */
1028
			schedule();
1029
		}
1030 1031
	}

1032
	/* Flush any work that raced with us exiting */
1033 1034
	if (!list_empty(&bdi->work_list))
		wb_do_writeback(wb, 1);
1035 1036

	trace_writeback_thread_stop(bdi);
1037 1038 1039
	return 0;
}

1040

1041
/*
1042 1043
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
1044
 */
1045
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1046
{
1047
	struct backing_dev_info *bdi;
1048

1049 1050
	if (!nr_pages) {
		nr_pages = global_page_state(NR_FILE_DIRTY) +
1051 1052
				global_page_state(NR_UNSTABLE_NFS);
	}
1053

1054
	rcu_read_lock();
1055
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1056 1057
		if (!bdi_has_dirty_io(bdi))
			continue;
1058
		__bdi_start_writeback(bdi, nr_pages, false, reason);
1059
	}
1060
	rcu_read_unlock();
L
Linus Torvalds 已提交
1061 1062
}

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1091
 *
1092 1093 1094 1095 1096 1097 1098 1099 1100
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1101
 *
1102 1103 1104 1105 1106 1107
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1108
 */
1109
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1110
{
1111
	struct super_block *sb = inode->i_sb;
1112
	struct backing_dev_info *bdi = NULL;
L
Linus Torvalds 已提交
1113

1114 1115 1116 1117 1118 1119
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
1120
			sb->s_op->dirty_inode(inode, flags);
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

1136
	spin_lock(&inode->i_lock);
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
1148
			goto out_unlock_inode;
1149 1150 1151 1152 1153 1154

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
A
Al Viro 已提交
1155
			if (inode_unhashed(inode))
1156
				goto out_unlock_inode;
1157
		}
A
Al Viro 已提交
1158
		if (inode->i_state & I_FREEING)
1159
			goto out_unlock_inode;
1160 1161 1162 1163 1164 1165

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
1166
			bool wakeup_bdi = false;
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
			bdi = inode_to_bdi(inode);

			if (bdi_cap_writeback_dirty(bdi)) {
				WARN(!test_bit(BDI_registered, &bdi->state),
				     "bdi-%s not registered\n", bdi->name);

				/*
				 * If this is the first dirty inode for this
				 * bdi, we have to wake-up the corresponding
				 * bdi thread to make sure background
				 * write-back happens later.
				 */
				if (!wb_has_dirty_io(&bdi->wb))
					wakeup_bdi = true;
1181
			}
1182

1183
			spin_unlock(&inode->i_lock);
1184
			spin_lock(&bdi->wb.list_lock);
1185
			inode->dirtied_when = jiffies;
N
Nick Piggin 已提交
1186
			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1187
			spin_unlock(&bdi->wb.list_lock);
1188 1189 1190 1191

			if (wakeup_bdi)
				bdi_wakeup_thread_delayed(bdi);
			return;
L
Linus Torvalds 已提交
1192 1193
		}
	}
1194 1195
out_unlock_inode:
	spin_unlock(&inode->i_lock);
1196

1197 1198 1199
}
EXPORT_SYMBOL(__mark_inode_dirty);

1200
static void wait_sb_inodes(struct super_block *sb)
1201 1202 1203 1204 1205 1206 1207
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1208
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1209

1210
	spin_lock(&inode_sb_list_lock);
1211 1212 1213 1214 1215 1216 1217 1218

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1219
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1220
		struct address_space *mapping = inode->i_mapping;
1221

1222 1223 1224 1225
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
1226
			continue;
1227
		}
1228
		__iget(inode);
1229
		spin_unlock(&inode->i_lock);
1230 1231
		spin_unlock(&inode_sb_list_lock);

1232
		/*
1233 1234 1235 1236 1237 1238
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
1239 1240 1241 1242 1243 1244 1245 1246
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

1247
		spin_lock(&inode_sb_list_lock);
1248
	}
1249
	spin_unlock(&inode_sb_list_lock);
1250
	iput(old_inode);
L
Linus Torvalds 已提交
1251 1252
}

1253
/**
1254
 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
1255
 * @sb: the superblock
1256
 * @nr: the number of pages to write
1257
 * @reason: reason why some writeback work initiated
L
Linus Torvalds 已提交
1258
 *
1259 1260
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
1261
 * for IO completion of submitted IO.
L
Linus Torvalds 已提交
1262
 */
1263 1264 1265
void writeback_inodes_sb_nr(struct super_block *sb,
			    unsigned long nr,
			    enum wb_reason reason)
L
Linus Torvalds 已提交
1266
{
1267 1268
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1269 1270 1271 1272 1273
		.sb			= sb,
		.sync_mode		= WB_SYNC_NONE,
		.tagged_writepages	= 1,
		.done			= &done,
		.nr_pages		= nr,
1274
		.reason			= reason,
1275
	};
1276

1277
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1278 1279
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);
1280
}
1281 1282 1283 1284 1285
EXPORT_SYMBOL(writeback_inodes_sb_nr);

/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
1286
 * @reason: reason why some writeback work was initiated
1287 1288 1289 1290 1291
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
1292
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1293
{
1294
	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1295
}
1296
EXPORT_SYMBOL(writeback_inodes_sb);
1297

1298 1299 1300
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
1301
 * @reason: reason why some writeback work was initiated
1302 1303 1304 1305
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
1306
int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
1307 1308
{
	if (!writeback_in_progress(sb->s_bdi)) {
1309
		down_read(&sb->s_umount);
1310
		writeback_inodes_sb(sb, reason);
1311
		up_read(&sb->s_umount);
1312 1313 1314 1315 1316 1317
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1318
/**
1319
 * writeback_inodes_sb_nr_if_idle	-	start writeback if none underway
1320 1321
 * @sb: the superblock
 * @nr: the number of pages to write
1322
 * @reason: reason why some writeback work was initiated
1323 1324 1325 1326 1327
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
1328 1329
				   unsigned long nr,
				   enum wb_reason reason)
1330 1331 1332
{
	if (!writeback_in_progress(sb->s_bdi)) {
		down_read(&sb->s_umount);
1333
		writeback_inodes_sb_nr(sb, nr, reason);
1334 1335 1336 1337 1338 1339 1340
		up_read(&sb->s_umount);
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);

1341 1342 1343 1344 1345
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
1346
 * super_block.
1347
 */
1348
void sync_inodes_sb(struct super_block *sb)
1349
{
1350 1351
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1352 1353 1354 1355
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
1356
		.done		= &done,
1357
		.reason		= WB_REASON_SYNC,
1358 1359
	};

1360 1361
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1362 1363 1364
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);

1365
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1366
}
1367
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1368 1369

/**
1370 1371 1372 1373 1374 1375
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1376
 *
1377
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1378 1379 1380
 */
int write_inode_now(struct inode *inode, int sync)
{
1381
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1382 1383
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1384
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1385 1386
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1387 1388 1389
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1390
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1391 1392

	might_sleep();
1393
	return writeback_single_inode(inode, wb, &wbc);
L
Linus Torvalds 已提交
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
1410
	return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
L
Linus Torvalds 已提交
1411 1412
}
EXPORT_SYMBOL(sync_inode);
C
Christoph Hellwig 已提交
1413 1414

/**
A
Andrew Morton 已提交
1415
 * sync_inode_metadata - write an inode to disk
C
Christoph Hellwig 已提交
1416 1417 1418
 * @inode: the inode to sync
 * @wait: wait for I/O to complete.
 *
A
Andrew Morton 已提交
1419
 * Write an inode to disk and adjust its dirty state after completion.
C
Christoph Hellwig 已提交
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
 *
 * Note: only writes the actual inode, no associated data or other metadata.
 */
int sync_inode_metadata(struct inode *inode, int wait)
{
	struct writeback_control wbc = {
		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
		.nr_to_write = 0, /* metadata-only */
	};

	return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);