fs-writeback.c 36.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
J
Jens Axboe 已提交
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23 24
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
25 26 27 28
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
29
#include <linux/tracepoint.h>
30
#include "internal.h"
L
Linus Torvalds 已提交
31

32 33 34
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
35
struct wb_writeback_work {
36 37
	long nr_pages;
	struct super_block *sb;
38
	unsigned long *older_than_this;
39
	enum writeback_sync_modes sync_mode;
40
	unsigned int tagged_writepages:1;
41 42 43
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
44

45
	struct list_head list;		/* pending work list */
46
	struct completion *done;	/* set if the caller waits */
47 48
};

49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure so that the definition remains local to this
 * file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

62 63 64 65
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
66 67
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
68 69 70
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
J
Jan Kara 已提交
71
	return test_bit(BDI_writeback_running, &bdi->state);
72 73
}

74 75 76 77
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

78 79 80 81
	if (strcmp(sb->s_type->name, "bdev") == 0)
		return inode->i_mapping->backing_dev_info;

	return sb->s_bdi;
82 83
}

N
Nick Piggin 已提交
84 85 86 87 88
static inline struct inode *wb_inode(struct list_head *head)
{
	return list_entry(head, struct inode, i_wb_list);
}

89 90
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
91
{
92 93 94 95 96 97 98
	if (bdi->wb.task) {
		wake_up_process(bdi->wb.task);
	} else {
		/*
		 * The bdi thread isn't there, wake up the forker thread which
		 * will create and run it.
		 */
99
		wake_up_process(default_backing_dev_info.wb.task);
L
Linus Torvalds 已提交
100
	}
101 102 103 104 105 106 107 108 109 110 111 112
}

static void bdi_queue_work(struct backing_dev_info *bdi,
			   struct wb_writeback_work *work)
{
	trace_writeback_queue(bdi, work);

	spin_lock_bh(&bdi->wb_lock);
	list_add_tail(&work->list, &bdi->work_list);
	if (!bdi->wb.task)
		trace_writeback_nothread(bdi, work);
	bdi_wakeup_flusher(bdi);
113
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
114 115
}

116 117
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
118
		      bool range_cyclic)
L
Linus Torvalds 已提交
119
{
120
	struct wb_writeback_work *work;
121

122 123 124 125
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
126 127
	work = kzalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
128 129
		if (bdi->wb.task) {
			trace_writeback_nowork(bdi);
130
			wake_up_process(bdi->wb.task);
131
		}
132
		return;
133
	}
134

135 136 137
	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
138

139
	bdi_queue_work(bdi, work);
140 141 142 143 144 145 146 147 148
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
 * @nr_pages: the number of pages to write
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
L
Lucas De Marchi 已提交
149
 *   started when this function returns, we make no guarantees on
150
 *   completion. Caller need not hold sb s_umount semaphore.
151 152
 *
 */
153
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
154
{
155
	__bdi_start_writeback(bdi, nr_pages, true);
156
}
157

158 159 160 161 162
/**
 * bdi_start_background_writeback - start background writeback
 * @bdi: the backing device to write from
 *
 * Description:
163 164 165 166
 *   This makes sure WB_SYNC_NONE background writeback happens. When
 *   this function returns, it is only guaranteed that for given BDI
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
167 168 169
 */
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
170 171 172 173
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
174
	trace_writeback_wake_background(bdi);
175 176 177
	spin_lock_bh(&bdi->wb_lock);
	bdi_wakeup_flusher(bdi);
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
178 179
}

180 181 182 183 184
/*
 * Remove the inode from the writeback list it is on.
 */
void inode_wb_list_del(struct inode *inode)
{
185 186 187
	struct backing_dev_info *bdi = inode_to_bdi(inode);

	spin_lock(&bdi->wb.list_lock);
188
	list_del_init(&inode->i_wb_list);
189
	spin_unlock(&bdi->wb.list_lock);
190 191
}

192 193 194 195 196
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
197
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
198 199 200
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
201
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
202
{
203
	assert_spin_locked(&wb->list_lock);
204
	if (!list_empty(&wb->b_dirty)) {
205
		struct inode *tail;
206

N
Nick Piggin 已提交
207
		tail = wb_inode(wb->b_dirty.next);
208
		if (time_before(inode->dirtied_when, tail->dirtied_when))
209 210
			inode->dirtied_when = jiffies;
	}
N
Nick Piggin 已提交
211
	list_move(&inode->i_wb_list, &wb->b_dirty);
212 213
}

214
/*
215
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
216
 */
217
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
218
{
219
	assert_spin_locked(&wb->list_lock);
N
Nick Piggin 已提交
220
	list_move(&inode->i_wb_list, &wb->b_more_io);
221 222
}

J
Joern Engel 已提交
223 224 225
static void inode_sync_complete(struct inode *inode)
{
	/*
226
	 * Prevent speculative execution through
227
	 * spin_unlock(&wb->list_lock);
J
Joern Engel 已提交
228
	 */
229

J
Joern Engel 已提交
230 231 232 233
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

234 235 236 237 238 239 240 241
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
242
	 * from permanently stopping the whole bdi writeback.
243 244 245 246 247 248
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

249 250 251
/*
 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 */
252
static int move_expired_inodes(struct list_head *delaying_queue,
253
			       struct list_head *dispatch_queue,
254
			       unsigned long *older_than_this)
255
{
256 257
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
258
	struct super_block *sb = NULL;
259
	struct inode *inode;
260
	int do_sb_sort = 0;
261
	int moved = 0;
262

263
	while (!list_empty(delaying_queue)) {
N
Nick Piggin 已提交
264
		inode = wb_inode(delaying_queue->prev);
265
		if (older_than_this &&
266
		    inode_dirtied_after(inode, *older_than_this))
267
			break;
268 269 270
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
N
Nick Piggin 已提交
271
		list_move(&inode->i_wb_list, &tmp);
272
		moved++;
273 274
	}

275 276 277
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
278
		goto out;
279 280
	}

281 282
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
N
Nick Piggin 已提交
283
		sb = wb_inode(tmp.prev)->i_sb;
284
		list_for_each_prev_safe(pos, node, &tmp) {
N
Nick Piggin 已提交
285
			inode = wb_inode(pos);
286
			if (inode->i_sb == sb)
N
Nick Piggin 已提交
287
				list_move(&inode->i_wb_list, dispatch_queue);
288
		}
289
	}
290 291
out:
	return moved;
292 293 294 295
}

/*
 * Queue all expired dirty inodes for io, eldest first.
296 297 298 299 300 301 302 303
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
304
 */
305
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
306
{
307
	int moved;
308
	assert_spin_locked(&wb->list_lock);
309
	list_splice_init(&wb->b_more_io, &wb->b_io);
310 311
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
	trace_writeback_queue_io(wb, older_than_this, moved);
312 313
}

314
static int write_inode(struct inode *inode, struct writeback_control *wbc)
315
{
316
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
317
		return inode->i_sb->s_op->write_inode(inode, wbc);
318
	return 0;
319 320
}

L
Linus Torvalds 已提交
321
/*
322 323
 * Wait for writeback on an inode to complete.
 */
324 325
static void inode_wait_for_writeback(struct inode *inode,
				     struct bdi_writeback *wb)
326 327 328 329 330
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
331 332
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
333
		spin_unlock(&wb->list_lock);
334
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
335
		spin_lock(&wb->list_lock);
336
		spin_lock(&inode->i_lock);
337
	}
338 339 340
}

/*
341
 * Write out an inode's dirty pages.  Called under wb->list_lock and
342 343
 * inode->i_lock.  Either the caller has an active reference on the inode or
 * the inode has I_WILL_FREE set.
344
 *
L
Linus Torvalds 已提交
345 346 347 348 349 350 351
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 */
static int
352 353
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		       struct writeback_control *wbc)
L
Linus Torvalds 已提交
354 355
{
	struct address_space *mapping = inode->i_mapping;
356
	long nr_to_write = wbc->nr_to_write;
357
	unsigned dirty;
L
Linus Torvalds 已提交
358 359
	int ret;

360
	assert_spin_locked(&wb->list_lock);
361 362
	assert_spin_locked(&inode->i_lock);

363 364 365 366 367 368 369 370
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		/*
		 * If this inode is locked for writeback and we are not doing
371
		 * writeback-for-data-integrity, move it to b_more_io so that
372 373 374
		 * writeback can proceed with the other inodes on s_io.
		 *
		 * We'll have another go at writing back this inode when we
375
		 * completed a full scan of b_io.
376
		 */
377
		if (wbc->sync_mode != WB_SYNC_ALL) {
378
			requeue_io(inode, wb);
379 380
			trace_writeback_single_inode_requeue(inode, wbc,
							     nr_to_write);
381 382 383 384 385 386
			return 0;
		}

		/*
		 * It's a data-integrity sync.  We must wait.
		 */
387
		inode_wait_for_writeback(inode, wb);
388 389
	}

J
Joern Engel 已提交
390
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
391

392
	/* Set I_SYNC, reset I_DIRTY_PAGES */
J
Joern Engel 已提交
393
	inode->i_state |= I_SYNC;
394
	inode->i_state &= ~I_DIRTY_PAGES;
395
	spin_unlock(&inode->i_lock);
396
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
397 398 399

	ret = do_writepages(mapping, wbc);

400 401 402 403 404
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
405
	if (wbc->sync_mode == WB_SYNC_ALL) {
406
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
407 408 409 410
		if (ret == 0)
			ret = err;
	}

411 412 413 414 415
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
416
	spin_lock(&inode->i_lock);
417 418
	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
419
	spin_unlock(&inode->i_lock);
420 421
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
422
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
423 424 425 426
		if (ret == 0)
			ret = err;
	}

427
	spin_lock(&wb->list_lock);
428
	spin_lock(&inode->i_lock);
J
Joern Engel 已提交
429
	inode->i_state &= ~I_SYNC;
A
Al Viro 已提交
430
	if (!(inode->i_state & I_FREEING)) {
431 432 433 434 435 436 437 438 439
		/*
		 * Sync livelock prevention. Each inode is tagged and synced in
		 * one shot. If still dirty, it will be redirty_tail()'ed below.
		 * Update the dirty time to prevent enqueue and sync it again.
		 */
		if ((inode->i_state & I_DIRTY) &&
		    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
			inode->dirtied_when = jiffies;

440
		if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
L
Linus Torvalds 已提交
441 442
			/*
			 * We didn't write back all the pages.  nfs_writepages()
443
			 * sometimes bales out without doing anything.
444
			 */
445 446
			inode->i_state |= I_DIRTY_PAGES;
			if (wbc->nr_to_write <= 0) {
L
Linus Torvalds 已提交
447
				/*
448
				 * slice used up: queue for next turn
L
Linus Torvalds 已提交
449
				 */
450
				requeue_io(inode, wb);
L
Linus Torvalds 已提交
451 452
			} else {
				/*
453 454 455 456 457
				 * Writeback blocked by something other than
				 * congestion. Delay the inode for some time to
				 * avoid spinning on the CPU (100% iowait)
				 * retrying writeback of the dirty page/inode
				 * that cannot be performed immediately.
L
Linus Torvalds 已提交
458
				 */
459
				redirty_tail(inode, wb);
L
Linus Torvalds 已提交
460
			}
461 462 463 464 465 466 467
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * Filesystems can dirty the inode during writeback
			 * operations, such as delayed allocation during
			 * submission or metadata updates after data IO
			 * completion.
			 */
468
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
469 470
		} else {
			/*
471 472 473
			 * The inode is clean.  At this point we either have
			 * a reference to the inode or it's on it's way out.
			 * No need to add it back to the LRU.
L
Linus Torvalds 已提交
474
			 */
N
Nick Piggin 已提交
475
			list_del_init(&inode->i_wb_list);
L
Linus Torvalds 已提交
476 477
		}
	}
J
Joern Engel 已提交
478
	inode_sync_complete(inode);
479
	trace_writeback_single_inode(inode, wbc, nr_to_write);
L
Linus Torvalds 已提交
480 481 482
	return ret;
}

483 484
static long writeback_chunk_size(struct backing_dev_info *bdi,
				 struct wb_writeback_work *work)
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
{
	long pages;

	/*
	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
	 * here avoids calling into writeback_inodes_wb() more than once.
	 *
	 * The intended call sequence for WB_SYNC_ALL writeback is:
	 *
	 *      wb_writeback()
	 *          writeback_sb_inodes()       <== called only once
	 *              write_cache_pages()     <== called once for each inode
	 *                   (quickly) tag currently dirty pages
	 *                   (maybe slowly) sync all tagged pages
	 */
	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
		pages = LONG_MAX;
503 504 505 506 507 508 509
	else {
		pages = min(bdi->avg_write_bandwidth / 2,
			    global_dirty_limit / DIRTY_SCOPE);
		pages = min(pages, work->nr_pages);
		pages = round_down(pages + MIN_WRITEBACK_PAGES,
				   MIN_WRITEBACK_PAGES);
	}
510 511 512 513

	return pages;
}

514 515
/*
 * Write a portion of b_io inodes which belong to @sb.
516 517
 *
 * If @only_this_sb is true, then find and write all such
518 519
 * inodes. Otherwise write only ones which go sequentially
 * in reverse order.
520
 *
521
 * Return the number of pages and/or inodes written.
522
 */
523 524 525
static long writeback_sb_inodes(struct super_block *sb,
				struct bdi_writeback *wb,
				struct wb_writeback_work *work)
L
Linus Torvalds 已提交
526
{
527 528 529 530 531 532 533 534 535 536 537 538 539
	struct writeback_control wbc = {
		.sync_mode		= work->sync_mode,
		.tagged_writepages	= work->tagged_writepages,
		.for_kupdate		= work->for_kupdate,
		.for_background		= work->for_background,
		.range_cyclic		= work->range_cyclic,
		.range_start		= 0,
		.range_end		= LLONG_MAX,
	};
	unsigned long start_time = jiffies;
	long write_chunk;
	long wrote = 0;  /* count both pages and inodes */

540
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
541
		struct inode *inode = wb_inode(wb->b_io.prev);
542 543

		if (inode->i_sb != sb) {
544
			if (work->sb) {
545 546 547 548 549
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
550
				redirty_tail(inode, wb);
551 552 553 554 555 556 557 558
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
559
			break;
560 561
		}

562 563 564 565 566
		/*
		 * Don't bother with new inodes or inodes beeing freed, first
		 * kind does not need peridic writeout yet, and for the latter
		 * kind writeout is handled by the freer.
		 */
567
		spin_lock(&inode->i_lock);
568
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
569
			spin_unlock(&inode->i_lock);
570
			redirty_tail(inode, wb);
571 572
			continue;
		}
L
Linus Torvalds 已提交
573
		__iget(inode);
574
		write_chunk = writeback_chunk_size(wb->bdi, work);
575 576
		wbc.nr_to_write = write_chunk;
		wbc.pages_skipped = 0;
577

578
		writeback_single_inode(inode, wb, &wbc);
579

580 581 582 583 584
		work->nr_pages -= write_chunk - wbc.nr_to_write;
		wrote += write_chunk - wbc.nr_to_write;
		if (!(inode->i_state & I_DIRTY))
			wrote++;
		if (wbc.pages_skipped) {
L
Linus Torvalds 已提交
585 586 587 588
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
589
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
590
		}
591
		spin_unlock(&inode->i_lock);
592
		spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
593
		iput(inode);
594
		cond_resched();
595
		spin_lock(&wb->list_lock);
596 597 598 599 600 601 602 603 604
		/*
		 * bail out to wb_writeback() often enough to check
		 * background threshold and other termination conditions.
		 */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
605
		}
L
Linus Torvalds 已提交
606
	}
607
	return wrote;
608 609
}

610 611
static long __writeback_inodes_wb(struct bdi_writeback *wb,
				  struct wb_writeback_work *work)
612
{
613 614
	unsigned long start_time = jiffies;
	long wrote = 0;
N
Nick Piggin 已提交
615

616
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
617
		struct inode *inode = wb_inode(wb->b_io.prev);
618
		struct super_block *sb = inode->i_sb;
619

620
		if (!grab_super_passive(sb)) {
621
			requeue_io(inode, wb);
622
			continue;
623
		}
624
		wrote += writeback_sb_inodes(sb, wb, work);
625
		drop_super(sb);
626

627 628 629 630 631 632 633
		/* refer to the same tests at the end of writeback_sb_inodes */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
		}
634
	}
635
	/* Leave any unwritten inodes on b_io */
636
	return wrote;
637 638
}

639
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages)
640
{
641 642 643 644 645
	struct wb_writeback_work work = {
		.nr_pages	= nr_pages,
		.sync_mode	= WB_SYNC_NONE,
		.range_cyclic	= 1,
	};
646

647
	spin_lock(&wb->list_lock);
W
Wu Fengguang 已提交
648
	if (list_empty(&wb->b_io))
649 650
		queue_io(wb, NULL);
	__writeback_inodes_wb(wb, &work);
651
	spin_unlock(&wb->list_lock);
652

653 654
	return nr_pages - work.nr_pages;
}
655 656 657 658 659

static inline bool over_bground_thresh(void)
{
	unsigned long background_thresh, dirty_thresh;

660
	global_dirty_limits(&background_thresh, &dirty_thresh);
661 662

	return (global_page_state(NR_FILE_DIRTY) +
663
		global_page_state(NR_UNSTABLE_NFS) > background_thresh);
664 665
}

666 667 668 669 670 671 672
/*
 * Called under wb->list_lock. If there are multiple wb per bdi,
 * only the flusher working on the first wb should do it.
 */
static void wb_update_bandwidth(struct bdi_writeback *wb,
				unsigned long start_time)
{
673
	__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time);
674 675
}

676 677
/*
 * Explicit flushing or periodic writeback of "old" data.
678
 *
679 680 681 682
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
683
 *
684 685 686
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
687
 *
688 689
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
690
 */
691
static long wb_writeback(struct bdi_writeback *wb,
692
			 struct wb_writeback_work *work)
693
{
694
	unsigned long wb_start = jiffies;
695
	long nr_pages = work->nr_pages;
696
	unsigned long oldest_jif;
J
Jan Kara 已提交
697
	struct inode *inode;
698
	long progress;
699

700
	oldest_jif = jiffies;
701
	work->older_than_this = &oldest_jif;
N
Nick Piggin 已提交
702

703
	spin_lock(&wb->list_lock);
704 705
	for (;;) {
		/*
706
		 * Stop writeback when nr_pages has been consumed
707
		 */
708
		if (work->nr_pages <= 0)
709
			break;
710

711 712 713 714 715 716 717 718 719 720
		/*
		 * Background writeout and kupdate-style writeback may
		 * run forever. Stop them if there is other work to do
		 * so that e.g. sync can proceed. They'll be restarted
		 * after the other works are all done.
		 */
		if ((work->for_background || work->for_kupdate) &&
		    !list_empty(&wb->bdi->work_list))
			break;

N
Nick Piggin 已提交
721
		/*
722 723
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
724
		 */
725
		if (work->for_background && !over_bground_thresh())
726
			break;
N
Nick Piggin 已提交
727

728 729 730
		if (work->for_kupdate) {
			oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
731
			work->older_than_this = &oldest_jif;
732
		}
733

734
		trace_writeback_start(wb->bdi, work);
735
		if (list_empty(&wb->b_io))
736
			queue_io(wb, work->older_than_this);
737
		if (work->sb)
738
			progress = writeback_sb_inodes(work->sb, wb, work);
739
		else
740 741
			progress = __writeback_inodes_wb(wb, work);
		trace_writeback_written(wb->bdi, work);
742

743
		wb_update_bandwidth(wb, wb_start);
744 745

		/*
746 747 748 749 750 751
		 * Did we write something? Try for more
		 *
		 * Dirty inodes are moved to b_io for writeback in batches.
		 * The completion of the current batch does not necessarily
		 * mean the overall work is done. So we keep looping as long
		 * as made some progress on cleaning pages or inodes.
752
		 */
753
		if (progress)
754 755
			continue;
		/*
756
		 * No more inodes for IO, bail
757
		 */
758
		if (list_empty(&wb->b_more_io))
759
			break;
760 761 762 763 764 765
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		if (!list_empty(&wb->b_more_io))  {
766
			trace_writeback_wait(wb->bdi, work);
N
Nick Piggin 已提交
767
			inode = wb_inode(wb->b_more_io.prev);
768
			spin_lock(&inode->i_lock);
769
			inode_wait_for_writeback(inode, wb);
770
			spin_unlock(&inode->i_lock);
771 772
		}
	}
773
	spin_unlock(&wb->list_lock);
774

775
	return nr_pages - work->nr_pages;
776 777 778
}

/*
779
 * Return the next wb_writeback_work struct that hasn't been processed yet.
780
 */
781
static struct wb_writeback_work *
782
get_next_work_item(struct backing_dev_info *bdi)
783
{
784
	struct wb_writeback_work *work = NULL;
785

786
	spin_lock_bh(&bdi->wb_lock);
787 788 789 790
	if (!list_empty(&bdi->work_list)) {
		work = list_entry(bdi->work_list.next,
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
791
	}
792
	spin_unlock_bh(&bdi->wb_lock);
793
	return work;
794 795
}

796 797 798 799 800 801 802 803 804 805 806
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
static long wb_check_background_flush(struct bdi_writeback *wb)
{
	if (over_bground_thresh()) {

		struct wb_writeback_work work = {
			.nr_pages	= LONG_MAX,
			.sync_mode	= WB_SYNC_NONE,
			.for_background	= 1,
			.range_cyclic	= 1,
		};

		return wb_writeback(wb, &work);
	}

	return 0;
}

824 825 826 827 828
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

829 830 831 832 833 834
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

835 836 837 838 839 840
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
841
	nr_pages = get_nr_dirty_pages();
842

843
	if (nr_pages) {
844
		struct wb_writeback_work work = {
845 846 847 848 849 850
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
		};

851
		return wb_writeback(wb, &work);
852
	}
853 854 855 856 857 858 859 860 861 862

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
863
	struct wb_writeback_work *work;
864
	long wrote = 0;
865

J
Jan Kara 已提交
866
	set_bit(BDI_writeback_running, &wb->bdi->state);
867
	while ((work = get_next_work_item(bdi)) != NULL) {
868 869
		/*
		 * Override sync mode, in case we must wait for completion
870
		 * because this thread is exiting now.
871 872
		 */
		if (force_wait)
873
			work->sync_mode = WB_SYNC_ALL;
874

875 876
		trace_writeback_exec(bdi, work);

877
		wrote += wb_writeback(wb, work);
878 879

		/*
880 881
		 * Notify the caller of completion if this is a synchronous
		 * work item, otherwise just free it.
882
		 */
883 884 885 886
		if (work->done)
			complete(work->done);
		else
			kfree(work);
887 888 889 890 891 892
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);
893
	wrote += wb_check_background_flush(wb);
J
Jan Kara 已提交
894
	clear_bit(BDI_writeback_running, &wb->bdi->state);
895 896 897 898 899 900 901 902

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
903
int bdi_writeback_thread(void *data)
904
{
905 906
	struct bdi_writeback *wb = data;
	struct backing_dev_info *bdi = wb->bdi;
907 908
	long pages_written;

P
Peter Zijlstra 已提交
909
	current->flags |= PF_SWAPWRITE;
910
	set_freezable();
911
	wb->last_active = jiffies;
912 913 914 915 916 917

	/*
	 * Our parent may run at a different priority, just set us to normal
	 */
	set_user_nice(current, 0);

918 919
	trace_writeback_thread_start(bdi);

920
	while (!kthread_should_stop()) {
921 922 923 924 925 926
		/*
		 * Remove own delayed wake-up timer, since we are already awake
		 * and we'll take care of the preriodic write-back.
		 */
		del_timer(&wb->wakeup_timer);

927 928
		pages_written = wb_do_writeback(wb, 0);

929 930
		trace_writeback_pages_written(pages_written);

931
		if (pages_written)
932
			wb->last_active = jiffies;
933

934
		set_current_state(TASK_INTERRUPTIBLE);
935
		if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
936
			__set_current_state(TASK_RUNNING);
937
			continue;
938 939
		}

940
		if (wb_has_dirty_io(wb) && dirty_writeback_interval)
941
			schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
942 943 944 945 946 947
		else {
			/*
			 * We have nothing to do, so can go sleep without any
			 * timeout and save power. When a work is queued or
			 * something is made dirty - we will be woken up.
			 */
948
			schedule();
949
		}
950

951 952 953
		try_to_freeze();
	}

954
	/* Flush any work that raced with us exiting */
955 956
	if (!list_empty(&bdi->work_list))
		wb_do_writeback(wb, 1);
957 958

	trace_writeback_thread_stop(bdi);
959 960 961
	return 0;
}

962

963
/*
964 965
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
966
 */
967
void wakeup_flusher_threads(long nr_pages)
968
{
969
	struct backing_dev_info *bdi;
970

971 972
	if (!nr_pages) {
		nr_pages = global_page_state(NR_FILE_DIRTY) +
973 974
				global_page_state(NR_UNSTABLE_NFS);
	}
975

976
	rcu_read_lock();
977
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
978 979
		if (!bdi_has_dirty_io(bdi))
			continue;
980
		__bdi_start_writeback(bdi, nr_pages, false);
981
	}
982
	rcu_read_unlock();
L
Linus Torvalds 已提交
983 984
}

985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1013
 *
1014 1015 1016 1017 1018 1019 1020 1021 1022
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1023
 *
1024 1025 1026 1027 1028 1029
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1030
 */
1031
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1032
{
1033
	struct super_block *sb = inode->i_sb;
1034
	struct backing_dev_info *bdi = NULL;
L
Linus Torvalds 已提交
1035

1036 1037 1038 1039 1040 1041
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
1042
			sb->s_op->dirty_inode(inode, flags);
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

1058
	spin_lock(&inode->i_lock);
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
1070
			goto out_unlock_inode;
1071 1072 1073 1074 1075 1076

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
A
Al Viro 已提交
1077
			if (inode_unhashed(inode))
1078
				goto out_unlock_inode;
1079
		}
A
Al Viro 已提交
1080
		if (inode->i_state & I_FREEING)
1081
			goto out_unlock_inode;
1082 1083 1084 1085 1086 1087

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
1088
			bool wakeup_bdi = false;
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
			bdi = inode_to_bdi(inode);

			if (bdi_cap_writeback_dirty(bdi)) {
				WARN(!test_bit(BDI_registered, &bdi->state),
				     "bdi-%s not registered\n", bdi->name);

				/*
				 * If this is the first dirty inode for this
				 * bdi, we have to wake-up the corresponding
				 * bdi thread to make sure background
				 * write-back happens later.
				 */
				if (!wb_has_dirty_io(&bdi->wb))
					wakeup_bdi = true;
1103
			}
1104

1105
			spin_unlock(&inode->i_lock);
1106
			spin_lock(&bdi->wb.list_lock);
1107
			inode->dirtied_when = jiffies;
N
Nick Piggin 已提交
1108
			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1109
			spin_unlock(&bdi->wb.list_lock);
1110 1111 1112 1113

			if (wakeup_bdi)
				bdi_wakeup_thread_delayed(bdi);
			return;
L
Linus Torvalds 已提交
1114 1115
		}
	}
1116 1117
out_unlock_inode:
	spin_unlock(&inode->i_lock);
1118

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
}
EXPORT_SYMBOL(__mark_inode_dirty);

/*
 * Write out a superblock's list of dirty inodes.  A wait will be performed
 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 *
 * If older_than_this is non-NULL, then only write out inodes which
 * had their first dirtying at a time earlier than *older_than_this.
 *
 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 * This function assumes that the blockdev superblock's inodes are backed by
 * a variety of queues, so all inodes are searched.  For other superblocks,
 * assume that all inodes are backed by the same queue.
 *
 * The inodes to be written are parked on bdi->b_io.  They are moved back onto
 * bdi->b_dirty as they are selected for writing.  This way, none can be missed
 * on the writer throttling path, and we get decent balancing between many
 * throttled threads: we don't want them all piling up on inode_sync_wait.
 */
1139
static void wait_sb_inodes(struct super_block *sb)
1140 1141 1142 1143 1144 1145 1146
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1147
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1148

1149
	spin_lock(&inode_sb_list_lock);
1150 1151 1152 1153 1154 1155 1156 1157

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1158
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1159
		struct address_space *mapping = inode->i_mapping;
1160

1161 1162 1163 1164
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
1165
			continue;
1166
		}
1167
		__iget(inode);
1168
		spin_unlock(&inode->i_lock);
1169 1170
		spin_unlock(&inode_sb_list_lock);

1171
		/*
1172 1173 1174 1175 1176 1177
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
1178 1179 1180 1181 1182 1183 1184 1185
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

1186
		spin_lock(&inode_sb_list_lock);
1187
	}
1188
	spin_unlock(&inode_sb_list_lock);
1189
	iput(old_inode);
L
Linus Torvalds 已提交
1190 1191
}

1192
/**
1193
 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
1194
 * @sb: the superblock
1195
 * @nr: the number of pages to write
L
Linus Torvalds 已提交
1196
 *
1197 1198
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
1199
 * for IO completion of submitted IO.
L
Linus Torvalds 已提交
1200
 */
1201
void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
L
Linus Torvalds 已提交
1202
{
1203 1204
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1205 1206 1207 1208 1209
		.sb			= sb,
		.sync_mode		= WB_SYNC_NONE,
		.tagged_writepages	= 1,
		.done			= &done,
		.nr_pages		= nr,
1210
	};
1211

1212
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1213 1214
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);
1215
}
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
EXPORT_SYMBOL(writeback_inodes_sb_nr);

/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
void writeback_inodes_sb(struct super_block *sb)
{
1228
	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages());
1229
}
1230
EXPORT_SYMBOL(writeback_inodes_sb);
1231

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_if_idle(struct super_block *sb)
{
	if (!writeback_in_progress(sb->s_bdi)) {
1242
		down_read(&sb->s_umount);
1243
		writeback_inodes_sb(sb);
1244
		up_read(&sb->s_umount);
1245 1246 1247 1248 1249 1250
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 * @nr: the number of pages to write
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
				   unsigned long nr)
{
	if (!writeback_in_progress(sb->s_bdi)) {
		down_read(&sb->s_umount);
		writeback_inodes_sb_nr(sb, nr);
		up_read(&sb->s_umount);
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);

1272 1273 1274 1275 1276
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
1277
 * super_block.
1278
 */
1279
void sync_inodes_sb(struct super_block *sb)
1280
{
1281 1282
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1283 1284 1285 1286
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
1287
		.done		= &done,
1288 1289
	};

1290 1291
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1292 1293 1294
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);

1295
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1296
}
1297
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1298 1299

/**
1300 1301 1302 1303 1304 1305
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1306
 *
1307
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1308 1309 1310
 */
int write_inode_now(struct inode *inode, int sync)
{
1311
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1312 1313 1314
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1315
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1316 1317
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1318 1319 1320
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1321
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1322 1323

	might_sleep();
1324
	spin_lock(&wb->list_lock);
1325
	spin_lock(&inode->i_lock);
1326
	ret = writeback_single_inode(inode, wb, &wbc);
1327
	spin_unlock(&inode->i_lock);
1328
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1329
	if (sync)
J
Joern Engel 已提交
1330
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
1348
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1349 1350
	int ret;

1351
	spin_lock(&wb->list_lock);
1352
	spin_lock(&inode->i_lock);
1353
	ret = writeback_single_inode(inode, wb, wbc);
1354
	spin_unlock(&inode->i_lock);
1355
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1356 1357 1358
	return ret;
}
EXPORT_SYMBOL(sync_inode);
C
Christoph Hellwig 已提交
1359 1360

/**
A
Andrew Morton 已提交
1361
 * sync_inode_metadata - write an inode to disk
C
Christoph Hellwig 已提交
1362 1363 1364
 * @inode: the inode to sync
 * @wait: wait for I/O to complete.
 *
A
Andrew Morton 已提交
1365
 * Write an inode to disk and adjust its dirty state after completion.
C
Christoph Hellwig 已提交
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
 *
 * Note: only writes the actual inode, no associated data or other metadata.
 */
int sync_inode_metadata(struct inode *inode, int wait)
{
	struct writeback_control wbc = {
		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
		.nr_to_write = 0, /* metadata-only */
	};

	return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);