fs-writeback.c 37.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
J
Jens Axboe 已提交
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23 24
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
25 26 27 28
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
29
#include <linux/tracepoint.h>
30
#include "internal.h"
L
Linus Torvalds 已提交
31

32 33 34
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
35
struct wb_writeback_work {
36 37
	long nr_pages;
	struct super_block *sb;
38
	unsigned long *older_than_this;
39
	enum writeback_sync_modes sync_mode;
40
	unsigned int tagged_writepages:1;
41 42 43
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
44
	enum wb_reason reason;		/* why was writeback initiated? */
45

46
	struct list_head list;		/* pending work list */
47
	struct completion *done;	/* set if the caller waits */
48 49
};

50 51 52 53 54 55 56 57 58 59 60
const char *wb_reason_name[] = {
	[WB_REASON_BACKGROUND]		= "background",
	[WB_REASON_TRY_TO_FREE_PAGES]	= "try_to_free_pages",
	[WB_REASON_SYNC]		= "sync",
	[WB_REASON_PERIODIC]		= "periodic",
	[WB_REASON_LAPTOP_TIMER]	= "laptop_timer",
	[WB_REASON_FREE_MORE_MEM]	= "free_more_memory",
	[WB_REASON_FS_FREE_SPACE]	= "fs_free_space",
	[WB_REASON_FORKER_THREAD]	= "forker_thread"
};

61 62 63 64 65 66 67 68 69 70 71 72 73
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure so that the definition remains local to this
 * file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

74 75 76 77
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
78 79
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
80 81 82
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
J
Jan Kara 已提交
83
	return test_bit(BDI_writeback_running, &bdi->state);
84 85
}

86 87 88 89
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

90 91 92 93
	if (strcmp(sb->s_type->name, "bdev") == 0)
		return inode->i_mapping->backing_dev_info;

	return sb->s_bdi;
94 95
}

N
Nick Piggin 已提交
96 97 98 99 100
static inline struct inode *wb_inode(struct list_head *head)
{
	return list_entry(head, struct inode, i_wb_list);
}

101 102
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
103
{
104 105 106 107 108 109 110
	if (bdi->wb.task) {
		wake_up_process(bdi->wb.task);
	} else {
		/*
		 * The bdi thread isn't there, wake up the forker thread which
		 * will create and run it.
		 */
111
		wake_up_process(default_backing_dev_info.wb.task);
L
Linus Torvalds 已提交
112
	}
113 114 115 116 117 118 119 120 121 122 123 124
}

static void bdi_queue_work(struct backing_dev_info *bdi,
			   struct wb_writeback_work *work)
{
	trace_writeback_queue(bdi, work);

	spin_lock_bh(&bdi->wb_lock);
	list_add_tail(&work->list, &bdi->work_list);
	if (!bdi->wb.task)
		trace_writeback_nothread(bdi, work);
	bdi_wakeup_flusher(bdi);
125
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
126 127
}

128 129
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
130
		      bool range_cyclic, enum wb_reason reason)
L
Linus Torvalds 已提交
131
{
132
	struct wb_writeback_work *work;
133

134 135 136 137
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
138 139
	work = kzalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
140 141
		if (bdi->wb.task) {
			trace_writeback_nowork(bdi);
142
			wake_up_process(bdi->wb.task);
143
		}
144
		return;
145
	}
146

147 148 149
	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
150
	work->reason	= reason;
151

152
	bdi_queue_work(bdi, work);
153 154 155 156 157 158
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
 * @nr_pages: the number of pages to write
159
 * @reason: reason why some writeback work was initiated
160 161 162
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
L
Lucas De Marchi 已提交
163
 *   started when this function returns, we make no guarantees on
164
 *   completion. Caller need not hold sb s_umount semaphore.
165 166
 *
 */
167 168
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
			enum wb_reason reason)
169
{
170
	__bdi_start_writeback(bdi, nr_pages, true, reason);
171
}
172

173 174 175 176 177
/**
 * bdi_start_background_writeback - start background writeback
 * @bdi: the backing device to write from
 *
 * Description:
178 179 180 181
 *   This makes sure WB_SYNC_NONE background writeback happens. When
 *   this function returns, it is only guaranteed that for given BDI
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
182 183 184
 */
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
185 186 187 188
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
189
	trace_writeback_wake_background(bdi);
190 191 192
	spin_lock_bh(&bdi->wb_lock);
	bdi_wakeup_flusher(bdi);
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
193 194
}

195 196 197 198 199
/*
 * Remove the inode from the writeback list it is on.
 */
void inode_wb_list_del(struct inode *inode)
{
200 201 202
	struct backing_dev_info *bdi = inode_to_bdi(inode);

	spin_lock(&bdi->wb.list_lock);
203
	list_del_init(&inode->i_wb_list);
204
	spin_unlock(&bdi->wb.list_lock);
205 206
}

207 208 209 210 211
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
212
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
213 214 215
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
216
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
217
{
218
	assert_spin_locked(&wb->list_lock);
219
	if (!list_empty(&wb->b_dirty)) {
220
		struct inode *tail;
221

N
Nick Piggin 已提交
222
		tail = wb_inode(wb->b_dirty.next);
223
		if (time_before(inode->dirtied_when, tail->dirtied_when))
224 225
			inode->dirtied_when = jiffies;
	}
N
Nick Piggin 已提交
226
	list_move(&inode->i_wb_list, &wb->b_dirty);
227 228
}

229
/*
230
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
231
 */
232
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
233
{
234
	assert_spin_locked(&wb->list_lock);
N
Nick Piggin 已提交
235
	list_move(&inode->i_wb_list, &wb->b_more_io);
236 237
}

J
Joern Engel 已提交
238 239 240
static void inode_sync_complete(struct inode *inode)
{
	/*
241
	 * Prevent speculative execution through
242
	 * spin_unlock(&wb->list_lock);
J
Joern Engel 已提交
243
	 */
244

J
Joern Engel 已提交
245 246 247 248
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

249 250 251 252 253 254 255 256
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
257
	 * from permanently stopping the whole bdi writeback.
258 259 260 261 262 263
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

264 265 266
/*
 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 */
267
static int move_expired_inodes(struct list_head *delaying_queue,
268
			       struct list_head *dispatch_queue,
269
			       struct wb_writeback_work *work)
270
{
271 272
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
273
	struct super_block *sb = NULL;
274
	struct inode *inode;
275
	int do_sb_sort = 0;
276
	int moved = 0;
277

278
	while (!list_empty(delaying_queue)) {
N
Nick Piggin 已提交
279
		inode = wb_inode(delaying_queue->prev);
280 281
		if (work->older_than_this &&
		    inode_dirtied_after(inode, *work->older_than_this))
282
			break;
283 284 285
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
N
Nick Piggin 已提交
286
		list_move(&inode->i_wb_list, &tmp);
287
		moved++;
288 289
	}

290 291 292
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
293
		goto out;
294 295
	}

296 297
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
N
Nick Piggin 已提交
298
		sb = wb_inode(tmp.prev)->i_sb;
299
		list_for_each_prev_safe(pos, node, &tmp) {
N
Nick Piggin 已提交
300
			inode = wb_inode(pos);
301
			if (inode->i_sb == sb)
N
Nick Piggin 已提交
302
				list_move(&inode->i_wb_list, dispatch_queue);
303
		}
304
	}
305 306
out:
	return moved;
307 308 309 310
}

/*
 * Queue all expired dirty inodes for io, eldest first.
311 312 313 314 315 316 317 318
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
319
 */
320
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
321
{
322
	int moved;
323
	assert_spin_locked(&wb->list_lock);
324
	list_splice_init(&wb->b_more_io, &wb->b_io);
325 326
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
	trace_writeback_queue_io(wb, work, moved);
327 328
}

329
static int write_inode(struct inode *inode, struct writeback_control *wbc)
330
{
331
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
332
		return inode->i_sb->s_op->write_inode(inode, wbc);
333
	return 0;
334 335
}

L
Linus Torvalds 已提交
336
/*
337 338
 * Wait for writeback on an inode to complete.
 */
339 340
static void inode_wait_for_writeback(struct inode *inode,
				     struct bdi_writeback *wb)
341 342 343 344 345
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
346 347
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
348
		spin_unlock(&wb->list_lock);
349
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
350
		spin_lock(&wb->list_lock);
351
		spin_lock(&inode->i_lock);
352
	}
353 354 355
}

/*
356
 * Write out an inode's dirty pages.  Called under wb->list_lock and
357 358
 * inode->i_lock.  Either the caller has an active reference on the inode or
 * the inode has I_WILL_FREE set.
359
 *
L
Linus Torvalds 已提交
360 361 362 363 364 365 366
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 */
static int
367 368
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		       struct writeback_control *wbc)
L
Linus Torvalds 已提交
369 370
{
	struct address_space *mapping = inode->i_mapping;
371
	long nr_to_write = wbc->nr_to_write;
372
	unsigned dirty;
L
Linus Torvalds 已提交
373 374
	int ret;

375
	assert_spin_locked(&wb->list_lock);
376 377
	assert_spin_locked(&inode->i_lock);

378 379 380 381 382 383 384 385
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		/*
		 * If this inode is locked for writeback and we are not doing
386
		 * writeback-for-data-integrity, move it to b_more_io so that
387 388 389
		 * writeback can proceed with the other inodes on s_io.
		 *
		 * We'll have another go at writing back this inode when we
390
		 * completed a full scan of b_io.
391
		 */
392
		if (wbc->sync_mode != WB_SYNC_ALL) {
393
			requeue_io(inode, wb);
394 395
			trace_writeback_single_inode_requeue(inode, wbc,
							     nr_to_write);
396 397 398 399 400 401
			return 0;
		}

		/*
		 * It's a data-integrity sync.  We must wait.
		 */
402
		inode_wait_for_writeback(inode, wb);
403 404
	}

J
Joern Engel 已提交
405
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
406

407
	/* Set I_SYNC, reset I_DIRTY_PAGES */
J
Joern Engel 已提交
408
	inode->i_state |= I_SYNC;
409
	inode->i_state &= ~I_DIRTY_PAGES;
410
	spin_unlock(&inode->i_lock);
411
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
412 413 414

	ret = do_writepages(mapping, wbc);

415 416 417 418 419
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
420
	if (wbc->sync_mode == WB_SYNC_ALL) {
421
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
422 423 424 425
		if (ret == 0)
			ret = err;
	}

426 427 428 429 430
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
431
	spin_lock(&inode->i_lock);
432 433
	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
434
	spin_unlock(&inode->i_lock);
435 436
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
437
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
438 439 440 441
		if (ret == 0)
			ret = err;
	}

442
	spin_lock(&wb->list_lock);
443
	spin_lock(&inode->i_lock);
J
Joern Engel 已提交
444
	inode->i_state &= ~I_SYNC;
A
Al Viro 已提交
445
	if (!(inode->i_state & I_FREEING)) {
446 447 448 449 450 451 452 453 454
		/*
		 * Sync livelock prevention. Each inode is tagged and synced in
		 * one shot. If still dirty, it will be redirty_tail()'ed below.
		 * Update the dirty time to prevent enqueue and sync it again.
		 */
		if ((inode->i_state & I_DIRTY) &&
		    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
			inode->dirtied_when = jiffies;

455
		if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
L
Linus Torvalds 已提交
456 457
			/*
			 * We didn't write back all the pages.  nfs_writepages()
458
			 * sometimes bales out without doing anything.
459
			 */
460 461
			inode->i_state |= I_DIRTY_PAGES;
			if (wbc->nr_to_write <= 0) {
L
Linus Torvalds 已提交
462
				/*
463
				 * slice used up: queue for next turn
L
Linus Torvalds 已提交
464
				 */
465
				requeue_io(inode, wb);
L
Linus Torvalds 已提交
466 467
			} else {
				/*
468 469 470 471 472
				 * Writeback blocked by something other than
				 * congestion. Delay the inode for some time to
				 * avoid spinning on the CPU (100% iowait)
				 * retrying writeback of the dirty page/inode
				 * that cannot be performed immediately.
L
Linus Torvalds 已提交
473
				 */
474
				redirty_tail(inode, wb);
L
Linus Torvalds 已提交
475
			}
476 477 478 479 480 481 482
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * Filesystems can dirty the inode during writeback
			 * operations, such as delayed allocation during
			 * submission or metadata updates after data IO
			 * completion.
			 */
483
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
484 485
		} else {
			/*
486 487 488
			 * The inode is clean.  At this point we either have
			 * a reference to the inode or it's on it's way out.
			 * No need to add it back to the LRU.
L
Linus Torvalds 已提交
489
			 */
N
Nick Piggin 已提交
490
			list_del_init(&inode->i_wb_list);
L
Linus Torvalds 已提交
491 492
		}
	}
J
Joern Engel 已提交
493
	inode_sync_complete(inode);
494
	trace_writeback_single_inode(inode, wbc, nr_to_write);
L
Linus Torvalds 已提交
495 496 497
	return ret;
}

498 499
static long writeback_chunk_size(struct backing_dev_info *bdi,
				 struct wb_writeback_work *work)
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
{
	long pages;

	/*
	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
	 * here avoids calling into writeback_inodes_wb() more than once.
	 *
	 * The intended call sequence for WB_SYNC_ALL writeback is:
	 *
	 *      wb_writeback()
	 *          writeback_sb_inodes()       <== called only once
	 *              write_cache_pages()     <== called once for each inode
	 *                   (quickly) tag currently dirty pages
	 *                   (maybe slowly) sync all tagged pages
	 */
	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
		pages = LONG_MAX;
518 519 520 521 522 523 524
	else {
		pages = min(bdi->avg_write_bandwidth / 2,
			    global_dirty_limit / DIRTY_SCOPE);
		pages = min(pages, work->nr_pages);
		pages = round_down(pages + MIN_WRITEBACK_PAGES,
				   MIN_WRITEBACK_PAGES);
	}
525 526 527 528

	return pages;
}

529 530
/*
 * Write a portion of b_io inodes which belong to @sb.
531 532
 *
 * If @only_this_sb is true, then find and write all such
533 534
 * inodes. Otherwise write only ones which go sequentially
 * in reverse order.
535
 *
536
 * Return the number of pages and/or inodes written.
537
 */
538 539 540
static long writeback_sb_inodes(struct super_block *sb,
				struct bdi_writeback *wb,
				struct wb_writeback_work *work)
L
Linus Torvalds 已提交
541
{
542 543 544 545 546 547 548 549 550 551 552 553 554
	struct writeback_control wbc = {
		.sync_mode		= work->sync_mode,
		.tagged_writepages	= work->tagged_writepages,
		.for_kupdate		= work->for_kupdate,
		.for_background		= work->for_background,
		.range_cyclic		= work->range_cyclic,
		.range_start		= 0,
		.range_end		= LLONG_MAX,
	};
	unsigned long start_time = jiffies;
	long write_chunk;
	long wrote = 0;  /* count both pages and inodes */

555
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
556
		struct inode *inode = wb_inode(wb->b_io.prev);
557 558

		if (inode->i_sb != sb) {
559
			if (work->sb) {
560 561 562 563 564
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
565
				redirty_tail(inode, wb);
566 567 568 569 570 571 572 573
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
574
			break;
575 576
		}

577 578 579 580 581
		/*
		 * Don't bother with new inodes or inodes beeing freed, first
		 * kind does not need peridic writeout yet, and for the latter
		 * kind writeout is handled by the freer.
		 */
582
		spin_lock(&inode->i_lock);
583
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
584
			spin_unlock(&inode->i_lock);
585
			redirty_tail(inode, wb);
586 587
			continue;
		}
L
Linus Torvalds 已提交
588
		__iget(inode);
589
		write_chunk = writeback_chunk_size(wb->bdi, work);
590 591
		wbc.nr_to_write = write_chunk;
		wbc.pages_skipped = 0;
592

593
		writeback_single_inode(inode, wb, &wbc);
594

595 596 597 598 599
		work->nr_pages -= write_chunk - wbc.nr_to_write;
		wrote += write_chunk - wbc.nr_to_write;
		if (!(inode->i_state & I_DIRTY))
			wrote++;
		if (wbc.pages_skipped) {
L
Linus Torvalds 已提交
600 601 602 603
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
604
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
605
		}
606
		spin_unlock(&inode->i_lock);
607
		spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
608
		iput(inode);
609
		cond_resched();
610
		spin_lock(&wb->list_lock);
611 612 613 614 615 616 617 618 619
		/*
		 * bail out to wb_writeback() often enough to check
		 * background threshold and other termination conditions.
		 */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
620
		}
L
Linus Torvalds 已提交
621
	}
622
	return wrote;
623 624
}

625 626
static long __writeback_inodes_wb(struct bdi_writeback *wb,
				  struct wb_writeback_work *work)
627
{
628 629
	unsigned long start_time = jiffies;
	long wrote = 0;
N
Nick Piggin 已提交
630

631
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
632
		struct inode *inode = wb_inode(wb->b_io.prev);
633
		struct super_block *sb = inode->i_sb;
634

635
		if (!grab_super_passive(sb)) {
636 637 638 639 640 641
			/*
			 * grab_super_passive() may fail consistently due to
			 * s_umount being grabbed by someone else. Don't use
			 * requeue_io() to avoid busy retrying the inode/sb.
			 */
			redirty_tail(inode, wb);
642
			continue;
643
		}
644
		wrote += writeback_sb_inodes(sb, wb, work);
645
		drop_super(sb);
646

647 648 649 650 651 652 653
		/* refer to the same tests at the end of writeback_sb_inodes */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
		}
654
	}
655
	/* Leave any unwritten inodes on b_io */
656
	return wrote;
657 658
}

659 660
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
				enum wb_reason reason)
661
{
662 663 664 665
	struct wb_writeback_work work = {
		.nr_pages	= nr_pages,
		.sync_mode	= WB_SYNC_NONE,
		.range_cyclic	= 1,
666
		.reason		= reason,
667
	};
668

669
	spin_lock(&wb->list_lock);
W
Wu Fengguang 已提交
670
	if (list_empty(&wb->b_io))
671
		queue_io(wb, &work);
672
	__writeback_inodes_wb(wb, &work);
673
	spin_unlock(&wb->list_lock);
674

675 676
	return nr_pages - work.nr_pages;
}
677

678
static bool over_bground_thresh(struct backing_dev_info *bdi)
679 680 681
{
	unsigned long background_thresh, dirty_thresh;

682
	global_dirty_limits(&background_thresh, &dirty_thresh);
683

684 685 686 687 688 689 690 691 692
	if (global_page_state(NR_FILE_DIRTY) +
	    global_page_state(NR_UNSTABLE_NFS) > background_thresh)
		return true;

	if (bdi_stat(bdi, BDI_RECLAIMABLE) >
				bdi_dirty_limit(bdi, background_thresh))
		return true;

	return false;
693 694
}

695 696 697 698 699 700 701
/*
 * Called under wb->list_lock. If there are multiple wb per bdi,
 * only the flusher working on the first wb should do it.
 */
static void wb_update_bandwidth(struct bdi_writeback *wb,
				unsigned long start_time)
{
702
	__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
703 704
}

705 706
/*
 * Explicit flushing or periodic writeback of "old" data.
707
 *
708 709 710 711
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
712
 *
713 714 715
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
716
 *
717 718
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
719
 */
720
static long wb_writeback(struct bdi_writeback *wb,
721
			 struct wb_writeback_work *work)
722
{
723
	unsigned long wb_start = jiffies;
724
	long nr_pages = work->nr_pages;
725
	unsigned long oldest_jif;
J
Jan Kara 已提交
726
	struct inode *inode;
727
	long progress;
728

729
	oldest_jif = jiffies;
730
	work->older_than_this = &oldest_jif;
N
Nick Piggin 已提交
731

732
	spin_lock(&wb->list_lock);
733 734
	for (;;) {
		/*
735
		 * Stop writeback when nr_pages has been consumed
736
		 */
737
		if (work->nr_pages <= 0)
738
			break;
739

740 741 742 743 744 745 746 747 748 749
		/*
		 * Background writeout and kupdate-style writeback may
		 * run forever. Stop them if there is other work to do
		 * so that e.g. sync can proceed. They'll be restarted
		 * after the other works are all done.
		 */
		if ((work->for_background || work->for_kupdate) &&
		    !list_empty(&wb->bdi->work_list))
			break;

N
Nick Piggin 已提交
750
		/*
751 752
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
753
		 */
754
		if (work->for_background && !over_bground_thresh(wb->bdi))
755
			break;
N
Nick Piggin 已提交
756

757 758 759
		if (work->for_kupdate) {
			oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
760
			work->older_than_this = &oldest_jif;
761
		}
762

763
		trace_writeback_start(wb->bdi, work);
764
		if (list_empty(&wb->b_io))
765
			queue_io(wb, work);
766
		if (work->sb)
767
			progress = writeback_sb_inodes(work->sb, wb, work);
768
		else
769 770
			progress = __writeback_inodes_wb(wb, work);
		trace_writeback_written(wb->bdi, work);
771

772
		wb_update_bandwidth(wb, wb_start);
773 774

		/*
775 776 777 778 779 780
		 * Did we write something? Try for more
		 *
		 * Dirty inodes are moved to b_io for writeback in batches.
		 * The completion of the current batch does not necessarily
		 * mean the overall work is done. So we keep looping as long
		 * as made some progress on cleaning pages or inodes.
781
		 */
782
		if (progress)
783 784
			continue;
		/*
785
		 * No more inodes for IO, bail
786
		 */
787
		if (list_empty(&wb->b_more_io))
788
			break;
789 790 791 792 793 794
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		if (!list_empty(&wb->b_more_io))  {
795
			trace_writeback_wait(wb->bdi, work);
N
Nick Piggin 已提交
796
			inode = wb_inode(wb->b_more_io.prev);
797
			spin_lock(&inode->i_lock);
798
			inode_wait_for_writeback(inode, wb);
799
			spin_unlock(&inode->i_lock);
800 801
		}
	}
802
	spin_unlock(&wb->list_lock);
803

804
	return nr_pages - work->nr_pages;
805 806 807
}

/*
808
 * Return the next wb_writeback_work struct that hasn't been processed yet.
809
 */
810
static struct wb_writeback_work *
811
get_next_work_item(struct backing_dev_info *bdi)
812
{
813
	struct wb_writeback_work *work = NULL;
814

815
	spin_lock_bh(&bdi->wb_lock);
816 817 818 819
	if (!list_empty(&bdi->work_list)) {
		work = list_entry(bdi->work_list.next,
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
820
	}
821
	spin_unlock_bh(&bdi->wb_lock);
822
	return work;
823 824
}

825 826 827 828 829 830 831 832 833 834 835
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

836 837
static long wb_check_background_flush(struct bdi_writeback *wb)
{
838
	if (over_bground_thresh(wb->bdi)) {
839 840 841 842 843 844

		struct wb_writeback_work work = {
			.nr_pages	= LONG_MAX,
			.sync_mode	= WB_SYNC_NONE,
			.for_background	= 1,
			.range_cyclic	= 1,
845
			.reason		= WB_REASON_BACKGROUND,
846 847 848 849 850 851 852 853
		};

		return wb_writeback(wb, &work);
	}

	return 0;
}

854 855 856 857 858
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

859 860 861 862 863 864
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

865 866 867 868 869 870
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
871
	nr_pages = get_nr_dirty_pages();
872

873
	if (nr_pages) {
874
		struct wb_writeback_work work = {
875 876 877 878
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
879
			.reason		= WB_REASON_PERIODIC,
880 881
		};

882
		return wb_writeback(wb, &work);
883
	}
884 885 886 887 888 889 890 891 892 893

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
894
	struct wb_writeback_work *work;
895
	long wrote = 0;
896

J
Jan Kara 已提交
897
	set_bit(BDI_writeback_running, &wb->bdi->state);
898
	while ((work = get_next_work_item(bdi)) != NULL) {
899 900
		/*
		 * Override sync mode, in case we must wait for completion
901
		 * because this thread is exiting now.
902 903
		 */
		if (force_wait)
904
			work->sync_mode = WB_SYNC_ALL;
905

906 907
		trace_writeback_exec(bdi, work);

908
		wrote += wb_writeback(wb, work);
909 910

		/*
911 912
		 * Notify the caller of completion if this is a synchronous
		 * work item, otherwise just free it.
913
		 */
914 915 916 917
		if (work->done)
			complete(work->done);
		else
			kfree(work);
918 919 920 921 922 923
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);
924
	wrote += wb_check_background_flush(wb);
J
Jan Kara 已提交
925
	clear_bit(BDI_writeback_running, &wb->bdi->state);
926 927 928 929 930 931 932 933

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
934
int bdi_writeback_thread(void *data)
935
{
936 937
	struct bdi_writeback *wb = data;
	struct backing_dev_info *bdi = wb->bdi;
938 939
	long pages_written;

P
Peter Zijlstra 已提交
940
	current->flags |= PF_SWAPWRITE;
941
	set_freezable();
942
	wb->last_active = jiffies;
943 944 945 946 947 948

	/*
	 * Our parent may run at a different priority, just set us to normal
	 */
	set_user_nice(current, 0);

949 950
	trace_writeback_thread_start(bdi);

951
	while (!kthread_should_stop()) {
952 953 954 955 956 957
		/*
		 * Remove own delayed wake-up timer, since we are already awake
		 * and we'll take care of the preriodic write-back.
		 */
		del_timer(&wb->wakeup_timer);

958 959
		pages_written = wb_do_writeback(wb, 0);

960 961
		trace_writeback_pages_written(pages_written);

962
		if (pages_written)
963
			wb->last_active = jiffies;
964

965
		set_current_state(TASK_INTERRUPTIBLE);
966
		if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
967
			__set_current_state(TASK_RUNNING);
968
			continue;
969 970
		}

971
		if (wb_has_dirty_io(wb) && dirty_writeback_interval)
972
			schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
973 974 975 976 977 978
		else {
			/*
			 * We have nothing to do, so can go sleep without any
			 * timeout and save power. When a work is queued or
			 * something is made dirty - we will be woken up.
			 */
979
			schedule();
980
		}
981

982 983 984
		try_to_freeze();
	}

985
	/* Flush any work that raced with us exiting */
986 987
	if (!list_empty(&bdi->work_list))
		wb_do_writeback(wb, 1);
988 989

	trace_writeback_thread_stop(bdi);
990 991 992
	return 0;
}

993

994
/*
995 996
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
997
 */
998
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
999
{
1000
	struct backing_dev_info *bdi;
1001

1002 1003
	if (!nr_pages) {
		nr_pages = global_page_state(NR_FILE_DIRTY) +
1004 1005
				global_page_state(NR_UNSTABLE_NFS);
	}
1006

1007
	rcu_read_lock();
1008
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1009 1010
		if (!bdi_has_dirty_io(bdi))
			continue;
1011
		__bdi_start_writeback(bdi, nr_pages, false, reason);
1012
	}
1013
	rcu_read_unlock();
L
Linus Torvalds 已提交
1014 1015
}

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1044
 *
1045 1046 1047 1048 1049 1050 1051 1052 1053
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1054
 *
1055 1056 1057 1058 1059 1060
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1061
 */
1062
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1063
{
1064
	struct super_block *sb = inode->i_sb;
1065
	struct backing_dev_info *bdi = NULL;
L
Linus Torvalds 已提交
1066

1067 1068 1069 1070 1071 1072
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
1073
			sb->s_op->dirty_inode(inode, flags);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

1089
	spin_lock(&inode->i_lock);
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
1101
			goto out_unlock_inode;
1102 1103 1104 1105 1106 1107

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
A
Al Viro 已提交
1108
			if (inode_unhashed(inode))
1109
				goto out_unlock_inode;
1110
		}
A
Al Viro 已提交
1111
		if (inode->i_state & I_FREEING)
1112
			goto out_unlock_inode;
1113 1114 1115 1116 1117 1118

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
1119
			bool wakeup_bdi = false;
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
			bdi = inode_to_bdi(inode);

			if (bdi_cap_writeback_dirty(bdi)) {
				WARN(!test_bit(BDI_registered, &bdi->state),
				     "bdi-%s not registered\n", bdi->name);

				/*
				 * If this is the first dirty inode for this
				 * bdi, we have to wake-up the corresponding
				 * bdi thread to make sure background
				 * write-back happens later.
				 */
				if (!wb_has_dirty_io(&bdi->wb))
					wakeup_bdi = true;
1134
			}
1135

1136
			spin_unlock(&inode->i_lock);
1137
			spin_lock(&bdi->wb.list_lock);
1138
			inode->dirtied_when = jiffies;
N
Nick Piggin 已提交
1139
			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1140
			spin_unlock(&bdi->wb.list_lock);
1141 1142 1143 1144

			if (wakeup_bdi)
				bdi_wakeup_thread_delayed(bdi);
			return;
L
Linus Torvalds 已提交
1145 1146
		}
	}
1147 1148
out_unlock_inode:
	spin_unlock(&inode->i_lock);
1149

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
}
EXPORT_SYMBOL(__mark_inode_dirty);

/*
 * Write out a superblock's list of dirty inodes.  A wait will be performed
 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 *
 * If older_than_this is non-NULL, then only write out inodes which
 * had their first dirtying at a time earlier than *older_than_this.
 *
 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 * This function assumes that the blockdev superblock's inodes are backed by
 * a variety of queues, so all inodes are searched.  For other superblocks,
 * assume that all inodes are backed by the same queue.
 *
 * The inodes to be written are parked on bdi->b_io.  They are moved back onto
 * bdi->b_dirty as they are selected for writing.  This way, none can be missed
 * on the writer throttling path, and we get decent balancing between many
 * throttled threads: we don't want them all piling up on inode_sync_wait.
 */
1170
static void wait_sb_inodes(struct super_block *sb)
1171 1172 1173 1174 1175 1176 1177
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1178
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1179

1180
	spin_lock(&inode_sb_list_lock);
1181 1182 1183 1184 1185 1186 1187 1188

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1189
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1190
		struct address_space *mapping = inode->i_mapping;
1191

1192 1193 1194 1195
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
1196
			continue;
1197
		}
1198
		__iget(inode);
1199
		spin_unlock(&inode->i_lock);
1200 1201
		spin_unlock(&inode_sb_list_lock);

1202
		/*
1203 1204 1205 1206 1207 1208
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
1209 1210 1211 1212 1213 1214 1215 1216
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

1217
		spin_lock(&inode_sb_list_lock);
1218
	}
1219
	spin_unlock(&inode_sb_list_lock);
1220
	iput(old_inode);
L
Linus Torvalds 已提交
1221 1222
}

1223
/**
1224
 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
1225
 * @sb: the superblock
1226
 * @nr: the number of pages to write
1227
 * @reason: reason why some writeback work initiated
L
Linus Torvalds 已提交
1228
 *
1229 1230
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
1231
 * for IO completion of submitted IO.
L
Linus Torvalds 已提交
1232
 */
1233 1234 1235
void writeback_inodes_sb_nr(struct super_block *sb,
			    unsigned long nr,
			    enum wb_reason reason)
L
Linus Torvalds 已提交
1236
{
1237 1238
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1239 1240 1241 1242 1243
		.sb			= sb,
		.sync_mode		= WB_SYNC_NONE,
		.tagged_writepages	= 1,
		.done			= &done,
		.nr_pages		= nr,
1244
		.reason			= reason,
1245
	};
1246

1247
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1248 1249
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);
1250
}
1251 1252 1253 1254 1255
EXPORT_SYMBOL(writeback_inodes_sb_nr);

/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
1256
 * @reason: reason why some writeback work was initiated
1257 1258 1259 1260 1261
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
1262
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1263
{
1264
	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1265
}
1266
EXPORT_SYMBOL(writeback_inodes_sb);
1267

1268 1269 1270
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
1271
 * @reason: reason why some writeback work was initiated
1272 1273 1274 1275
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
1276
int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
1277 1278
{
	if (!writeback_in_progress(sb->s_bdi)) {
1279
		down_read(&sb->s_umount);
1280
		writeback_inodes_sb(sb, reason);
1281
		up_read(&sb->s_umount);
1282 1283 1284 1285 1286 1287
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1288 1289 1290 1291
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 * @nr: the number of pages to write
1292
 * @reason: reason why some writeback work was initiated
1293 1294 1295 1296 1297
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
1298 1299
				   unsigned long nr,
				   enum wb_reason reason)
1300 1301 1302
{
	if (!writeback_in_progress(sb->s_bdi)) {
		down_read(&sb->s_umount);
1303
		writeback_inodes_sb_nr(sb, nr, reason);
1304 1305 1306 1307 1308 1309 1310
		up_read(&sb->s_umount);
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);

1311 1312 1313 1314 1315
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
1316
 * super_block.
1317
 */
1318
void sync_inodes_sb(struct super_block *sb)
1319
{
1320 1321
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1322 1323 1324 1325
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
1326
		.done		= &done,
1327
		.reason		= WB_REASON_SYNC,
1328 1329
	};

1330 1331
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1332 1333 1334
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);

1335
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1336
}
1337
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1338 1339

/**
1340 1341 1342 1343 1344 1345
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1346
 *
1347
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1348 1349 1350
 */
int write_inode_now(struct inode *inode, int sync)
{
1351
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1352 1353 1354
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1355
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1356 1357
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1358 1359 1360
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1361
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1362 1363

	might_sleep();
1364
	spin_lock(&wb->list_lock);
1365
	spin_lock(&inode->i_lock);
1366
	ret = writeback_single_inode(inode, wb, &wbc);
1367
	spin_unlock(&inode->i_lock);
1368
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1369
	if (sync)
J
Joern Engel 已提交
1370
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
1388
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1389 1390
	int ret;

1391
	spin_lock(&wb->list_lock);
1392
	spin_lock(&inode->i_lock);
1393
	ret = writeback_single_inode(inode, wb, wbc);
1394
	spin_unlock(&inode->i_lock);
1395
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1396 1397 1398
	return ret;
}
EXPORT_SYMBOL(sync_inode);
C
Christoph Hellwig 已提交
1399 1400

/**
A
Andrew Morton 已提交
1401
 * sync_inode_metadata - write an inode to disk
C
Christoph Hellwig 已提交
1402 1403 1404
 * @inode: the inode to sync
 * @wait: wait for I/O to complete.
 *
A
Andrew Morton 已提交
1405
 * Write an inode to disk and adjust its dirty state after completion.
C
Christoph Hellwig 已提交
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
 *
 * Note: only writes the actual inode, no associated data or other metadata.
 */
int sync_inode_metadata(struct inode *inode, int wait)
{
	struct writeback_control wbc = {
		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
		.nr_to_write = 0, /* metadata-only */
	};

	return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);