fs-writeback.c 37.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
J
Jens Axboe 已提交
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23 24
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
25 26 27 28
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
29
#include <linux/tracepoint.h>
30
#include "internal.h"
L
Linus Torvalds 已提交
31

32 33 34
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
35
struct wb_writeback_work {
36 37
	long nr_pages;
	struct super_block *sb;
38
	unsigned long *older_than_this;
39
	enum writeback_sync_modes sync_mode;
40
	unsigned int tagged_writepages:1;
41 42 43
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
44
	enum wb_reason reason;		/* why was writeback initiated? */
45

46
	struct list_head list;		/* pending work list */
47
	struct completion *done;	/* set if the caller waits */
48 49
};

50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure so that the definition remains local to this
 * file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

63 64 65 66
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
67 68
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
69 70 71
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
J
Jan Kara 已提交
72
	return test_bit(BDI_writeback_running, &bdi->state);
73 74
}

75 76 77 78
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

79 80 81 82
	if (strcmp(sb->s_type->name, "bdev") == 0)
		return inode->i_mapping->backing_dev_info;

	return sb->s_bdi;
83 84
}

N
Nick Piggin 已提交
85 86 87 88 89
static inline struct inode *wb_inode(struct list_head *head)
{
	return list_entry(head, struct inode, i_wb_list);
}

90 91
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
92
{
93 94 95 96 97 98 99
	if (bdi->wb.task) {
		wake_up_process(bdi->wb.task);
	} else {
		/*
		 * The bdi thread isn't there, wake up the forker thread which
		 * will create and run it.
		 */
100
		wake_up_process(default_backing_dev_info.wb.task);
L
Linus Torvalds 已提交
101
	}
102 103 104 105 106 107 108 109 110 111 112 113
}

static void bdi_queue_work(struct backing_dev_info *bdi,
			   struct wb_writeback_work *work)
{
	trace_writeback_queue(bdi, work);

	spin_lock_bh(&bdi->wb_lock);
	list_add_tail(&work->list, &bdi->work_list);
	if (!bdi->wb.task)
		trace_writeback_nothread(bdi, work);
	bdi_wakeup_flusher(bdi);
114
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
115 116
}

117 118
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
119
		      bool range_cyclic, enum wb_reason reason)
L
Linus Torvalds 已提交
120
{
121
	struct wb_writeback_work *work;
122

123 124 125 126
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
127 128
	work = kzalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
129 130
		if (bdi->wb.task) {
			trace_writeback_nowork(bdi);
131
			wake_up_process(bdi->wb.task);
132
		}
133
		return;
134
	}
135

136 137 138
	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
139
	work->reason	= reason;
140

141
	bdi_queue_work(bdi, work);
142 143 144 145 146 147
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
 * @nr_pages: the number of pages to write
148
 * @reason: reason why some writeback work was initiated
149 150 151
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
L
Lucas De Marchi 已提交
152
 *   started when this function returns, we make no guarantees on
153
 *   completion. Caller need not hold sb s_umount semaphore.
154 155
 *
 */
156 157
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
			enum wb_reason reason)
158
{
159
	__bdi_start_writeback(bdi, nr_pages, true, reason);
160
}
161

162 163 164 165 166
/**
 * bdi_start_background_writeback - start background writeback
 * @bdi: the backing device to write from
 *
 * Description:
167 168 169 170
 *   This makes sure WB_SYNC_NONE background writeback happens. When
 *   this function returns, it is only guaranteed that for given BDI
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
171 172 173
 */
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
174 175 176 177
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
178
	trace_writeback_wake_background(bdi);
179 180 181
	spin_lock_bh(&bdi->wb_lock);
	bdi_wakeup_flusher(bdi);
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
182 183
}

184 185 186 187 188
/*
 * Remove the inode from the writeback list it is on.
 */
void inode_wb_list_del(struct inode *inode)
{
189 190 191
	struct backing_dev_info *bdi = inode_to_bdi(inode);

	spin_lock(&bdi->wb.list_lock);
192
	list_del_init(&inode->i_wb_list);
193
	spin_unlock(&bdi->wb.list_lock);
194 195
}

196 197 198 199 200
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
201
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
202 203 204
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
205
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
206
{
207
	assert_spin_locked(&wb->list_lock);
208
	if (!list_empty(&wb->b_dirty)) {
209
		struct inode *tail;
210

N
Nick Piggin 已提交
211
		tail = wb_inode(wb->b_dirty.next);
212
		if (time_before(inode->dirtied_when, tail->dirtied_when))
213 214
			inode->dirtied_when = jiffies;
	}
N
Nick Piggin 已提交
215
	list_move(&inode->i_wb_list, &wb->b_dirty);
216 217
}

218
/*
219
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
220
 */
221
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
222
{
223
	assert_spin_locked(&wb->list_lock);
N
Nick Piggin 已提交
224
	list_move(&inode->i_wb_list, &wb->b_more_io);
225 226
}

J
Joern Engel 已提交
227 228 229
static void inode_sync_complete(struct inode *inode)
{
	/*
230
	 * Prevent speculative execution through
231
	 * spin_unlock(&wb->list_lock);
J
Joern Engel 已提交
232
	 */
233

J
Joern Engel 已提交
234 235 236 237
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

238 239 240 241 242 243 244 245
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
246
	 * from permanently stopping the whole bdi writeback.
247 248 249 250 251 252
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

253 254 255
/*
 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 */
256
static int move_expired_inodes(struct list_head *delaying_queue,
257
			       struct list_head *dispatch_queue,
258
			       struct wb_writeback_work *work)
259
{
260 261
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
262
	struct super_block *sb = NULL;
263
	struct inode *inode;
264
	int do_sb_sort = 0;
265
	int moved = 0;
266

267
	while (!list_empty(delaying_queue)) {
N
Nick Piggin 已提交
268
		inode = wb_inode(delaying_queue->prev);
269 270
		if (work->older_than_this &&
		    inode_dirtied_after(inode, *work->older_than_this))
271
			break;
272 273 274
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
N
Nick Piggin 已提交
275
		list_move(&inode->i_wb_list, &tmp);
276
		moved++;
277 278
	}

279 280 281
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
282
		goto out;
283 284
	}

285 286
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
N
Nick Piggin 已提交
287
		sb = wb_inode(tmp.prev)->i_sb;
288
		list_for_each_prev_safe(pos, node, &tmp) {
N
Nick Piggin 已提交
289
			inode = wb_inode(pos);
290
			if (inode->i_sb == sb)
N
Nick Piggin 已提交
291
				list_move(&inode->i_wb_list, dispatch_queue);
292
		}
293
	}
294 295
out:
	return moved;
296 297 298 299
}

/*
 * Queue all expired dirty inodes for io, eldest first.
300 301 302 303 304 305 306 307
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
308
 */
309
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
310
{
311
	int moved;
312
	assert_spin_locked(&wb->list_lock);
313
	list_splice_init(&wb->b_more_io, &wb->b_io);
314 315
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
	trace_writeback_queue_io(wb, work, moved);
316 317
}

318
static int write_inode(struct inode *inode, struct writeback_control *wbc)
319
{
320
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
321
		return inode->i_sb->s_op->write_inode(inode, wbc);
322
	return 0;
323 324
}

L
Linus Torvalds 已提交
325
/*
326 327
 * Wait for writeback on an inode to complete.
 */
328 329
static void inode_wait_for_writeback(struct inode *inode,
				     struct bdi_writeback *wb)
330 331 332 333 334
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
335 336
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
337
		spin_unlock(&wb->list_lock);
338
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
339
		spin_lock(&wb->list_lock);
340
		spin_lock(&inode->i_lock);
341
	}
342 343 344
}

/*
345
 * Write out an inode's dirty pages.  Called under wb->list_lock and
346 347
 * inode->i_lock.  Either the caller has an active reference on the inode or
 * the inode has I_WILL_FREE set.
348
 *
L
Linus Torvalds 已提交
349 350 351 352 353 354 355
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 */
static int
356 357
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		       struct writeback_control *wbc)
L
Linus Torvalds 已提交
358 359
{
	struct address_space *mapping = inode->i_mapping;
360
	long nr_to_write = wbc->nr_to_write;
361
	unsigned dirty;
L
Linus Torvalds 已提交
362 363
	int ret;

364
	assert_spin_locked(&wb->list_lock);
365 366
	assert_spin_locked(&inode->i_lock);

367 368 369 370 371 372 373 374
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		/*
		 * If this inode is locked for writeback and we are not doing
375
		 * writeback-for-data-integrity, move it to b_more_io so that
376 377 378
		 * writeback can proceed with the other inodes on s_io.
		 *
		 * We'll have another go at writing back this inode when we
379
		 * completed a full scan of b_io.
380
		 */
381
		if (wbc->sync_mode != WB_SYNC_ALL) {
382
			requeue_io(inode, wb);
383 384
			trace_writeback_single_inode_requeue(inode, wbc,
							     nr_to_write);
385 386 387 388 389 390
			return 0;
		}

		/*
		 * It's a data-integrity sync.  We must wait.
		 */
391
		inode_wait_for_writeback(inode, wb);
392 393
	}

J
Joern Engel 已提交
394
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
395

396
	/* Set I_SYNC, reset I_DIRTY_PAGES */
J
Joern Engel 已提交
397
	inode->i_state |= I_SYNC;
398
	inode->i_state &= ~I_DIRTY_PAGES;
399
	spin_unlock(&inode->i_lock);
400
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
401 402 403

	ret = do_writepages(mapping, wbc);

404 405 406 407 408
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
409
	if (wbc->sync_mode == WB_SYNC_ALL) {
410
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
411 412 413 414
		if (ret == 0)
			ret = err;
	}

415 416 417 418 419
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
420
	spin_lock(&inode->i_lock);
421 422
	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
423
	spin_unlock(&inode->i_lock);
424 425
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
426
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
427 428 429 430
		if (ret == 0)
			ret = err;
	}

431
	spin_lock(&wb->list_lock);
432
	spin_lock(&inode->i_lock);
J
Joern Engel 已提交
433
	inode->i_state &= ~I_SYNC;
A
Al Viro 已提交
434
	if (!(inode->i_state & I_FREEING)) {
435 436 437 438 439 440 441 442 443
		/*
		 * Sync livelock prevention. Each inode is tagged and synced in
		 * one shot. If still dirty, it will be redirty_tail()'ed below.
		 * Update the dirty time to prevent enqueue and sync it again.
		 */
		if ((inode->i_state & I_DIRTY) &&
		    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
			inode->dirtied_when = jiffies;

444
		if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
L
Linus Torvalds 已提交
445 446
			/*
			 * We didn't write back all the pages.  nfs_writepages()
447
			 * sometimes bales out without doing anything.
448
			 */
449 450
			inode->i_state |= I_DIRTY_PAGES;
			if (wbc->nr_to_write <= 0) {
L
Linus Torvalds 已提交
451
				/*
452
				 * slice used up: queue for next turn
L
Linus Torvalds 已提交
453
				 */
454
				requeue_io(inode, wb);
L
Linus Torvalds 已提交
455 456
			} else {
				/*
457 458 459 460 461
				 * Writeback blocked by something other than
				 * congestion. Delay the inode for some time to
				 * avoid spinning on the CPU (100% iowait)
				 * retrying writeback of the dirty page/inode
				 * that cannot be performed immediately.
L
Linus Torvalds 已提交
462
				 */
463
				redirty_tail(inode, wb);
L
Linus Torvalds 已提交
464
			}
465 466 467 468 469 470 471
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * Filesystems can dirty the inode during writeback
			 * operations, such as delayed allocation during
			 * submission or metadata updates after data IO
			 * completion.
			 */
472
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
473 474
		} else {
			/*
475 476 477
			 * The inode is clean.  At this point we either have
			 * a reference to the inode or it's on it's way out.
			 * No need to add it back to the LRU.
L
Linus Torvalds 已提交
478
			 */
N
Nick Piggin 已提交
479
			list_del_init(&inode->i_wb_list);
L
Linus Torvalds 已提交
480 481
		}
	}
J
Joern Engel 已提交
482
	inode_sync_complete(inode);
483
	trace_writeback_single_inode(inode, wbc, nr_to_write);
L
Linus Torvalds 已提交
484 485 486
	return ret;
}

487 488
static long writeback_chunk_size(struct backing_dev_info *bdi,
				 struct wb_writeback_work *work)
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
{
	long pages;

	/*
	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
	 * here avoids calling into writeback_inodes_wb() more than once.
	 *
	 * The intended call sequence for WB_SYNC_ALL writeback is:
	 *
	 *      wb_writeback()
	 *          writeback_sb_inodes()       <== called only once
	 *              write_cache_pages()     <== called once for each inode
	 *                   (quickly) tag currently dirty pages
	 *                   (maybe slowly) sync all tagged pages
	 */
	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
		pages = LONG_MAX;
507 508 509 510 511 512 513
	else {
		pages = min(bdi->avg_write_bandwidth / 2,
			    global_dirty_limit / DIRTY_SCOPE);
		pages = min(pages, work->nr_pages);
		pages = round_down(pages + MIN_WRITEBACK_PAGES,
				   MIN_WRITEBACK_PAGES);
	}
514 515 516 517

	return pages;
}

518 519
/*
 * Write a portion of b_io inodes which belong to @sb.
520 521
 *
 * If @only_this_sb is true, then find and write all such
522 523
 * inodes. Otherwise write only ones which go sequentially
 * in reverse order.
524
 *
525
 * Return the number of pages and/or inodes written.
526
 */
527 528 529
static long writeback_sb_inodes(struct super_block *sb,
				struct bdi_writeback *wb,
				struct wb_writeback_work *work)
L
Linus Torvalds 已提交
530
{
531 532 533 534 535 536 537 538 539 540 541 542 543
	struct writeback_control wbc = {
		.sync_mode		= work->sync_mode,
		.tagged_writepages	= work->tagged_writepages,
		.for_kupdate		= work->for_kupdate,
		.for_background		= work->for_background,
		.range_cyclic		= work->range_cyclic,
		.range_start		= 0,
		.range_end		= LLONG_MAX,
	};
	unsigned long start_time = jiffies;
	long write_chunk;
	long wrote = 0;  /* count both pages and inodes */

544
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
545
		struct inode *inode = wb_inode(wb->b_io.prev);
546 547

		if (inode->i_sb != sb) {
548
			if (work->sb) {
549 550 551 552 553
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
554
				redirty_tail(inode, wb);
555 556 557 558 559 560 561 562
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
563
			break;
564 565
		}

566 567 568 569 570
		/*
		 * Don't bother with new inodes or inodes beeing freed, first
		 * kind does not need peridic writeout yet, and for the latter
		 * kind writeout is handled by the freer.
		 */
571
		spin_lock(&inode->i_lock);
572
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
573
			spin_unlock(&inode->i_lock);
574
			redirty_tail(inode, wb);
575 576
			continue;
		}
L
Linus Torvalds 已提交
577
		__iget(inode);
578
		write_chunk = writeback_chunk_size(wb->bdi, work);
579 580
		wbc.nr_to_write = write_chunk;
		wbc.pages_skipped = 0;
581

582
		writeback_single_inode(inode, wb, &wbc);
583

584 585 586 587 588
		work->nr_pages -= write_chunk - wbc.nr_to_write;
		wrote += write_chunk - wbc.nr_to_write;
		if (!(inode->i_state & I_DIRTY))
			wrote++;
		if (wbc.pages_skipped) {
L
Linus Torvalds 已提交
589 590 591 592
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
593
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
594
		}
595
		spin_unlock(&inode->i_lock);
596
		spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
597
		iput(inode);
598
		cond_resched();
599
		spin_lock(&wb->list_lock);
600 601 602 603 604 605 606 607 608
		/*
		 * bail out to wb_writeback() often enough to check
		 * background threshold and other termination conditions.
		 */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
609
		}
L
Linus Torvalds 已提交
610
	}
611
	return wrote;
612 613
}

614 615
static long __writeback_inodes_wb(struct bdi_writeback *wb,
				  struct wb_writeback_work *work)
616
{
617 618
	unsigned long start_time = jiffies;
	long wrote = 0;
N
Nick Piggin 已提交
619

620
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
621
		struct inode *inode = wb_inode(wb->b_io.prev);
622
		struct super_block *sb = inode->i_sb;
623

624
		if (!grab_super_passive(sb)) {
625 626 627 628 629 630
			/*
			 * grab_super_passive() may fail consistently due to
			 * s_umount being grabbed by someone else. Don't use
			 * requeue_io() to avoid busy retrying the inode/sb.
			 */
			redirty_tail(inode, wb);
631
			continue;
632
		}
633
		wrote += writeback_sb_inodes(sb, wb, work);
634
		drop_super(sb);
635

636 637 638 639 640 641 642
		/* refer to the same tests at the end of writeback_sb_inodes */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
		}
643
	}
644
	/* Leave any unwritten inodes on b_io */
645
	return wrote;
646 647
}

648 649
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
				enum wb_reason reason)
650
{
651 652 653 654
	struct wb_writeback_work work = {
		.nr_pages	= nr_pages,
		.sync_mode	= WB_SYNC_NONE,
		.range_cyclic	= 1,
655
		.reason		= reason,
656
	};
657

658
	spin_lock(&wb->list_lock);
W
Wu Fengguang 已提交
659
	if (list_empty(&wb->b_io))
660
		queue_io(wb, &work);
661
	__writeback_inodes_wb(wb, &work);
662
	spin_unlock(&wb->list_lock);
663

664 665
	return nr_pages - work.nr_pages;
}
666

667
static bool over_bground_thresh(struct backing_dev_info *bdi)
668 669 670
{
	unsigned long background_thresh, dirty_thresh;

671
	global_dirty_limits(&background_thresh, &dirty_thresh);
672

673 674 675 676 677 678 679 680 681
	if (global_page_state(NR_FILE_DIRTY) +
	    global_page_state(NR_UNSTABLE_NFS) > background_thresh)
		return true;

	if (bdi_stat(bdi, BDI_RECLAIMABLE) >
				bdi_dirty_limit(bdi, background_thresh))
		return true;

	return false;
682 683
}

684 685 686 687 688 689 690
/*
 * Called under wb->list_lock. If there are multiple wb per bdi,
 * only the flusher working on the first wb should do it.
 */
static void wb_update_bandwidth(struct bdi_writeback *wb,
				unsigned long start_time)
{
691
	__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
692 693
}

694 695
/*
 * Explicit flushing or periodic writeback of "old" data.
696
 *
697 698 699 700
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
701
 *
702 703 704
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
705
 *
706 707
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
708
 */
709
static long wb_writeback(struct bdi_writeback *wb,
710
			 struct wb_writeback_work *work)
711
{
712
	unsigned long wb_start = jiffies;
713
	long nr_pages = work->nr_pages;
714
	unsigned long oldest_jif;
J
Jan Kara 已提交
715
	struct inode *inode;
716
	long progress;
717

718
	oldest_jif = jiffies;
719
	work->older_than_this = &oldest_jif;
N
Nick Piggin 已提交
720

721
	spin_lock(&wb->list_lock);
722 723
	for (;;) {
		/*
724
		 * Stop writeback when nr_pages has been consumed
725
		 */
726
		if (work->nr_pages <= 0)
727
			break;
728

729 730 731 732 733 734 735 736 737 738
		/*
		 * Background writeout and kupdate-style writeback may
		 * run forever. Stop them if there is other work to do
		 * so that e.g. sync can proceed. They'll be restarted
		 * after the other works are all done.
		 */
		if ((work->for_background || work->for_kupdate) &&
		    !list_empty(&wb->bdi->work_list))
			break;

N
Nick Piggin 已提交
739
		/*
740 741
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
742
		 */
743
		if (work->for_background && !over_bground_thresh(wb->bdi))
744
			break;
N
Nick Piggin 已提交
745

746 747 748 749 750 751
		/*
		 * Kupdate and background works are special and we want to
		 * include all inodes that need writing. Livelock avoidance is
		 * handled by these works yielding to any other work so we are
		 * safe.
		 */
752 753 754
		if (work->for_kupdate) {
			oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
755 756
		} else if (work->for_background)
			oldest_jif = jiffies;
757

758
		trace_writeback_start(wb->bdi, work);
759
		if (list_empty(&wb->b_io))
760
			queue_io(wb, work);
761
		if (work->sb)
762
			progress = writeback_sb_inodes(work->sb, wb, work);
763
		else
764 765
			progress = __writeback_inodes_wb(wb, work);
		trace_writeback_written(wb->bdi, work);
766

767
		wb_update_bandwidth(wb, wb_start);
768 769

		/*
770 771 772 773 774 775
		 * Did we write something? Try for more
		 *
		 * Dirty inodes are moved to b_io for writeback in batches.
		 * The completion of the current batch does not necessarily
		 * mean the overall work is done. So we keep looping as long
		 * as made some progress on cleaning pages or inodes.
776
		 */
777
		if (progress)
778 779
			continue;
		/*
780
		 * No more inodes for IO, bail
781
		 */
782
		if (list_empty(&wb->b_more_io))
783
			break;
784 785 786 787 788 789
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		if (!list_empty(&wb->b_more_io))  {
790
			trace_writeback_wait(wb->bdi, work);
N
Nick Piggin 已提交
791
			inode = wb_inode(wb->b_more_io.prev);
792
			spin_lock(&inode->i_lock);
793
			inode_wait_for_writeback(inode, wb);
794
			spin_unlock(&inode->i_lock);
795 796
		}
	}
797
	spin_unlock(&wb->list_lock);
798

799
	return nr_pages - work->nr_pages;
800 801 802
}

/*
803
 * Return the next wb_writeback_work struct that hasn't been processed yet.
804
 */
805
static struct wb_writeback_work *
806
get_next_work_item(struct backing_dev_info *bdi)
807
{
808
	struct wb_writeback_work *work = NULL;
809

810
	spin_lock_bh(&bdi->wb_lock);
811 812 813 814
	if (!list_empty(&bdi->work_list)) {
		work = list_entry(bdi->work_list.next,
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
815
	}
816
	spin_unlock_bh(&bdi->wb_lock);
817
	return work;
818 819
}

820 821 822 823 824 825 826 827 828 829 830
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

831 832
static long wb_check_background_flush(struct bdi_writeback *wb)
{
833
	if (over_bground_thresh(wb->bdi)) {
834 835 836 837 838 839

		struct wb_writeback_work work = {
			.nr_pages	= LONG_MAX,
			.sync_mode	= WB_SYNC_NONE,
			.for_background	= 1,
			.range_cyclic	= 1,
840
			.reason		= WB_REASON_BACKGROUND,
841 842 843 844 845 846 847 848
		};

		return wb_writeback(wb, &work);
	}

	return 0;
}

849 850 851 852 853
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

854 855 856 857 858 859
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

860 861 862 863 864 865
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
866
	nr_pages = get_nr_dirty_pages();
867

868
	if (nr_pages) {
869
		struct wb_writeback_work work = {
870 871 872 873
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
874
			.reason		= WB_REASON_PERIODIC,
875 876
		};

877
		return wb_writeback(wb, &work);
878
	}
879 880 881 882 883 884 885 886 887 888

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
889
	struct wb_writeback_work *work;
890
	long wrote = 0;
891

J
Jan Kara 已提交
892
	set_bit(BDI_writeback_running, &wb->bdi->state);
893
	while ((work = get_next_work_item(bdi)) != NULL) {
894 895
		/*
		 * Override sync mode, in case we must wait for completion
896
		 * because this thread is exiting now.
897 898
		 */
		if (force_wait)
899
			work->sync_mode = WB_SYNC_ALL;
900

901 902
		trace_writeback_exec(bdi, work);

903
		wrote += wb_writeback(wb, work);
904 905

		/*
906 907
		 * Notify the caller of completion if this is a synchronous
		 * work item, otherwise just free it.
908
		 */
909 910 911 912
		if (work->done)
			complete(work->done);
		else
			kfree(work);
913 914 915 916 917 918
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);
919
	wrote += wb_check_background_flush(wb);
J
Jan Kara 已提交
920
	clear_bit(BDI_writeback_running, &wb->bdi->state);
921 922 923 924 925 926 927 928

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
929
int bdi_writeback_thread(void *data)
930
{
931 932
	struct bdi_writeback *wb = data;
	struct backing_dev_info *bdi = wb->bdi;
933 934
	long pages_written;

P
Peter Zijlstra 已提交
935
	current->flags |= PF_SWAPWRITE;
936
	set_freezable();
937
	wb->last_active = jiffies;
938 939 940 941 942 943

	/*
	 * Our parent may run at a different priority, just set us to normal
	 */
	set_user_nice(current, 0);

944 945
	trace_writeback_thread_start(bdi);

946
	while (!kthread_should_stop()) {
947 948 949 950 951 952
		/*
		 * Remove own delayed wake-up timer, since we are already awake
		 * and we'll take care of the preriodic write-back.
		 */
		del_timer(&wb->wakeup_timer);

953 954
		pages_written = wb_do_writeback(wb, 0);

955 956
		trace_writeback_pages_written(pages_written);

957
		if (pages_written)
958
			wb->last_active = jiffies;
959

960
		set_current_state(TASK_INTERRUPTIBLE);
961
		if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
962
			__set_current_state(TASK_RUNNING);
963
			continue;
964 965
		}

966
		if (wb_has_dirty_io(wb) && dirty_writeback_interval)
967
			schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
968 969 970 971 972 973
		else {
			/*
			 * We have nothing to do, so can go sleep without any
			 * timeout and save power. When a work is queued or
			 * something is made dirty - we will be woken up.
			 */
974
			schedule();
975
		}
976

977 978 979
		try_to_freeze();
	}

980
	/* Flush any work that raced with us exiting */
981 982
	if (!list_empty(&bdi->work_list))
		wb_do_writeback(wb, 1);
983 984

	trace_writeback_thread_stop(bdi);
985 986 987
	return 0;
}

988

989
/*
990 991
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
992
 */
993
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
994
{
995
	struct backing_dev_info *bdi;
996

997 998
	if (!nr_pages) {
		nr_pages = global_page_state(NR_FILE_DIRTY) +
999 1000
				global_page_state(NR_UNSTABLE_NFS);
	}
1001

1002
	rcu_read_lock();
1003
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1004 1005
		if (!bdi_has_dirty_io(bdi))
			continue;
1006
		__bdi_start_writeback(bdi, nr_pages, false, reason);
1007
	}
1008
	rcu_read_unlock();
L
Linus Torvalds 已提交
1009 1010
}

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1039
 *
1040 1041 1042 1043 1044 1045 1046 1047 1048
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1049
 *
1050 1051 1052 1053 1054 1055
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1056
 */
1057
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1058
{
1059
	struct super_block *sb = inode->i_sb;
1060
	struct backing_dev_info *bdi = NULL;
L
Linus Torvalds 已提交
1061

1062 1063 1064 1065 1066 1067
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
1068
			sb->s_op->dirty_inode(inode, flags);
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

1084
	spin_lock(&inode->i_lock);
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
1096
			goto out_unlock_inode;
1097 1098 1099 1100 1101 1102

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
A
Al Viro 已提交
1103
			if (inode_unhashed(inode))
1104
				goto out_unlock_inode;
1105
		}
A
Al Viro 已提交
1106
		if (inode->i_state & I_FREEING)
1107
			goto out_unlock_inode;
1108 1109 1110 1111 1112 1113

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
1114
			bool wakeup_bdi = false;
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
			bdi = inode_to_bdi(inode);

			if (bdi_cap_writeback_dirty(bdi)) {
				WARN(!test_bit(BDI_registered, &bdi->state),
				     "bdi-%s not registered\n", bdi->name);

				/*
				 * If this is the first dirty inode for this
				 * bdi, we have to wake-up the corresponding
				 * bdi thread to make sure background
				 * write-back happens later.
				 */
				if (!wb_has_dirty_io(&bdi->wb))
					wakeup_bdi = true;
1129
			}
1130

1131
			spin_unlock(&inode->i_lock);
1132
			spin_lock(&bdi->wb.list_lock);
1133
			inode->dirtied_when = jiffies;
N
Nick Piggin 已提交
1134
			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1135
			spin_unlock(&bdi->wb.list_lock);
1136 1137 1138 1139

			if (wakeup_bdi)
				bdi_wakeup_thread_delayed(bdi);
			return;
L
Linus Torvalds 已提交
1140 1141
		}
	}
1142 1143
out_unlock_inode:
	spin_unlock(&inode->i_lock);
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
}
EXPORT_SYMBOL(__mark_inode_dirty);

/*
 * Write out a superblock's list of dirty inodes.  A wait will be performed
 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 *
 * If older_than_this is non-NULL, then only write out inodes which
 * had their first dirtying at a time earlier than *older_than_this.
 *
 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 * This function assumes that the blockdev superblock's inodes are backed by
 * a variety of queues, so all inodes are searched.  For other superblocks,
 * assume that all inodes are backed by the same queue.
 *
 * The inodes to be written are parked on bdi->b_io.  They are moved back onto
 * bdi->b_dirty as they are selected for writing.  This way, none can be missed
 * on the writer throttling path, and we get decent balancing between many
 * throttled threads: we don't want them all piling up on inode_sync_wait.
 */
1165
static void wait_sb_inodes(struct super_block *sb)
1166 1167 1168 1169 1170 1171 1172
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1173
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1174

1175
	spin_lock(&inode_sb_list_lock);
1176 1177 1178 1179 1180 1181 1182 1183

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1184
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1185
		struct address_space *mapping = inode->i_mapping;
1186

1187 1188 1189 1190
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
1191
			continue;
1192
		}
1193
		__iget(inode);
1194
		spin_unlock(&inode->i_lock);
1195 1196
		spin_unlock(&inode_sb_list_lock);

1197
		/*
1198 1199 1200 1201 1202 1203
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
1204 1205 1206 1207 1208 1209 1210 1211
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

1212
		spin_lock(&inode_sb_list_lock);
1213
	}
1214
	spin_unlock(&inode_sb_list_lock);
1215
	iput(old_inode);
L
Linus Torvalds 已提交
1216 1217
}

1218
/**
1219
 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
1220
 * @sb: the superblock
1221
 * @nr: the number of pages to write
1222
 * @reason: reason why some writeback work initiated
L
Linus Torvalds 已提交
1223
 *
1224 1225
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
1226
 * for IO completion of submitted IO.
L
Linus Torvalds 已提交
1227
 */
1228 1229 1230
void writeback_inodes_sb_nr(struct super_block *sb,
			    unsigned long nr,
			    enum wb_reason reason)
L
Linus Torvalds 已提交
1231
{
1232 1233
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1234 1235 1236 1237 1238
		.sb			= sb,
		.sync_mode		= WB_SYNC_NONE,
		.tagged_writepages	= 1,
		.done			= &done,
		.nr_pages		= nr,
1239
		.reason			= reason,
1240
	};
1241

1242
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1243 1244
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);
1245
}
1246 1247 1248 1249 1250
EXPORT_SYMBOL(writeback_inodes_sb_nr);

/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
1251
 * @reason: reason why some writeback work was initiated
1252 1253 1254 1255 1256
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
1257
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1258
{
1259
	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1260
}
1261
EXPORT_SYMBOL(writeback_inodes_sb);
1262

1263 1264 1265
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
1266
 * @reason: reason why some writeback work was initiated
1267 1268 1269 1270
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
1271
int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
1272 1273
{
	if (!writeback_in_progress(sb->s_bdi)) {
1274
		down_read(&sb->s_umount);
1275
		writeback_inodes_sb(sb, reason);
1276
		up_read(&sb->s_umount);
1277 1278 1279 1280 1281 1282
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1283 1284 1285 1286
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 * @nr: the number of pages to write
1287
 * @reason: reason why some writeback work was initiated
1288 1289 1290 1291 1292
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
1293 1294
				   unsigned long nr,
				   enum wb_reason reason)
1295 1296 1297
{
	if (!writeback_in_progress(sb->s_bdi)) {
		down_read(&sb->s_umount);
1298
		writeback_inodes_sb_nr(sb, nr, reason);
1299 1300 1301 1302 1303 1304 1305
		up_read(&sb->s_umount);
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);

1306 1307 1308 1309 1310
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
1311
 * super_block.
1312
 */
1313
void sync_inodes_sb(struct super_block *sb)
1314
{
1315 1316
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1317 1318 1319 1320
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
1321
		.done		= &done,
1322
		.reason		= WB_REASON_SYNC,
1323 1324
	};

1325 1326
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1327 1328 1329
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);

1330
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1331
}
1332
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1333 1334

/**
1335 1336 1337 1338 1339 1340
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1341
 *
1342
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1343 1344 1345
 */
int write_inode_now(struct inode *inode, int sync)
{
1346
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1347 1348 1349
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1350
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1351 1352
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1353 1354 1355
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1356
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1357 1358

	might_sleep();
1359
	spin_lock(&wb->list_lock);
1360
	spin_lock(&inode->i_lock);
1361
	ret = writeback_single_inode(inode, wb, &wbc);
1362
	spin_unlock(&inode->i_lock);
1363
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1364
	if (sync)
J
Joern Engel 已提交
1365
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
1383
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1384 1385
	int ret;

1386
	spin_lock(&wb->list_lock);
1387
	spin_lock(&inode->i_lock);
1388
	ret = writeback_single_inode(inode, wb, wbc);
1389
	spin_unlock(&inode->i_lock);
1390
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1391 1392 1393
	return ret;
}
EXPORT_SYMBOL(sync_inode);
C
Christoph Hellwig 已提交
1394 1395

/**
A
Andrew Morton 已提交
1396
 * sync_inode_metadata - write an inode to disk
C
Christoph Hellwig 已提交
1397 1398 1399
 * @inode: the inode to sync
 * @wait: wait for I/O to complete.
 *
A
Andrew Morton 已提交
1400
 * Write an inode to disk and adjust its dirty state after completion.
C
Christoph Hellwig 已提交
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
 *
 * Note: only writes the actual inode, no associated data or other metadata.
 */
int sync_inode_metadata(struct inode *inode, int wait)
{
	struct writeback_control wbc = {
		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
		.nr_to_write = 0, /* metadata-only */
	};

	return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);