fs-writeback.c 36.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
17
#include <linux/export.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
24 25
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
26 27 28
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
29
#include <linux/tracepoint.h>
30
#include "internal.h"
L
Linus Torvalds 已提交
31

32 33 34 35 36
/*
 * 4MB minimal write chunk size
 */
#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_CACHE_SHIFT - 10))

37 38 39
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
40
struct wb_writeback_work {
41 42
	long nr_pages;
	struct super_block *sb;
43
	unsigned long *older_than_this;
44
	enum writeback_sync_modes sync_mode;
45
	unsigned int tagged_writepages:1;
46 47 48
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
49
	enum wb_reason reason;		/* why was writeback initiated? */
50

51
	struct list_head list;		/* pending work list */
52
	struct completion *done;	/* set if the caller waits */
53 54
};

55 56 57 58 59
/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

60 61 62 63
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
64 65
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
66 67 68
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
J
Jan Kara 已提交
69
	return test_bit(BDI_writeback_running, &bdi->state);
70 71
}

72 73 74 75
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

76 77 78 79
	if (strcmp(sb->s_type->name, "bdev") == 0)
		return inode->i_mapping->backing_dev_info;

	return sb->s_bdi;
80 81
}

N
Nick Piggin 已提交
82 83 84 85 86
static inline struct inode *wb_inode(struct list_head *head)
{
	return list_entry(head, struct inode, i_wb_list);
}

87 88 89 90 91 92 93 94
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure and inline functions so that the definition
 * remains local to this file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

95 96
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
97
{
98 99 100 101 102 103 104
	if (bdi->wb.task) {
		wake_up_process(bdi->wb.task);
	} else {
		/*
		 * The bdi thread isn't there, wake up the forker thread which
		 * will create and run it.
		 */
105
		wake_up_process(default_backing_dev_info.wb.task);
L
Linus Torvalds 已提交
106
	}
107 108 109 110 111 112 113 114 115 116 117 118
}

static void bdi_queue_work(struct backing_dev_info *bdi,
			   struct wb_writeback_work *work)
{
	trace_writeback_queue(bdi, work);

	spin_lock_bh(&bdi->wb_lock);
	list_add_tail(&work->list, &bdi->work_list);
	if (!bdi->wb.task)
		trace_writeback_nothread(bdi, work);
	bdi_wakeup_flusher(bdi);
119
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
120 121
}

122 123
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
124
		      bool range_cyclic, enum wb_reason reason)
L
Linus Torvalds 已提交
125
{
126
	struct wb_writeback_work *work;
127

128 129 130 131
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
132 133
	work = kzalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
134 135
		if (bdi->wb.task) {
			trace_writeback_nowork(bdi);
136
			wake_up_process(bdi->wb.task);
137
		}
138
		return;
139
	}
140

141 142 143
	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
144
	work->reason	= reason;
145

146
	bdi_queue_work(bdi, work);
147 148 149 150 151 152
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
 * @nr_pages: the number of pages to write
153
 * @reason: reason why some writeback work was initiated
154 155 156
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
L
Lucas De Marchi 已提交
157
 *   started when this function returns, we make no guarantees on
158
 *   completion. Caller need not hold sb s_umount semaphore.
159 160
 *
 */
161 162
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
			enum wb_reason reason)
163
{
164
	__bdi_start_writeback(bdi, nr_pages, true, reason);
165
}
166

167 168 169 170 171
/**
 * bdi_start_background_writeback - start background writeback
 * @bdi: the backing device to write from
 *
 * Description:
172 173 174 175
 *   This makes sure WB_SYNC_NONE background writeback happens. When
 *   this function returns, it is only guaranteed that for given BDI
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
176 177 178
 */
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
179 180 181 182
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
183
	trace_writeback_wake_background(bdi);
184 185 186
	spin_lock_bh(&bdi->wb_lock);
	bdi_wakeup_flusher(bdi);
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
187 188
}

189 190 191 192 193
/*
 * Remove the inode from the writeback list it is on.
 */
void inode_wb_list_del(struct inode *inode)
{
194 195 196
	struct backing_dev_info *bdi = inode_to_bdi(inode);

	spin_lock(&bdi->wb.list_lock);
197
	list_del_init(&inode->i_wb_list);
198
	spin_unlock(&bdi->wb.list_lock);
199 200
}

201 202 203 204 205
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
206
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
207 208 209
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
210
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
211
{
212
	assert_spin_locked(&wb->list_lock);
213
	if (!list_empty(&wb->b_dirty)) {
214
		struct inode *tail;
215

N
Nick Piggin 已提交
216
		tail = wb_inode(wb->b_dirty.next);
217
		if (time_before(inode->dirtied_when, tail->dirtied_when))
218 219
			inode->dirtied_when = jiffies;
	}
N
Nick Piggin 已提交
220
	list_move(&inode->i_wb_list, &wb->b_dirty);
221 222
}

223
/*
224
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
225
 */
226
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
227
{
228
	assert_spin_locked(&wb->list_lock);
N
Nick Piggin 已提交
229
	list_move(&inode->i_wb_list, &wb->b_more_io);
230 231
}

J
Joern Engel 已提交
232 233
static void inode_sync_complete(struct inode *inode)
{
234 235
	inode->i_state &= ~I_SYNC;
	/* Waiters must see I_SYNC cleared before being woken up */
J
Joern Engel 已提交
236 237 238 239
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

240 241 242 243 244 245 246 247
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
248
	 * from permanently stopping the whole bdi writeback.
249 250 251 252 253 254
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

255
/*
J
Jan Kara 已提交
256 257
 * Move expired (dirtied after work->older_than_this) dirty inodes from
 * @delaying_queue to @dispatch_queue.
258
 */
259
static int move_expired_inodes(struct list_head *delaying_queue,
260
			       struct list_head *dispatch_queue,
261
			       struct wb_writeback_work *work)
262
{
263 264
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
265
	struct super_block *sb = NULL;
266
	struct inode *inode;
267
	int do_sb_sort = 0;
268
	int moved = 0;
269

270
	while (!list_empty(delaying_queue)) {
N
Nick Piggin 已提交
271
		inode = wb_inode(delaying_queue->prev);
272 273
		if (work->older_than_this &&
		    inode_dirtied_after(inode, *work->older_than_this))
274
			break;
275 276 277
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
N
Nick Piggin 已提交
278
		list_move(&inode->i_wb_list, &tmp);
279
		moved++;
280 281
	}

282 283 284
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
285
		goto out;
286 287
	}

288 289
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
N
Nick Piggin 已提交
290
		sb = wb_inode(tmp.prev)->i_sb;
291
		list_for_each_prev_safe(pos, node, &tmp) {
N
Nick Piggin 已提交
292
			inode = wb_inode(pos);
293
			if (inode->i_sb == sb)
N
Nick Piggin 已提交
294
				list_move(&inode->i_wb_list, dispatch_queue);
295
		}
296
	}
297 298
out:
	return moved;
299 300 301 302
}

/*
 * Queue all expired dirty inodes for io, eldest first.
303 304 305 306 307 308 309 310
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
311
 */
312
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
313
{
314
	int moved;
315
	assert_spin_locked(&wb->list_lock);
316
	list_splice_init(&wb->b_more_io, &wb->b_io);
317 318
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
	trace_writeback_queue_io(wb, work, moved);
319 320
}

321
static int write_inode(struct inode *inode, struct writeback_control *wbc)
322
{
323
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
324
		return inode->i_sb->s_op->write_inode(inode, wbc);
325
	return 0;
326 327
}

L
Linus Torvalds 已提交
328
/*
329 330
 * Wait for writeback on an inode to complete.
 */
331 332
static void inode_wait_for_writeback(struct inode *inode,
				     struct bdi_writeback *wb)
333 334 335 336 337
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
338 339
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
340
		spin_unlock(&wb->list_lock);
341
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
342
		spin_lock(&wb->list_lock);
343
		spin_lock(&inode->i_lock);
344
	}
345 346 347
}

/*
348
 * Write out an inode's dirty pages.  Called under wb->list_lock and
349 350
 * inode->i_lock.  Either the caller has an active reference on the inode or
 * the inode has I_WILL_FREE set.
351
 *
L
Linus Torvalds 已提交
352 353 354 355 356 357 358
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 */
static int
359 360
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		       struct writeback_control *wbc)
L
Linus Torvalds 已提交
361 362
{
	struct address_space *mapping = inode->i_mapping;
363
	long nr_to_write = wbc->nr_to_write;
364
	unsigned dirty;
L
Linus Torvalds 已提交
365 366
	int ret;

367
	assert_spin_locked(&wb->list_lock);
368 369
	assert_spin_locked(&inode->i_lock);

370 371 372 373 374 375
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
376
		if (wbc->sync_mode != WB_SYNC_ALL)
377 378 379 380
			return 0;
		/*
		 * It's a data-integrity sync.  We must wait.
		 */
381
		inode_wait_for_writeback(inode, wb);
382 383
	}

J
Joern Engel 已提交
384
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
385

386
	/* Set I_SYNC, reset I_DIRTY_PAGES */
J
Joern Engel 已提交
387
	inode->i_state |= I_SYNC;
388
	inode->i_state &= ~I_DIRTY_PAGES;
389
	spin_unlock(&inode->i_lock);
390
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
391 392 393

	ret = do_writepages(mapping, wbc);

394 395 396 397 398
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
399
	if (wbc->sync_mode == WB_SYNC_ALL) {
400
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
401 402 403 404
		if (ret == 0)
			ret = err;
	}

405 406 407 408 409
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
410
	spin_lock(&inode->i_lock);
411 412
	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
413
	spin_unlock(&inode->i_lock);
414 415
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
416
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
417 418 419 420
		if (ret == 0)
			ret = err;
	}

421
	spin_lock(&wb->list_lock);
422
	spin_lock(&inode->i_lock);
A
Al Viro 已提交
423
	if (!(inode->i_state & I_FREEING)) {
424 425 426 427 428 429 430 431 432
		/*
		 * Sync livelock prevention. Each inode is tagged and synced in
		 * one shot. If still dirty, it will be redirty_tail()'ed below.
		 * Update the dirty time to prevent enqueue and sync it again.
		 */
		if ((inode->i_state & I_DIRTY) &&
		    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
			inode->dirtied_when = jiffies;

433
		if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
L
Linus Torvalds 已提交
434 435
			/*
			 * We didn't write back all the pages.  nfs_writepages()
436
			 * sometimes bales out without doing anything.
437
			 */
438 439
			inode->i_state |= I_DIRTY_PAGES;
			if (wbc->nr_to_write <= 0) {
L
Linus Torvalds 已提交
440
				/*
441
				 * slice used up: queue for next turn
L
Linus Torvalds 已提交
442
				 */
443
				requeue_io(inode, wb);
L
Linus Torvalds 已提交
444 445
			} else {
				/*
446 447 448 449 450
				 * Writeback blocked by something other than
				 * congestion. Delay the inode for some time to
				 * avoid spinning on the CPU (100% iowait)
				 * retrying writeback of the dirty page/inode
				 * that cannot be performed immediately.
L
Linus Torvalds 已提交
451
				 */
452
				redirty_tail(inode, wb);
L
Linus Torvalds 已提交
453
			}
454 455 456 457 458 459 460
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * Filesystems can dirty the inode during writeback
			 * operations, such as delayed allocation during
			 * submission or metadata updates after data IO
			 * completion.
			 */
461
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
462 463
		} else {
			/*
464 465 466
			 * The inode is clean.  At this point we either have
			 * a reference to the inode or it's on it's way out.
			 * No need to add it back to the LRU.
L
Linus Torvalds 已提交
467
			 */
N
Nick Piggin 已提交
468
			list_del_init(&inode->i_wb_list);
L
Linus Torvalds 已提交
469 470
		}
	}
J
Joern Engel 已提交
471
	inode_sync_complete(inode);
472
	trace_writeback_single_inode(inode, wbc, nr_to_write);
L
Linus Torvalds 已提交
473 474 475
	return ret;
}

476 477
static long writeback_chunk_size(struct backing_dev_info *bdi,
				 struct wb_writeback_work *work)
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
{
	long pages;

	/*
	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
	 * here avoids calling into writeback_inodes_wb() more than once.
	 *
	 * The intended call sequence for WB_SYNC_ALL writeback is:
	 *
	 *      wb_writeback()
	 *          writeback_sb_inodes()       <== called only once
	 *              write_cache_pages()     <== called once for each inode
	 *                   (quickly) tag currently dirty pages
	 *                   (maybe slowly) sync all tagged pages
	 */
	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
		pages = LONG_MAX;
496 497 498 499 500 501 502
	else {
		pages = min(bdi->avg_write_bandwidth / 2,
			    global_dirty_limit / DIRTY_SCOPE);
		pages = min(pages, work->nr_pages);
		pages = round_down(pages + MIN_WRITEBACK_PAGES,
				   MIN_WRITEBACK_PAGES);
	}
503 504 505 506

	return pages;
}

507 508
/*
 * Write a portion of b_io inodes which belong to @sb.
509 510
 *
 * If @only_this_sb is true, then find and write all such
511 512
 * inodes. Otherwise write only ones which go sequentially
 * in reverse order.
513
 *
514
 * Return the number of pages and/or inodes written.
515
 */
516 517 518
static long writeback_sb_inodes(struct super_block *sb,
				struct bdi_writeback *wb,
				struct wb_writeback_work *work)
L
Linus Torvalds 已提交
519
{
520 521 522 523 524 525 526 527 528 529 530 531 532
	struct writeback_control wbc = {
		.sync_mode		= work->sync_mode,
		.tagged_writepages	= work->tagged_writepages,
		.for_kupdate		= work->for_kupdate,
		.for_background		= work->for_background,
		.range_cyclic		= work->range_cyclic,
		.range_start		= 0,
		.range_end		= LLONG_MAX,
	};
	unsigned long start_time = jiffies;
	long write_chunk;
	long wrote = 0;  /* count both pages and inodes */

533
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
534
		struct inode *inode = wb_inode(wb->b_io.prev);
535 536

		if (inode->i_sb != sb) {
537
			if (work->sb) {
538 539 540 541 542
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
543
				redirty_tail(inode, wb);
544 545 546 547 548 549 550 551
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
552
			break;
553 554
		}

555 556 557 558 559
		/*
		 * Don't bother with new inodes or inodes beeing freed, first
		 * kind does not need peridic writeout yet, and for the latter
		 * kind writeout is handled by the freer.
		 */
560
		spin_lock(&inode->i_lock);
561
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
562
			spin_unlock(&inode->i_lock);
563
			redirty_tail(inode, wb);
564 565
			continue;
		}
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
			/*
			 * If this inode is locked for writeback and we are not
			 * doing writeback-for-data-integrity, move it to
			 * b_more_io so that writeback can proceed with the
			 * other inodes on s_io.
			 *
			 * We'll have another go at writing back this inode
			 * when we completed a full scan of b_io.
			 */
			spin_unlock(&inode->i_lock);
			requeue_io(inode, wb);
			trace_writeback_sb_inodes_requeue(inode);
			continue;
		}
L
Linus Torvalds 已提交
581
		__iget(inode);
582
		write_chunk = writeback_chunk_size(wb->bdi, work);
583 584
		wbc.nr_to_write = write_chunk;
		wbc.pages_skipped = 0;
585

586
		writeback_single_inode(inode, wb, &wbc);
587

588 589 590 591 592
		work->nr_pages -= write_chunk - wbc.nr_to_write;
		wrote += write_chunk - wbc.nr_to_write;
		if (!(inode->i_state & I_DIRTY))
			wrote++;
		if (wbc.pages_skipped) {
L
Linus Torvalds 已提交
593 594 595 596
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
597
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
598
		}
599
		spin_unlock(&inode->i_lock);
600
		spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
601
		iput(inode);
602
		cond_resched();
603
		spin_lock(&wb->list_lock);
604 605 606 607 608 609 610 611 612
		/*
		 * bail out to wb_writeback() often enough to check
		 * background threshold and other termination conditions.
		 */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
613
		}
L
Linus Torvalds 已提交
614
	}
615
	return wrote;
616 617
}

618 619
static long __writeback_inodes_wb(struct bdi_writeback *wb,
				  struct wb_writeback_work *work)
620
{
621 622
	unsigned long start_time = jiffies;
	long wrote = 0;
N
Nick Piggin 已提交
623

624
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
625
		struct inode *inode = wb_inode(wb->b_io.prev);
626
		struct super_block *sb = inode->i_sb;
627

628
		if (!grab_super_passive(sb)) {
629 630 631 632 633 634
			/*
			 * grab_super_passive() may fail consistently due to
			 * s_umount being grabbed by someone else. Don't use
			 * requeue_io() to avoid busy retrying the inode/sb.
			 */
			redirty_tail(inode, wb);
635
			continue;
636
		}
637
		wrote += writeback_sb_inodes(sb, wb, work);
638
		drop_super(sb);
639

640 641 642 643 644 645 646
		/* refer to the same tests at the end of writeback_sb_inodes */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
		}
647
	}
648
	/* Leave any unwritten inodes on b_io */
649
	return wrote;
650 651
}

652 653
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
				enum wb_reason reason)
654
{
655 656 657 658
	struct wb_writeback_work work = {
		.nr_pages	= nr_pages,
		.sync_mode	= WB_SYNC_NONE,
		.range_cyclic	= 1,
659
		.reason		= reason,
660
	};
661

662
	spin_lock(&wb->list_lock);
W
Wu Fengguang 已提交
663
	if (list_empty(&wb->b_io))
664
		queue_io(wb, &work);
665
	__writeback_inodes_wb(wb, &work);
666
	spin_unlock(&wb->list_lock);
667

668 669
	return nr_pages - work.nr_pages;
}
670

671
static bool over_bground_thresh(struct backing_dev_info *bdi)
672 673 674
{
	unsigned long background_thresh, dirty_thresh;

675
	global_dirty_limits(&background_thresh, &dirty_thresh);
676

677 678 679 680 681 682 683 684 685
	if (global_page_state(NR_FILE_DIRTY) +
	    global_page_state(NR_UNSTABLE_NFS) > background_thresh)
		return true;

	if (bdi_stat(bdi, BDI_RECLAIMABLE) >
				bdi_dirty_limit(bdi, background_thresh))
		return true;

	return false;
686 687
}

688 689 690 691 692 693 694
/*
 * Called under wb->list_lock. If there are multiple wb per bdi,
 * only the flusher working on the first wb should do it.
 */
static void wb_update_bandwidth(struct bdi_writeback *wb,
				unsigned long start_time)
{
695
	__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
696 697
}

698 699
/*
 * Explicit flushing or periodic writeback of "old" data.
700
 *
701 702 703 704
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
705
 *
706 707 708
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
709
 *
710 711
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
712
 */
713
static long wb_writeback(struct bdi_writeback *wb,
714
			 struct wb_writeback_work *work)
715
{
716
	unsigned long wb_start = jiffies;
717
	long nr_pages = work->nr_pages;
718
	unsigned long oldest_jif;
J
Jan Kara 已提交
719
	struct inode *inode;
720
	long progress;
721

722
	oldest_jif = jiffies;
723
	work->older_than_this = &oldest_jif;
N
Nick Piggin 已提交
724

725
	spin_lock(&wb->list_lock);
726 727
	for (;;) {
		/*
728
		 * Stop writeback when nr_pages has been consumed
729
		 */
730
		if (work->nr_pages <= 0)
731
			break;
732

733 734 735 736 737 738 739 740 741 742
		/*
		 * Background writeout and kupdate-style writeback may
		 * run forever. Stop them if there is other work to do
		 * so that e.g. sync can proceed. They'll be restarted
		 * after the other works are all done.
		 */
		if ((work->for_background || work->for_kupdate) &&
		    !list_empty(&wb->bdi->work_list))
			break;

N
Nick Piggin 已提交
743
		/*
744 745
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
746
		 */
747
		if (work->for_background && !over_bground_thresh(wb->bdi))
748
			break;
N
Nick Piggin 已提交
749

750 751 752 753 754 755
		/*
		 * Kupdate and background works are special and we want to
		 * include all inodes that need writing. Livelock avoidance is
		 * handled by these works yielding to any other work so we are
		 * safe.
		 */
756 757 758
		if (work->for_kupdate) {
			oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
759 760
		} else if (work->for_background)
			oldest_jif = jiffies;
761

762
		trace_writeback_start(wb->bdi, work);
763
		if (list_empty(&wb->b_io))
764
			queue_io(wb, work);
765
		if (work->sb)
766
			progress = writeback_sb_inodes(work->sb, wb, work);
767
		else
768 769
			progress = __writeback_inodes_wb(wb, work);
		trace_writeback_written(wb->bdi, work);
770

771
		wb_update_bandwidth(wb, wb_start);
772 773

		/*
774 775 776 777 778 779
		 * Did we write something? Try for more
		 *
		 * Dirty inodes are moved to b_io for writeback in batches.
		 * The completion of the current batch does not necessarily
		 * mean the overall work is done. So we keep looping as long
		 * as made some progress on cleaning pages or inodes.
780
		 */
781
		if (progress)
782 783
			continue;
		/*
784
		 * No more inodes for IO, bail
785
		 */
786
		if (list_empty(&wb->b_more_io))
787
			break;
788 789 790 791 792 793
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		if (!list_empty(&wb->b_more_io))  {
794
			trace_writeback_wait(wb->bdi, work);
N
Nick Piggin 已提交
795
			inode = wb_inode(wb->b_more_io.prev);
796
			spin_lock(&inode->i_lock);
797
			inode_wait_for_writeback(inode, wb);
798
			spin_unlock(&inode->i_lock);
799 800
		}
	}
801
	spin_unlock(&wb->list_lock);
802

803
	return nr_pages - work->nr_pages;
804 805 806
}

/*
807
 * Return the next wb_writeback_work struct that hasn't been processed yet.
808
 */
809
static struct wb_writeback_work *
810
get_next_work_item(struct backing_dev_info *bdi)
811
{
812
	struct wb_writeback_work *work = NULL;
813

814
	spin_lock_bh(&bdi->wb_lock);
815 816 817 818
	if (!list_empty(&bdi->work_list)) {
		work = list_entry(bdi->work_list.next,
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
819
	}
820
	spin_unlock_bh(&bdi->wb_lock);
821
	return work;
822 823
}

824 825 826 827 828 829 830 831 832 833 834
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

835 836
static long wb_check_background_flush(struct bdi_writeback *wb)
{
837
	if (over_bground_thresh(wb->bdi)) {
838 839 840 841 842 843

		struct wb_writeback_work work = {
			.nr_pages	= LONG_MAX,
			.sync_mode	= WB_SYNC_NONE,
			.for_background	= 1,
			.range_cyclic	= 1,
844
			.reason		= WB_REASON_BACKGROUND,
845 846 847 848 849 850 851 852
		};

		return wb_writeback(wb, &work);
	}

	return 0;
}

853 854 855 856 857
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

858 859 860 861 862 863
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

864 865 866 867 868 869
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
870
	nr_pages = get_nr_dirty_pages();
871

872
	if (nr_pages) {
873
		struct wb_writeback_work work = {
874 875 876 877
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
878
			.reason		= WB_REASON_PERIODIC,
879 880
		};

881
		return wb_writeback(wb, &work);
882
	}
883 884 885 886 887 888 889 890 891 892

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
893
	struct wb_writeback_work *work;
894
	long wrote = 0;
895

J
Jan Kara 已提交
896
	set_bit(BDI_writeback_running, &wb->bdi->state);
897
	while ((work = get_next_work_item(bdi)) != NULL) {
898 899
		/*
		 * Override sync mode, in case we must wait for completion
900
		 * because this thread is exiting now.
901 902
		 */
		if (force_wait)
903
			work->sync_mode = WB_SYNC_ALL;
904

905 906
		trace_writeback_exec(bdi, work);

907
		wrote += wb_writeback(wb, work);
908 909

		/*
910 911
		 * Notify the caller of completion if this is a synchronous
		 * work item, otherwise just free it.
912
		 */
913 914 915 916
		if (work->done)
			complete(work->done);
		else
			kfree(work);
917 918 919 920 921 922
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);
923
	wrote += wb_check_background_flush(wb);
J
Jan Kara 已提交
924
	clear_bit(BDI_writeback_running, &wb->bdi->state);
925 926 927 928 929 930 931 932

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
933
int bdi_writeback_thread(void *data)
934
{
935 936
	struct bdi_writeback *wb = data;
	struct backing_dev_info *bdi = wb->bdi;
937 938
	long pages_written;

P
Peter Zijlstra 已提交
939
	current->flags |= PF_SWAPWRITE;
940
	set_freezable();
941
	wb->last_active = jiffies;
942 943 944 945 946 947

	/*
	 * Our parent may run at a different priority, just set us to normal
	 */
	set_user_nice(current, 0);

948 949
	trace_writeback_thread_start(bdi);

950
	while (!kthread_freezable_should_stop(NULL)) {
951 952 953 954 955 956
		/*
		 * Remove own delayed wake-up timer, since we are already awake
		 * and we'll take care of the preriodic write-back.
		 */
		del_timer(&wb->wakeup_timer);

957 958
		pages_written = wb_do_writeback(wb, 0);

959 960
		trace_writeback_pages_written(pages_written);

961
		if (pages_written)
962
			wb->last_active = jiffies;
963

964
		set_current_state(TASK_INTERRUPTIBLE);
965
		if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
966
			__set_current_state(TASK_RUNNING);
967
			continue;
968 969
		}

970
		if (wb_has_dirty_io(wb) && dirty_writeback_interval)
971
			schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
972 973 974 975 976 977
		else {
			/*
			 * We have nothing to do, so can go sleep without any
			 * timeout and save power. When a work is queued or
			 * something is made dirty - we will be woken up.
			 */
978
			schedule();
979
		}
980 981
	}

982
	/* Flush any work that raced with us exiting */
983 984
	if (!list_empty(&bdi->work_list))
		wb_do_writeback(wb, 1);
985 986

	trace_writeback_thread_stop(bdi);
987 988 989
	return 0;
}

990

991
/*
992 993
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
994
 */
995
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
996
{
997
	struct backing_dev_info *bdi;
998

999 1000
	if (!nr_pages) {
		nr_pages = global_page_state(NR_FILE_DIRTY) +
1001 1002
				global_page_state(NR_UNSTABLE_NFS);
	}
1003

1004
	rcu_read_lock();
1005
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1006 1007
		if (!bdi_has_dirty_io(bdi))
			continue;
1008
		__bdi_start_writeback(bdi, nr_pages, false, reason);
1009
	}
1010
	rcu_read_unlock();
L
Linus Torvalds 已提交
1011 1012
}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1041
 *
1042 1043 1044 1045 1046 1047 1048 1049 1050
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1051
 *
1052 1053 1054 1055 1056 1057
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1058
 */
1059
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1060
{
1061
	struct super_block *sb = inode->i_sb;
1062
	struct backing_dev_info *bdi = NULL;
L
Linus Torvalds 已提交
1063

1064 1065 1066 1067 1068 1069
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
1070
			sb->s_op->dirty_inode(inode, flags);
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

1086
	spin_lock(&inode->i_lock);
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
1098
			goto out_unlock_inode;
1099 1100 1101 1102 1103 1104

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
A
Al Viro 已提交
1105
			if (inode_unhashed(inode))
1106
				goto out_unlock_inode;
1107
		}
A
Al Viro 已提交
1108
		if (inode->i_state & I_FREEING)
1109
			goto out_unlock_inode;
1110 1111 1112 1113 1114 1115

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
1116
			bool wakeup_bdi = false;
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
			bdi = inode_to_bdi(inode);

			if (bdi_cap_writeback_dirty(bdi)) {
				WARN(!test_bit(BDI_registered, &bdi->state),
				     "bdi-%s not registered\n", bdi->name);

				/*
				 * If this is the first dirty inode for this
				 * bdi, we have to wake-up the corresponding
				 * bdi thread to make sure background
				 * write-back happens later.
				 */
				if (!wb_has_dirty_io(&bdi->wb))
					wakeup_bdi = true;
1131
			}
1132

1133
			spin_unlock(&inode->i_lock);
1134
			spin_lock(&bdi->wb.list_lock);
1135
			inode->dirtied_when = jiffies;
N
Nick Piggin 已提交
1136
			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1137
			spin_unlock(&bdi->wb.list_lock);
1138 1139 1140 1141

			if (wakeup_bdi)
				bdi_wakeup_thread_delayed(bdi);
			return;
L
Linus Torvalds 已提交
1142 1143
		}
	}
1144 1145
out_unlock_inode:
	spin_unlock(&inode->i_lock);
1146

1147 1148 1149
}
EXPORT_SYMBOL(__mark_inode_dirty);

1150
static void wait_sb_inodes(struct super_block *sb)
1151 1152 1153 1154 1155 1156 1157
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1158
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1159

1160
	spin_lock(&inode_sb_list_lock);
1161 1162 1163 1164 1165 1166 1167 1168

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1169
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1170
		struct address_space *mapping = inode->i_mapping;
1171

1172 1173 1174 1175
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
1176
			continue;
1177
		}
1178
		__iget(inode);
1179
		spin_unlock(&inode->i_lock);
1180 1181
		spin_unlock(&inode_sb_list_lock);

1182
		/*
1183 1184 1185 1186 1187 1188
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
1189 1190 1191 1192 1193 1194 1195 1196
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

1197
		spin_lock(&inode_sb_list_lock);
1198
	}
1199
	spin_unlock(&inode_sb_list_lock);
1200
	iput(old_inode);
L
Linus Torvalds 已提交
1201 1202
}

1203
/**
1204
 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
1205
 * @sb: the superblock
1206
 * @nr: the number of pages to write
1207
 * @reason: reason why some writeback work initiated
L
Linus Torvalds 已提交
1208
 *
1209 1210
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
1211
 * for IO completion of submitted IO.
L
Linus Torvalds 已提交
1212
 */
1213 1214 1215
void writeback_inodes_sb_nr(struct super_block *sb,
			    unsigned long nr,
			    enum wb_reason reason)
L
Linus Torvalds 已提交
1216
{
1217 1218
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1219 1220 1221 1222 1223
		.sb			= sb,
		.sync_mode		= WB_SYNC_NONE,
		.tagged_writepages	= 1,
		.done			= &done,
		.nr_pages		= nr,
1224
		.reason			= reason,
1225
	};
1226

1227
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1228 1229
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);
1230
}
1231 1232 1233 1234 1235
EXPORT_SYMBOL(writeback_inodes_sb_nr);

/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
1236
 * @reason: reason why some writeback work was initiated
1237 1238 1239 1240 1241
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
1242
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1243
{
1244
	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1245
}
1246
EXPORT_SYMBOL(writeback_inodes_sb);
1247

1248 1249 1250
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
1251
 * @reason: reason why some writeback work was initiated
1252 1253 1254 1255
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
1256
int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
1257 1258
{
	if (!writeback_in_progress(sb->s_bdi)) {
1259
		down_read(&sb->s_umount);
1260
		writeback_inodes_sb(sb, reason);
1261
		up_read(&sb->s_umount);
1262 1263 1264 1265 1266 1267
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1268
/**
1269
 * writeback_inodes_sb_nr_if_idle	-	start writeback if none underway
1270 1271
 * @sb: the superblock
 * @nr: the number of pages to write
1272
 * @reason: reason why some writeback work was initiated
1273 1274 1275 1276 1277
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
1278 1279
				   unsigned long nr,
				   enum wb_reason reason)
1280 1281 1282
{
	if (!writeback_in_progress(sb->s_bdi)) {
		down_read(&sb->s_umount);
1283
		writeback_inodes_sb_nr(sb, nr, reason);
1284 1285 1286 1287 1288 1289 1290
		up_read(&sb->s_umount);
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);

1291 1292 1293 1294 1295
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
1296
 * super_block.
1297
 */
1298
void sync_inodes_sb(struct super_block *sb)
1299
{
1300 1301
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1302 1303 1304 1305
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
1306
		.done		= &done,
1307
		.reason		= WB_REASON_SYNC,
1308 1309
	};

1310 1311
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1312 1313 1314
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);

1315
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1316
}
1317
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1318 1319

/**
1320 1321 1322 1323 1324 1325
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1326
 *
1327
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1328 1329 1330
 */
int write_inode_now(struct inode *inode, int sync)
{
1331
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1332 1333 1334
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1335
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1336 1337
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1338 1339 1340
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1341
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1342 1343

	might_sleep();
1344
	spin_lock(&wb->list_lock);
1345
	spin_lock(&inode->i_lock);
1346
	ret = writeback_single_inode(inode, wb, &wbc);
1347
	spin_unlock(&inode->i_lock);
1348
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
1366
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1367 1368
	int ret;

1369
	spin_lock(&wb->list_lock);
1370
	spin_lock(&inode->i_lock);
1371
	ret = writeback_single_inode(inode, wb, wbc);
1372
	spin_unlock(&inode->i_lock);
1373
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1374 1375 1376
	return ret;
}
EXPORT_SYMBOL(sync_inode);
C
Christoph Hellwig 已提交
1377 1378

/**
A
Andrew Morton 已提交
1379
 * sync_inode_metadata - write an inode to disk
C
Christoph Hellwig 已提交
1380 1381 1382
 * @inode: the inode to sync
 * @wait: wait for I/O to complete.
 *
A
Andrew Morton 已提交
1383
 * Write an inode to disk and adjust its dirty state after completion.
C
Christoph Hellwig 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
 *
 * Note: only writes the actual inode, no associated data or other metadata.
 */
int sync_inode_metadata(struct inode *inode, int wait)
{
	struct writeback_control wbc = {
		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
		.nr_to_write = 0, /* metadata-only */
	};

	return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);