fs-writeback.c 35.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
J
Jens Axboe 已提交
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23 24
#include <linux/kthread.h>
#include <linux/freezer.h>
L
Linus Torvalds 已提交
25 26 27 28
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
29
#include <linux/tracepoint.h>
30
#include "internal.h"
L
Linus Torvalds 已提交
31

32 33 34
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
35
struct wb_writeback_work {
36 37 38
	long nr_pages;
	struct super_block *sb;
	enum writeback_sync_modes sync_mode;
39
	unsigned int tagged_writepages:1;
40 41 42
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
43

44
	struct list_head list;		/* pending work list */
45
	struct completion *done;	/* set if the caller waits */
46 47
};

48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure so that the definition remains local to this
 * file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

/*
 * We don't actually have pdflush, but this one is exported though /proc...
 */
int nr_pdflush_threads;

61 62 63 64
/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
65 66
 * Determine whether there is writeback waiting to be handled against a
 * backing device.
67 68 69
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
J
Jan Kara 已提交
70
	return test_bit(BDI_writeback_running, &bdi->state);
71 72
}

73 74 75 76
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;

77 78 79 80
	if (strcmp(sb->s_type->name, "bdev") == 0)
		return inode->i_mapping->backing_dev_info;

	return sb->s_bdi;
81 82
}

N
Nick Piggin 已提交
83 84 85 86 87
static inline struct inode *wb_inode(struct list_head *head)
{
	return list_entry(head, struct inode, i_wb_list);
}

88 89
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
90
{
91 92 93 94 95 96 97
	if (bdi->wb.task) {
		wake_up_process(bdi->wb.task);
	} else {
		/*
		 * The bdi thread isn't there, wake up the forker thread which
		 * will create and run it.
		 */
98
		wake_up_process(default_backing_dev_info.wb.task);
L
Linus Torvalds 已提交
99
	}
100 101 102 103 104 105 106 107 108 109 110 111
}

static void bdi_queue_work(struct backing_dev_info *bdi,
			   struct wb_writeback_work *work)
{
	trace_writeback_queue(bdi, work);

	spin_lock_bh(&bdi->wb_lock);
	list_add_tail(&work->list, &bdi->work_list);
	if (!bdi->wb.task)
		trace_writeback_nothread(bdi, work);
	bdi_wakeup_flusher(bdi);
112
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
113 114
}

115 116
static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
117
		      bool range_cyclic)
L
Linus Torvalds 已提交
118
{
119
	struct wb_writeback_work *work;
120

121 122 123 124
	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
125 126
	work = kzalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
127 128
		if (bdi->wb.task) {
			trace_writeback_nowork(bdi);
129
			wake_up_process(bdi->wb.task);
130
		}
131
		return;
132
	}
133

134 135 136
	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
137

138
	bdi_queue_work(bdi, work);
139 140 141 142 143 144 145 146 147
}

/**
 * bdi_start_writeback - start writeback
 * @bdi: the backing device to write from
 * @nr_pages: the number of pages to write
 *
 * Description:
 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
L
Lucas De Marchi 已提交
148
 *   started when this function returns, we make no guarantees on
149
 *   completion. Caller need not hold sb s_umount semaphore.
150 151
 *
 */
152
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
153
{
154
	__bdi_start_writeback(bdi, nr_pages, true);
155
}
156

157 158 159 160 161
/**
 * bdi_start_background_writeback - start background writeback
 * @bdi: the backing device to write from
 *
 * Description:
162 163 164 165
 *   This makes sure WB_SYNC_NONE background writeback happens. When
 *   this function returns, it is only guaranteed that for given BDI
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
166 167 168
 */
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
169 170 171 172
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
173
	trace_writeback_wake_background(bdi);
174 175 176
	spin_lock_bh(&bdi->wb_lock);
	bdi_wakeup_flusher(bdi);
	spin_unlock_bh(&bdi->wb_lock);
L
Linus Torvalds 已提交
177 178
}

179 180 181 182 183
/*
 * Remove the inode from the writeback list it is on.
 */
void inode_wb_list_del(struct inode *inode)
{
184 185 186
	struct backing_dev_info *bdi = inode_to_bdi(inode);

	spin_lock(&bdi->wb.list_lock);
187
	list_del_init(&inode->i_wb_list);
188
	spin_unlock(&bdi->wb.list_lock);
189 190
}

191 192 193 194 195
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
196
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
197 198 199
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
200
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
201
{
202
	assert_spin_locked(&wb->list_lock);
203
	if (!list_empty(&wb->b_dirty)) {
204
		struct inode *tail;
205

N
Nick Piggin 已提交
206
		tail = wb_inode(wb->b_dirty.next);
207
		if (time_before(inode->dirtied_when, tail->dirtied_when))
208 209
			inode->dirtied_when = jiffies;
	}
N
Nick Piggin 已提交
210
	list_move(&inode->i_wb_list, &wb->b_dirty);
211 212
}

213
/*
214
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
215
 */
216
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
217
{
218
	assert_spin_locked(&wb->list_lock);
N
Nick Piggin 已提交
219
	list_move(&inode->i_wb_list, &wb->b_more_io);
220 221
}

J
Joern Engel 已提交
222 223 224
static void inode_sync_complete(struct inode *inode)
{
	/*
225
	 * Prevent speculative execution through
226
	 * spin_unlock(&wb->list_lock);
J
Joern Engel 已提交
227
	 */
228

J
Joern Engel 已提交
229 230 231 232
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

233 234 235 236 237 238 239 240
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
241
	 * from permanently stopping the whole bdi writeback.
242 243 244 245 246 247
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

248 249 250
/*
 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 */
251
static int move_expired_inodes(struct list_head *delaying_queue,
252
			       struct list_head *dispatch_queue,
253
			       unsigned long *older_than_this)
254
{
255 256
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
257
	struct super_block *sb = NULL;
258
	struct inode *inode;
259
	int do_sb_sort = 0;
260
	int moved = 0;
261

262
	while (!list_empty(delaying_queue)) {
N
Nick Piggin 已提交
263
		inode = wb_inode(delaying_queue->prev);
264
		if (older_than_this &&
265
		    inode_dirtied_after(inode, *older_than_this))
266
			break;
267 268 269
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
N
Nick Piggin 已提交
270
		list_move(&inode->i_wb_list, &tmp);
271
		moved++;
272 273
	}

274 275 276
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
277
		goto out;
278 279
	}

280 281
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
N
Nick Piggin 已提交
282
		sb = wb_inode(tmp.prev)->i_sb;
283
		list_for_each_prev_safe(pos, node, &tmp) {
N
Nick Piggin 已提交
284
			inode = wb_inode(pos);
285
			if (inode->i_sb == sb)
N
Nick Piggin 已提交
286
				list_move(&inode->i_wb_list, dispatch_queue);
287
		}
288
	}
289 290
out:
	return moved;
291 292 293 294
}

/*
 * Queue all expired dirty inodes for io, eldest first.
295 296 297 298 299 300 301 302
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
303
 */
304
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
305
{
306
	int moved;
307
	assert_spin_locked(&wb->list_lock);
308
	list_splice_init(&wb->b_more_io, &wb->b_io);
309 310
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
	trace_writeback_queue_io(wb, older_than_this, moved);
311 312
}

313
static int write_inode(struct inode *inode, struct writeback_control *wbc)
314
{
315
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
316
		return inode->i_sb->s_op->write_inode(inode, wbc);
317
	return 0;
318 319
}

L
Linus Torvalds 已提交
320
/*
321 322
 * Wait for writeback on an inode to complete.
 */
323 324
static void inode_wait_for_writeback(struct inode *inode,
				     struct bdi_writeback *wb)
325 326 327 328 329
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
330 331
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
332
		spin_unlock(&wb->list_lock);
333
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
334
		spin_lock(&wb->list_lock);
335
		spin_lock(&inode->i_lock);
336
	}
337 338 339
}

/*
340
 * Write out an inode's dirty pages.  Called under wb->list_lock and
341 342
 * inode->i_lock.  Either the caller has an active reference on the inode or
 * the inode has I_WILL_FREE set.
343
 *
L
Linus Torvalds 已提交
344 345 346 347 348 349 350
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 */
static int
351 352
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		       struct writeback_control *wbc)
L
Linus Torvalds 已提交
353 354
{
	struct address_space *mapping = inode->i_mapping;
355
	long nr_to_write = wbc->nr_to_write;
356
	unsigned dirty;
L
Linus Torvalds 已提交
357 358
	int ret;

359
	assert_spin_locked(&wb->list_lock);
360 361
	assert_spin_locked(&inode->i_lock);

362 363 364 365 366 367 368 369
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		/*
		 * If this inode is locked for writeback and we are not doing
370
		 * writeback-for-data-integrity, move it to b_more_io so that
371 372 373
		 * writeback can proceed with the other inodes on s_io.
		 *
		 * We'll have another go at writing back this inode when we
374
		 * completed a full scan of b_io.
375
		 */
376
		if (wbc->sync_mode != WB_SYNC_ALL) {
377
			requeue_io(inode, wb);
378 379
			trace_writeback_single_inode_requeue(inode, wbc,
							     nr_to_write);
380 381 382 383 384 385
			return 0;
		}

		/*
		 * It's a data-integrity sync.  We must wait.
		 */
386
		inode_wait_for_writeback(inode, wb);
387 388
	}

J
Joern Engel 已提交
389
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
390

391
	/* Set I_SYNC, reset I_DIRTY_PAGES */
J
Joern Engel 已提交
392
	inode->i_state |= I_SYNC;
393
	inode->i_state &= ~I_DIRTY_PAGES;
394
	spin_unlock(&inode->i_lock);
395
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
396 397 398

	ret = do_writepages(mapping, wbc);

399 400 401 402 403
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
	 * I/O completion.
	 */
404
	if (wbc->sync_mode == WB_SYNC_ALL) {
405
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
406 407 408 409
		if (ret == 0)
			ret = err;
	}

410 411 412 413 414
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
415
	spin_lock(&inode->i_lock);
416 417
	dirty = inode->i_state & I_DIRTY;
	inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
418
	spin_unlock(&inode->i_lock);
419 420
	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
421
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
422 423 424 425
		if (ret == 0)
			ret = err;
	}

426
	spin_lock(&wb->list_lock);
427
	spin_lock(&inode->i_lock);
J
Joern Engel 已提交
428
	inode->i_state &= ~I_SYNC;
A
Al Viro 已提交
429
	if (!(inode->i_state & I_FREEING)) {
430 431 432 433 434 435 436 437 438
		/*
		 * Sync livelock prevention. Each inode is tagged and synced in
		 * one shot. If still dirty, it will be redirty_tail()'ed below.
		 * Update the dirty time to prevent enqueue and sync it again.
		 */
		if ((inode->i_state & I_DIRTY) &&
		    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
			inode->dirtied_when = jiffies;

439
		if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
L
Linus Torvalds 已提交
440 441
			/*
			 * We didn't write back all the pages.  nfs_writepages()
442
			 * sometimes bales out without doing anything.
443
			 */
444 445
			inode->i_state |= I_DIRTY_PAGES;
			if (wbc->nr_to_write <= 0) {
L
Linus Torvalds 已提交
446
				/*
447
				 * slice used up: queue for next turn
L
Linus Torvalds 已提交
448
				 */
449
				requeue_io(inode, wb);
L
Linus Torvalds 已提交
450 451
			} else {
				/*
452 453 454 455 456
				 * Writeback blocked by something other than
				 * congestion. Delay the inode for some time to
				 * avoid spinning on the CPU (100% iowait)
				 * retrying writeback of the dirty page/inode
				 * that cannot be performed immediately.
L
Linus Torvalds 已提交
457
				 */
458
				redirty_tail(inode, wb);
L
Linus Torvalds 已提交
459
			}
460 461 462 463 464 465 466
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * Filesystems can dirty the inode during writeback
			 * operations, such as delayed allocation during
			 * submission or metadata updates after data IO
			 * completion.
			 */
467
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
468 469
		} else {
			/*
470 471 472
			 * The inode is clean.  At this point we either have
			 * a reference to the inode or it's on it's way out.
			 * No need to add it back to the LRU.
L
Linus Torvalds 已提交
473
			 */
N
Nick Piggin 已提交
474
			list_del_init(&inode->i_wb_list);
475
			wbc->inodes_written++;
L
Linus Torvalds 已提交
476 477
		}
	}
J
Joern Engel 已提交
478
	inode_sync_complete(inode);
479
	trace_writeback_single_inode(inode, wbc, nr_to_write);
L
Linus Torvalds 已提交
480 481 482
	return ret;
}

483
/*
484
 * For background writeback the caller does not have the sb pinned
485 486 487
 * before calling writeback. So make sure that we do pin it, so it doesn't
 * go away while we are writing inodes from it.
 */
488
static bool pin_sb_for_writeback(struct super_block *sb)
489 490
{
	spin_lock(&sb_lock);
491 492 493 494 495
	if (list_empty(&sb->s_instances)) {
		spin_unlock(&sb_lock);
		return false;
	}

496
	sb->s_count++;
497 498
	spin_unlock(&sb_lock);

499
	if (down_read_trylock(&sb->s_umount)) {
500
		if (sb->s_root)
501
			return true;
502 503
		up_read(&sb->s_umount);
	}
504 505

	put_super(sb);
506
	return false;
507 508
}

509 510
/*
 * Write a portion of b_io inodes which belong to @sb.
511 512
 *
 * If @only_this_sb is true, then find and write all such
513 514
 * inodes. Otherwise write only ones which go sequentially
 * in reverse order.
515
 *
516 517 518
 * Return 1, if the caller writeback routine should be
 * interrupted. Otherwise return 0.
 */
519 520
static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
		struct writeback_control *wbc, bool only_this_sb)
L
Linus Torvalds 已提交
521
{
522
	while (!list_empty(&wb->b_io)) {
L
Linus Torvalds 已提交
523
		long pages_skipped;
N
Nick Piggin 已提交
524
		struct inode *inode = wb_inode(wb->b_io.prev);
525 526 527 528 529 530 531 532

		if (inode->i_sb != sb) {
			if (only_this_sb) {
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
533
				redirty_tail(inode, wb);
534 535 536 537 538 539 540 541
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
542
			return 0;
543 544
		}

545 546 547 548 549
		/*
		 * Don't bother with new inodes or inodes beeing freed, first
		 * kind does not need peridic writeout yet, and for the latter
		 * kind writeout is handled by the freer.
		 */
550
		spin_lock(&inode->i_lock);
551
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
552
			spin_unlock(&inode->i_lock);
553
			requeue_io(inode, wb);
554 555
			continue;
		}
556

L
Linus Torvalds 已提交
557
		__iget(inode);
558

L
Linus Torvalds 已提交
559
		pages_skipped = wbc->pages_skipped;
560
		writeback_single_inode(inode, wb, wbc);
L
Linus Torvalds 已提交
561 562 563 564 565
		if (wbc->pages_skipped != pages_skipped) {
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
566
			redirty_tail(inode, wb);
L
Linus Torvalds 已提交
567
		}
568
		spin_unlock(&inode->i_lock);
569
		spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
570
		iput(inode);
571
		cond_resched();
572
		spin_lock(&wb->list_lock);
573
		if (wbc->nr_to_write <= 0)
574
			return 1;
L
Linus Torvalds 已提交
575
	}
576 577 578 579
	/* b_io is empty */
	return 1;
}

580 581
static void __writeback_inodes_wb(struct bdi_writeback *wb,
				  struct writeback_control *wbc)
582 583 584 585
{
	int ret = 0;

	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
586
		struct inode *inode = wb_inode(wb->b_io.prev);
587
		struct super_block *sb = inode->i_sb;
588

589
		if (!pin_sb_for_writeback(sb)) {
590
			requeue_io(inode, wb);
591
			continue;
592
		}
593 594
		ret = writeback_sb_inodes(sb, wb, wbc, false);
		drop_super(sb);
595 596 597 598

		if (ret)
			break;
	}
599 600 601
	/* Leave any unwritten inodes on b_io */
}

602 603
void writeback_inodes_wb(struct bdi_writeback *wb,
		struct writeback_control *wbc)
604
{
605
	spin_lock(&wb->list_lock);
W
Wu Fengguang 已提交
606
	if (list_empty(&wb->b_io))
607
		queue_io(wb, wbc->older_than_this);
608
	__writeback_inodes_wb(wb, wbc);
609
	spin_unlock(&wb->list_lock);
610 611
}

612
/*
613 614 615 616 617 618 619 620 621 622 623 624
 * The maximum number of pages to writeout in a single bdi flush/kupdate
 * operation.  We do this so we don't hold I_SYNC against an inode for
 * enormous amounts of time, which would block a userspace task which has
 * been forced to throttle against that inode.  Also, the code reevaluates
 * the dirty each time it has written this many pages.
 */
#define MAX_WRITEBACK_PAGES     1024

static inline bool over_bground_thresh(void)
{
	unsigned long background_thresh, dirty_thresh;

625
	global_dirty_limits(&background_thresh, &dirty_thresh);
626 627

	return (global_page_state(NR_FILE_DIRTY) +
628
		global_page_state(NR_UNSTABLE_NFS) > background_thresh);
629 630 631 632
}

/*
 * Explicit flushing or periodic writeback of "old" data.
633
 *
634 635 636 637
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
638
 *
639 640 641
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
642
 *
643 644
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
645
 */
646
static long wb_writeback(struct bdi_writeback *wb,
647
			 struct wb_writeback_work *work)
648
{
649
	struct writeback_control wbc = {
650
		.sync_mode		= work->sync_mode,
651
		.tagged_writepages	= work->tagged_writepages,
652
		.older_than_this	= NULL,
653 654 655
		.for_kupdate		= work->for_kupdate,
		.for_background		= work->for_background,
		.range_cyclic		= work->range_cyclic,
656 657 658
	};
	unsigned long oldest_jif;
	long wrote = 0;
659
	long write_chunk = MAX_WRITEBACK_PAGES;
J
Jan Kara 已提交
660
	struct inode *inode;
661

662 663 664 665
	if (!wbc.range_cyclic) {
		wbc.range_start = 0;
		wbc.range_end = LLONG_MAX;
	}
N
Nick Piggin 已提交
666

667 668 669 670 671 672 673 674
	/*
	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
	 * here avoids calling into writeback_inodes_wb() more than once.
	 *
	 * The intended call sequence for WB_SYNC_ALL writeback is:
	 *
	 *      wb_writeback()
675
	 *          writeback_sb_inodes()       <== called only once
676 677 678 679
	 *              write_cache_pages()     <== called once for each inode
	 *                   (quickly) tag currently dirty pages
	 *                   (maybe slowly) sync all tagged pages
	 */
680
	if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages)
681 682
		write_chunk = LONG_MAX;

683 684 685
	oldest_jif = jiffies;
	wbc.older_than_this = &oldest_jif;

686
	spin_lock(&wb->list_lock);
687 688
	for (;;) {
		/*
689
		 * Stop writeback when nr_pages has been consumed
690
		 */
691
		if (work->nr_pages <= 0)
692
			break;
693

694 695 696 697 698 699 700 701 702 703
		/*
		 * Background writeout and kupdate-style writeback may
		 * run forever. Stop them if there is other work to do
		 * so that e.g. sync can proceed. They'll be restarted
		 * after the other works are all done.
		 */
		if ((work->for_background || work->for_kupdate) &&
		    !list_empty(&wb->bdi->work_list))
			break;

N
Nick Piggin 已提交
704
		/*
705 706
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
707
		 */
708
		if (work->for_background && !over_bground_thresh())
709
			break;
N
Nick Piggin 已提交
710

711 712 713 714 715 716
		if (work->for_kupdate) {
			oldest_jif = jiffies -
				msecs_to_jiffies(dirty_expire_interval * 10);
			wbc.older_than_this = &oldest_jif;
		}

717
		wbc.nr_to_write = write_chunk;
718
		wbc.pages_skipped = 0;
719
		wbc.inodes_written = 0;
720 721

		trace_wbc_writeback_start(&wbc, wb->bdi);
722 723
		if (list_empty(&wb->b_io))
			queue_io(wb, wbc.older_than_this);
724
		if (work->sb)
725
			writeback_sb_inodes(work->sb, wb, &wbc, true);
726
		else
727
			__writeback_inodes_wb(wb, &wbc);
728 729
		trace_wbc_writeback_written(&wbc, wb->bdi);

730 731
		work->nr_pages -= write_chunk - wbc.nr_to_write;
		wrote += write_chunk - wbc.nr_to_write;
732 733

		/*
734 735 736 737 738 739
		 * Did we write something? Try for more
		 *
		 * Dirty inodes are moved to b_io for writeback in batches.
		 * The completion of the current batch does not necessarily
		 * mean the overall work is done. So we keep looping as long
		 * as made some progress on cleaning pages or inodes.
740
		 */
741
		if (wbc.nr_to_write < write_chunk)
742
			continue;
743 744
		if (wbc.inodes_written)
			continue;
745
		/*
746
		 * No more inodes for IO, bail
747
		 */
748
		if (list_empty(&wb->b_more_io))
749
			break;
750 751 752 753 754 755
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		if (!list_empty(&wb->b_more_io))  {
N
Nick Piggin 已提交
756
			inode = wb_inode(wb->b_more_io.prev);
757
			trace_wbc_writeback_wait(&wbc, wb->bdi);
758
			spin_lock(&inode->i_lock);
759
			inode_wait_for_writeback(inode, wb);
760
			spin_unlock(&inode->i_lock);
761 762
		}
	}
763
	spin_unlock(&wb->list_lock);
764 765 766 767 768

	return wrote;
}

/*
769
 * Return the next wb_writeback_work struct that hasn't been processed yet.
770
 */
771
static struct wb_writeback_work *
772
get_next_work_item(struct backing_dev_info *bdi)
773
{
774
	struct wb_writeback_work *work = NULL;
775

776
	spin_lock_bh(&bdi->wb_lock);
777 778 779 780
	if (!list_empty(&bdi->work_list)) {
		work = list_entry(bdi->work_list.next,
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
781
	}
782
	spin_unlock_bh(&bdi->wb_lock);
783
	return work;
784 785
}

786 787 788 789 790 791 792 793 794 795 796
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
static long wb_check_background_flush(struct bdi_writeback *wb)
{
	if (over_bground_thresh()) {

		struct wb_writeback_work work = {
			.nr_pages	= LONG_MAX,
			.sync_mode	= WB_SYNC_NONE,
			.for_background	= 1,
			.range_cyclic	= 1,
		};

		return wb_writeback(wb, &work);
	}

	return 0;
}

814 815 816 817 818
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

819 820 821 822 823 824
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

825 826 827 828 829 830
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
831
	nr_pages = get_nr_dirty_pages();
832

833
	if (nr_pages) {
834
		struct wb_writeback_work work = {
835 836 837 838 839 840
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
		};

841
		return wb_writeback(wb, &work);
842
	}
843 844 845 846 847 848 849 850 851 852

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
	struct backing_dev_info *bdi = wb->bdi;
853
	struct wb_writeback_work *work;
854
	long wrote = 0;
855

J
Jan Kara 已提交
856
	set_bit(BDI_writeback_running, &wb->bdi->state);
857
	while ((work = get_next_work_item(bdi)) != NULL) {
858 859
		/*
		 * Override sync mode, in case we must wait for completion
860
		 * because this thread is exiting now.
861 862
		 */
		if (force_wait)
863
			work->sync_mode = WB_SYNC_ALL;
864

865 866
		trace_writeback_exec(bdi, work);

867
		wrote += wb_writeback(wb, work);
868 869

		/*
870 871
		 * Notify the caller of completion if this is a synchronous
		 * work item, otherwise just free it.
872
		 */
873 874 875 876
		if (work->done)
			complete(work->done);
		else
			kfree(work);
877 878 879 880 881 882
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);
883
	wrote += wb_check_background_flush(wb);
J
Jan Kara 已提交
884
	clear_bit(BDI_writeback_running, &wb->bdi->state);
885 886 887 888 889 890 891 892

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
 * wakes up periodically and does kupdated style flushing.
 */
893
int bdi_writeback_thread(void *data)
894
{
895 896
	struct bdi_writeback *wb = data;
	struct backing_dev_info *bdi = wb->bdi;
897 898
	long pages_written;

P
Peter Zijlstra 已提交
899
	current->flags |= PF_SWAPWRITE;
900
	set_freezable();
901
	wb->last_active = jiffies;
902 903 904 905 906 907

	/*
	 * Our parent may run at a different priority, just set us to normal
	 */
	set_user_nice(current, 0);

908 909
	trace_writeback_thread_start(bdi);

910
	while (!kthread_should_stop()) {
911 912 913 914 915 916
		/*
		 * Remove own delayed wake-up timer, since we are already awake
		 * and we'll take care of the preriodic write-back.
		 */
		del_timer(&wb->wakeup_timer);

917 918
		pages_written = wb_do_writeback(wb, 0);

919 920
		trace_writeback_pages_written(pages_written);

921
		if (pages_written)
922
			wb->last_active = jiffies;
923

924
		set_current_state(TASK_INTERRUPTIBLE);
925
		if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
926
			__set_current_state(TASK_RUNNING);
927
			continue;
928 929
		}

930
		if (wb_has_dirty_io(wb) && dirty_writeback_interval)
931
			schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
932 933 934 935 936 937
		else {
			/*
			 * We have nothing to do, so can go sleep without any
			 * timeout and save power. When a work is queued or
			 * something is made dirty - we will be woken up.
			 */
938
			schedule();
939
		}
940

941 942 943
		try_to_freeze();
	}

944
	/* Flush any work that raced with us exiting */
945 946
	if (!list_empty(&bdi->work_list))
		wb_do_writeback(wb, 1);
947 948

	trace_writeback_thread_stop(bdi);
949 950 951
	return 0;
}

952

953
/*
954 955
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
956
 */
957
void wakeup_flusher_threads(long nr_pages)
958
{
959
	struct backing_dev_info *bdi;
960

961 962
	if (!nr_pages) {
		nr_pages = global_page_state(NR_FILE_DIRTY) +
963 964
				global_page_state(NR_UNSTABLE_NFS);
	}
965

966
	rcu_read_lock();
967
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
968 969
		if (!bdi_has_dirty_io(bdi))
			continue;
970
		__bdi_start_writeback(bdi, nr_pages, false);
971
	}
972
	rcu_read_unlock();
L
Linus Torvalds 已提交
973 974
}

975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1003
 *
1004 1005 1006 1007 1008 1009 1010 1011 1012
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1013
 *
1014 1015 1016 1017 1018 1019
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1020
 */
1021
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1022
{
1023
	struct super_block *sb = inode->i_sb;
1024
	struct backing_dev_info *bdi = NULL;
L
Linus Torvalds 已提交
1025

1026 1027 1028 1029 1030 1031
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
1032
			sb->s_op->dirty_inode(inode, flags);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

1048
	spin_lock(&inode->i_lock);
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
1060
			goto out_unlock_inode;
1061 1062 1063 1064 1065 1066

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
A
Al Viro 已提交
1067
			if (inode_unhashed(inode))
1068
				goto out_unlock_inode;
1069
		}
A
Al Viro 已提交
1070
		if (inode->i_state & I_FREEING)
1071
			goto out_unlock_inode;
1072 1073 1074 1075 1076 1077

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
1078
			bool wakeup_bdi = false;
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
			bdi = inode_to_bdi(inode);

			if (bdi_cap_writeback_dirty(bdi)) {
				WARN(!test_bit(BDI_registered, &bdi->state),
				     "bdi-%s not registered\n", bdi->name);

				/*
				 * If this is the first dirty inode for this
				 * bdi, we have to wake-up the corresponding
				 * bdi thread to make sure background
				 * write-back happens later.
				 */
				if (!wb_has_dirty_io(&bdi->wb))
					wakeup_bdi = true;
1093
			}
1094

1095
			spin_unlock(&inode->i_lock);
1096
			spin_lock(&bdi->wb.list_lock);
1097
			inode->dirtied_when = jiffies;
N
Nick Piggin 已提交
1098
			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1099
			spin_unlock(&bdi->wb.list_lock);
1100 1101 1102 1103

			if (wakeup_bdi)
				bdi_wakeup_thread_delayed(bdi);
			return;
L
Linus Torvalds 已提交
1104 1105
		}
	}
1106 1107
out_unlock_inode:
	spin_unlock(&inode->i_lock);
1108

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
}
EXPORT_SYMBOL(__mark_inode_dirty);

/*
 * Write out a superblock's list of dirty inodes.  A wait will be performed
 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 *
 * If older_than_this is non-NULL, then only write out inodes which
 * had their first dirtying at a time earlier than *older_than_this.
 *
 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 * This function assumes that the blockdev superblock's inodes are backed by
 * a variety of queues, so all inodes are searched.  For other superblocks,
 * assume that all inodes are backed by the same queue.
 *
 * The inodes to be written are parked on bdi->b_io.  They are moved back onto
 * bdi->b_dirty as they are selected for writing.  This way, none can be missed
 * on the writer throttling path, and we get decent balancing between many
 * throttled threads: we don't want them all piling up on inode_sync_wait.
 */
1129
static void wait_sb_inodes(struct super_block *sb)
1130 1131 1132 1133 1134 1135 1136
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
1137
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1138

1139
	spin_lock(&inode_sb_list_lock);
1140 1141 1142 1143 1144 1145 1146 1147

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
1148
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1149
		struct address_space *mapping = inode->i_mapping;
1150

1151 1152 1153 1154
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
1155
			continue;
1156
		}
1157
		__iget(inode);
1158
		spin_unlock(&inode->i_lock);
1159 1160
		spin_unlock(&inode_sb_list_lock);

1161
		/*
1162 1163 1164 1165 1166 1167
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
1168 1169 1170 1171 1172 1173 1174 1175
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

1176
		spin_lock(&inode_sb_list_lock);
1177
	}
1178
	spin_unlock(&inode_sb_list_lock);
1179
	iput(old_inode);
L
Linus Torvalds 已提交
1180 1181
}

1182
/**
1183
 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
1184
 * @sb: the superblock
1185
 * @nr: the number of pages to write
L
Linus Torvalds 已提交
1186
 *
1187 1188
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
1189
 * for IO completion of submitted IO.
L
Linus Torvalds 已提交
1190
 */
1191
void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
L
Linus Torvalds 已提交
1192
{
1193 1194
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1195 1196 1197 1198 1199
		.sb			= sb,
		.sync_mode		= WB_SYNC_NONE,
		.tagged_writepages	= 1,
		.done			= &done,
		.nr_pages		= nr,
1200
	};
1201

1202
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1203 1204
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);
1205
}
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
EXPORT_SYMBOL(writeback_inodes_sb_nr);

/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
void writeback_inodes_sb(struct super_block *sb)
{
1218
	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages());
1219
}
1220
EXPORT_SYMBOL(writeback_inodes_sb);
1221

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_if_idle(struct super_block *sb)
{
	if (!writeback_in_progress(sb->s_bdi)) {
1232
		down_read(&sb->s_umount);
1233
		writeback_inodes_sb(sb);
1234
		up_read(&sb->s_umount);
1235 1236 1237 1238 1239 1240
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_if_idle);

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
/**
 * writeback_inodes_sb_if_idle	-	start writeback if none underway
 * @sb: the superblock
 * @nr: the number of pages to write
 *
 * Invoke writeback_inodes_sb if no writeback is currently underway.
 * Returns 1 if writeback was started, 0 if not.
 */
int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
				   unsigned long nr)
{
	if (!writeback_in_progress(sb->s_bdi)) {
		down_read(&sb->s_umount);
		writeback_inodes_sb_nr(sb, nr);
		up_read(&sb->s_umount);
		return 1;
	} else
		return 0;
}
EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);

1262 1263 1264 1265 1266
/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
1267
 * super_block.
1268
 */
1269
void sync_inodes_sb(struct super_block *sb)
1270
{
1271 1272
	DECLARE_COMPLETION_ONSTACK(done);
	struct wb_writeback_work work = {
1273 1274 1275 1276
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
1277
		.done		= &done,
1278 1279
	};

1280 1281
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

1282 1283 1284
	bdi_queue_work(sb->s_bdi, &work);
	wait_for_completion(&done);

1285
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
1286
}
1287
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
1288 1289

/**
1290 1291 1292 1293 1294 1295
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
1296
 *
1297
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
1298 1299 1300
 */
int write_inode_now(struct inode *inode, int sync)
{
1301
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1302 1303 1304
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
1305
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1306 1307
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
1308 1309 1310
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1311
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
1312 1313

	might_sleep();
1314
	spin_lock(&wb->list_lock);
1315
	spin_lock(&inode->i_lock);
1316
	ret = writeback_single_inode(inode, wb, &wbc);
1317
	spin_unlock(&inode->i_lock);
1318
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1319
	if (sync)
J
Joern Engel 已提交
1320
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
1338
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
1339 1340
	int ret;

1341
	spin_lock(&wb->list_lock);
1342
	spin_lock(&inode->i_lock);
1343
	ret = writeback_single_inode(inode, wb, wbc);
1344
	spin_unlock(&inode->i_lock);
1345
	spin_unlock(&wb->list_lock);
L
Linus Torvalds 已提交
1346 1347 1348
	return ret;
}
EXPORT_SYMBOL(sync_inode);
C
Christoph Hellwig 已提交
1349 1350

/**
A
Andrew Morton 已提交
1351
 * sync_inode_metadata - write an inode to disk
C
Christoph Hellwig 已提交
1352 1353 1354
 * @inode: the inode to sync
 * @wait: wait for I/O to complete.
 *
A
Andrew Morton 已提交
1355
 * Write an inode to disk and adjust its dirty state after completion.
C
Christoph Hellwig 已提交
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
 *
 * Note: only writes the actual inode, no associated data or other metadata.
 */
int sync_inode_metadata(struct inode *inode, int wait)
{
	struct writeback_control wbc = {
		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
		.nr_to_write = 0, /* metadata-only */
	};

	return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);