fs-writeback.c 23.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
J
Jens Axboe 已提交
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
26
#include "internal.h"
L
Linus Torvalds 已提交
27

28
#define inode_to_bdi(inode)	((inode)->i_mapping->backing_dev_info)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

/**
 * writeback_acquire - attempt to get exclusive writeback access to a device
 * @bdi: the device's backing_dev_info structure
 *
 * It is a waste of resources to have more than one pdflush thread blocked on
 * a single request queue.  Exclusion at the request_queue level is obtained
 * via a flag in the request_queue's backing_dev_info.state.
 *
 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
 * unless they implement their own.  Which is somewhat inefficient, as this
 * may prevent concurrent writeback against multiple devices.
 */
static int writeback_acquire(struct backing_dev_info *bdi)
{
	return !test_and_set_bit(BDI_pdflush, &bdi->state);
}

/**
 * writeback_in_progress - determine whether there is writeback in progress
 * @bdi: the device's backing_dev_info structure.
 *
 * Determine whether there is writeback in progress against a backing device.
 */
int writeback_in_progress(struct backing_dev_info *bdi)
{
	return test_bit(BDI_pdflush, &bdi->state);
}

/**
 * writeback_release - relinquish exclusive writeback access against a device.
 * @bdi: the device's backing_dev_info structure
 */
static void writeback_release(struct backing_dev_info *bdi)
{
	BUG_ON(!writeback_in_progress(bdi));
	clear_bit(BDI_pdflush, &bdi->state);
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

L
Linus Torvalds 已提交
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
 *
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
 *
 * This function *must* be atomic for the I_DIRTY_PAGES case -
 * set_page_dirty() is called under spinlock in several places.
 *
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
 */
void __mark_inode_dirty(struct inode *inode, int flags)
{
	struct super_block *sb = inode->i_sb;

	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
			sb->s_op->dirty_inode(inode);
	}

	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();

	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;

140 141
	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);
L
Linus Torvalds 已提交
142 143 144 145 146 147 148 149

	spin_lock(&inode_lock);
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

		inode->i_state |= flags;

		/*
J
Joern Engel 已提交
150
		 * If the inode is being synced, just update its dirty state.
L
Linus Torvalds 已提交
151 152 153
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
J
Joern Engel 已提交
154
		if (inode->i_state & I_SYNC)
L
Linus Torvalds 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168
			goto out;

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
			if (hlist_unhashed(&inode->i_hash))
				goto out;
		}
		if (inode->i_state & (I_FREEING|I_CLEAR))
			goto out;

		/*
169 170
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
L
Linus Torvalds 已提交
171 172 173
		 */
		if (!was_dirty) {
			inode->dirtied_when = jiffies;
174 175
			list_move(&inode->i_list,
					&inode_to_bdi(inode)->b_dirty);
L
Linus Torvalds 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		}
	}
out:
	spin_unlock(&inode_lock);
}

EXPORT_SYMBOL(__mark_inode_dirty);

static int write_inode(struct inode *inode, int sync)
{
	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
		return inode->i_sb->s_op->write_inode(inode, sync);
	return 0;
}

191 192 193 194 195
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
196
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
197 198 199 200 201
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
static void redirty_tail(struct inode *inode)
{
202
	struct backing_dev_info *bdi = inode_to_bdi(inode);
203

204 205
	if (!list_empty(&bdi->b_dirty)) {
		struct inode *tail;
206

207 208
		tail = list_entry(bdi->b_dirty.next, struct inode, i_list);
		if (time_before(inode->dirtied_when, tail->dirtied_when))
209 210
			inode->dirtied_when = jiffies;
	}
211
	list_move(&inode->i_list, &bdi->b_dirty);
212 213
}

214
/*
215
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
216
 */
217
static void requeue_io(struct inode *inode)
218
{
219
	list_move(&inode->i_list, &inode_to_bdi(inode)->b_more_io);
220 221
}

J
Joern Engel 已提交
222 223 224 225 226 227 228 229 230
static void inode_sync_complete(struct inode *inode)
{
	/*
	 * Prevent speculative execution through spin_unlock(&inode_lock);
	 */
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
	 * from permanently stopping the whole pdflush writeback.
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

246 247 248 249 250 251 252 253 254 255 256
/*
 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
 */
static void move_expired_inodes(struct list_head *delaying_queue,
			       struct list_head *dispatch_queue,
				unsigned long *older_than_this)
{
	while (!list_empty(delaying_queue)) {
		struct inode *inode = list_entry(delaying_queue->prev,
						struct inode, i_list);
		if (older_than_this &&
257
		    inode_dirtied_after(inode, *older_than_this))
258 259 260 261 262 263 264 265
			break;
		list_move(&inode->i_list, dispatch_queue);
	}
}

/*
 * Queue all expired dirty inodes for io, eldest first.
 */
266 267 268 269 270 271 272 273
static void queue_io(struct backing_dev_info *bdi,
		     unsigned long *older_than_this)
{
	list_splice_init(&bdi->b_more_io, bdi->b_io.prev);
	move_expired_inodes(&bdi->b_dirty, &bdi->b_io, older_than_this);
}

static int sb_on_inode_list(struct super_block *sb, struct list_head *list)
274
{
275 276 277 278 279 280 281 282 283 284 285 286
	struct inode *inode;
	int ret = 0;

	spin_lock(&inode_lock);
	list_for_each_entry(inode, list, i_list) {
		if (inode->i_sb == sb) {
			ret = 1;
			break;
		}
	}
	spin_unlock(&inode_lock);
	return ret;
287 288
}

289 290
int sb_has_dirty_inodes(struct super_block *sb)
{
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	struct backing_dev_info *bdi;
	int ret = 0;

	/*
	 * This is REALLY expensive right now, but it'll go away
	 * when the bdi writeback is introduced
	 */
	mutex_lock(&bdi_lock);
	list_for_each_entry(bdi, &bdi_list, bdi_list) {
		if (sb_on_inode_list(sb, &bdi->b_dirty) ||
		    sb_on_inode_list(sb, &bdi->b_io) ||
		    sb_on_inode_list(sb, &bdi->b_more_io)) {
			ret = 1;
			break;
		}
	}
	mutex_unlock(&bdi_lock);

	return ret;
310 311 312
}
EXPORT_SYMBOL(sb_has_dirty_inodes);

L
Linus Torvalds 已提交
313
/*
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
 * Wait for writeback on an inode to complete.
 */
static void inode_wait_for_writeback(struct inode *inode)
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
	do {
		spin_unlock(&inode_lock);
		__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
		spin_lock(&inode_lock);
	} while (inode->i_state & I_SYNC);
}

/*
 * Write out an inode's dirty pages.  Called under inode_lock.  Either the
 * caller has ref on the inode (either via __iget or via syscall against an fd)
 * or the inode has I_WILL_FREE set (via generic_forget_inode)
 *
L
Linus Torvalds 已提交
334 335 336 337 338 339 340 341 342
 * If `wait' is set, wait on the writeout.
 *
 * The whole writeout design is quite complex and fragile.  We want to avoid
 * starvation of particular inodes when others are being redirtied, prevent
 * livelocks, etc.
 *
 * Called under inode_lock.
 */
static int
343
writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
L
Linus Torvalds 已提交
344 345 346
{
	struct address_space *mapping = inode->i_mapping;
	int wait = wbc->sync_mode == WB_SYNC_ALL;
347
	unsigned dirty;
L
Linus Torvalds 已提交
348 349
	int ret;

350 351 352 353 354 355 356 357
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		/*
		 * If this inode is locked for writeback and we are not doing
358
		 * writeback-for-data-integrity, move it to b_more_io so that
359 360 361
		 * writeback can proceed with the other inodes on s_io.
		 *
		 * We'll have another go at writing back this inode when we
362
		 * completed a full scan of b_io.
363 364 365 366 367 368 369 370 371 372 373 374
		 */
		if (!wait) {
			requeue_io(inode);
			return 0;
		}

		/*
		 * It's a data-integrity sync.  We must wait.
		 */
		inode_wait_for_writeback(inode);
	}

J
Joern Engel 已提交
375
	BUG_ON(inode->i_state & I_SYNC);
L
Linus Torvalds 已提交
376

J
Joern Engel 已提交
377
	/* Set I_SYNC, reset I_DIRTY */
L
Linus Torvalds 已提交
378
	dirty = inode->i_state & I_DIRTY;
J
Joern Engel 已提交
379
	inode->i_state |= I_SYNC;
L
Linus Torvalds 已提交
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	inode->i_state &= ~I_DIRTY;

	spin_unlock(&inode_lock);

	ret = do_writepages(mapping, wbc);

	/* Don't write the inode if only I_DIRTY_PAGES was set */
	if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		int err = write_inode(inode, wait);
		if (ret == 0)
			ret = err;
	}

	if (wait) {
		int err = filemap_fdatawait(mapping);
		if (ret == 0)
			ret = err;
	}

	spin_lock(&inode_lock);
J
Joern Engel 已提交
400
	inode->i_state &= ~I_SYNC;
401
	if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
L
Linus Torvalds 已提交
402 403 404 405 406
		if (!(inode->i_state & I_DIRTY) &&
		    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
			/*
			 * We didn't write back all the pages.  nfs_writepages()
			 * sometimes bales out without doing anything. Redirty
407
			 * the inode; Move it from b_io onto b_more_io/b_dirty.
408 409 410
			 */
			/*
			 * akpm: if the caller was the kupdate function we put
411
			 * this inode at the head of b_dirty so it gets first
412 413 414 415 416
			 * consideration.  Otherwise, move it to the tail, for
			 * the reasons described there.  I'm not really sure
			 * how much sense this makes.  Presumably I had a good
			 * reasons for doing it this way, and I'd rather not
			 * muck with it at present.
L
Linus Torvalds 已提交
417 418 419
			 */
			if (wbc->for_kupdate) {
				/*
420
				 * For the kupdate function we move the inode
421
				 * to b_more_io so it will get more writeout as
422
				 * soon as the queue becomes uncongested.
L
Linus Torvalds 已提交
423 424
				 */
				inode->i_state |= I_DIRTY_PAGES;
425 426 427 428 429 430 431 432 433 434 435
				if (wbc->nr_to_write <= 0) {
					/*
					 * slice used up: queue for next turn
					 */
					requeue_io(inode);
				} else {
					/*
					 * somehow blocked: retry later
					 */
					redirty_tail(inode);
				}
L
Linus Torvalds 已提交
436 437 438 439 440 441 442 443 444
			} else {
				/*
				 * Otherwise fully redirty the inode so that
				 * other inodes on this superblock will get some
				 * writeout.  Otherwise heavy writing to one
				 * file would indefinitely suspend writeout of
				 * all the other files.
				 */
				inode->i_state |= I_DIRTY_PAGES;
445
				redirty_tail(inode);
L
Linus Torvalds 已提交
446 447 448 449 450 451
			}
		} else if (inode->i_state & I_DIRTY) {
			/*
			 * Someone redirtied the inode while were writing back
			 * the pages.
			 */
452
			redirty_tail(inode);
L
Linus Torvalds 已提交
453 454 455 456 457 458 459 460 461 462 463 464
		} else if (atomic_read(&inode->i_count)) {
			/*
			 * The inode is clean, inuse
			 */
			list_move(&inode->i_list, &inode_in_use);
		} else {
			/*
			 * The inode is clean, unused
			 */
			list_move(&inode->i_list, &inode_unused);
		}
	}
J
Joern Engel 已提交
465
	inode_sync_complete(inode);
L
Linus Torvalds 已提交
466 467 468
	return ret;
}

469 470 471
static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
				    struct writeback_control *wbc,
				    struct super_block *sb)
L
Linus Torvalds 已提交
472
{
473
	const int is_blkdev_sb = sb_is_blkdev_sb(sb);
L
Linus Torvalds 已提交
474 475
	const unsigned long start = jiffies;	/* livelock avoidance */

476
	spin_lock(&inode_lock);
L
Linus Torvalds 已提交
477

478 479 480 481 482
	if (!wbc->for_kupdate || list_empty(&bdi->b_io))
		queue_io(bdi, wbc->older_than_this);

	while (!list_empty(&bdi->b_io)) {
		struct inode *inode = list_entry(bdi->b_io.prev,
L
Linus Torvalds 已提交
483 484 485
						struct inode, i_list);
		long pages_skipped;

486 487 488 489 490 491 492 493
		/*
		 * super block given and doesn't match, skip this inode
		 */
		if (sb && sb != inode->i_sb) {
			redirty_tail(inode);
			continue;
		}

L
Linus Torvalds 已提交
494
		if (!bdi_cap_writeback_dirty(bdi)) {
495
			redirty_tail(inode);
496
			if (is_blkdev_sb) {
L
Linus Torvalds 已提交
497 498 499 500 501 502 503 504 505 506 507 508 509 510
				/*
				 * Dirty memory-backed blockdev: the ramdisk
				 * driver does this.  Skip just this inode
				 */
				continue;
			}
			/*
			 * Dirty memory-backed inode against a filesystem other
			 * than the kernel-internal bdev filesystem.  Skip the
			 * entire superblock.
			 */
			break;
		}

511
		if (inode->i_state & (I_NEW | I_WILL_FREE)) {
512 513 514 515
			requeue_io(inode);
			continue;
		}

L
Linus Torvalds 已提交
516 517
		if (wbc->nonblocking && bdi_write_congested(bdi)) {
			wbc->encountered_congestion = 1;
518
			if (!is_blkdev_sb)
L
Linus Torvalds 已提交
519
				break;		/* Skip a congested fs */
520
			requeue_io(inode);
L
Linus Torvalds 已提交
521 522 523 524
			continue;		/* Skip a congested blockdev */
		}

		if (wbc->bdi && bdi != wbc->bdi) {
525
			if (!is_blkdev_sb)
L
Linus Torvalds 已提交
526
				break;		/* fs has the wrong queue */
527
			requeue_io(inode);
L
Linus Torvalds 已提交
528 529 530
			continue;		/* blockdev has wrong queue */
		}

531 532 533 534 535
		/*
		 * Was this inode dirtied after sync_sb_inodes was called?
		 * This keeps sync from extra jobs and livelock.
		 */
		if (inode_dirtied_after(inode, start))
L
Linus Torvalds 已提交
536 537 538 539 540 541
			break;

		/* Is another pdflush already flushing this queue? */
		if (current_is_pdflush() && !writeback_acquire(bdi))
			break;

542
		BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
L
Linus Torvalds 已提交
543 544
		__iget(inode);
		pages_skipped = wbc->pages_skipped;
545
		writeback_single_inode(inode, wbc);
L
Linus Torvalds 已提交
546 547 548 549 550 551 552
		if (current_is_pdflush())
			writeback_release(bdi);
		if (wbc->pages_skipped != pages_skipped) {
			/*
			 * writeback is not making progress due to locked
			 * buffers.  Skip this inode for now.
			 */
553
			redirty_tail(inode);
L
Linus Torvalds 已提交
554 555 556
		}
		spin_unlock(&inode_lock);
		iput(inode);
557
		cond_resched();
L
Linus Torvalds 已提交
558
		spin_lock(&inode_lock);
559 560
		if (wbc->nr_to_write <= 0) {
			wbc->more_io = 1;
L
Linus Torvalds 已提交
561
			break;
562
		}
563
		if (!list_empty(&bdi->b_more_io))
564
			wbc->more_io = 1;
L
Linus Torvalds 已提交
565
	}
N
Nick Piggin 已提交
566

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
	spin_unlock(&inode_lock);
	/* Leave any unwritten inodes on b_io */
}

/*
 * Write out a superblock's list of dirty inodes.  A wait will be performed
 * upon no inodes, all inodes or the final one, depending upon sync_mode.
 *
 * If older_than_this is non-NULL, then only write out inodes which
 * had their first dirtying at a time earlier than *older_than_this.
 *
 * If we're a pdlfush thread, then implement pdflush collision avoidance
 * against the entire list.
 *
 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
 * This function assumes that the blockdev superblock's inodes are backed by
 * a variety of queues, so all inodes are searched.  For other superblocks,
 * assume that all inodes are backed by the same queue.
 *
 * FIXME: this linear search could get expensive with many fileystems.  But
 * how to fix?  We need to go from an address_space to all inodes which share
 * a queue with that address_space.  (Easy: have a global "dirty superblocks"
 * list).
 *
 * The inodes to be written are parked on bdi->b_io.  They are moved back onto
 * bdi->b_dirty as they are selected for writing.  This way, none can be missed
 * on the writer throttling path, and we get decent balancing between many
 * throttled threads: we don't want them all piling up on inode_sync_wait.
 */
static void generic_sync_sb_inodes(struct super_block *sb,
				   struct writeback_control *wbc)
{
	struct backing_dev_info *bdi;

	if (!wbc->bdi) {
		mutex_lock(&bdi_lock);
		list_for_each_entry(bdi, &bdi_list, bdi_list)
			generic_sync_bdi_inodes(bdi, wbc, sb);
		mutex_unlock(&bdi_lock);
	} else
		generic_sync_bdi_inodes(wbc->bdi, wbc, sb);

	if (wbc->sync_mode == WB_SYNC_ALL) {
N
Nick Piggin 已提交
610 611
		struct inode *inode, *old_inode = NULL;

612 613
		spin_lock(&inode_lock);

N
Nick Piggin 已提交
614 615 616 617 618 619 620 621 622 623
		/*
		 * Data integrity sync. Must wait for all pages under writeback,
		 * because there may have been pages dirtied before our sync
		 * call, but which had writeout started before we write it out.
		 * In which case, the inode may not be on the dirty list, but
		 * we still have to wait for that writeout.
		 */
		list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
			struct address_space *mapping;

W
Wu Fengguang 已提交
624 625
			if (inode->i_state &
					(I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
N
Nick Piggin 已提交
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
				continue;
			mapping = inode->i_mapping;
			if (mapping->nrpages == 0)
				continue;
			__iget(inode);
			spin_unlock(&inode_lock);
			/*
			 * We hold a reference to 'inode' so it couldn't have
			 * been removed from s_inodes list while we dropped the
			 * inode_lock.  We cannot iput the inode now as we can
			 * be holding the last reference and we cannot iput it
			 * under inode_lock. So we keep the reference and iput
			 * it later.
			 */
			iput(old_inode);
			old_inode = inode;

			filemap_fdatawait(mapping);

			cond_resched();

			spin_lock(&inode_lock);
		}
		spin_unlock(&inode_lock);
		iput(old_inode);
651
	}
L
Linus Torvalds 已提交
652 653 654 655 656 657 658
}

/*
 * Start writeback of dirty pagecache data against all unlocked inodes.
 *
 * Note:
 * We don't need to grab a reference to superblock here. If it has non-empty
659 660
 * ->b_dirty it's hadn't been killed yet and kill_super() won't proceed
 * past sync_inodes_sb() until the ->b_dirty/b_io/b_more_io lists are all
L
Linus Torvalds 已提交
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
 * inode from superblock lists we are OK.
 *
 * If `older_than_this' is non-zero then only flush inodes which have a
 * flushtime older than *older_than_this.
 *
 * If `bdi' is non-zero then we will scan the first inode against each
 * superblock until we find the matching ones.  One group will be the dirty
 * inodes against a filesystem.  Then when we hit the dummy blockdev superblock,
 * sync_sb_inodes will seekout the blockdev which matches `bdi'.  Maybe not
 * super-efficient but we're about to do a ton of I/O...
 */
void
writeback_inodes(struct writeback_control *wbc)
{
	struct super_block *sb;

	might_sleep();
	spin_lock(&sb_lock);
restart:
681
	list_for_each_entry_reverse(sb, &super_blocks, s_list) {
682
		if (sb_has_dirty_inodes(sb)) {
L
Linus Torvalds 已提交
683 684 685 686 687 688 689 690 691
			/* we're making our own get_super here */
			sb->s_count++;
			spin_unlock(&sb_lock);
			/*
			 * If we can't get the readlock, there's no sense in
			 * waiting around, most of the time the FS is going to
			 * be unmounted by the time it is released.
			 */
			if (down_read_trylock(&sb->s_umount)) {
692
				if (sb->s_root)
693
					generic_sync_sb_inodes(sb, wbc);
L
Linus Torvalds 已提交
694 695 696 697 698 699 700 701 702 703 704 705
				up_read(&sb->s_umount);
			}
			spin_lock(&sb_lock);
			if (__put_super_and_need_restart(sb))
				goto restart;
		}
		if (wbc->nr_to_write <= 0)
			break;
	}
	spin_unlock(&sb_lock);
}

706 707 708
/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
L
Linus Torvalds 已提交
709
 *
710 711 712 713
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO. The number of pages submitted is
 * returned.
L
Linus Torvalds 已提交
714
 */
715
long writeback_inodes_sb(struct super_block *sb)
L
Linus Torvalds 已提交
716 717
{
	struct writeback_control wbc = {
718
		.sync_mode	= WB_SYNC_NONE,
719 720
		.range_start	= 0,
		.range_end	= LLONG_MAX,
L
Linus Torvalds 已提交
721
	};
722 723 724
	unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
	unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
	long nr_to_write;
L
Linus Torvalds 已提交
725

726
	nr_to_write = nr_dirty + nr_unstable +
N
Nick Piggin 已提交
727 728
			(inodes_stat.nr_inodes - inodes_stat.nr_unused);

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
	wbc.nr_to_write = nr_to_write;
	generic_sync_sb_inodes(sb, &wbc);
	return nr_to_write - wbc.nr_to_write;
}
EXPORT_SYMBOL(writeback_inodes_sb);

/**
 * sync_inodes_sb	-	sync sb inode pages
 * @sb: the superblock
 *
 * This function writes and waits on any dirty inode belonging to this
 * super_block. The number of pages synced is returned.
 */
long sync_inodes_sb(struct super_block *sb)
{
	struct writeback_control wbc = {
		.sync_mode	= WB_SYNC_ALL,
		.range_start	= 0,
		.range_end	= LLONG_MAX,
	};
	long nr_to_write = LONG_MAX; /* doesn't actually matter */

	wbc.nr_to_write = nr_to_write;
	generic_sync_sb_inodes(sb, &wbc);
	return nr_to_write - wbc.nr_to_write;
L
Linus Torvalds 已提交
754
}
755
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
756 757

/**
758 759 760 761 762 763
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
764
 *
765
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
766 767 768 769 770 771
 */
int write_inode_now(struct inode *inode, int sync)
{
	int ret;
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
772
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
773 774
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
775 776 777
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
778
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
779 780 781

	might_sleep();
	spin_lock(&inode_lock);
782
	ret = writeback_single_inode(inode, &wbc);
L
Linus Torvalds 已提交
783 784
	spin_unlock(&inode_lock);
	if (sync)
J
Joern Engel 已提交
785
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
	return ret;
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
	int ret;

	spin_lock(&inode_lock);
806
	ret = writeback_single_inode(inode, wbc);
L
Linus Torvalds 已提交
807 808 809 810 811 812 813 814
	spin_unlock(&inode_lock);
	return ret;
}
EXPORT_SYMBOL(sync_inode);

/**
 * generic_osync_inode - flush all dirty data for a given inode to disk
 * @inode: inode to write
815
 * @mapping: the address_space that should be flushed
L
Linus Torvalds 已提交
816 817 818 819 820 821
 * @what:  what to write and wait upon
 *
 * This can be called by file_write functions for files which have the
 * O_SYNC flag set, to flush dirty writes to disk.
 *
 * @what is a bitmask, specifying which part of the inode's data should be
822
 * written and waited upon.
L
Linus Torvalds 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
 *
 *    OSYNC_DATA:     i_mapping's dirty data
 *    OSYNC_METADATA: the buffers at i_mapping->private_list
 *    OSYNC_INODE:    the inode itself
 */

int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
{
	int err = 0;
	int need_write_inode_now = 0;
	int err2;

	if (what & OSYNC_DATA)
		err = filemap_fdatawrite(mapping);
	if (what & (OSYNC_METADATA|OSYNC_DATA)) {
		err2 = sync_mapping_buffers(mapping);
		if (!err)
			err = err2;
	}
	if (what & OSYNC_DATA) {
		err2 = filemap_fdatawait(mapping);
		if (!err)
			err = err2;
	}

	spin_lock(&inode_lock);
	if ((inode->i_state & I_DIRTY) &&
	    ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
		need_write_inode_now = 1;
	spin_unlock(&inode_lock);

	if (need_write_inode_now) {
		err2 = write_inode_now(inode, 1);
		if (!err)
			err = err2;
	}
	else
J
Joern Engel 已提交
860
		inode_sync_wait(inode);
L
Linus Torvalds 已提交
861 862 863 864

	return err;
}
EXPORT_SYMBOL(generic_osync_inode);