fs-writeback.c 64.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
17
#include <linux/export.h>
L
Linus Torvalds 已提交
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
L
Linus Torvalds 已提交
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
24
#include <linux/kthread.h>
L
Linus Torvalds 已提交
25 26 27
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
28
#include <linux/tracepoint.h>
29
#include <linux/device.h>
30
#include <linux/memcontrol.h>
31
#include "internal.h"
L
Linus Torvalds 已提交
32

33 34 35 36 37
/*
 * 4MB minimal write chunk size
 */
#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_CACHE_SHIFT - 10))

38 39 40 41
struct wb_completion {
	atomic_t		cnt;
};

42 43 44
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
45
struct wb_writeback_work {
46 47
	long nr_pages;
	struct super_block *sb;
48
	unsigned long *older_than_this;
49
	enum writeback_sync_modes sync_mode;
50
	unsigned int tagged_writepages:1;
51 52 53
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
54
	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
55
	unsigned int auto_free:1;	/* free on completion */
56 57
	unsigned int single_wait:1;
	unsigned int single_done:1;
58
	enum wb_reason reason;		/* why was writeback initiated? */
59

60
	struct list_head list;		/* pending work list */
61
	struct wb_completion *done;	/* set if the caller waits */
62 63
};

64 65 66 67 68 69 70 71 72 73 74 75 76
/*
 * If one wants to wait for one or more wb_writeback_works, each work's
 * ->done should be set to a wb_completion defined using the following
 * macro.  Once all work items are issued with wb_queue_work(), the caller
 * can wait for the completion of all using wb_wait_for_completion().  Work
 * items which are waited upon aren't freed automatically on completion.
 */
#define DEFINE_WB_COMPLETION_ONSTACK(cmpl)				\
	struct wb_completion cmpl = {					\
		.cnt		= ATOMIC_INIT(1),			\
	}


77 78 79 80 81 82 83 84 85 86 87 88
/*
 * If an inode is constantly having its pages dirtied, but then the
 * updates stop dirtytime_expire_interval seconds in the past, it's
 * possible for the worst case time between when an inode has its
 * timestamps updated and when they finally get written out to be two
 * dirtytime_expire_intervals.  We set the default to 12 hours (in
 * seconds), which means most of the time inodes will have their
 * timestamps written to disk after 12 hours, but in the worst case a
 * few inodes might not their timestamps updated for 24 hours.
 */
unsigned int dirtytime_expire_interval = 12 * 60 * 60;

N
Nick Piggin 已提交
89 90 91 92 93
static inline struct inode *wb_inode(struct list_head *head)
{
	return list_entry(head, struct inode, i_wb_list);
}

94 95 96 97 98 99 100 101
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure and inline functions so that the definition
 * remains local to this file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

102 103
EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);

104 105 106 107 108 109
static bool wb_io_lists_populated(struct bdi_writeback *wb)
{
	if (wb_has_dirty_io(wb)) {
		return false;
	} else {
		set_bit(WB_has_dirty_io, &wb->state);
110
		WARN_ON_ONCE(!wb->avg_write_bandwidth);
111 112
		atomic_long_add(wb->avg_write_bandwidth,
				&wb->bdi->tot_write_bandwidth);
113 114 115 116 117 118 119
		return true;
	}
}

static void wb_io_lists_depopulated(struct bdi_writeback *wb)
{
	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
120
	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
121
		clear_bit(WB_has_dirty_io, &wb->state);
122 123
		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
					&wb->bdi->tot_write_bandwidth) < 0);
124
	}
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
}

/**
 * inode_wb_list_move_locked - move an inode onto a bdi_writeback IO list
 * @inode: inode to be moved
 * @wb: target bdi_writeback
 * @head: one of @wb->b_{dirty|io|more_io}
 *
 * Move @inode->i_wb_list to @list of @wb and set %WB_has_dirty_io.
 * Returns %true if @inode is the first occupant of the !dirty_time IO
 * lists; otherwise, %false.
 */
static bool inode_wb_list_move_locked(struct inode *inode,
				      struct bdi_writeback *wb,
				      struct list_head *head)
{
	assert_spin_locked(&wb->list_lock);

	list_move(&inode->i_wb_list, head);

	/* dirty_time doesn't count as dirty_io until expiration */
	if (head != &wb->b_dirty_time)
		return wb_io_lists_populated(wb);

	wb_io_lists_depopulated(wb);
	return false;
}

/**
 * inode_wb_list_del_locked - remove an inode from its bdi_writeback IO list
 * @inode: inode to be removed
 * @wb: bdi_writeback @inode is being removed from
 *
 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
 * clear %WB_has_dirty_io if all are empty afterwards.
 */
static void inode_wb_list_del_locked(struct inode *inode,
				     struct bdi_writeback *wb)
{
	assert_spin_locked(&wb->list_lock);

	list_del_init(&inode->i_wb_list);
	wb_io_lists_depopulated(wb);
}

170
static void wb_wakeup(struct bdi_writeback *wb)
J
Jan Kara 已提交
171
{
172 173 174 175
	spin_lock_bh(&wb->work_lock);
	if (test_bit(WB_registered, &wb->state))
		mod_delayed_work(bdi_wq, &wb->dwork, 0);
	spin_unlock_bh(&wb->work_lock);
J
Jan Kara 已提交
176 177
}

178 179
static void wb_queue_work(struct bdi_writeback *wb,
			  struct wb_writeback_work *work)
180
{
181
	trace_writeback_queue(wb->bdi, work);
182

183
	spin_lock_bh(&wb->work_lock);
184 185 186
	if (!test_bit(WB_registered, &wb->state)) {
		if (work->single_wait)
			work->single_done = 1;
J
Jan Kara 已提交
187
		goto out_unlock;
188
	}
189 190
	if (work->done)
		atomic_inc(&work->done->cnt);
191 192
	list_add_tail(&work->list, &wb->work_list);
	mod_delayed_work(bdi_wq, &wb->dwork, 0);
J
Jan Kara 已提交
193
out_unlock:
194
	spin_unlock_bh(&wb->work_lock);
L
Linus Torvalds 已提交
195 196
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
/**
 * wb_wait_for_completion - wait for completion of bdi_writeback_works
 * @bdi: bdi work items were issued to
 * @done: target wb_completion
 *
 * Wait for one or more work items issued to @bdi with their ->done field
 * set to @done, which should have been defined with
 * DEFINE_WB_COMPLETION_ONSTACK().  This function returns after all such
 * work items are completed.  Work items which are waited upon aren't freed
 * automatically on completion.
 */
static void wb_wait_for_completion(struct backing_dev_info *bdi,
				   struct wb_completion *done)
{
	atomic_dec(&done->cnt);		/* put down the initial count */
	wait_event(bdi->wb_waitq, !atomic_read(&done->cnt));
}

215 216
#ifdef CONFIG_CGROUP_WRITEBACK

217 218 219 220 221 222 223 224 225 226 227 228 229 230
/* parameters for foreign inode detection, see wb_detach_inode() */
#define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
#define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
#define WB_FRN_TIME_CUT_DIV	2	/* ignore rounds < avg / 2 */
#define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */

#define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
#define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
					/* each slot's duration is 2s / 16 */
#define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
					/* if foreign slots >= 8, switch */
#define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
					/* one round can affect upto 5 slots */

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
void __inode_attach_wb(struct inode *inode, struct page *page)
{
	struct backing_dev_info *bdi = inode_to_bdi(inode);
	struct bdi_writeback *wb = NULL;

	if (inode_cgwb_enabled(inode)) {
		struct cgroup_subsys_state *memcg_css;

		if (page) {
			memcg_css = mem_cgroup_css_from_page(page);
			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
		} else {
			/* must pin memcg_css, see wb_get_create() */
			memcg_css = task_get_css(current, memory_cgrp_id);
			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
			css_put(memcg_css);
		}
	}

	if (!wb)
		wb = &bdi->wb;

	/*
	 * There may be multiple instances of this function racing to
	 * update the same inode.  Use cmpxchg() to tell the winner.
	 */
	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
		wb_put(wb);
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
/**
 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 * @inode: inode of interest with i_lock held
 *
 * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 * held on entry and is released on return.  The returned wb is guaranteed
 * to stay @inode's associated wb until its list_lock is released.
 */
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
	__releases(&inode->i_lock)
	__acquires(&wb->list_lock)
{
	while (true) {
		struct bdi_writeback *wb = inode_to_wb(inode);

		/*
		 * inode_to_wb() association is protected by both
		 * @inode->i_lock and @wb->list_lock but list_lock nests
		 * outside i_lock.  Drop i_lock and verify that the
		 * association hasn't changed after acquiring list_lock.
		 */
		wb_get(wb);
		spin_unlock(&inode->i_lock);
		spin_lock(&wb->list_lock);
		wb_put(wb);		/* not gonna deref it anymore */

		if (likely(wb == inode_to_wb(inode)))
			return wb;	/* @inode already has ref */

		spin_unlock(&wb->list_lock);
		cpu_relax();
		spin_lock(&inode->i_lock);
	}
}

/**
 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 * @inode: inode of interest
 *
 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 * on entry.
 */
static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
	__acquires(&wb->list_lock)
{
	spin_lock(&inode->i_lock);
	return locked_inode_to_wb_and_lock_list(inode);
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
struct inode_switch_wbs_context {
	struct inode		*inode;
	struct bdi_writeback	*new_wb;

	struct rcu_head		rcu_head;
	struct work_struct	work;
};

static void inode_switch_wbs_work_fn(struct work_struct *work)
{
	struct inode_switch_wbs_context *isw =
		container_of(work, struct inode_switch_wbs_context, work);
	struct inode *inode = isw->inode;
	struct bdi_writeback *new_wb = isw->new_wb;

	/*
	 * By the time control reaches here, RCU grace period has passed
	 * since I_WB_SWITCH assertion and all wb stat update transactions
	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
	 * synchronizing against mapping->tree_lock.
	 */
	spin_lock(&inode->i_lock);

	inode->i_wb_frn_winner = 0;
	inode->i_wb_frn_avg_time = 0;
	inode->i_wb_frn_history = 0;

	/*
	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
	 */
	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);

	spin_unlock(&inode->i_lock);

	iput(inode);
	wb_put(new_wb);
	kfree(isw);
}

static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
{
	struct inode_switch_wbs_context *isw = container_of(rcu_head,
				struct inode_switch_wbs_context, rcu_head);

	/* needs to grab bh-unsafe locks, bounce to work item */
	INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
	schedule_work(&isw->work);
}

/**
 * inode_switch_wbs - change the wb association of an inode
 * @inode: target inode
 * @new_wb_id: ID of the new wb
 *
 * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 * switching is performed asynchronously and may fail silently.
 */
static void inode_switch_wbs(struct inode *inode, int new_wb_id)
{
	struct backing_dev_info *bdi = inode_to_bdi(inode);
	struct cgroup_subsys_state *memcg_css;
	struct inode_switch_wbs_context *isw;

	/* noop if seems to be already in progress */
	if (inode->i_state & I_WB_SWITCH)
		return;

	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
	if (!isw)
		return;

	/* find and pin the new wb */
	rcu_read_lock();
	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
	if (memcg_css)
		isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
	rcu_read_unlock();
	if (!isw->new_wb)
		goto out_free;

	/* while holding I_WB_SWITCH, no one else can update the association */
	spin_lock(&inode->i_lock);
	if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
	    inode_to_wb(inode) == isw->new_wb) {
		spin_unlock(&inode->i_lock);
		goto out_free;
	}
	inode->i_state |= I_WB_SWITCH;
	spin_unlock(&inode->i_lock);

	ihold(inode);
	isw->inode = inode;

	/*
	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
	 * the RCU protected stat update paths to grab the mapping's
	 * tree_lock so that stat transfer can synchronize against them.
	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
	 */
	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
	return;

out_free:
	if (isw->new_wb)
		wb_put(isw->new_wb);
	kfree(isw);
}

420 421 422 423 424 425 426 427 428 429 430 431 432 433
/**
 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 * @wbc: writeback_control of interest
 * @inode: target inode
 *
 * @inode is locked and about to be written back under the control of @wbc.
 * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 * writeback completion, wbc_detach_inode() should be called.  This is used
 * to track the cgroup writeback context.
 */
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
				 struct inode *inode)
{
	wbc->wb = inode_to_wb(inode);
434 435 436 437 438 439 440 441 442
	wbc->inode = inode;

	wbc->wb_id = wbc->wb->memcg_css->id;
	wbc->wb_lcand_id = inode->i_wb_frn_winner;
	wbc->wb_tcand_id = 0;
	wbc->wb_bytes = 0;
	wbc->wb_lcand_bytes = 0;
	wbc->wb_tcand_bytes = 0;

443 444 445 446 447
	wb_get(wbc->wb);
	spin_unlock(&inode->i_lock);
}

/**
448 449
 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 * @wbc: writeback_control of the just finished writeback
450 451 452
 *
 * To be called after a writeback attempt of an inode finishes and undoes
 * wbc_attach_and_unlock_inode().  Can be called under any context.
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
 *
 * As concurrent write sharing of an inode is expected to be very rare and
 * memcg only tracks page ownership on first-use basis severely confining
 * the usefulness of such sharing, cgroup writeback tracks ownership
 * per-inode.  While the support for concurrent write sharing of an inode
 * is deemed unnecessary, an inode being written to by different cgroups at
 * different points in time is a lot more common, and, more importantly,
 * charging only by first-use can too readily lead to grossly incorrect
 * behaviors (single foreign page can lead to gigabytes of writeback to be
 * incorrectly attributed).
 *
 * To resolve this issue, cgroup writeback detects the majority dirtier of
 * an inode and transfers the ownership to it.  To avoid unnnecessary
 * oscillation, the detection mechanism keeps track of history and gives
 * out the switch verdict only if the foreign usage pattern is stable over
 * a certain amount of time and/or writeback attempts.
 *
 * On each writeback attempt, @wbc tries to detect the majority writer
 * using Boyer-Moore majority vote algorithm.  In addition to the byte
 * count from the majority voting, it also counts the bytes written for the
 * current wb and the last round's winner wb (max of last round's current
 * wb, the winner from two rounds ago, and the last round's majority
 * candidate).  Keeping track of the historical winner helps the algorithm
 * to semi-reliably detect the most active writer even when it's not the
 * absolute majority.
 *
 * Once the winner of the round is determined, whether the winner is
 * foreign or not and how much IO time the round consumed is recorded in
 * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 * over a certain threshold, the switch verdict is given.
483 484 485
 */
void wbc_detach_inode(struct writeback_control *wbc)
{
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	struct bdi_writeback *wb = wbc->wb;
	struct inode *inode = wbc->inode;
	u16 history = inode->i_wb_frn_history;
	unsigned long avg_time = inode->i_wb_frn_avg_time;
	unsigned long max_bytes, max_time;
	int max_id;

	/* pick the winner of this round */
	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
		max_id = wbc->wb_id;
		max_bytes = wbc->wb_bytes;
	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
		max_id = wbc->wb_lcand_id;
		max_bytes = wbc->wb_lcand_bytes;
	} else {
		max_id = wbc->wb_tcand_id;
		max_bytes = wbc->wb_tcand_bytes;
	}

	/*
	 * Calculate the amount of IO time the winner consumed and fold it
	 * into the running average kept per inode.  If the consumed IO
	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
	 * deciding whether to switch or not.  This is to prevent one-off
	 * small dirtiers from skewing the verdict.
	 */
	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
				wb->avg_write_bandwidth);
	if (avg_time)
		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
	else
		avg_time = max_time;	/* immediate catch up on first run */

	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
		int slots;

		/*
		 * The switch verdict is reached if foreign wb's consume
		 * more than a certain proportion of IO time in a
		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
		 * history mask where each bit represents one sixteenth of
		 * the period.  Determine the number of slots to shift into
		 * history from @max_time.
		 */
		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
		history <<= slots;
		if (wbc->wb_id != max_id)
			history |= (1U << slots) - 1;

		/*
		 * Switch if the current wb isn't the consistent winner.
		 * If there are multiple closely competing dirtiers, the
		 * inode may switch across them repeatedly over time, which
		 * is okay.  The main goal is avoiding keeping an inode on
		 * the wrong wb for an extended period of time.
		 */
545 546
		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
			inode_switch_wbs(inode, max_id);
547 548 549 550 551 552 553 554 555 556
	}

	/*
	 * Multiple instances of this function may race to update the
	 * following fields but we don't mind occassional inaccuracies.
	 */
	inode->i_wb_frn_winner = max_id;
	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
	inode->i_wb_frn_history = history;

557 558 559 560
	wb_put(wbc->wb);
	wbc->wb = NULL;
}

561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
/**
 * wbc_account_io - account IO issued during writeback
 * @wbc: writeback_control of the writeback in progress
 * @page: page being written out
 * @bytes: number of bytes being written out
 *
 * @bytes from @page are about to written out during the writeback
 * controlled by @wbc.  Keep the book for foreign inode detection.  See
 * wbc_detach_inode().
 */
void wbc_account_io(struct writeback_control *wbc, struct page *page,
		    size_t bytes)
{
	int id;

	/*
	 * pageout() path doesn't attach @wbc to the inode being written
	 * out.  This is intentional as we don't want the function to block
	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
	 * regular writeback instead of writing things out itself.
	 */
	if (!wbc->wb)
		return;

	rcu_read_lock();
	id = mem_cgroup_css_from_page(page)->id;
	rcu_read_unlock();

	if (id == wbc->wb_id) {
		wbc->wb_bytes += bytes;
		return;
	}

	if (id == wbc->wb_lcand_id)
		wbc->wb_lcand_bytes += bytes;

	/* Boyer-Moore majority vote algorithm */
	if (!wbc->wb_tcand_bytes)
		wbc->wb_tcand_id = id;
	if (id == wbc->wb_tcand_id)
		wbc->wb_tcand_bytes += bytes;
	else
		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
}

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/**
 * inode_congested - test whether an inode is congested
 * @inode: inode to test for congestion
 * @cong_bits: mask of WB_[a]sync_congested bits to test
 *
 * Tests whether @inode is congested.  @cong_bits is the mask of congestion
 * bits to test and the return value is the mask of set bits.
 *
 * If cgroup writeback is enabled for @inode, the congestion state is
 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
 * associated with @inode is congested; otherwise, the root wb's congestion
 * state is used.
 */
int inode_congested(struct inode *inode, int cong_bits)
{
621 622 623 624 625 626 627 628 629 630 631 632
	/*
	 * Once set, ->i_wb never becomes NULL while the inode is alive.
	 * Start transaction iff ->i_wb is visible.
	 */
	if (inode && inode_to_wb(inode)) {
		struct bdi_writeback *wb;
		bool locked, congested;

		wb = unlocked_inode_to_wb_begin(inode, &locked);
		congested = wb_congested(wb, cong_bits);
		unlocked_inode_to_wb_end(inode, locked);
		return congested;
633 634 635 636 637 638
	}

	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
}
EXPORT_SYMBOL_GPL(inode_congested);

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
/**
 * wb_wait_for_single_work - wait for completion of a single bdi_writeback_work
 * @bdi: bdi the work item was issued to
 * @work: work item to wait for
 *
 * Wait for the completion of @work which was issued to one of @bdi's
 * bdi_writeback's.  The caller must have set @work->single_wait before
 * issuing it.  This wait operates independently fo
 * wb_wait_for_completion() and also disables automatic freeing of @work.
 */
static void wb_wait_for_single_work(struct backing_dev_info *bdi,
				    struct wb_writeback_work *work)
{
	if (WARN_ON_ONCE(!work->single_wait))
		return;

	wait_event(bdi->wb_waitq, work->single_done);

	/*
	 * Paired with smp_wmb() in wb_do_writeback() and ensures that all
	 * modifications to @work prior to assertion of ->single_done is
	 * visible to the caller once this function returns.
	 */
	smp_rmb();
}

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
/**
 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 * @wb: target bdi_writeback to split @nr_pages to
 * @nr_pages: number of pages to write for the whole bdi
 *
 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 * relation to the total write bandwidth of all wb's w/ dirty inodes on
 * @wb->bdi.
 */
static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
{
	unsigned long this_bw = wb->avg_write_bandwidth;
	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);

	if (nr_pages == LONG_MAX)
		return LONG_MAX;

	/*
	 * This may be called on clean wb's and proportional distribution
	 * may not make sense, just use the original @nr_pages in those
	 * cases.  In general, we wanna err on the side of writing more.
	 */
	if (!tot_bw || this_bw >= tot_bw)
		return nr_pages;
	else
		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
}

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
/**
 * wb_clone_and_queue_work - clone a wb_writeback_work and issue it to a wb
 * @wb: target bdi_writeback
 * @base_work: source wb_writeback_work
 *
 * Try to make a clone of @base_work and issue it to @wb.  If cloning
 * succeeds, %true is returned; otherwise, @base_work is issued directly
 * and %false is returned.  In the latter case, the caller is required to
 * wait for @base_work's completion using wb_wait_for_single_work().
 *
 * A clone is auto-freed on completion.  @base_work never is.
 */
static bool wb_clone_and_queue_work(struct bdi_writeback *wb,
				    struct wb_writeback_work *base_work)
{
	struct wb_writeback_work *work;

	work = kmalloc(sizeof(*work), GFP_ATOMIC);
	if (work) {
		*work = *base_work;
		work->auto_free = 1;
		work->single_wait = 0;
	} else {
		work = base_work;
		work->auto_free = 0;
		work->single_wait = 1;
	}
	work->single_done = 0;
	wb_queue_work(wb, work);
	return work != base_work;
}

/**
 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 * @bdi: target backing_dev_info
 * @base_work: wb_writeback_work to issue
 * @skip_if_busy: skip wb's which already have writeback in progress
 *
 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 * distributed to the busy wbs according to each wb's proportion in the
 * total active write bandwidth of @bdi.
 */
static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
				  struct wb_writeback_work *base_work,
				  bool skip_if_busy)
{
	long nr_pages = base_work->nr_pages;
	int next_blkcg_id = 0;
	struct bdi_writeback *wb;
	struct wb_iter iter;

	might_sleep();

	if (!bdi_has_dirty_io(bdi))
		return;
restart:
	rcu_read_lock();
	bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) {
		if (!wb_has_dirty_io(wb) ||
		    (skip_if_busy && writeback_in_progress(wb)))
			continue;

		base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
		if (!wb_clone_and_queue_work(wb, base_work)) {
			next_blkcg_id = wb->blkcg_css->id + 1;
			rcu_read_unlock();
			wb_wait_for_single_work(bdi, base_work);
			goto restart;
		}
	}
	rcu_read_unlock();
}

767 768
#else	/* CONFIG_CGROUP_WRITEBACK */

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
	__releases(&inode->i_lock)
	__acquires(&wb->list_lock)
{
	struct bdi_writeback *wb = inode_to_wb(inode);

	spin_unlock(&inode->i_lock);
	spin_lock(&wb->list_lock);
	return wb;
}

static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
	__acquires(&wb->list_lock)
{
	struct bdi_writeback *wb = inode_to_wb(inode);

	spin_lock(&wb->list_lock);
	return wb;
}

790 791 792 793 794
static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
{
	return nr_pages;
}

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
				  struct wb_writeback_work *base_work,
				  bool skip_if_busy)
{
	might_sleep();

	if (bdi_has_dirty_io(bdi) &&
	    (!skip_if_busy || !writeback_in_progress(&bdi->wb))) {
		base_work->auto_free = 0;
		base_work->single_wait = 0;
		base_work->single_done = 0;
		wb_queue_work(&bdi->wb, base_work);
	}
}

810 811
#endif	/* CONFIG_CGROUP_WRITEBACK */

812 813
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
			bool range_cyclic, enum wb_reason reason)
814
{
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
	struct wb_writeback_work *work;

	if (!wb_has_dirty_io(wb))
		return;

	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
	work = kzalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
		trace_writeback_nowork(wb->bdi);
		wb_wakeup(wb);
		return;
	}

	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
	work->reason	= reason;
835
	work->auto_free	= 1;
836 837

	wb_queue_work(wb, work);
838
}
839

840
/**
841 842
 * wb_start_background_writeback - start background writeback
 * @wb: bdi_writback to write from
843 844
 *
 * Description:
845
 *   This makes sure WB_SYNC_NONE background writeback happens. When
846
 *   this function returns, it is only guaranteed that for given wb
847 848
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
849
 */
850
void wb_start_background_writeback(struct bdi_writeback *wb)
851
{
852 853 854 855
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
856 857
	trace_writeback_wake_background(wb->bdi);
	wb_wakeup(wb);
L
Linus Torvalds 已提交
858 859
}

860 861 862 863 864
/*
 * Remove the inode from the writeback list it is on.
 */
void inode_wb_list_del(struct inode *inode)
{
865
	struct bdi_writeback *wb;
866

867
	wb = inode_to_wb_and_lock_list(inode);
868
	inode_wb_list_del_locked(inode, wb);
869
	spin_unlock(&wb->list_lock);
870 871
}

872 873 874 875 876
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
877
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
878 879 880
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
881
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
882
{
883
	if (!list_empty(&wb->b_dirty)) {
884
		struct inode *tail;
885

N
Nick Piggin 已提交
886
		tail = wb_inode(wb->b_dirty.next);
887
		if (time_before(inode->dirtied_when, tail->dirtied_when))
888 889
			inode->dirtied_when = jiffies;
	}
890
	inode_wb_list_move_locked(inode, wb, &wb->b_dirty);
891 892
}

893
/*
894
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
895
 */
896
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
897
{
898
	inode_wb_list_move_locked(inode, wb, &wb->b_more_io);
899 900
}

J
Joern Engel 已提交
901 902
static void inode_sync_complete(struct inode *inode)
{
903
	inode->i_state &= ~I_SYNC;
904 905
	/* If inode is clean an unused, put it into LRU now... */
	inode_add_lru(inode);
906
	/* Waiters must see I_SYNC cleared before being woken up */
J
Joern Engel 已提交
907 908 909 910
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

911 912 913 914 915 916 917 918
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
919
	 * from permanently stopping the whole bdi writeback.
920 921 922 923 924 925
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

926 927
#define EXPIRE_DIRTY_ATIME 0x0001

928
/*
929
 * Move expired (dirtied before work->older_than_this) dirty inodes from
J
Jan Kara 已提交
930
 * @delaying_queue to @dispatch_queue.
931
 */
932
static int move_expired_inodes(struct list_head *delaying_queue,
933
			       struct list_head *dispatch_queue,
934
			       int flags,
935
			       struct wb_writeback_work *work)
936
{
937 938
	unsigned long *older_than_this = NULL;
	unsigned long expire_time;
939 940
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
941
	struct super_block *sb = NULL;
942
	struct inode *inode;
943
	int do_sb_sort = 0;
944
	int moved = 0;
945

946 947
	if ((flags & EXPIRE_DIRTY_ATIME) == 0)
		older_than_this = work->older_than_this;
948 949
	else if (!work->for_sync) {
		expire_time = jiffies - (dirtytime_expire_interval * HZ);
950 951
		older_than_this = &expire_time;
	}
952
	while (!list_empty(delaying_queue)) {
N
Nick Piggin 已提交
953
		inode = wb_inode(delaying_queue->prev);
954 955
		if (older_than_this &&
		    inode_dirtied_after(inode, *older_than_this))
956
			break;
957 958
		list_move(&inode->i_wb_list, &tmp);
		moved++;
959 960
		if (flags & EXPIRE_DIRTY_ATIME)
			set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
961 962
		if (sb_is_blkdev_sb(inode->i_sb))
			continue;
963 964 965
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
966 967
	}

968 969 970
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
971
		goto out;
972 973
	}

974 975
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
N
Nick Piggin 已提交
976
		sb = wb_inode(tmp.prev)->i_sb;
977
		list_for_each_prev_safe(pos, node, &tmp) {
N
Nick Piggin 已提交
978
			inode = wb_inode(pos);
979
			if (inode->i_sb == sb)
N
Nick Piggin 已提交
980
				list_move(&inode->i_wb_list, dispatch_queue);
981
		}
982
	}
983 984
out:
	return moved;
985 986 987 988
}

/*
 * Queue all expired dirty inodes for io, eldest first.
989 990 991 992 993 994 995 996
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
997
 */
998
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
999
{
1000
	int moved;
1001

1002
	assert_spin_locked(&wb->list_lock);
1003
	list_splice_init(&wb->b_more_io, &wb->b_io);
1004 1005 1006
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
				     EXPIRE_DIRTY_ATIME, work);
1007 1008
	if (moved)
		wb_io_lists_populated(wb);
1009
	trace_writeback_queue_io(wb, work, moved);
1010 1011
}

1012
static int write_inode(struct inode *inode, struct writeback_control *wbc)
1013
{
T
Tejun Heo 已提交
1014 1015 1016 1017 1018 1019 1020 1021
	int ret;

	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
		trace_writeback_write_inode_start(inode, wbc);
		ret = inode->i_sb->s_op->write_inode(inode, wbc);
		trace_writeback_write_inode(inode, wbc);
		return ret;
	}
1022
	return 0;
1023 1024
}

L
Linus Torvalds 已提交
1025
/*
1026 1027
 * Wait for writeback on an inode to complete. Called with i_lock held.
 * Caller must make sure inode cannot go away when we drop i_lock.
1028
 */
1029 1030 1031
static void __inode_wait_for_writeback(struct inode *inode)
	__releases(inode->i_lock)
	__acquires(inode->i_lock)
1032 1033 1034 1035 1036
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1037 1038
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
1039 1040
		__wait_on_bit(wqh, &wq, bit_wait,
			      TASK_UNINTERRUPTIBLE);
1041
		spin_lock(&inode->i_lock);
1042
	}
1043 1044
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
/*
 * Wait for writeback on an inode to complete. Caller must have inode pinned.
 */
void inode_wait_for_writeback(struct inode *inode)
{
	spin_lock(&inode->i_lock);
	__inode_wait_for_writeback(inode);
	spin_unlock(&inode->i_lock);
}

/*
 * Sleep until I_SYNC is cleared. This function must be called with i_lock
 * held and drops it. It is aimed for callers not holding any inode reference
 * so once i_lock is dropped, inode can go away.
 */
static void inode_sleep_on_writeback(struct inode *inode)
	__releases(inode->i_lock)
{
	DEFINE_WAIT(wait);
	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
	int sleep;

	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
	sleep = inode->i_state & I_SYNC;
	spin_unlock(&inode->i_lock);
	if (sleep)
		schedule();
	finish_wait(wqh, &wait);
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
/*
 * Find proper writeback list for the inode depending on its current state and
 * possibly also change of its state while we were doing writeback.  Here we
 * handle things such as livelock prevention or fairness of writeback among
 * inodes. This function can be called only by flusher thread - noone else
 * processes all inodes in writeback lists and requeueing inodes behind flusher
 * thread's back can have unexpected consequences.
 */
static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
			  struct writeback_control *wbc)
{
	if (inode->i_state & I_FREEING)
		return;

	/*
	 * Sync livelock prevention. Each inode is tagged and synced in one
	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
	 * the dirty time to prevent enqueue and sync it again.
	 */
	if ((inode->i_state & I_DIRTY) &&
	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
		inode->dirtied_when = jiffies;

1098 1099 1100 1101 1102 1103 1104 1105 1106
	if (wbc->pages_skipped) {
		/*
		 * writeback is not making progress due to locked
		 * buffers. Skip this inode for now.
		 */
		redirty_tail(inode, wb);
		return;
	}

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
		/*
		 * We didn't write back all the pages.  nfs_writepages()
		 * sometimes bales out without doing anything.
		 */
		if (wbc->nr_to_write <= 0) {
			/* Slice used up. Queue for next turn. */
			requeue_io(inode, wb);
		} else {
			/*
			 * Writeback blocked by something other than
			 * congestion. Delay the inode for some time to
			 * avoid spinning on the CPU (100% iowait)
			 * retrying writeback of the dirty page/inode
			 * that cannot be performed immediately.
			 */
			redirty_tail(inode, wb);
		}
	} else if (inode->i_state & I_DIRTY) {
		/*
		 * Filesystems can dirty the inode during writeback operations,
		 * such as delayed allocation during submission or metadata
		 * updates after data IO completion.
		 */
		redirty_tail(inode, wb);
1132
	} else if (inode->i_state & I_DIRTY_TIME) {
1133
		inode->dirtied_when = jiffies;
1134
		inode_wb_list_move_locked(inode, wb, &wb->b_dirty_time);
1135 1136
	} else {
		/* The inode is clean. Remove from writeback lists. */
1137
		inode_wb_list_del_locked(inode, wb);
1138 1139 1140
	}
}

1141
/*
1142 1143 1144
 * Write out an inode and its dirty pages. Do not update the writeback list
 * linkage. That is left to the caller. The caller is also responsible for
 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
L
Linus Torvalds 已提交
1145 1146
 */
static int
1147
__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
L
Linus Torvalds 已提交
1148 1149
{
	struct address_space *mapping = inode->i_mapping;
1150
	long nr_to_write = wbc->nr_to_write;
1151
	unsigned dirty;
L
Linus Torvalds 已提交
1152 1153
	int ret;

1154
	WARN_ON(!(inode->i_state & I_SYNC));
L
Linus Torvalds 已提交
1155

T
Tejun Heo 已提交
1156 1157
	trace_writeback_single_inode_start(inode, wbc, nr_to_write);

L
Linus Torvalds 已提交
1158 1159
	ret = do_writepages(mapping, wbc);

1160 1161 1162
	/*
	 * Make sure to wait on the data before writing out the metadata.
	 * This is important for filesystems that modify metadata on data
1163 1164 1165
	 * I/O completion. We don't do it for sync(2) writeback because it has a
	 * separate, external IO completion path and ->sync_fs for guaranteeing
	 * inode metadata is written back correctly.
1166
	 */
1167
	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1168
		int err = filemap_fdatawait(mapping);
L
Linus Torvalds 已提交
1169 1170 1171 1172
		if (ret == 0)
			ret = err;
	}

1173 1174 1175 1176 1177
	/*
	 * Some filesystems may redirty the inode during the writeback
	 * due to delalloc, clear dirty metadata flags right before
	 * write_inode()
	 */
1178
	spin_lock(&inode->i_lock);
1179

1180
	dirty = inode->i_state & I_DIRTY;
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	if (inode->i_state & I_DIRTY_TIME) {
		if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
		    unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
		    unlikely(time_after(jiffies,
					(inode->dirtied_time_when +
					 dirtytime_expire_interval * HZ)))) {
			dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
			trace_writeback_lazytime(inode);
		}
	} else
		inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
1192
	inode->i_state &= ~dirty;
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209

	/*
	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
	 * either they see the I_DIRTY bits cleared or we see the dirtied
	 * inode.
	 *
	 * I_DIRTY_PAGES is always cleared together above even if @mapping
	 * still has dirty pages.  The flag is reinstated after smp_mb() if
	 * necessary.  This guarantees that either __mark_inode_dirty()
	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
	 */
	smp_mb();

	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		inode->i_state |= I_DIRTY_PAGES;

1210
	spin_unlock(&inode->i_lock);
1211

1212 1213
	if (dirty & I_DIRTY_TIME)
		mark_inode_dirty_sync(inode);
1214
	/* Don't write the inode if only I_DIRTY_PAGES was set */
1215
	if (dirty & ~I_DIRTY_PAGES) {
1216
		int err = write_inode(inode, wbc);
L
Linus Torvalds 已提交
1217 1218 1219
		if (ret == 0)
			ret = err;
	}
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
	trace_writeback_single_inode(inode, wbc, nr_to_write);
	return ret;
}

/*
 * Write out an inode's dirty pages. Either the caller has an active reference
 * on the inode or the inode has I_WILL_FREE set.
 *
 * This function is designed to be called for writing back one inode which
 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
 * and does more profound writeback list handling in writeback_sb_inodes().
 */
static int
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
		       struct writeback_control *wbc)
{
	int ret = 0;

	spin_lock(&inode->i_lock);
	if (!atomic_read(&inode->i_count))
		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
	else
		WARN_ON(inode->i_state & I_WILL_FREE);

	if (inode->i_state & I_SYNC) {
		if (wbc->sync_mode != WB_SYNC_ALL)
			goto out;
		/*
1248 1249 1250
		 * It's a data-integrity sync. We must wait. Since callers hold
		 * inode reference or inode has I_WILL_FREE set, it cannot go
		 * away under us.
1251
		 */
1252
		__inode_wait_for_writeback(inode);
1253 1254 1255
	}
	WARN_ON(inode->i_state & I_SYNC);
	/*
J
Jan Kara 已提交
1256 1257 1258 1259 1260 1261
	 * Skip inode if it is clean and we have no outstanding writeback in
	 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
	 * function since flusher thread may be doing for example sync in
	 * parallel and if we move the inode, it could get skipped. So here we
	 * make sure inode is on some writeback list and leave it there unless
	 * we have completely cleaned the inode.
1262
	 */
1263
	if (!(inode->i_state & I_DIRTY_ALL) &&
J
Jan Kara 已提交
1264 1265
	    (wbc->sync_mode != WB_SYNC_ALL ||
	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1266 1267
		goto out;
	inode->i_state |= I_SYNC;
1268
	wbc_attach_and_unlock_inode(wbc, inode);
1269

1270
	ret = __writeback_single_inode(inode, wbc);
L
Linus Torvalds 已提交
1271

1272
	wbc_detach_inode(wbc);
1273
	spin_lock(&wb->list_lock);
1274
	spin_lock(&inode->i_lock);
1275 1276 1277 1278
	/*
	 * If inode is clean, remove it from writeback lists. Otherwise don't
	 * touch it. See comment above for explanation.
	 */
1279
	if (!(inode->i_state & I_DIRTY_ALL))
1280
		inode_wb_list_del_locked(inode, wb);
1281
	spin_unlock(&wb->list_lock);
J
Joern Engel 已提交
1282
	inode_sync_complete(inode);
1283 1284
out:
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
1285 1286 1287
	return ret;
}

1288
static long writeback_chunk_size(struct bdi_writeback *wb,
1289
				 struct wb_writeback_work *work)
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
{
	long pages;

	/*
	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
	 * here avoids calling into writeback_inodes_wb() more than once.
	 *
	 * The intended call sequence for WB_SYNC_ALL writeback is:
	 *
	 *      wb_writeback()
	 *          writeback_sb_inodes()       <== called only once
	 *              write_cache_pages()     <== called once for each inode
	 *                   (quickly) tag currently dirty pages
	 *                   (maybe slowly) sync all tagged pages
	 */
	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
		pages = LONG_MAX;
1308
	else {
1309
		pages = min(wb->avg_write_bandwidth / 2,
1310
			    global_wb_domain.dirty_limit / DIRTY_SCOPE);
1311 1312 1313 1314
		pages = min(pages, work->nr_pages);
		pages = round_down(pages + MIN_WRITEBACK_PAGES,
				   MIN_WRITEBACK_PAGES);
	}
1315 1316 1317 1318

	return pages;
}

1319 1320
/*
 * Write a portion of b_io inodes which belong to @sb.
1321
 *
1322
 * Return the number of pages and/or inodes written.
1323
 */
1324 1325 1326
static long writeback_sb_inodes(struct super_block *sb,
				struct bdi_writeback *wb,
				struct wb_writeback_work *work)
L
Linus Torvalds 已提交
1327
{
1328 1329 1330 1331 1332
	struct writeback_control wbc = {
		.sync_mode		= work->sync_mode,
		.tagged_writepages	= work->tagged_writepages,
		.for_kupdate		= work->for_kupdate,
		.for_background		= work->for_background,
1333
		.for_sync		= work->for_sync,
1334 1335 1336 1337 1338 1339 1340 1341
		.range_cyclic		= work->range_cyclic,
		.range_start		= 0,
		.range_end		= LLONG_MAX,
	};
	unsigned long start_time = jiffies;
	long write_chunk;
	long wrote = 0;  /* count both pages and inodes */

1342
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
1343
		struct inode *inode = wb_inode(wb->b_io.prev);
1344 1345

		if (inode->i_sb != sb) {
1346
			if (work->sb) {
1347 1348 1349 1350 1351
				/*
				 * We only want to write back data for this
				 * superblock, move all inodes not belonging
				 * to it back onto the dirty list.
				 */
1352
				redirty_tail(inode, wb);
1353 1354 1355 1356 1357 1358 1359 1360
				continue;
			}

			/*
			 * The inode belongs to a different superblock.
			 * Bounce back to the caller to unpin this and
			 * pin the next superblock.
			 */
1361
			break;
1362 1363
		}

1364
		/*
W
Wanpeng Li 已提交
1365 1366
		 * Don't bother with new inodes or inodes being freed, first
		 * kind does not need periodic writeout yet, and for the latter
1367 1368
		 * kind writeout is handled by the freer.
		 */
1369
		spin_lock(&inode->i_lock);
1370
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1371
			spin_unlock(&inode->i_lock);
1372
			redirty_tail(inode, wb);
1373 1374
			continue;
		}
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
			/*
			 * If this inode is locked for writeback and we are not
			 * doing writeback-for-data-integrity, move it to
			 * b_more_io so that writeback can proceed with the
			 * other inodes on s_io.
			 *
			 * We'll have another go at writing back this inode
			 * when we completed a full scan of b_io.
			 */
			spin_unlock(&inode->i_lock);
			requeue_io(inode, wb);
			trace_writeback_sb_inodes_requeue(inode);
			continue;
		}
1390 1391
		spin_unlock(&wb->list_lock);

1392 1393 1394 1395 1396
		/*
		 * We already requeued the inode if it had I_SYNC set and we
		 * are doing WB_SYNC_NONE writeback. So this catches only the
		 * WB_SYNC_ALL case.
		 */
1397 1398 1399 1400
		if (inode->i_state & I_SYNC) {
			/* Wait for I_SYNC. This function drops i_lock... */
			inode_sleep_on_writeback(inode);
			/* Inode may be gone, start again */
1401
			spin_lock(&wb->list_lock);
1402 1403
			continue;
		}
1404
		inode->i_state |= I_SYNC;
1405
		wbc_attach_and_unlock_inode(&wbc, inode);
1406

1407
		write_chunk = writeback_chunk_size(wb, work);
1408 1409
		wbc.nr_to_write = write_chunk;
		wbc.pages_skipped = 0;
1410

1411 1412 1413 1414
		/*
		 * We use I_SYNC to pin the inode in memory. While it is set
		 * evict_inode() will wait so the inode cannot be freed.
		 */
1415
		__writeback_single_inode(inode, &wbc);
1416

1417
		wbc_detach_inode(&wbc);
1418 1419
		work->nr_pages -= write_chunk - wbc.nr_to_write;
		wrote += write_chunk - wbc.nr_to_write;
1420 1421
		spin_lock(&wb->list_lock);
		spin_lock(&inode->i_lock);
1422
		if (!(inode->i_state & I_DIRTY_ALL))
1423
			wrote++;
1424 1425
		requeue_inode(inode, wb, &wbc);
		inode_sync_complete(inode);
1426
		spin_unlock(&inode->i_lock);
1427
		cond_resched_lock(&wb->list_lock);
1428 1429 1430 1431 1432 1433 1434 1435 1436
		/*
		 * bail out to wb_writeback() often enough to check
		 * background threshold and other termination conditions.
		 */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
1437
		}
L
Linus Torvalds 已提交
1438
	}
1439
	return wrote;
1440 1441
}

1442 1443
static long __writeback_inodes_wb(struct bdi_writeback *wb,
				  struct wb_writeback_work *work)
1444
{
1445 1446
	unsigned long start_time = jiffies;
	long wrote = 0;
N
Nick Piggin 已提交
1447

1448
	while (!list_empty(&wb->b_io)) {
N
Nick Piggin 已提交
1449
		struct inode *inode = wb_inode(wb->b_io.prev);
1450
		struct super_block *sb = inode->i_sb;
1451

1452
		if (!trylock_super(sb)) {
1453
			/*
1454
			 * trylock_super() may fail consistently due to
1455 1456 1457 1458
			 * s_umount being grabbed by someone else. Don't use
			 * requeue_io() to avoid busy retrying the inode/sb.
			 */
			redirty_tail(inode, wb);
1459
			continue;
1460
		}
1461
		wrote += writeback_sb_inodes(sb, wb, work);
1462
		up_read(&sb->s_umount);
1463

1464 1465 1466 1467 1468 1469 1470
		/* refer to the same tests at the end of writeback_sb_inodes */
		if (wrote) {
			if (time_is_before_jiffies(start_time + HZ / 10UL))
				break;
			if (work->nr_pages <= 0)
				break;
		}
1471
	}
1472
	/* Leave any unwritten inodes on b_io */
1473
	return wrote;
1474 1475
}

1476
static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1477
				enum wb_reason reason)
1478
{
1479 1480 1481 1482
	struct wb_writeback_work work = {
		.nr_pages	= nr_pages,
		.sync_mode	= WB_SYNC_NONE,
		.range_cyclic	= 1,
1483
		.reason		= reason,
1484
	};
1485

1486
	spin_lock(&wb->list_lock);
W
Wu Fengguang 已提交
1487
	if (list_empty(&wb->b_io))
1488
		queue_io(wb, &work);
1489
	__writeback_inodes_wb(wb, &work);
1490
	spin_unlock(&wb->list_lock);
1491

1492 1493
	return nr_pages - work.nr_pages;
}
1494 1495 1496

/*
 * Explicit flushing or periodic writeback of "old" data.
1497
 *
1498 1499 1500 1501
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
1502
 *
1503 1504 1505
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
 * one-second gap.
1506
 *
1507 1508
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
1509
 */
1510
static long wb_writeback(struct bdi_writeback *wb,
1511
			 struct wb_writeback_work *work)
1512
{
1513
	unsigned long wb_start = jiffies;
1514
	long nr_pages = work->nr_pages;
1515
	unsigned long oldest_jif;
J
Jan Kara 已提交
1516
	struct inode *inode;
1517
	long progress;
1518

1519 1520
	oldest_jif = jiffies;
	work->older_than_this = &oldest_jif;
N
Nick Piggin 已提交
1521

1522
	spin_lock(&wb->list_lock);
1523 1524
	for (;;) {
		/*
1525
		 * Stop writeback when nr_pages has been consumed
1526
		 */
1527
		if (work->nr_pages <= 0)
1528
			break;
1529

1530 1531 1532 1533 1534 1535 1536
		/*
		 * Background writeout and kupdate-style writeback may
		 * run forever. Stop them if there is other work to do
		 * so that e.g. sync can proceed. They'll be restarted
		 * after the other works are all done.
		 */
		if ((work->for_background || work->for_kupdate) &&
1537
		    !list_empty(&wb->work_list))
1538 1539
			break;

N
Nick Piggin 已提交
1540
		/*
1541 1542
		 * For background writeout, stop when we are below the
		 * background dirty threshold
N
Nick Piggin 已提交
1543
		 */
1544
		if (work->for_background && !wb_over_bg_thresh(wb))
1545
			break;
N
Nick Piggin 已提交
1546

1547 1548 1549 1550 1551 1552
		/*
		 * Kupdate and background works are special and we want to
		 * include all inodes that need writing. Livelock avoidance is
		 * handled by these works yielding to any other work so we are
		 * safe.
		 */
1553
		if (work->for_kupdate) {
1554
			oldest_jif = jiffies -
1555
				msecs_to_jiffies(dirty_expire_interval * 10);
1556
		} else if (work->for_background)
1557
			oldest_jif = jiffies;
1558

1559
		trace_writeback_start(wb->bdi, work);
1560
		if (list_empty(&wb->b_io))
1561
			queue_io(wb, work);
1562
		if (work->sb)
1563
			progress = writeback_sb_inodes(work->sb, wb, work);
1564
		else
1565 1566
			progress = __writeback_inodes_wb(wb, work);
		trace_writeback_written(wb->bdi, work);
1567

1568
		wb_update_bandwidth(wb, wb_start);
1569 1570

		/*
1571 1572 1573 1574 1575 1576
		 * Did we write something? Try for more
		 *
		 * Dirty inodes are moved to b_io for writeback in batches.
		 * The completion of the current batch does not necessarily
		 * mean the overall work is done. So we keep looping as long
		 * as made some progress on cleaning pages or inodes.
1577
		 */
1578
		if (progress)
1579 1580
			continue;
		/*
1581
		 * No more inodes for IO, bail
1582
		 */
1583
		if (list_empty(&wb->b_more_io))
1584
			break;
1585 1586 1587 1588 1589 1590
		/*
		 * Nothing written. Wait for some inode to
		 * become available for writeback. Otherwise
		 * we'll just busyloop.
		 */
		if (!list_empty(&wb->b_more_io))  {
1591
			trace_writeback_wait(wb->bdi, work);
N
Nick Piggin 已提交
1592
			inode = wb_inode(wb->b_more_io.prev);
1593
			spin_lock(&inode->i_lock);
1594
			spin_unlock(&wb->list_lock);
1595 1596
			/* This function drops i_lock... */
			inode_sleep_on_writeback(inode);
1597
			spin_lock(&wb->list_lock);
1598 1599
		}
	}
1600
	spin_unlock(&wb->list_lock);
1601

1602
	return nr_pages - work->nr_pages;
1603 1604 1605
}

/*
1606
 * Return the next wb_writeback_work struct that hasn't been processed yet.
1607
 */
1608
static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
1609
{
1610
	struct wb_writeback_work *work = NULL;
1611

1612 1613 1614
	spin_lock_bh(&wb->work_lock);
	if (!list_empty(&wb->work_list)) {
		work = list_entry(wb->work_list.next,
1615 1616
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
1617
	}
1618
	spin_unlock_bh(&wb->work_lock);
1619
	return work;
1620 1621
}

1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
/*
 * Add in the number of potentially dirty inodes, because each inode
 * write can dirty pagecache in the underlying blockdev.
 */
static unsigned long get_nr_dirty_pages(void)
{
	return global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS) +
		get_nr_dirty_inodes();
}

1633 1634
static long wb_check_background_flush(struct bdi_writeback *wb)
{
1635
	if (wb_over_bg_thresh(wb)) {
1636 1637 1638 1639 1640 1641

		struct wb_writeback_work work = {
			.nr_pages	= LONG_MAX,
			.sync_mode	= WB_SYNC_NONE,
			.for_background	= 1,
			.range_cyclic	= 1,
1642
			.reason		= WB_REASON_BACKGROUND,
1643 1644 1645 1646 1647 1648 1649 1650
		};

		return wb_writeback(wb, &work);
	}

	return 0;
}

1651 1652 1653 1654 1655
static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
	unsigned long expired;
	long nr_pages;

1656 1657 1658 1659 1660 1661
	/*
	 * When set to zero, disable periodic writeback
	 */
	if (!dirty_writeback_interval)
		return 0;

1662 1663 1664 1665 1666 1667
	expired = wb->last_old_flush +
			msecs_to_jiffies(dirty_writeback_interval * 10);
	if (time_before(jiffies, expired))
		return 0;

	wb->last_old_flush = jiffies;
1668
	nr_pages = get_nr_dirty_pages();
1669

1670
	if (nr_pages) {
1671
		struct wb_writeback_work work = {
1672 1673 1674 1675
			.nr_pages	= nr_pages,
			.sync_mode	= WB_SYNC_NONE,
			.for_kupdate	= 1,
			.range_cyclic	= 1,
1676
			.reason		= WB_REASON_PERIODIC,
1677 1678
		};

1679
		return wb_writeback(wb, &work);
1680
	}
1681 1682 1683 1684 1685 1686 1687

	return 0;
}

/*
 * Retrieve work items and do the writeback they describe
 */
1688
static long wb_do_writeback(struct bdi_writeback *wb)
1689
{
1690
	struct wb_writeback_work *work;
1691
	long wrote = 0;
1692

1693
	set_bit(WB_writeback_running, &wb->state);
1694
	while ((work = get_next_work_item(wb)) != NULL) {
1695
		struct wb_completion *done = work->done;
1696
		bool need_wake_up = false;
1697

1698
		trace_writeback_exec(wb->bdi, work);
1699

1700
		wrote += wb_writeback(wb, work);
1701

1702 1703 1704 1705 1706 1707 1708
		if (work->single_wait) {
			WARN_ON_ONCE(work->auto_free);
			/* paired w/ rmb in wb_wait_for_single_work() */
			smp_wmb();
			work->single_done = 1;
			need_wake_up = true;
		} else if (work->auto_free) {
1709
			kfree(work);
1710 1711
		}

1712
		if (done && atomic_dec_and_test(&done->cnt))
1713 1714 1715
			need_wake_up = true;

		if (need_wake_up)
1716
			wake_up_all(&wb->bdi->wb_waitq);
1717 1718 1719 1720 1721 1722
	}

	/*
	 * Check for periodic writeback, kupdated() style
	 */
	wrote += wb_check_old_data_flush(wb);
1723
	wrote += wb_check_background_flush(wb);
1724
	clear_bit(WB_writeback_running, &wb->state);
1725 1726 1727 1728 1729 1730

	return wrote;
}

/*
 * Handle writeback of dirty data for the device backed by this bdi. Also
1731
 * reschedules periodically and does kupdated style flushing.
1732
 */
1733
void wb_workfn(struct work_struct *work)
1734
{
1735 1736
	struct bdi_writeback *wb = container_of(to_delayed_work(work),
						struct bdi_writeback, dwork);
1737 1738
	long pages_written;

1739
	set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
P
Peter Zijlstra 已提交
1740
	current->flags |= PF_SWAPWRITE;
1741

1742
	if (likely(!current_is_workqueue_rescuer() ||
1743
		   !test_bit(WB_registered, &wb->state))) {
1744
		/*
1745
		 * The normal path.  Keep writing back @wb until its
1746
		 * work_list is empty.  Note that this path is also taken
1747
		 * if @wb is shutting down even when we're running off the
1748
		 * rescuer as work_list needs to be drained.
1749
		 */
1750
		do {
1751
			pages_written = wb_do_writeback(wb);
1752
			trace_writeback_pages_written(pages_written);
1753
		} while (!list_empty(&wb->work_list));
1754 1755 1756 1757 1758 1759
	} else {
		/*
		 * bdi_wq can't get enough workers and we're running off
		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
		 * enough for efficient IO.
		 */
1760
		pages_written = writeback_inodes_wb(wb, 1024,
1761
						    WB_REASON_FORKER_THREAD);
1762
		trace_writeback_pages_written(pages_written);
1763 1764
	}

1765
	if (!list_empty(&wb->work_list))
1766 1767
		mod_delayed_work(bdi_wq, &wb->dwork, 0);
	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1768
		wb_wakeup_delayed(wb);
1769

1770
	current->flags &= ~PF_SWAPWRITE;
1771 1772 1773
}

/*
1774 1775
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.
1776
 */
1777
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1778
{
1779
	struct backing_dev_info *bdi;
1780

1781 1782
	if (!nr_pages)
		nr_pages = get_nr_dirty_pages();
1783

1784
	rcu_read_lock();
1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
		struct bdi_writeback *wb;
		struct wb_iter iter;

		if (!bdi_has_dirty_io(bdi))
			continue;

		bdi_for_each_wb(wb, bdi, &iter, 0)
			wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
					   false, reason);
	}
1796
	rcu_read_unlock();
L
Linus Torvalds 已提交
1797 1798
}

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
/*
 * Wake up bdi's periodically to make sure dirtytime inodes gets
 * written back periodically.  We deliberately do *not* check the
 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
 * kernel to be constantly waking up once there are any dirtytime
 * inodes on the system.  So instead we define a separate delayed work
 * function which gets called much more rarely.  (By default, only
 * once every 12 hours.)
 *
 * If there is any other write activity going on in the file system,
 * this function won't be necessary.  But if the only thing that has
 * happened on the file system is a dirtytime inode caused by an atime
 * update, we need this infrastructure below to make sure that inode
 * eventually gets pushed out to disk.
 */
static void wakeup_dirtytime_writeback(struct work_struct *w);
static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);

static void wakeup_dirtytime_writeback(struct work_struct *w)
{
	struct backing_dev_info *bdi;

	rcu_read_lock();
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1823 1824 1825 1826 1827 1828
		struct bdi_writeback *wb;
		struct wb_iter iter;

		bdi_for_each_wb(wb, bdi, &iter, 0)
			if (!list_empty(&bdi->wb.b_dirty_time))
				wb_wakeup(&bdi->wb);
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
	}
	rcu_read_unlock();
	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
}

static int __init start_dirtytime_writeback(void)
{
	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
	return 0;
}
__initcall(start_dirtytime_writeback);

1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
int dirtytime_interval_handler(struct ctl_table *table, int write,
			       void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int ret;

	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	if (ret == 0 && write)
		mod_delayed_work(system_wq, &dirtytime_work, 0);
	return ret;
}

1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
		struct dentry *dentry;
		const char *name = "?";

		dentry = d_find_alias(inode);
		if (dentry) {
			spin_lock(&dentry->d_lock);
			name = (const char *) dentry->d_name.name;
		}
		printk(KERN_DEBUG
		       "%s(%d): dirtied inode %lu (%s) on %s\n",
		       current->comm, task_pid_nr(current), inode->i_ino,
		       name, inode->i_sb->s_id);
		if (dentry) {
			spin_unlock(&dentry->d_lock);
			dput(dentry);
		}
	}
}

/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
L
Linus Torvalds 已提交
1880
 *
1881 1882 1883 1884 1885 1886 1887 1888 1889
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
L
Linus Torvalds 已提交
1890
 *
1891 1892 1893 1894 1895 1896
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
L
Linus Torvalds 已提交
1897
 */
1898
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
1899
void __mark_inode_dirty(struct inode *inode, int flags)
L
Linus Torvalds 已提交
1900
{
1901
	struct super_block *sb = inode->i_sb;
1902 1903 1904
	int dirtytime;

	trace_writeback_mark_inode_dirty(inode, flags);
L
Linus Torvalds 已提交
1905

1906 1907 1908 1909
	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
1910
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) {
T
Tejun Heo 已提交
1911 1912
		trace_writeback_dirty_inode_start(inode, flags);

1913
		if (sb->s_op->dirty_inode)
1914
			sb->s_op->dirty_inode(inode, flags);
T
Tejun Heo 已提交
1915 1916

		trace_writeback_dirty_inode(inode, flags);
1917
	}
1918 1919 1920
	if (flags & I_DIRTY_INODE)
		flags &= ~I_DIRTY_TIME;
	dirtytime = flags & I_DIRTY_TIME;
1921 1922

	/*
1923 1924
	 * Paired with smp_mb() in __writeback_single_inode() for the
	 * following lockless i_state test.  See there for details.
1925 1926 1927
	 */
	smp_mb();

1928 1929
	if (((inode->i_state & flags) == flags) ||
	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
1930 1931 1932 1933 1934
		return;

	if (unlikely(block_dump))
		block_dump___mark_inode_dirty(inode);

1935
	spin_lock(&inode->i_lock);
1936 1937
	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
		goto out_unlock_inode;
1938 1939 1940
	if ((inode->i_state & flags) != flags) {
		const int was_dirty = inode->i_state & I_DIRTY;

1941 1942
		inode_attach_wb(inode, NULL);

1943 1944
		if (flags & I_DIRTY_INODE)
			inode->i_state &= ~I_DIRTY_TIME;
1945 1946 1947 1948 1949 1950 1951 1952
		inode->i_state |= flags;

		/*
		 * If the inode is being synced, just update its dirty state.
		 * The unlocker will place the inode on the appropriate
		 * superblock list, based upon its state.
		 */
		if (inode->i_state & I_SYNC)
1953
			goto out_unlock_inode;
1954 1955 1956 1957 1958 1959

		/*
		 * Only add valid (hashed) inodes to the superblock's
		 * dirty list.  Add blockdev inodes as well.
		 */
		if (!S_ISBLK(inode->i_mode)) {
A
Al Viro 已提交
1960
			if (inode_unhashed(inode))
1961
				goto out_unlock_inode;
1962
		}
A
Al Viro 已提交
1963
		if (inode->i_state & I_FREEING)
1964
			goto out_unlock_inode;
1965 1966 1967 1968 1969 1970

		/*
		 * If the inode was already on b_dirty/b_io/b_more_io, don't
		 * reposition it (that would break b_dirty time-ordering).
		 */
		if (!was_dirty) {
1971
			struct bdi_writeback *wb;
1972
			struct list_head *dirty_list;
1973
			bool wakeup_bdi = false;
1974

1975
			wb = locked_inode_to_wb_and_lock_list(inode);
1976

1977 1978 1979
			WARN(bdi_cap_writeback_dirty(wb->bdi) &&
			     !test_bit(WB_registered, &wb->state),
			     "bdi-%s not registered\n", wb->bdi->name);
1980 1981

			inode->dirtied_when = jiffies;
1982 1983
			if (dirtytime)
				inode->dirtied_time_when = jiffies;
1984

1985
			if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
1986
				dirty_list = &wb->b_dirty;
1987
			else
1988
				dirty_list = &wb->b_dirty_time;
1989

1990
			wakeup_bdi = inode_wb_list_move_locked(inode, wb,
1991 1992
							       dirty_list);

1993
			spin_unlock(&wb->list_lock);
1994
			trace_writeback_dirty_inode_enqueue(inode);
1995

1996 1997 1998 1999 2000 2001
			/*
			 * If this is the first dirty inode for this bdi,
			 * we have to wake-up the corresponding bdi thread
			 * to make sure background write-back happens
			 * later.
			 */
2002 2003
			if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
				wb_wakeup_delayed(wb);
2004
			return;
L
Linus Torvalds 已提交
2005 2006
		}
	}
2007 2008
out_unlock_inode:
	spin_unlock(&inode->i_lock);
2009

2010 2011 2012
}
EXPORT_SYMBOL(__mark_inode_dirty);

2013
static void wait_sb_inodes(struct super_block *sb)
2014 2015 2016 2017 2018 2019 2020
{
	struct inode *inode, *old_inode = NULL;

	/*
	 * We need to be protected against the filesystem going from
	 * r/o to r/w or vice versa.
	 */
2021
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2022

2023
	spin_lock(&inode_sb_list_lock);
2024 2025 2026 2027 2028 2029 2030 2031

	/*
	 * Data integrity sync. Must wait for all pages under writeback,
	 * because there may have been pages dirtied before our sync
	 * call, but which had writeout started before we write it out.
	 * In which case, the inode may not be on the dirty list, but
	 * we still have to wait for that writeout.
	 */
2032
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
2033
		struct address_space *mapping = inode->i_mapping;
2034

2035 2036 2037 2038
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
2039
			continue;
2040
		}
2041
		__iget(inode);
2042
		spin_unlock(&inode->i_lock);
2043 2044
		spin_unlock(&inode_sb_list_lock);

2045
		/*
2046 2047 2048 2049 2050 2051
		 * We hold a reference to 'inode' so it couldn't have been
		 * removed from s_inodes list while we dropped the
		 * inode_sb_list_lock.  We cannot iput the inode now as we can
		 * be holding the last reference and we cannot iput it under
		 * inode_sb_list_lock. So we keep the reference and iput it
		 * later.
2052 2053 2054 2055 2056 2057 2058 2059
		 */
		iput(old_inode);
		old_inode = inode;

		filemap_fdatawait(mapping);

		cond_resched();

2060
		spin_lock(&inode_sb_list_lock);
2061
	}
2062
	spin_unlock(&inode_sb_list_lock);
2063
	iput(old_inode);
L
Linus Torvalds 已提交
2064 2065
}

2066 2067
static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
				     enum wb_reason reason, bool skip_if_busy)
L
Linus Torvalds 已提交
2068
{
2069
	DEFINE_WB_COMPLETION_ONSTACK(done);
2070
	struct wb_writeback_work work = {
2071 2072 2073 2074 2075
		.sb			= sb,
		.sync_mode		= WB_SYNC_NONE,
		.tagged_writepages	= 1,
		.done			= &done,
		.nr_pages		= nr,
2076
		.reason			= reason,
2077
	};
2078
	struct backing_dev_info *bdi = sb->s_bdi;
2079

2080
	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2081
		return;
2082
	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2083

2084
	bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2085
	wb_wait_for_completion(bdi, &done);
2086
}
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103

/**
 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
 * @sb: the superblock
 * @nr: the number of pages to write
 * @reason: reason why some writeback work initiated
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
void writeback_inodes_sb_nr(struct super_block *sb,
			    unsigned long nr,
			    enum wb_reason reason)
{
	__writeback_inodes_sb_nr(sb, nr, reason, false);
}
2104 2105 2106 2107 2108
EXPORT_SYMBOL(writeback_inodes_sb_nr);

/**
 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
 * @sb: the superblock
2109
 * @reason: reason why some writeback work was initiated
2110 2111 2112 2113 2114
 *
 * Start writeback on some inodes on this super_block. No guarantees are made
 * on how many (if any) will be written, and this function does not wait
 * for IO completion of submitted IO.
 */
2115
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2116
{
2117
	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2118
}
2119
EXPORT_SYMBOL(writeback_inodes_sb);
2120

2121
/**
2122
 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
2123
 * @sb: the superblock
2124 2125
 * @nr: the number of pages to write
 * @reason: the reason of writeback
2126
 *
2127
 * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
2128 2129
 * Returns 1 if writeback was started, 0 if not.
 */
2130 2131
bool try_to_writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
				   enum wb_reason reason)
2132
{
2133
	if (!down_read_trylock(&sb->s_umount))
2134
		return false;
2135

2136
	__writeback_inodes_sb_nr(sb, nr, reason, true);
2137
	up_read(&sb->s_umount);
2138
	return true;
2139
}
2140
EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
2141

2142
/**
2143
 * try_to_writeback_inodes_sb - try to start writeback if none underway
2144
 * @sb: the superblock
2145
 * @reason: reason why some writeback work was initiated
2146
 *
2147
 * Implement by try_to_writeback_inodes_sb_nr()
2148 2149
 * Returns 1 if writeback was started, 0 if not.
 */
2150
bool try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2151
{
2152
	return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2153
}
2154
EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2155

2156 2157
/**
 * sync_inodes_sb	-	sync sb inode pages
2158
 * @sb: the superblock
2159 2160
 *
 * This function writes and waits on any dirty inode belonging to this
2161
 * super_block.
2162
 */
2163
void sync_inodes_sb(struct super_block *sb)
2164
{
2165
	DEFINE_WB_COMPLETION_ONSTACK(done);
2166
	struct wb_writeback_work work = {
2167 2168 2169 2170
		.sb		= sb,
		.sync_mode	= WB_SYNC_ALL,
		.nr_pages	= LONG_MAX,
		.range_cyclic	= 0,
2171
		.done		= &done,
2172
		.reason		= WB_REASON_SYNC,
2173
		.for_sync	= 1,
2174
	};
2175
	struct backing_dev_info *bdi = sb->s_bdi;
2176

2177
	/* Nothing to do? */
2178
	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2179
		return;
2180 2181
	WARN_ON(!rwsem_is_locked(&sb->s_umount));

2182
	bdi_split_work_to_wbs(bdi, &work, false);
2183
	wb_wait_for_completion(bdi, &done);
2184

2185
	wait_sb_inodes(sb);
L
Linus Torvalds 已提交
2186
}
2187
EXPORT_SYMBOL(sync_inodes_sb);
L
Linus Torvalds 已提交
2188 2189

/**
2190 2191 2192 2193 2194 2195
 * write_inode_now	-	write an inode to disk
 * @inode: inode to write to disk
 * @sync: whether the write should be synchronous or not
 *
 * This function commits an inode to disk immediately if it is dirty. This is
 * primarily needed by knfsd.
L
Linus Torvalds 已提交
2196
 *
2197
 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
L
Linus Torvalds 已提交
2198 2199 2200
 */
int write_inode_now(struct inode *inode, int sync)
{
2201
	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
L
Linus Torvalds 已提交
2202 2203
	struct writeback_control wbc = {
		.nr_to_write = LONG_MAX,
2204
		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2205 2206
		.range_start = 0,
		.range_end = LLONG_MAX,
L
Linus Torvalds 已提交
2207 2208 2209
	};

	if (!mapping_cap_writeback_dirty(inode->i_mapping))
2210
		wbc.nr_to_write = 0;
L
Linus Torvalds 已提交
2211 2212

	might_sleep();
2213
	return writeback_single_inode(inode, wb, &wbc);
L
Linus Torvalds 已提交
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
}
EXPORT_SYMBOL(write_inode_now);

/**
 * sync_inode - write an inode and its pages to disk.
 * @inode: the inode to sync
 * @wbc: controls the writeback mode
 *
 * sync_inode() will write an inode and its pages to disk.  It will also
 * correctly update the inode on its superblock's dirty inode lists and will
 * update inode->i_state.
 *
 * The caller must have a ref on the inode.
 */
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
2230
	return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
L
Linus Torvalds 已提交
2231 2232
}
EXPORT_SYMBOL(sync_inode);
C
Christoph Hellwig 已提交
2233 2234

/**
A
Andrew Morton 已提交
2235
 * sync_inode_metadata - write an inode to disk
C
Christoph Hellwig 已提交
2236 2237 2238
 * @inode: the inode to sync
 * @wait: wait for I/O to complete.
 *
A
Andrew Morton 已提交
2239
 * Write an inode to disk and adjust its dirty state after completion.
C
Christoph Hellwig 已提交
2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
 *
 * Note: only writes the actual inode, no associated data or other metadata.
 */
int sync_inode_metadata(struct inode *inode, int wait)
{
	struct writeback_control wbc = {
		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
		.nr_to_write = 0, /* metadata-only */
	};

	return sync_inode(inode, &wbc);
}
EXPORT_SYMBOL(sync_inode_metadata);