page-writeback.c 85.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * mm/page-writeback.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 2002, Linus Torvalds.
5
 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
L
Linus Torvalds 已提交
6 7 8 9
 *
 * Contains functions related to writing back dirty pages at the
 * address_space level.
 *
10
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
11 12 13 14
 *		Initial version
 */

#include <linux/kernel.h>
15
#include <linux/export.h>
L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
25
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
26 27
#include <linux/blkdev.h>
#include <linux/mpage.h>
28
#include <linux/rmap.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
A
Al Viro 已提交
35
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
36
#include <linux/pagevec.h>
37
#include <linux/timer.h>
38
#include <linux/sched/rt.h>
39
#include <linux/mm_inline.h>
40
#include <trace/events/writeback.h>
L
Linus Torvalds 已提交
41

42 43
#include "internal.h"

44 45 46 47 48
/*
 * Sleep at most 200ms at a time in balance_dirty_pages().
 */
#define MAX_PAUSE		max(HZ/5, 1)

49 50 51 52 53 54
/*
 * Try to keep balance_dirty_pages() call intervals higher than this many pages
 * by raising pause time to max_pause when falls below it.
 */
#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))

55 56 57 58 59
/*
 * Estimate write bandwidth at 200ms intervals.
 */
#define BANDWIDTH_INTERVAL	max(HZ/5, 1)

W
Wu Fengguang 已提交
60 61
#define RATELIMIT_CALC_SHIFT	10

L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70
/*
 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
 * will look to see if it needs to force writeback or throttling.
 */
static long ratelimit_pages = 32;

/* The following parameters are exported via /proc/sys/vm */

/*
71
 * Start background writeback (via writeback threads) at this percentage
L
Linus Torvalds 已提交
72
 */
73
int dirty_background_ratio = 10;
L
Linus Torvalds 已提交
74

75 76 77 78 79 80
/*
 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
 * dirty_background_ratio * the amount of dirtyable memory
 */
unsigned long dirty_background_bytes;

81 82 83 84 85 86
/*
 * free highmem will not be subtracted from the total free memory
 * for calculating free ratios if vm_highmem_is_dirtyable is true
 */
int vm_highmem_is_dirtyable;

L
Linus Torvalds 已提交
87 88 89
/*
 * The generator of dirty data starts writeback at this percentage
 */
90
int vm_dirty_ratio = 20;
L
Linus Torvalds 已提交
91

92 93 94 95 96 97
/*
 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
 * vm_dirty_ratio * the amount of dirtyable memory
 */
unsigned long vm_dirty_bytes;

L
Linus Torvalds 已提交
98
/*
99
 * The interval between `kupdate'-style writebacks
L
Linus Torvalds 已提交
100
 */
101
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
L
Linus Torvalds 已提交
102

103 104
EXPORT_SYMBOL_GPL(dirty_writeback_interval);

L
Linus Torvalds 已提交
105
/*
106
 * The longest time for which data is allowed to remain dirty
L
Linus Torvalds 已提交
107
 */
108
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
L
Linus Torvalds 已提交
109 110 111 112 113 114 115

/*
 * Flag that makes the machine dump writes/reads and block dirtyings.
 */
int block_dump;

/*
116 117
 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 * a full sync is triggered after this time elapses without any disk activity.
L
Linus Torvalds 已提交
118 119 120 121 122 123 124
 */
int laptop_mode;

EXPORT_SYMBOL(laptop_mode);

/* End of sysctl-exported parameters */

125
struct wb_domain global_wb_domain;
L
Linus Torvalds 已提交
126

127 128
/* consolidated parameters for balance_dirty_pages() and its subroutines */
struct dirty_throttle_control {
129 130
#ifdef CONFIG_CGROUP_WRITEBACK
	struct wb_domain	*dom;
131
	struct dirty_throttle_control *gdtc;	/* only set in memcg dtc's */
132
#endif
133
	struct bdi_writeback	*wb;
134
	struct fprop_local_percpu *wb_completions;
135

136
	unsigned long		avail;		/* dirtyable */
137 138 139 140 141 142
	unsigned long		dirty;		/* file_dirty + write + nfs */
	unsigned long		thresh;		/* dirty threshold */
	unsigned long		bg_thresh;	/* dirty background threshold */

	unsigned long		wb_dirty;	/* per-wb counterparts */
	unsigned long		wb_thresh;
143
	unsigned long		wb_bg_thresh;
144 145

	unsigned long		pos_ratio;
146 147
};

148 149 150 151 152 153
/*
 * Length of period for aging writeout fractions of bdis. This is an
 * arbitrarily chosen number. The longer the period, the slower fractions will
 * reflect changes in current writeout rate.
 */
#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
P
Peter Zijlstra 已提交
154

155 156
#ifdef CONFIG_CGROUP_WRITEBACK

157 158 159 160
#define GDTC_INIT(__wb)		.wb = (__wb),				\
				.dom = &global_wb_domain,		\
				.wb_completions = &(__wb)->completions

161
#define GDTC_INIT_NO_WB		.dom = &global_wb_domain
162 163 164 165 166

#define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
				.dom = mem_cgroup_wb_domain(__wb),	\
				.wb_completions = &(__wb)->memcg_completions, \
				.gdtc = __gdtc
167 168 169 170 171

static bool mdtc_valid(struct dirty_throttle_control *dtc)
{
	return dtc->dom;
}
172 173 174 175 176 177

static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
{
	return dtc->dom;
}

178 179 180 181 182
static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
{
	return mdtc->gdtc;
}

T
Tejun Heo 已提交
183 184 185 186 187
static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
{
	return &wb->memcg_completions;
}

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
static void wb_min_max_ratio(struct bdi_writeback *wb,
			     unsigned long *minp, unsigned long *maxp)
{
	unsigned long this_bw = wb->avg_write_bandwidth;
	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
	unsigned long long min = wb->bdi->min_ratio;
	unsigned long long max = wb->bdi->max_ratio;

	/*
	 * @wb may already be clean by the time control reaches here and
	 * the total may not include its bw.
	 */
	if (this_bw < tot_bw) {
		if (min) {
			min *= this_bw;
			do_div(min, tot_bw);
		}
		if (max < 100) {
			max *= this_bw;
			do_div(max, tot_bw);
		}
	}

	*minp = min;
	*maxp = max;
}

#else	/* CONFIG_CGROUP_WRITEBACK */

217 218
#define GDTC_INIT(__wb)		.wb = (__wb),                           \
				.wb_completions = &(__wb)->completions
219
#define GDTC_INIT_NO_WB
220 221 222 223 224 225
#define MDTC_INIT(__wb, __gdtc)

static bool mdtc_valid(struct dirty_throttle_control *dtc)
{
	return false;
}
226 227 228 229 230 231

static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
{
	return &global_wb_domain;
}

232 233 234 235 236
static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
{
	return NULL;
}

T
Tejun Heo 已提交
237 238 239 240 241
static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
{
	return NULL;
}

242 243 244 245 246 247 248 249 250
static void wb_min_max_ratio(struct bdi_writeback *wb,
			     unsigned long *minp, unsigned long *maxp)
{
	*minp = wb->bdi->min_ratio;
	*maxp = wb->bdi->max_ratio;
}

#endif	/* CONFIG_CGROUP_WRITEBACK */

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/*
 * In a memory zone, there is a certain amount of pages we consider
 * available for the page cache, which is essentially the number of
 * free and reclaimable pages, minus some zone reserves to protect
 * lowmem and the ability to uphold the zone's watermarks without
 * requiring writeback.
 *
 * This number of dirtyable pages is the base value of which the
 * user-configurable dirty ratio is the effictive number of pages that
 * are allowed to be actually dirtied.  Per individual zone, or
 * globally by using the sum of dirtyable pages over all zones.
 *
 * Because the user is allowed to specify the dirty limit globally as
 * absolute number of bytes, calculating the per-zone dirty limit can
 * require translating the configured limit into a percentage of
 * global dirtyable memory first.
 */

269
/**
270 271
 * node_dirtyable_memory - number of dirtyable pages in a node
 * @pgdat: the node
272
 *
273 274
 * Returns the node's number of pages potentially available for dirty
 * page cache.  This is the base value for the per-node dirty limits.
275
 */
276
static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
277
{
278 279 280 281 282 283 284 285 286 287 288
	unsigned long nr_pages = 0;
	int z;

	for (z = 0; z < MAX_NR_ZONES; z++) {
		struct zone *zone = pgdat->node_zones + z;

		if (!populated_zone(zone))
			continue;

		nr_pages += zone_page_state(zone, NR_FREE_PAGES);
	}
289

290 291 292 293 294
	/*
	 * Pages reserved for the kernel should not be considered
	 * dirtyable, to prevent a situation where reclaim has to
	 * clean pages in order to balance the zones.
	 */
295
	nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
296

297 298
	nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
	nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
299 300 301 302

	return nr_pages;
}

303 304 305 306 307
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
	int node;
	unsigned long x = 0;
308
	int i;
309 310

	for_each_node_state(node, N_HIGH_MEMORY) {
311 312 313 314 315 316 317 318 319 320
		for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
			struct zone *z;
			unsigned long dirtyable;

			if (!is_highmem_idx(i))
				continue;

			z = &NODE_DATA(node)->node_zones[i];
			dirtyable = zone_page_state(z, NR_FREE_PAGES) +
				zone_page_state(z, NR_ZONE_LRU_FILE);
321

322 323 324 325
			/* watch for underflows */
			dirtyable -= min(dirtyable, high_wmark_pages(z));

			x += dirtyable;
326
		}
327
	}
328

329 330 331 332 333 334 335 336 337 338 339 340
	/*
	 * Unreclaimable memory (kernel memory or anonymous memory
	 * without swap) can bring down the dirtyable pages below
	 * the zone's dirty balance reserve and the above calculation
	 * will underflow.  However we still want to add in nodes
	 * which are below threshold (negative values) to get a more
	 * accurate calculation but make sure that the total never
	 * underflows.
	 */
	if ((long)x < 0)
		x = 0;

341 342 343 344 345 346 347 348 349 350 351 352 353
	/*
	 * Make sure that the number of highmem pages is never larger
	 * than the number of the total dirtyable memory. This can only
	 * occur in very strange VM situations but we want to make sure
	 * that this does not occur.
	 */
	return min(x, total);
#else
	return 0;
#endif
}

/**
354
 * global_dirtyable_memory - number of globally dirtyable pages
355
 *
356 357
 * Returns the global number of pages potentially available for dirty
 * page cache.  This is the base value for the global dirty limits.
358
 */
359
static unsigned long global_dirtyable_memory(void)
360 361 362
{
	unsigned long x;

363
	x = global_page_state(NR_FREE_PAGES);
364 365 366 367 368 369
	/*
	 * Pages reserved for the kernel should not be considered
	 * dirtyable, to prevent a situation where reclaim has to
	 * clean pages in order to balance the zones.
	 */
	x -= min(x, totalreserve_pages);
370

M
Mel Gorman 已提交
371 372
	x += global_node_page_state(NR_INACTIVE_FILE);
	x += global_node_page_state(NR_ACTIVE_FILE);
373

374 375 376 377 378 379
	if (!vm_highmem_is_dirtyable)
		x -= highmem_dirtyable_memory(x);

	return x + 1;	/* Ensure that we never return 0 */
}

380 381 382
/**
 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
 * @dtc: dirty_throttle_control of interest
383
 *
384 385 386 387
 * Calculate @dtc->thresh and ->bg_thresh considering
 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
 * must ensure that @dtc->avail is set before calling this function.  The
 * dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
388 389
 * real-time tasks.
 */
390
static void domain_dirty_limits(struct dirty_throttle_control *dtc)
391
{
392 393 394 395
	const unsigned long available_memory = dtc->avail;
	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
	unsigned long bytes = vm_dirty_bytes;
	unsigned long bg_bytes = dirty_background_bytes;
396 397 398
	/* convert ratios to per-PAGE_SIZE for higher precision */
	unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
	unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
399 400
	unsigned long thresh;
	unsigned long bg_thresh;
401 402
	struct task_struct *tsk;

403 404 405 406 407 408 409
	/* gdtc is !NULL iff @dtc is for memcg domain */
	if (gdtc) {
		unsigned long global_avail = gdtc->avail;

		/*
		 * The byte settings can't be applied directly to memcg
		 * domains.  Convert them to ratios by scaling against
410 411 412
		 * globally available memory.  As the ratios are in
		 * per-PAGE_SIZE, they can be obtained by dividing bytes by
		 * number of pages.
413 414
		 */
		if (bytes)
415 416
			ratio = min(DIV_ROUND_UP(bytes, global_avail),
				    PAGE_SIZE);
417
		if (bg_bytes)
418 419
			bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
				       PAGE_SIZE);
420 421 422 423 424
		bytes = bg_bytes = 0;
	}

	if (bytes)
		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
425
	else
426
		thresh = (ratio * available_memory) / PAGE_SIZE;
427

428 429
	if (bg_bytes)
		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
430
	else
431
		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
432

433 434
	if (bg_thresh >= thresh)
		bg_thresh = thresh / 2;
435 436
	tsk = current;
	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
437 438
		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
439
	}
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
	dtc->thresh = thresh;
	dtc->bg_thresh = bg_thresh;

	/* we should eventually report the domain in the TP */
	if (!gdtc)
		trace_global_dirty_state(bg_thresh, thresh);
}

/**
 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 * @pbackground: out parameter for bg_thresh
 * @pdirty: out parameter for thresh
 *
 * Calculate bg_thresh and thresh for global_wb_domain.  See
 * domain_dirty_limits() for details.
 */
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };

	gdtc.avail = global_dirtyable_memory();
	domain_dirty_limits(&gdtc);

	*pbackground = gdtc.bg_thresh;
	*pdirty = gdtc.thresh;
465 466
}

467
/**
468 469
 * node_dirty_limit - maximum number of dirty pages allowed in a node
 * @pgdat: the node
470
 *
471 472
 * Returns the maximum number of dirty pages allowed in a node, based
 * on the node's dirtyable memory.
473
 */
474
static unsigned long node_dirty_limit(struct pglist_data *pgdat)
475
{
476
	unsigned long node_memory = node_dirtyable_memory(pgdat);
477 478 479 480 481
	struct task_struct *tsk = current;
	unsigned long dirty;

	if (vm_dirty_bytes)
		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
482
			node_memory / global_dirtyable_memory();
483
	else
484
		dirty = vm_dirty_ratio * node_memory / 100;
485 486 487 488 489 490 491 492

	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
		dirty += dirty / 4;

	return dirty;
}

/**
493 494
 * node_dirty_ok - tells whether a node is within its dirty limits
 * @pgdat: the node to check
495
 *
496
 * Returns %true when the dirty pages in @pgdat are within the node's
497 498
 * dirty limit, %false if the limit is exceeded.
 */
499
bool node_dirty_ok(struct pglist_data *pgdat)
500
{
501 502 503 504 505 506 507 508 509 510 511 512 513 514
	int z;
	unsigned long limit = node_dirty_limit(pgdat);
	unsigned long nr_pages = 0;

	for (z = 0; z < MAX_NR_ZONES; z++) {
		struct zone *zone = pgdat->node_zones + z;

		if (!populated_zone(zone))
			continue;

		nr_pages += zone_page_state(zone, NR_FILE_DIRTY);
		nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS);
		nr_pages += zone_page_state(zone, NR_WRITEBACK);
	}
515

516
	return nr_pages <= limit;
517 518
}

519
int dirty_background_ratio_handler(struct ctl_table *table, int write,
520
		void __user *buffer, size_t *lenp,
521 522 523 524
		loff_t *ppos)
{
	int ret;

525
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
526 527 528 529 530 531
	if (ret == 0 && write)
		dirty_background_bytes = 0;
	return ret;
}

int dirty_background_bytes_handler(struct ctl_table *table, int write,
532
		void __user *buffer, size_t *lenp,
533 534 535 536
		loff_t *ppos)
{
	int ret;

537
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
538 539 540 541 542
	if (ret == 0 && write)
		dirty_background_ratio = 0;
	return ret;
}

P
Peter Zijlstra 已提交
543
int dirty_ratio_handler(struct ctl_table *table, int write,
544
		void __user *buffer, size_t *lenp,
P
Peter Zijlstra 已提交
545 546 547
		loff_t *ppos)
{
	int old_ratio = vm_dirty_ratio;
548 549
	int ret;

550
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
P
Peter Zijlstra 已提交
551
	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
552
		writeback_set_ratelimit();
553 554 555 556 557 558
		vm_dirty_bytes = 0;
	}
	return ret;
}

int dirty_bytes_handler(struct ctl_table *table, int write,
559
		void __user *buffer, size_t *lenp,
560 561
		loff_t *ppos)
{
562
	unsigned long old_bytes = vm_dirty_bytes;
563 564
	int ret;

565
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
566
	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
567
		writeback_set_ratelimit();
568
		vm_dirty_ratio = 0;
P
Peter Zijlstra 已提交
569 570 571 572
	}
	return ret;
}

573 574 575 576 577 578 579 580 581
static unsigned long wp_next_time(unsigned long cur_time)
{
	cur_time += VM_COMPLETIONS_PERIOD_LEN;
	/* 0 has a special meaning... */
	if (!cur_time)
		return 1;
	return cur_time;
}

582 583 584
static void wb_domain_writeout_inc(struct wb_domain *dom,
				   struct fprop_local_percpu *completions,
				   unsigned int max_prop_frac)
P
Peter Zijlstra 已提交
585
{
586 587
	__fprop_inc_percpu_max(&dom->completions, completions,
			       max_prop_frac);
588
	/* First event after period switching was turned off? */
T
Tejun Heo 已提交
589
	if (!unlikely(dom->period_time)) {
590 591 592 593 594 595
		/*
		 * We can race with other __bdi_writeout_inc calls here but
		 * it does not cause any harm since the resulting time when
		 * timer will fire and what is in writeout_period_time will be
		 * roughly the same.
		 */
T
Tejun Heo 已提交
596 597
		dom->period_time = wp_next_time(jiffies);
		mod_timer(&dom->period_timer, dom->period_time);
598
	}
P
Peter Zijlstra 已提交
599 600
}

601 602 603 604 605
/*
 * Increment @wb's writeout completion count and the global writeout
 * completion count. Called from test_clear_page_writeback().
 */
static inline void __wb_writeout_inc(struct bdi_writeback *wb)
606
{
T
Tejun Heo 已提交
607
	struct wb_domain *cgdom;
608

609 610 611
	__inc_wb_stat(wb, WB_WRITTEN);
	wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
			       wb->bdi->max_prop_frac);
T
Tejun Heo 已提交
612 613 614 615 616

	cgdom = mem_cgroup_wb_domain(wb);
	if (cgdom)
		wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
				       wb->bdi->max_prop_frac);
617 618
}

619
void wb_writeout_inc(struct bdi_writeback *wb)
P
Peter Zijlstra 已提交
620
{
621 622 623
	unsigned long flags;

	local_irq_save(flags);
624
	__wb_writeout_inc(wb);
625
	local_irq_restore(flags);
P
Peter Zijlstra 已提交
626
}
627
EXPORT_SYMBOL_GPL(wb_writeout_inc);
P
Peter Zijlstra 已提交
628

629 630 631 632 633 634
/*
 * On idle system, we can be called long after we scheduled because we use
 * deferred timers so count with missed periods.
 */
static void writeout_period(unsigned long t)
{
T
Tejun Heo 已提交
635 636
	struct wb_domain *dom = (void *)t;
	int miss_periods = (jiffies - dom->period_time) /
637 638
						 VM_COMPLETIONS_PERIOD_LEN;

T
Tejun Heo 已提交
639 640
	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
		dom->period_time = wp_next_time(dom->period_time +
641
				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
T
Tejun Heo 已提交
642
		mod_timer(&dom->period_timer, dom->period_time);
643 644 645 646 647
	} else {
		/*
		 * Aging has zeroed all fractions. Stop wasting CPU on period
		 * updates.
		 */
T
Tejun Heo 已提交
648
		dom->period_time = 0;
649 650 651
	}
}

T
Tejun Heo 已提交
652 653 654
int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
{
	memset(dom, 0, sizeof(*dom));
655 656 657

	spin_lock_init(&dom->lock);

T
Tejun Heo 已提交
658 659 660
	init_timer_deferrable(&dom->period_timer);
	dom->period_timer.function = writeout_period;
	dom->period_timer.data = (unsigned long)dom;
661 662 663

	dom->dirty_limit_tstamp = jiffies;

T
Tejun Heo 已提交
664 665 666
	return fprop_global_init(&dom->completions, gfp);
}

T
Tejun Heo 已提交
667 668 669 670 671 672 673 674
#ifdef CONFIG_CGROUP_WRITEBACK
void wb_domain_exit(struct wb_domain *dom)
{
	del_timer_sync(&dom->period_timer);
	fprop_global_destroy(&dom->completions);
}
#endif

675
/*
676 677 678
 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 * registered backing devices, which, for obvious reasons, can not
 * exceed 100%.
679 680 681 682 683 684 685
 */
static unsigned int bdi_min_ratio;

int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
	int ret = 0;

686
	spin_lock_bh(&bdi_lock);
687
	if (min_ratio > bdi->max_ratio) {
688
		ret = -EINVAL;
689 690 691 692 693 694 695 696 697
	} else {
		min_ratio -= bdi->min_ratio;
		if (bdi_min_ratio + min_ratio < 100) {
			bdi_min_ratio += min_ratio;
			bdi->min_ratio += min_ratio;
		} else {
			ret = -EINVAL;
		}
	}
698
	spin_unlock_bh(&bdi_lock);
699 700 701 702 703 704 705 706 707 708 709

	return ret;
}

int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
	int ret = 0;

	if (max_ratio > 100)
		return -EINVAL;

710
	spin_lock_bh(&bdi_lock);
711 712 713 714
	if (bdi->min_ratio > max_ratio) {
		ret = -EINVAL;
	} else {
		bdi->max_ratio = max_ratio;
715
		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
716
	}
717
	spin_unlock_bh(&bdi_lock);
718 719 720

	return ret;
}
721
EXPORT_SYMBOL(bdi_set_max_ratio);
722

W
Wu Fengguang 已提交
723 724 725 726 727 728
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
					   unsigned long bg_thresh)
{
	return (thresh + bg_thresh) / 2;
}

729 730
static unsigned long hard_dirty_limit(struct wb_domain *dom,
				      unsigned long thresh)
731
{
732
	return max(thresh, dom->dirty_limit);
733 734
}

735 736 737 738 739 740
/*
 * Memory which can be further allocated to a memcg domain is capped by
 * system-wide clean memory excluding the amount being used in the domain.
 */
static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
			    unsigned long filepages, unsigned long headroom)
741 742
{
	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
743 744 745
	unsigned long clean = filepages - min(filepages, mdtc->dirty);
	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
	unsigned long other_clean = global_clean - min(global_clean, clean);
746

747
	mdtc->avail = filepages + min(headroom, other_clean);
748 749
}

750
/**
751 752
 * __wb_calc_thresh - @wb's share of dirty throttling threshold
 * @dtc: dirty_throttle_context of interest
753
 *
754
 * Returns @wb's dirty limit in pages. The term "dirty" in the context of
755
 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
756 757 758 759 760 761
 *
 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 * when sleeping max_pause per page is not enough to keep the dirty pages under
 * control. For example, when the device is completely stalled due to some error
 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 * In the other normal situations, it acts more gently by throttling the tasks
762
 * more (rather than completely block them) when the wb dirty pages go high.
763
 *
764
 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
765 766 767
 * - starving fast devices
 * - piling up dirty pages (that will take long time to sync) on slow devices
 *
768
 * The wb's share of dirty limit will be adapting to its throughput and
769 770
 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 */
771
static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
772
{
773
	struct wb_domain *dom = dtc_dom(dtc);
774
	unsigned long thresh = dtc->thresh;
T
Tejun Heo 已提交
775
	u64 wb_thresh;
776
	long numerator, denominator;
777
	unsigned long wb_min_ratio, wb_max_ratio;
P
Peter Zijlstra 已提交
778

779
	/*
T
Tejun Heo 已提交
780
	 * Calculate this BDI's share of the thresh ratio.
781
	 */
782
	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
T
Tejun Heo 已提交
783
			      &numerator, &denominator);
P
Peter Zijlstra 已提交
784

T
Tejun Heo 已提交
785 786 787
	wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
	wb_thresh *= numerator;
	do_div(wb_thresh, denominator);
P
Peter Zijlstra 已提交
788

789
	wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
P
Peter Zijlstra 已提交
790

T
Tejun Heo 已提交
791 792 793
	wb_thresh += (thresh * wb_min_ratio) / 100;
	if (wb_thresh > (thresh * wb_max_ratio) / 100)
		wb_thresh = thresh * wb_max_ratio / 100;
794

T
Tejun Heo 已提交
795
	return wb_thresh;
L
Linus Torvalds 已提交
796 797
}

798 799 800 801 802
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
{
	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
					       .thresh = thresh };
	return __wb_calc_thresh(&gdtc);
L
Linus Torvalds 已提交
803 804
}

805 806 807 808 809 810 811 812 813 814 815 816 817 818
/*
 *                           setpoint - dirty 3
 *        f(dirty) := 1.0 + (----------------)
 *                           limit - setpoint
 *
 * it's a 3rd order polynomial that subjects to
 *
 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 * (2) f(setpoint) = 1.0 => the balance point
 * (3) f(limit)    = 0   => the hard limit
 * (4) df/dx      <= 0	 => negative feedback control
 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 *     => fast response on large errors; small oscillation near setpoint
 */
819
static long long pos_ratio_polynom(unsigned long setpoint,
820 821 822 823 824 825
					  unsigned long dirty,
					  unsigned long limit)
{
	long long pos_ratio;
	long x;

826
	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
827
		      (limit - setpoint) | 1);
828 829 830 831 832 833 834 835
	pos_ratio = x;
	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;

	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
}

W
Wu Fengguang 已提交
836 837 838 839 840
/*
 * Dirty position control.
 *
 * (o) global/bdi setpoints
 *
841
 * We want the dirty pages be balanced around the global/wb setpoints.
W
Wu Fengguang 已提交
842 843 844 845 846 847 848 849 850
 * When the number of dirty pages is higher/lower than the setpoint, the
 * dirty position control ratio (and hence task dirty ratelimit) will be
 * decreased/increased to bring the dirty pages back to the setpoint.
 *
 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 *
 *     if (dirty < setpoint) scale up   pos_ratio
 *     if (dirty > setpoint) scale down pos_ratio
 *
851 852
 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
W
Wu Fengguang 已提交
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
 *
 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 *
 * (o) global control line
 *
 *     ^ pos_ratio
 *     |
 *     |            |<===== global dirty control scope ======>|
 * 2.0 .............*
 *     |            .*
 *     |            . *
 *     |            .   *
 *     |            .     *
 *     |            .        *
 *     |            .            *
 * 1.0 ................................*
 *     |            .                  .     *
 *     |            .                  .          *
 *     |            .                  .              *
 *     |            .                  .                 *
 *     |            .                  .                    *
 *   0 +------------.------------------.----------------------*------------->
 *           freerun^          setpoint^                 limit^   dirty pages
 *
877
 * (o) wb control line
W
Wu Fengguang 已提交
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
 *
 *     ^ pos_ratio
 *     |
 *     |            *
 *     |              *
 *     |                *
 *     |                  *
 *     |                    * |<=========== span ============>|
 * 1.0 .......................*
 *     |                      . *
 *     |                      .   *
 *     |                      .     *
 *     |                      .       *
 *     |                      .         *
 *     |                      .           *
 *     |                      .             *
 *     |                      .               *
 *     |                      .                 *
 *     |                      .                   *
 *     |                      .                     *
 * 1/4 ...............................................* * * * * * * * * * * *
 *     |                      .                         .
 *     |                      .                           .
 *     |                      .                             .
 *   0 +----------------------.-------------------------------.------------->
903
 *                wb_setpoint^                    x_intercept^
W
Wu Fengguang 已提交
904
 *
905
 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
W
Wu Fengguang 已提交
906 907
 * be smoothly throttled down to normal if it starts high in situations like
 * - start writing to a slow SD card and a fast disk at the same time. The SD
908 909
 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 * - the wb dirty thresh drops quickly due to change of JBOD workload
W
Wu Fengguang 已提交
910
 */
911
static void wb_position_ratio(struct dirty_throttle_control *dtc)
W
Wu Fengguang 已提交
912
{
913
	struct bdi_writeback *wb = dtc->wb;
914
	unsigned long write_bw = wb->avg_write_bandwidth;
915
	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
916
	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
917
	unsigned long wb_thresh = dtc->wb_thresh;
W
Wu Fengguang 已提交
918 919
	unsigned long x_intercept;
	unsigned long setpoint;		/* dirty pages' target balance point */
920
	unsigned long wb_setpoint;
W
Wu Fengguang 已提交
921 922 923 924
	unsigned long span;
	long long pos_ratio;		/* for scaling up/down the rate limit */
	long x;

925 926
	dtc->pos_ratio = 0;

927
	if (unlikely(dtc->dirty >= limit))
928
		return;
W
Wu Fengguang 已提交
929 930 931 932

	/*
	 * global setpoint
	 *
933 934 935
	 * See comment for pos_ratio_polynom().
	 */
	setpoint = (freerun + limit) / 2;
936
	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
937 938 939 940

	/*
	 * The strictlimit feature is a tool preventing mistrusted filesystems
	 * from growing a large number of dirty pages before throttling. For
941 942
	 * such filesystems balance_dirty_pages always checks wb counters
	 * against wb limits. Even if global "nr_dirty" is under "freerun".
943 944 945 946
	 * This is especially important for fuse which sets bdi->max_ratio to
	 * 1% by default. Without strictlimit feature, fuse writeback may
	 * consume arbitrary amount of RAM because it is accounted in
	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
W
Wu Fengguang 已提交
947
	 *
948
	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
949
	 * two values: wb_dirty and wb_thresh. Let's consider an example:
950 951
	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
	 * limits are set by default to 10% and 20% (background and throttle).
952
	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
T
Tejun Heo 已提交
953
	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
954
	 * about ~6K pages (as the average of background and throttle wb
955
	 * limits). The 3rd order polynomial will provide positive feedback if
956
	 * wb_dirty is under wb_setpoint and vice versa.
W
Wu Fengguang 已提交
957
	 *
958
	 * Note, that we cannot use global counters in these calculations
959
	 * because we want to throttle process writing to a strictlimit wb
960 961
	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
	 * in the example above).
W
Wu Fengguang 已提交
962
	 */
963
	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
964
		long long wb_pos_ratio;
965

966 967 968 969 970
		if (dtc->wb_dirty < 8) {
			dtc->pos_ratio = min_t(long long, pos_ratio * 2,
					   2 << RATELIMIT_CALC_SHIFT);
			return;
		}
971

972
		if (dtc->wb_dirty >= wb_thresh)
973
			return;
974

975 976
		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
						    dtc->wb_bg_thresh);
977

978
		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
979
			return;
980

981
		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
982
						 wb_thresh);
983 984

		/*
985 986
		 * Typically, for strictlimit case, wb_setpoint << setpoint
		 * and pos_ratio >> wb_pos_ratio. In the other words global
987
		 * state ("dirty") is not limiting factor and we have to
988
		 * make decision based on wb counters. But there is an
989 990
		 * important case when global pos_ratio should get precedence:
		 * global limits are exceeded (e.g. due to activities on other
991
		 * wb's) while given strictlimit wb is below limit.
992
		 *
993
		 * "pos_ratio * wb_pos_ratio" would work for the case above,
994
		 * but it would look too non-natural for the case of all
995
		 * activity in the system coming from a single strictlimit wb
996 997 998 999
		 * with bdi->max_ratio == 100%.
		 *
		 * Note that min() below somewhat changes the dynamics of the
		 * control system. Normally, pos_ratio value can be well over 3
1000
		 * (when globally we are at freerun and wb is well below wb
1001 1002 1003 1004
		 * setpoint). Now the maximum pos_ratio in the same situation
		 * is 2. We might want to tweak this if we observe the control
		 * system is too slow to adapt.
		 */
1005 1006
		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
		return;
1007
	}
W
Wu Fengguang 已提交
1008 1009 1010

	/*
	 * We have computed basic pos_ratio above based on global situation. If
1011
	 * the wb is over/under its share of dirty pages, we want to scale
W
Wu Fengguang 已提交
1012 1013 1014 1015
	 * pos_ratio further down/up. That is done by the following mechanism.
	 */

	/*
1016
	 * wb setpoint
W
Wu Fengguang 已提交
1017
	 *
1018
	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
W
Wu Fengguang 已提交
1019
	 *
1020
	 *                        x_intercept - wb_dirty
W
Wu Fengguang 已提交
1021
	 *                     := --------------------------
1022
	 *                        x_intercept - wb_setpoint
W
Wu Fengguang 已提交
1023
	 *
1024
	 * The main wb control line is a linear function that subjects to
W
Wu Fengguang 已提交
1025
	 *
1026 1027 1028
	 * (1) f(wb_setpoint) = 1.0
	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
W
Wu Fengguang 已提交
1029
	 *
1030
	 * For single wb case, the dirty pages are observed to fluctuate
W
Wu Fengguang 已提交
1031
	 * regularly within range
1032
	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
W
Wu Fengguang 已提交
1033 1034 1035
	 * for various filesystems, where (2) can yield in a reasonable 12.5%
	 * fluctuation range for pos_ratio.
	 *
1036
	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
W
Wu Fengguang 已提交
1037
	 * own size, so move the slope over accordingly and choose a slope that
1038
	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
W
Wu Fengguang 已提交
1039
	 */
1040 1041
	if (unlikely(wb_thresh > dtc->thresh))
		wb_thresh = dtc->thresh;
1042
	/*
1043
	 * It's very possible that wb_thresh is close to 0 not because the
1044 1045 1046 1047 1048
	 * device is slow, but that it has remained inactive for long time.
	 * Honour such devices a reasonable good (hopefully IO efficient)
	 * threshold, so that the occasional writes won't be blocked and active
	 * writes can rampup the threshold quickly.
	 */
1049
	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
W
Wu Fengguang 已提交
1050
	/*
1051 1052
	 * scale global setpoint to wb's:
	 *	wb_setpoint = setpoint * wb_thresh / thresh
W
Wu Fengguang 已提交
1053
	 */
1054
	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1055
	wb_setpoint = setpoint * (u64)x >> 16;
W
Wu Fengguang 已提交
1056
	/*
1057 1058
	 * Use span=(8*write_bw) in single wb case as indicated by
	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
W
Wu Fengguang 已提交
1059
	 *
1060 1061 1062
	 *        wb_thresh                    thresh - wb_thresh
	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
	 *         thresh                           thresh
W
Wu Fengguang 已提交
1063
	 */
1064
	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1065
	x_intercept = wb_setpoint + span;
W
Wu Fengguang 已提交
1066

1067 1068
	if (dtc->wb_dirty < x_intercept - span / 4) {
		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1069
				      (x_intercept - wb_setpoint) | 1);
W
Wu Fengguang 已提交
1070 1071 1072
	} else
		pos_ratio /= 4;

1073
	/*
1074
	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1075 1076 1077
	 * It may push the desired control point of global dirty pages higher
	 * than setpoint.
	 */
1078
	x_intercept = wb_thresh / 2;
1079 1080 1081 1082
	if (dtc->wb_dirty < x_intercept) {
		if (dtc->wb_dirty > x_intercept / 8)
			pos_ratio = div_u64(pos_ratio * x_intercept,
					    dtc->wb_dirty);
1083
		else
1084 1085 1086
			pos_ratio *= 8;
	}

1087
	dtc->pos_ratio = pos_ratio;
W
Wu Fengguang 已提交
1088 1089
}

1090 1091 1092
static void wb_update_write_bandwidth(struct bdi_writeback *wb,
				      unsigned long elapsed,
				      unsigned long written)
1093 1094
{
	const unsigned long period = roundup_pow_of_two(3 * HZ);
1095 1096
	unsigned long avg = wb->avg_write_bandwidth;
	unsigned long old = wb->write_bandwidth;
1097 1098 1099 1100 1101 1102 1103 1104
	u64 bw;

	/*
	 * bw = written * HZ / elapsed
	 *
	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
	 * write_bandwidth = ---------------------------------------------------
	 *                                          period
1105 1106 1107
	 *
	 * @written may have decreased due to account_page_redirty().
	 * Avoid underflowing @bw calculation.
1108
	 */
1109
	bw = written - min(written, wb->written_stamp);
1110 1111 1112 1113 1114 1115
	bw *= HZ;
	if (unlikely(elapsed > period)) {
		do_div(bw, elapsed);
		avg = bw;
		goto out;
	}
1116
	bw += (u64)wb->write_bandwidth * (period - elapsed);
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
	bw >>= ilog2(period);

	/*
	 * one more level of smoothing, for filtering out sudden spikes
	 */
	if (avg > old && old >= (unsigned long)bw)
		avg -= (avg - old) >> 3;

	if (avg < old && old <= (unsigned long)bw)
		avg += (old - avg) >> 3;

out:
1129 1130 1131 1132 1133 1134 1135
	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
	avg = max(avg, 1LU);
	if (wb_has_dirty_io(wb)) {
		long delta = avg - wb->avg_write_bandwidth;
		WARN_ON_ONCE(atomic_long_add_return(delta,
					&wb->bdi->tot_write_bandwidth) <= 0);
	}
1136 1137
	wb->write_bandwidth = bw;
	wb->avg_write_bandwidth = avg;
1138 1139
}

1140
static void update_dirty_limit(struct dirty_throttle_control *dtc)
1141
{
1142
	struct wb_domain *dom = dtc_dom(dtc);
1143
	unsigned long thresh = dtc->thresh;
1144
	unsigned long limit = dom->dirty_limit;
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156

	/*
	 * Follow up in one step.
	 */
	if (limit < thresh) {
		limit = thresh;
		goto update;
	}

	/*
	 * Follow down slowly. Use the higher one as the target, because thresh
	 * may drop below dirty. This is exactly the reason to introduce
1157
	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1158
	 */
1159
	thresh = max(thresh, dtc->dirty);
1160 1161 1162 1163 1164 1165
	if (limit > thresh) {
		limit -= (limit - thresh) >> 5;
		goto update;
	}
	return;
update:
1166
	dom->dirty_limit = limit;
1167 1168
}

1169
static void domain_update_bandwidth(struct dirty_throttle_control *dtc,
1170 1171
				    unsigned long now)
{
1172
	struct wb_domain *dom = dtc_dom(dtc);
1173 1174 1175 1176

	/*
	 * check locklessly first to optimize away locking for the most time
	 */
1177
	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1178 1179
		return;

1180 1181
	spin_lock(&dom->lock);
	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1182
		update_dirty_limit(dtc);
1183
		dom->dirty_limit_tstamp = now;
1184
	}
1185
	spin_unlock(&dom->lock);
1186 1187
}

W
Wu Fengguang 已提交
1188
/*
1189
 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
W
Wu Fengguang 已提交
1190
 *
1191
 * Normal wb tasks will be curbed at or below it in long term.
W
Wu Fengguang 已提交
1192 1193
 * Obviously it should be around (write_bw / N) when there are N dd tasks.
 */
1194
static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1195 1196
				      unsigned long dirtied,
				      unsigned long elapsed)
W
Wu Fengguang 已提交
1197
{
1198 1199 1200
	struct bdi_writeback *wb = dtc->wb;
	unsigned long dirty = dtc->dirty;
	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1201
	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1202
	unsigned long setpoint = (freerun + limit) / 2;
1203 1204
	unsigned long write_bw = wb->avg_write_bandwidth;
	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
W
Wu Fengguang 已提交
1205 1206 1207
	unsigned long dirty_rate;
	unsigned long task_ratelimit;
	unsigned long balanced_dirty_ratelimit;
1208 1209
	unsigned long step;
	unsigned long x;
1210
	unsigned long shift;
W
Wu Fengguang 已提交
1211 1212 1213 1214 1215

	/*
	 * The dirty rate will match the writeout rate in long term, except
	 * when dirty pages are truncated by userspace or re-dirtied by FS.
	 */
1216
	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
W
Wu Fengguang 已提交
1217 1218 1219 1220 1221

	/*
	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
	 */
	task_ratelimit = (u64)dirty_ratelimit *
1222
					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
W
Wu Fengguang 已提交
1223 1224 1225 1226
	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */

	/*
	 * A linear estimation of the "balanced" throttle rate. The theory is,
1227
	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
W
Wu Fengguang 已提交
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
	 * formula will yield the balanced rate limit (write_bw / N).
	 *
	 * Note that the expanded form is not a pure rate feedback:
	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
	 * but also takes pos_ratio into account:
	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
	 *
	 * (1) is not realistic because pos_ratio also takes part in balancing
	 * the dirty rate.  Consider the state
	 *	pos_ratio = 0.5						     (3)
	 *	rate = 2 * (write_bw / N)				     (4)
	 * If (1) is used, it will stuck in that state! Because each dd will
	 * be throttled at
	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
	 * yielding
	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
	 * put (6) into (1) we get
	 *	rate_(i+1) = rate_(i)					     (7)
	 *
	 * So we end up using (2) to always keep
	 *	rate_(i+1) ~= (write_bw / N)				     (8)
	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
	 * pos_ratio is able to drive itself to 1.0, which is not only where
	 * the dirty count meet the setpoint, but also where the slope of
	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
	 */
	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
					   dirty_rate | 1);
1257 1258 1259 1260 1261
	/*
	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
	 */
	if (unlikely(balanced_dirty_ratelimit > write_bw))
		balanced_dirty_ratelimit = write_bw;
W
Wu Fengguang 已提交
1262

1263 1264 1265
	/*
	 * We could safely do this and return immediately:
	 *
1266
	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1267 1268
	 *
	 * However to get a more stable dirty_ratelimit, the below elaborated
W
Wanpeng Li 已提交
1269
	 * code makes use of task_ratelimit to filter out singular points and
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
	 * limit the step size.
	 *
	 * The below code essentially only uses the relative value of
	 *
	 *	task_ratelimit - dirty_ratelimit
	 *	= (pos_ratio - 1) * dirty_ratelimit
	 *
	 * which reflects the direction and size of dirty position error.
	 */

	/*
	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
	 * task_ratelimit is on the same side of dirty_ratelimit, too.
	 * For example, when
	 * - dirty_ratelimit > balanced_dirty_ratelimit
	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
	 * lowering dirty_ratelimit will help meet both the position and rate
	 * control targets. Otherwise, don't update dirty_ratelimit if it will
	 * only help meet the rate target. After all, what the users ultimately
	 * feel and care are stable dirty rate and small position error.
	 *
	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
W
Wanpeng Li 已提交
1292
	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1293 1294 1295 1296 1297
	 * keeps jumping around randomly and can even leap far away at times
	 * due to the small 200ms estimation period of dirty_rate (we want to
	 * keep that period small to reduce time lags).
	 */
	step = 0;
1298 1299

	/*
1300
	 * For strictlimit case, calculations above were based on wb counters
1301
	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1302
	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1303 1304
	 * Hence, to calculate "step" properly, we have to use wb_dirty as
	 * "dirty" and wb_setpoint as "setpoint".
1305
	 *
1306 1307
	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
	 * it's possible that wb_thresh is close to zero due to inactivity
1308
	 * of backing device.
1309
	 */
1310
	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1311 1312 1313
		dirty = dtc->wb_dirty;
		if (dtc->wb_dirty < 8)
			setpoint = dtc->wb_dirty + 1;
1314
		else
1315
			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1316 1317
	}

1318
	if (dirty < setpoint) {
1319
		x = min3(wb->balanced_dirty_ratelimit,
1320
			 balanced_dirty_ratelimit, task_ratelimit);
1321 1322 1323
		if (dirty_ratelimit < x)
			step = x - dirty_ratelimit;
	} else {
1324
		x = max3(wb->balanced_dirty_ratelimit,
1325
			 balanced_dirty_ratelimit, task_ratelimit);
1326 1327 1328 1329 1330 1331 1332 1333 1334
		if (dirty_ratelimit > x)
			step = dirty_ratelimit - x;
	}

	/*
	 * Don't pursue 100% rate matching. It's impossible since the balanced
	 * rate itself is constantly fluctuating. So decrease the track speed
	 * when it gets close to the target. Helps eliminate pointless tremors.
	 */
1335 1336 1337 1338 1339
	shift = dirty_ratelimit / (2 * step + 1);
	if (shift < BITS_PER_LONG)
		step = DIV_ROUND_UP(step >> shift, 8);
	else
		step = 0;
1340 1341 1342 1343 1344 1345

	if (dirty_ratelimit < balanced_dirty_ratelimit)
		dirty_ratelimit += step;
	else
		dirty_ratelimit -= step;

1346 1347
	wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1348

1349
	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
W
Wu Fengguang 已提交
1350 1351
}

1352 1353
static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
				  struct dirty_throttle_control *mdtc,
1354 1355
				  unsigned long start_time,
				  bool update_ratelimit)
1356
{
1357
	struct bdi_writeback *wb = gdtc->wb;
1358
	unsigned long now = jiffies;
1359
	unsigned long elapsed = now - wb->bw_time_stamp;
W
Wu Fengguang 已提交
1360
	unsigned long dirtied;
1361 1362
	unsigned long written;

1363 1364
	lockdep_assert_held(&wb->list_lock);

1365 1366 1367 1368 1369 1370
	/*
	 * rate-limit, only update once every 200ms.
	 */
	if (elapsed < BANDWIDTH_INTERVAL)
		return;

1371 1372
	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1373 1374 1375 1376 1377

	/*
	 * Skip quiet periods when disk bandwidth is under-utilized.
	 * (at least 1s idle time between two flusher runs)
	 */
1378
	if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1379 1380
		goto snapshot;

1381
	if (update_ratelimit) {
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
		domain_update_bandwidth(gdtc, now);
		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);

		/*
		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
		 * compiler has no way to figure that out.  Help it.
		 */
		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
			domain_update_bandwidth(mdtc, now);
			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
		}
W
Wu Fengguang 已提交
1393
	}
1394
	wb_update_write_bandwidth(wb, elapsed, written);
1395 1396

snapshot:
1397 1398 1399
	wb->dirtied_stamp = dirtied;
	wb->written_stamp = written;
	wb->bw_time_stamp = now;
1400 1401
}

1402
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1403
{
1404 1405
	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };

1406
	__wb_update_bandwidth(&gdtc, NULL, start_time, false);
1407 1408
}

1409
/*
1410
 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
 * will look to see if it needs to start dirty throttling.
 *
 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
 * global_page_state() too often. So scale it near-sqrt to the safety margin
 * (the number of pages we may dirty without exceeding the dirty limits).
 */
static unsigned long dirty_poll_interval(unsigned long dirty,
					 unsigned long thresh)
{
	if (thresh > dirty)
		return 1UL << (ilog2(thresh - dirty) >> 1);

	return 1;
}

1426
static unsigned long wb_max_pause(struct bdi_writeback *wb,
1427
				  unsigned long wb_dirty)
1428
{
1429
	unsigned long bw = wb->avg_write_bandwidth;
1430
	unsigned long t;
1431

1432 1433 1434 1435 1436 1437 1438
	/*
	 * Limit pause time for small memory systems. If sleeping for too long
	 * time, a small pool of dirty/writeback pages may go empty and disk go
	 * idle.
	 *
	 * 8 serves as the safety ratio.
	 */
1439
	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1440 1441
	t++;

1442
	return min_t(unsigned long, t, MAX_PAUSE);
1443 1444
}

1445 1446 1447 1448 1449
static long wb_min_pause(struct bdi_writeback *wb,
			 long max_pause,
			 unsigned long task_ratelimit,
			 unsigned long dirty_ratelimit,
			 int *nr_dirtied_pause)
1450
{
1451 1452
	long hi = ilog2(wb->avg_write_bandwidth);
	long lo = ilog2(wb->dirty_ratelimit);
1453 1454 1455
	long t;		/* target pause */
	long pause;	/* estimated next pause */
	int pages;	/* target nr_dirtied_pause */
1456

1457 1458
	/* target for 10ms pause on 1-dd case */
	t = max(1, HZ / 100);
1459 1460 1461 1462 1463

	/*
	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
	 * overheads.
	 *
1464
	 * (N * 10ms) on 2^N concurrent tasks.
1465 1466
	 */
	if (hi > lo)
1467
		t += (hi - lo) * (10 * HZ) / 1024;
1468 1469

	/*
1470 1471 1472 1473 1474 1475 1476 1477
	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
	 * on the much more stable dirty_ratelimit. However the next pause time
	 * will be computed based on task_ratelimit and the two rate limits may
	 * depart considerably at some time. Especially if task_ratelimit goes
	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
	 * result task_ratelimit won't be executed faithfully, which could
	 * eventually bring down dirty_ratelimit.
1478
	 *
1479 1480 1481 1482 1483 1484 1485
	 * We apply two rules to fix it up:
	 * 1) try to estimate the next pause time and if necessary, use a lower
	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
	 * 2) limit the target pause time to max_pause/2, so that the normal
	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1486
	 */
1487 1488
	t = min(t, 1 + max_pause / 2);
	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1489 1490

	/*
1491 1492 1493 1494 1495 1496
	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
	 * When the 16 consecutive reads are often interrupted by some dirty
	 * throttling pause during the async writes, cfq will go into idles
	 * (deadline is fine). So push nr_dirtied_pause as high as possible
	 * until reaches DIRTY_POLL_THRESH=32 pages.
1497
	 */
1498 1499 1500 1501 1502 1503 1504 1505 1506
	if (pages < DIRTY_POLL_THRESH) {
		t = max_pause;
		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
		if (pages > DIRTY_POLL_THRESH) {
			pages = DIRTY_POLL_THRESH;
			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
		}
	}

1507 1508 1509 1510 1511
	pause = HZ * pages / (task_ratelimit + 1);
	if (pause > max_pause) {
		t = max_pause;
		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
	}
1512

1513
	*nr_dirtied_pause = pages;
1514
	/*
1515
	 * The minimal pause time will normally be half the target pause time.
1516
	 */
1517
	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1518 1519
}

1520
static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1521
{
1522
	struct bdi_writeback *wb = dtc->wb;
1523
	unsigned long wb_reclaimable;
1524 1525

	/*
1526
	 * wb_thresh is not treated as some limiting factor as
1527
	 * dirty_thresh, due to reasons
1528
	 * - in JBOD setup, wb_thresh can fluctuate a lot
1529
	 * - in a system with HDD and USB key, the USB key may somehow
1530 1531
	 *   go into state (wb_dirty >> wb_thresh) either because
	 *   wb_dirty starts high, or because wb_thresh drops low.
1532
	 *   In this case we don't want to hard throttle the USB key
1533 1534
	 *   dirtiers for 100 seconds until wb_dirty drops under
	 *   wb_thresh. Instead the auxiliary wb control line in
1535
	 *   wb_position_ratio() will let the dirtier task progress
1536
	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1537
	 */
1538
	dtc->wb_thresh = __wb_calc_thresh(dtc);
1539 1540
	dtc->wb_bg_thresh = dtc->thresh ?
		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551

	/*
	 * In order to avoid the stacked BDI deadlock we need
	 * to ensure we accurately count the 'dirty' pages when
	 * the threshold is low.
	 *
	 * Otherwise it would be possible to get thresh+n pages
	 * reported dirty, even though there are thresh-m pages
	 * actually dirty; with m+n sitting in the percpu
	 * deltas.
	 */
1552
	if (dtc->wb_thresh < 2 * wb_stat_error(wb)) {
1553
		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1554
		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1555
	} else {
1556
		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1557
		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1558 1559 1560
	}
}

L
Linus Torvalds 已提交
1561 1562 1563
/*
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
1564
 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1565 1566
 * If we're over `background_thresh' then the writeback threads are woken to
 * perform some writeout.
L
Linus Torvalds 已提交
1567
 */
1568
static void balance_dirty_pages(struct address_space *mapping,
1569
				struct bdi_writeback *wb,
1570
				unsigned long pages_dirtied)
L
Linus Torvalds 已提交
1571
{
1572
	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1573
	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1574
	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1575 1576 1577
	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
						     &mdtc_stor : NULL;
	struct dirty_throttle_control *sdtc;
1578
	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1579
	long period;
1580 1581 1582 1583
	long pause;
	long max_pause;
	long min_pause;
	int nr_dirtied_pause;
1584
	bool dirty_exceeded = false;
1585
	unsigned long task_ratelimit;
1586
	unsigned long dirty_ratelimit;
1587
	struct backing_dev_info *bdi = wb->bdi;
1588
	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1589
	unsigned long start_time = jiffies;
L
Linus Torvalds 已提交
1590 1591

	for (;;) {
1592
		unsigned long now = jiffies;
1593
		unsigned long dirty, thresh, bg_thresh;
1594 1595 1596
		unsigned long m_dirty = 0;	/* stop bogus uninit warnings */
		unsigned long m_thresh = 0;
		unsigned long m_bg_thresh = 0;
1597

1598 1599 1600 1601 1602 1603
		/*
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
		 */
1604 1605
		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
					global_page_state(NR_UNSTABLE_NFS);
1606
		gdtc->avail = global_dirtyable_memory();
1607
		gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1608

1609
		domain_dirty_limits(gdtc);
1610

1611
		if (unlikely(strictlimit)) {
1612
			wb_dirty_limits(gdtc);
1613

1614 1615
			dirty = gdtc->wb_dirty;
			thresh = gdtc->wb_thresh;
1616
			bg_thresh = gdtc->wb_bg_thresh;
1617
		} else {
1618 1619 1620
			dirty = gdtc->dirty;
			thresh = gdtc->thresh;
			bg_thresh = gdtc->bg_thresh;
1621 1622
		}

1623
		if (mdtc) {
1624
			unsigned long filepages, headroom, writeback;
1625 1626 1627 1628 1629

			/*
			 * If @wb belongs to !root memcg, repeat the same
			 * basic calculations for the memcg domain.
			 */
1630 1631
			mem_cgroup_wb_stats(wb, &filepages, &headroom,
					    &mdtc->dirty, &writeback);
1632
			mdtc->dirty += writeback;
1633
			mdtc_calc_avail(mdtc, filepages, headroom);
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646

			domain_dirty_limits(mdtc);

			if (unlikely(strictlimit)) {
				wb_dirty_limits(mdtc);
				m_dirty = mdtc->wb_dirty;
				m_thresh = mdtc->wb_thresh;
				m_bg_thresh = mdtc->wb_bg_thresh;
			} else {
				m_dirty = mdtc->dirty;
				m_thresh = mdtc->thresh;
				m_bg_thresh = mdtc->bg_thresh;
			}
1647 1648
		}

1649 1650 1651
		/*
		 * Throttle it only when the background writeback cannot
		 * catch-up. This avoids (excessively) small writeouts
1652
		 * when the wb limits are ramping up in case of !strictlimit.
1653
		 *
1654 1655
		 * In strictlimit case make decision based on the wb counters
		 * and limits. Small writeouts when the wb limits are ramping
1656
		 * up are the price we consciously pay for strictlimit-ing.
1657 1658 1659
		 *
		 * If memcg domain is in effect, @dirty should be under
		 * both global and memcg freerun ceilings.
1660
		 */
1661 1662 1663 1664 1665 1666
		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
		    (!mdtc ||
		     m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
			unsigned long intv = dirty_poll_interval(dirty, thresh);
			unsigned long m_intv = ULONG_MAX;

1667 1668
			current->dirty_paused_when = now;
			current->nr_dirtied = 0;
1669 1670 1671
			if (mdtc)
				m_intv = dirty_poll_interval(m_dirty, m_thresh);
			current->nr_dirtied_pause = min(intv, m_intv);
1672
			break;
1673
		}
1674

1675
		if (unlikely(!writeback_in_progress(wb)))
1676
			wb_start_background_writeback(wb);
1677

1678 1679 1680 1681
		/*
		 * Calculate global domain's pos_ratio and select the
		 * global dtc by default.
		 */
1682
		if (!strictlimit)
1683
			wb_dirty_limits(gdtc);
1684

1685 1686
		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
			((gdtc->dirty > gdtc->thresh) || strictlimit);
1687 1688

		wb_position_ratio(gdtc);
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
		sdtc = gdtc;

		if (mdtc) {
			/*
			 * If memcg domain is in effect, calculate its
			 * pos_ratio.  @wb should satisfy constraints from
			 * both global and memcg domains.  Choose the one
			 * w/ lower pos_ratio.
			 */
			if (!strictlimit)
				wb_dirty_limits(mdtc);

			dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
				((mdtc->dirty > mdtc->thresh) || strictlimit);

			wb_position_ratio(mdtc);
			if (mdtc->pos_ratio < gdtc->pos_ratio)
				sdtc = mdtc;
		}
1708

1709 1710
		if (dirty_exceeded && !wb->dirty_exceeded)
			wb->dirty_exceeded = 1;
L
Linus Torvalds 已提交
1711

1712 1713 1714
		if (time_is_before_jiffies(wb->bw_time_stamp +
					   BANDWIDTH_INTERVAL)) {
			spin_lock(&wb->list_lock);
1715
			__wb_update_bandwidth(gdtc, mdtc, start_time, true);
1716 1717
			spin_unlock(&wb->list_lock);
		}
1718

1719
		/* throttle according to the chosen dtc */
1720
		dirty_ratelimit = wb->dirty_ratelimit;
1721
		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1722
							RATELIMIT_CALC_SHIFT;
1723
		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1724 1725 1726
		min_pause = wb_min_pause(wb, max_pause,
					 task_ratelimit, dirty_ratelimit,
					 &nr_dirtied_pause);
1727

1728
		if (unlikely(task_ratelimit == 0)) {
1729
			period = max_pause;
1730
			pause = max_pause;
1731
			goto pause;
P
Peter Zijlstra 已提交
1732
		}
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
		period = HZ * pages_dirtied / task_ratelimit;
		pause = period;
		if (current->dirty_paused_when)
			pause -= now - current->dirty_paused_when;
		/*
		 * For less than 1s think time (ext3/4 may block the dirtier
		 * for up to 800ms from time to time on 1-HDD; so does xfs,
		 * however at much less frequency), try to compensate it in
		 * future periods by updating the virtual time; otherwise just
		 * do a reset, as it may be a light dirtier.
		 */
1744
		if (pause < min_pause) {
1745
			trace_balance_dirty_pages(wb,
1746 1747 1748 1749 1750
						  sdtc->thresh,
						  sdtc->bg_thresh,
						  sdtc->dirty,
						  sdtc->wb_thresh,
						  sdtc->wb_dirty,
1751 1752 1753
						  dirty_ratelimit,
						  task_ratelimit,
						  pages_dirtied,
1754
						  period,
1755
						  min(pause, 0L),
1756
						  start_time);
1757 1758 1759 1760 1761 1762
			if (pause < -HZ) {
				current->dirty_paused_when = now;
				current->nr_dirtied = 0;
			} else if (period) {
				current->dirty_paused_when += period;
				current->nr_dirtied = 0;
1763 1764
			} else if (current->nr_dirtied_pause <= pages_dirtied)
				current->nr_dirtied_pause += pages_dirtied;
W
Wu Fengguang 已提交
1765
			break;
P
Peter Zijlstra 已提交
1766
		}
1767 1768 1769 1770 1771
		if (unlikely(pause > max_pause)) {
			/* for occasional dropped task_ratelimit */
			now += min(pause - max_pause, max_pause);
			pause = max_pause;
		}
1772 1773

pause:
1774
		trace_balance_dirty_pages(wb,
1775 1776 1777 1778 1779
					  sdtc->thresh,
					  sdtc->bg_thresh,
					  sdtc->dirty,
					  sdtc->wb_thresh,
					  sdtc->wb_dirty,
1780 1781 1782
					  dirty_ratelimit,
					  task_ratelimit,
					  pages_dirtied,
1783
					  period,
1784 1785
					  pause,
					  start_time);
1786
		__set_current_state(TASK_KILLABLE);
1787
		io_schedule_timeout(pause);
1788

1789 1790
		current->dirty_paused_when = now + pause;
		current->nr_dirtied = 0;
1791
		current->nr_dirtied_pause = nr_dirtied_pause;
1792

1793
		/*
1794 1795
		 * This is typically equal to (dirty < thresh) and can also
		 * keep "1000+ dd on a slow USB stick" under control.
1796
		 */
1797
		if (task_ratelimit)
1798
			break;
1799

1800 1801
		/*
		 * In the case of an unresponding NFS server and the NFS dirty
1802
		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1803 1804 1805 1806
		 * to go through, so that tasks on them still remain responsive.
		 *
		 * In theory 1 page is enough to keep the comsumer-producer
		 * pipe going: the flusher cleans 1 page => the task dirties 1
1807
		 * more page. However wb_dirty has accounting errors.  So use
1808
		 * the larger and more IO friendly wb_stat_error.
1809
		 */
1810
		if (sdtc->wb_dirty <= wb_stat_error(wb))
1811 1812
			break;

1813 1814
		if (fatal_signal_pending(current))
			break;
L
Linus Torvalds 已提交
1815 1816
	}

1817 1818
	if (!dirty_exceeded && wb->dirty_exceeded)
		wb->dirty_exceeded = 0;
L
Linus Torvalds 已提交
1819

1820
	if (writeback_in_progress(wb))
1821
		return;
L
Linus Torvalds 已提交
1822 1823 1824 1825 1826 1827 1828 1829 1830

	/*
	 * In laptop mode, we wait until hitting the higher threshold before
	 * starting background writeout, and then write out all the way down
	 * to the lower threshold.  So slow writers cause minimal disk activity.
	 *
	 * In normal mode, we start background writeout at the lower
	 * background_thresh, to keep the amount of dirty memory low.
	 */
1831 1832 1833
	if (laptop_mode)
		return;

1834
	if (nr_reclaimable > gdtc->bg_thresh)
1835
		wb_start_background_writeback(wb);
L
Linus Torvalds 已提交
1836 1837
}

1838
static DEFINE_PER_CPU(int, bdp_ratelimits);
1839

1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
/*
 * Normal tasks are throttled by
 *	loop {
 *		dirty tsk->nr_dirtied_pause pages;
 *		take a snap in balance_dirty_pages();
 *	}
 * However there is a worst case. If every task exit immediately when dirtied
 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
 * called to throttle the page dirties. The solution is to save the not yet
 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
 * randomly into the running tasks. This works well for the above worst case,
 * as the new task will pick up and accumulate the old task's leaked dirty
 * count and eventually get throttled.
 */
DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;

L
Linus Torvalds 已提交
1856
/**
1857
 * balance_dirty_pages_ratelimited - balance dirty memory state
1858
 * @mapping: address_space which was dirtied
L
Linus Torvalds 已提交
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
 * On really big machines, get_writeback_state is expensive, so try to avoid
 * calling it too often (ratelimiting).  But once we're over the dirty memory
 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 * from overshooting the limit by (ratelimit_pages) each.
 */
1869
void balance_dirty_pages_ratelimited(struct address_space *mapping)
L
Linus Torvalds 已提交
1870
{
1871 1872 1873
	struct inode *inode = mapping->host;
	struct backing_dev_info *bdi = inode_to_bdi(inode);
	struct bdi_writeback *wb = NULL;
1874 1875
	int ratelimit;
	int *p;
L
Linus Torvalds 已提交
1876

1877 1878 1879
	if (!bdi_cap_account_dirty(bdi))
		return;

1880 1881 1882 1883 1884
	if (inode_cgwb_enabled(inode))
		wb = wb_get_create_current(bdi, GFP_KERNEL);
	if (!wb)
		wb = &bdi->wb;

1885
	ratelimit = current->nr_dirtied_pause;
1886
	if (wb->dirty_exceeded)
1887 1888 1889
		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));

	preempt_disable();
L
Linus Torvalds 已提交
1890
	/*
1891 1892 1893 1894
	 * This prevents one CPU to accumulate too many dirtied pages without
	 * calling into balance_dirty_pages(), which can happen when there are
	 * 1000+ tasks, all of them start dirtying pages at exactly the same
	 * time, hence all honoured too large initial task->nr_dirtied_pause.
L
Linus Torvalds 已提交
1895
	 */
1896
	p =  this_cpu_ptr(&bdp_ratelimits);
1897
	if (unlikely(current->nr_dirtied >= ratelimit))
1898
		*p = 0;
1899 1900 1901
	else if (unlikely(*p >= ratelimit_pages)) {
		*p = 0;
		ratelimit = 0;
L
Linus Torvalds 已提交
1902
	}
1903 1904 1905 1906 1907
	/*
	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
	 * the dirty throttling and livelock other long-run dirtiers.
	 */
1908
	p = this_cpu_ptr(&dirty_throttle_leaks);
1909
	if (*p > 0 && current->nr_dirtied < ratelimit) {
1910
		unsigned long nr_pages_dirtied;
1911 1912 1913
		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
		*p -= nr_pages_dirtied;
		current->nr_dirtied += nr_pages_dirtied;
L
Linus Torvalds 已提交
1914
	}
1915
	preempt_enable();
1916 1917

	if (unlikely(current->nr_dirtied >= ratelimit))
1918 1919 1920
		balance_dirty_pages(mapping, wb, current->nr_dirtied);

	wb_put(wb);
L
Linus Torvalds 已提交
1921
}
1922
EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
L
Linus Torvalds 已提交
1923

1924 1925 1926 1927 1928 1929 1930 1931 1932
/**
 * wb_over_bg_thresh - does @wb need to be written back?
 * @wb: bdi_writeback of interest
 *
 * Determines whether background writeback should keep writing @wb or it's
 * clean enough.  Returns %true if writeback should continue.
 */
bool wb_over_bg_thresh(struct bdi_writeback *wb)
{
1933
	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1934
	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1935
	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1936 1937
	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
						     &mdtc_stor : NULL;
1938

1939 1940 1941 1942 1943 1944 1945 1946
	/*
	 * Similar to balance_dirty_pages() but ignores pages being written
	 * as we're trying to decide whether to put more under writeback.
	 */
	gdtc->avail = global_dirtyable_memory();
	gdtc->dirty = global_page_state(NR_FILE_DIRTY) +
		      global_page_state(NR_UNSTABLE_NFS);
	domain_dirty_limits(gdtc);
1947

1948
	if (gdtc->dirty > gdtc->bg_thresh)
1949 1950
		return true;

1951 1952
	if (wb_stat(wb, WB_RECLAIMABLE) >
	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
1953 1954
		return true;

1955
	if (mdtc) {
1956
		unsigned long filepages, headroom, writeback;
1957

1958 1959 1960
		mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
				    &writeback);
		mdtc_calc_avail(mdtc, filepages, headroom);
1961 1962 1963 1964 1965
		domain_dirty_limits(mdtc);	/* ditto, ignore writeback */

		if (mdtc->dirty > mdtc->bg_thresh)
			return true;

1966 1967
		if (wb_stat(wb, WB_RECLAIMABLE) >
		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
1968 1969 1970
			return true;
	}

1971 1972 1973
	return false;
}

1974
void throttle_vm_writeout(gfp_t gfp_mask)
L
Linus Torvalds 已提交
1975
{
1976 1977
	unsigned long background_thresh;
	unsigned long dirty_thresh;
L
Linus Torvalds 已提交
1978 1979

        for ( ; ; ) {
1980
		global_dirty_limits(&background_thresh, &dirty_thresh);
1981
		dirty_thresh = hard_dirty_limit(&global_wb_domain, dirty_thresh);
L
Linus Torvalds 已提交
1982 1983 1984 1985 1986 1987 1988

                /*
                 * Boost the allowable dirty threshold a bit for page
                 * allocators so they don't get DoS'ed by heavy writers
                 */
                dirty_thresh += dirty_thresh / 10;      /* wheeee... */

1989 1990 1991
                if (global_page_state(NR_UNSTABLE_NFS) +
			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                        	break;
1992
                congestion_wait(BLK_RW_ASYNC, HZ/10);
1993 1994 1995 1996 1997 1998 1999 2000

		/*
		 * The caller might hold locks which can prevent IO completion
		 * or progress in the filesystem.  So we cannot just sit here
		 * waiting for IO to complete.
		 */
		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
			break;
L
Linus Torvalds 已提交
2001 2002 2003 2004 2005 2006
        }
}

/*
 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 */
2007
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
2008
	void __user *buffer, size_t *length, loff_t *ppos)
L
Linus Torvalds 已提交
2009
{
2010
	proc_dointvec(table, write, buffer, length, ppos);
L
Linus Torvalds 已提交
2011 2012 2013
	return 0;
}

2014
#ifdef CONFIG_BLOCK
2015
void laptop_mode_timer_fn(unsigned long data)
L
Linus Torvalds 已提交
2016
{
2017 2018 2019
	struct request_queue *q = (struct request_queue *)data;
	int nr_pages = global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS);
2020
	struct bdi_writeback *wb;
L
Linus Torvalds 已提交
2021

2022 2023 2024 2025
	/*
	 * We want to write everything out, not just down to the dirty
	 * threshold
	 */
2026 2027 2028
	if (!bdi_has_dirty_io(&q->backing_dev_info))
		return;

2029
	rcu_read_lock();
2030
	list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
2031 2032 2033
		if (wb_has_dirty_io(wb))
			wb_start_writeback(wb, nr_pages, true,
					   WB_REASON_LAPTOP_TIMER);
2034
	rcu_read_unlock();
L
Linus Torvalds 已提交
2035 2036 2037 2038 2039 2040 2041
}

/*
 * We've spun up the disk and we're in laptop mode: schedule writeback
 * of all dirty data a few seconds from now.  If the flush is already scheduled
 * then push it back - the user is still using the disk.
 */
2042
void laptop_io_completion(struct backing_dev_info *info)
L
Linus Torvalds 已提交
2043
{
2044
	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
L
Linus Torvalds 已提交
2045 2046 2047 2048 2049 2050 2051 2052 2053
}

/*
 * We're in laptop mode and we've just synced. The sync's writes will have
 * caused another writeback to be scheduled by laptop_io_completion.
 * Nothing needs to be written back anymore, so we unschedule the writeback.
 */
void laptop_sync_completion(void)
{
2054 2055 2056 2057 2058 2059 2060 2061
	struct backing_dev_info *bdi;

	rcu_read_lock();

	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
		del_timer(&bdi->laptop_mode_wb_timer);

	rcu_read_unlock();
L
Linus Torvalds 已提交
2062
}
2063
#endif
L
Linus Torvalds 已提交
2064 2065 2066 2067 2068 2069 2070 2071 2072

/*
 * If ratelimit_pages is too high then we can get into dirty-data overload
 * if a large number of processes all perform writes at the same time.
 * If it is too low then SMP machines will call the (expensive)
 * get_writeback_state too often.
 *
 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2073
 * thresholds.
L
Linus Torvalds 已提交
2074 2075
 */

2076
void writeback_set_ratelimit(void)
L
Linus Torvalds 已提交
2077
{
2078
	struct wb_domain *dom = &global_wb_domain;
2079 2080
	unsigned long background_thresh;
	unsigned long dirty_thresh;
2081

2082
	global_dirty_limits(&background_thresh, &dirty_thresh);
2083
	dom->dirty_limit = dirty_thresh;
2084
	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
L
Linus Torvalds 已提交
2085 2086 2087 2088
	if (ratelimit_pages < 16)
		ratelimit_pages = 16;
}

2089
static int
2090 2091
ratelimit_handler(struct notifier_block *self, unsigned long action,
		  void *hcpu)
L
Linus Torvalds 已提交
2092
{
2093 2094 2095 2096 2097 2098 2099 2100 2101

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
	case CPU_DEAD:
		writeback_set_ratelimit();
		return NOTIFY_OK;
	default:
		return NOTIFY_DONE;
	}
L
Linus Torvalds 已提交
2102 2103
}

2104
static struct notifier_block ratelimit_nb = {
L
Linus Torvalds 已提交
2105 2106 2107 2108 2109
	.notifier_call	= ratelimit_handler,
	.next		= NULL,
};

/*
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
L
Linus Torvalds 已提交
2126 2127 2128
 */
void __init page_writeback_init(void)
{
2129 2130
	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));

2131
	writeback_set_ratelimit();
L
Linus Torvalds 已提交
2132 2133 2134
	register_cpu_notifier(&ratelimit_nb);
}

2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
/**
 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
 * @mapping: address space structure to write
 * @start: starting page index
 * @end: ending page index (inclusive)
 *
 * This function scans the page range from @start to @end (inclusive) and tags
 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
 * that write_cache_pages (or whoever calls this function) will then use
 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
 * used to avoid livelocking of writeback by a process steadily creating new
 * dirty pages in the file (thus it is important for this function to be quick
 * so that it can tag pages faster than a dirtying process can create them).
 */
/*
 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
 */
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end)
{
R
Randy Dunlap 已提交
2155
#define WRITEBACK_TAG_BATCH 4096
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165
	unsigned long tagged;

	do {
		spin_lock_irq(&mapping->tree_lock);
		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
				&start, end, WRITEBACK_TAG_BATCH,
				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
		spin_unlock_irq(&mapping->tree_lock);
		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
		cond_resched();
2166 2167
		/* We check 'start' to handle wrapping when end == ~0UL */
	} while (tagged >= WRITEBACK_TAG_BATCH && start);
2168 2169 2170
}
EXPORT_SYMBOL(tag_pages_for_writeback);

2171
/**
2172
 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2173 2174
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2175 2176
 * @writepage: function called for each page
 * @data: data passed to writepage function
2177
 *
2178
 * If a page is already under I/O, write_cache_pages() skips it, even
2179 2180 2181 2182 2183 2184
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
2185 2186 2187 2188 2189 2190 2191
 *
 * To avoid livelocks (when other process dirties new pages), we first tag
 * pages which should be written back with TOWRITE tag and only then start
 * writing them. For data-integrity sync we have to be careful so that we do
 * not miss some pages (e.g., because some other process has cleared TOWRITE
 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
 * by the process clearing the DIRTY tag (and submitting the page for IO).
2192
 */
2193 2194 2195
int write_cache_pages(struct address_space *mapping,
		      struct writeback_control *wbc, writepage_t writepage,
		      void *data)
2196 2197 2198 2199 2200
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
N
Nick Piggin 已提交
2201
	pgoff_t uninitialized_var(writeback_index);
2202 2203
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
2204
	pgoff_t done_index;
N
Nick Piggin 已提交
2205
	int cycled;
2206
	int range_whole = 0;
2207
	int tag;
2208 2209 2210

	pagevec_init(&pvec, 0);
	if (wbc->range_cyclic) {
N
Nick Piggin 已提交
2211 2212 2213 2214 2215 2216
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
2217 2218
		end = -1;
	} else {
2219 2220
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
2221 2222
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
N
Nick Piggin 已提交
2223
		cycled = 1; /* ignore range_cyclic tests */
2224
	}
2225
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2226 2227 2228
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
2229
retry:
2230
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2231
		tag_pages_for_writeback(mapping, index, end);
2232
	done_index = index;
N
Nick Piggin 已提交
2233 2234 2235
	while (!done && (index <= end)) {
		int i;

2236
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
N
Nick Piggin 已提交
2237 2238 2239
			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
		if (nr_pages == 0)
			break;
2240 2241 2242 2243 2244

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			/*
2245 2246 2247 2248 2249
			 * At this point, the page may be truncated or
			 * invalidated (changing page->mapping to NULL), or
			 * even swizzled back from swapper_space to tmpfs file
			 * mapping. However, page->index will not change
			 * because we have a reference on the page.
2250
			 */
2251 2252 2253 2254 2255 2256 2257 2258 2259
			if (page->index > end) {
				/*
				 * can't be range_cyclic (1st pass) because
				 * end == -1 in that case.
				 */
				done = 1;
				break;
			}

2260
			done_index = page->index;
2261

2262 2263
			lock_page(page);

N
Nick Piggin 已提交
2264 2265 2266 2267 2268 2269 2270 2271
			/*
			 * Page truncated or invalidated. We can freely skip it
			 * then, even for data integrity operations: the page
			 * has disappeared concurrently, so there could be no
			 * real expectation of this data interity operation
			 * even if there is now a new, dirty page at the same
			 * pagecache address.
			 */
2272
			if (unlikely(page->mapping != mapping)) {
N
Nick Piggin 已提交
2273
continue_unlock:
2274 2275 2276 2277
				unlock_page(page);
				continue;
			}

2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
					wait_on_page_writeback(page);
				else
					goto continue_unlock;
			}
2289

2290 2291
			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
N
Nick Piggin 已提交
2292
				goto continue_unlock;
2293

2294
			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2295
			ret = (*writepage)(page, wbc, data);
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					/*
					 * done_index is set past this page,
					 * so media errors will not choke
					 * background writeout for the entire
					 * file. This has consequences for
					 * range_cyclic semantics (ie. it may
					 * not be suitable for data integrity
					 * writeout).
					 */
2310
					done_index = page->index + 1;
2311 2312 2313
					done = 1;
					break;
				}
2314
			}
2315

2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
			/*
			 * We stop writing back only if we are not doing
			 * integrity sync. In case of integrity sync we have to
			 * keep going until we have written all the pages
			 * we tagged for writeback prior to entering this loop.
			 */
			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
2326
			}
2327 2328 2329 2330
		}
		pagevec_release(&pvec);
		cond_resched();
	}
2331
	if (!cycled && !done) {
2332
		/*
N
Nick Piggin 已提交
2333
		 * range_cyclic:
2334 2335 2336
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
N
Nick Piggin 已提交
2337
		cycled = 1;
2338
		index = 0;
N
Nick Piggin 已提交
2339
		end = writeback_index - 1;
2340 2341
		goto retry;
	}
2342 2343
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;
2344

2345 2346
	return ret;
}
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
EXPORT_SYMBOL(write_cache_pages);

/*
 * Function used by generic_writepages to call the real writepage
 * function and set the mapping flags on error
 */
static int __writepage(struct page *page, struct writeback_control *wbc,
		       void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

/**
 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 *
 * This is a library function, which implements the writepages()
 * address_space_operation.
 */
int generic_writepages(struct address_space *mapping,
		       struct writeback_control *wbc)
{
2373 2374 2375
	struct blk_plug plug;
	int ret;

2376 2377 2378 2379
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2380 2381 2382 2383
	blk_start_plug(&plug);
	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
	blk_finish_plug(&plug);
	return ret;
2384
}
2385 2386 2387

EXPORT_SYMBOL(generic_writepages);

L
Linus Torvalds 已提交
2388 2389
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
2390 2391
	int ret;

L
Linus Torvalds 已提交
2392 2393 2394
	if (wbc->nr_to_write <= 0)
		return 0;
	if (mapping->a_ops->writepages)
2395
		ret = mapping->a_ops->writepages(mapping, wbc);
2396 2397 2398
	else
		ret = generic_writepages(mapping, wbc);
	return ret;
L
Linus Torvalds 已提交
2399 2400 2401 2402
}

/**
 * write_one_page - write out a single page and optionally wait on I/O
2403 2404
 * @page: the page to write
 * @wait: if true, wait on writeout
L
Linus Torvalds 已提交
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
 *
 * The page must be locked by the caller and will be unlocked upon return.
 *
 * write_one_page() returns a negative error code if I/O failed.
 */
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 1,
	};

	BUG_ON(!PageLocked(page));

	if (wait)
		wait_on_page_writeback(page);

	if (clear_page_dirty_for_io(page)) {
2425
		get_page(page);
L
Linus Torvalds 已提交
2426 2427 2428 2429 2430 2431
		ret = mapping->a_ops->writepage(page, &wbc);
		if (ret == 0 && wait) {
			wait_on_page_writeback(page);
			if (PageError(page))
				ret = -EIO;
		}
2432
		put_page(page);
L
Linus Torvalds 已提交
2433 2434 2435 2436 2437 2438 2439
	} else {
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

2440 2441 2442 2443 2444 2445
/*
 * For address_spaces which do not use buffers nor write back.
 */
int __set_page_dirty_no_writeback(struct page *page)
{
	if (!PageDirty(page))
2446
		return !TestSetPageDirty(page);
2447 2448 2449
	return 0;
}

2450 2451
/*
 * Helper function for set_page_dirty family.
2452
 *
2453
 * Caller must hold lock_page_memcg().
2454
 *
2455 2456
 * NOTE: This relies on being atomic wrt interrupts.
 */
J
Johannes Weiner 已提交
2457
void account_page_dirtied(struct page *page, struct address_space *mapping)
2458
{
2459 2460
	struct inode *inode = mapping->host;

T
Tejun Heo 已提交
2461 2462
	trace_writeback_dirty_page(page, mapping);

2463
	if (mapping_cap_account_dirty(mapping)) {
2464
		struct bdi_writeback *wb;
2465

2466 2467
		inode_attach_wb(inode, page);
		wb = inode_to_wb(inode);
2468

J
Johannes Weiner 已提交
2469
		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2470
		__inc_zone_page_state(page, NR_FILE_DIRTY);
2471
		__inc_zone_page_state(page, NR_DIRTIED);
2472 2473
		__inc_wb_stat(wb, WB_RECLAIMABLE);
		__inc_wb_stat(wb, WB_DIRTIED);
2474
		task_io_account_write(PAGE_SIZE);
2475 2476
		current->nr_dirtied++;
		this_cpu_inc(bdp_ratelimits);
2477 2478
	}
}
M
Michael Rubin 已提交
2479
EXPORT_SYMBOL(account_page_dirtied);
2480

2481 2482 2483
/*
 * Helper function for deaccounting dirty page without writeback.
 *
2484
 * Caller must hold lock_page_memcg().
2485
 */
2486
void account_page_cleaned(struct page *page, struct address_space *mapping,
J
Johannes Weiner 已提交
2487
			  struct bdi_writeback *wb)
2488 2489
{
	if (mapping_cap_account_dirty(mapping)) {
J
Johannes Weiner 已提交
2490
		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2491
		dec_zone_page_state(page, NR_FILE_DIRTY);
2492
		dec_wb_stat(wb, WB_RECLAIMABLE);
2493
		task_io_account_cancelled_write(PAGE_SIZE);
2494 2495 2496
	}
}

L
Linus Torvalds 已提交
2497 2498 2499 2500 2501 2502 2503 2504
/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
2505 2506 2507
 * The caller must ensure this doesn't race with truncation.  Most will simply
 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
 * the pte lock held, which also locks out truncation.
L
Linus Torvalds 已提交
2508 2509 2510
 */
int __set_page_dirty_nobuffers(struct page *page)
{
J
Johannes Weiner 已提交
2511
	lock_page_memcg(page);
L
Linus Torvalds 已提交
2512 2513
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
2514
		unsigned long flags;
L
Linus Torvalds 已提交
2515

2516
		if (!mapping) {
J
Johannes Weiner 已提交
2517
			unlock_page_memcg(page);
2518
			return 1;
2519
		}
2520

2521
		spin_lock_irqsave(&mapping->tree_lock, flags);
2522 2523
		BUG_ON(page_mapping(page) != mapping);
		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
J
Johannes Weiner 已提交
2524
		account_page_dirtied(page, mapping);
2525 2526
		radix_tree_tag_set(&mapping->page_tree, page_index(page),
				   PAGECACHE_TAG_DIRTY);
2527
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
J
Johannes Weiner 已提交
2528
		unlock_page_memcg(page);
2529

2530 2531 2532
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
L
Linus Torvalds 已提交
2533
		}
2534
		return 1;
L
Linus Torvalds 已提交
2535
	}
J
Johannes Weiner 已提交
2536
	unlock_page_memcg(page);
2537
	return 0;
L
Linus Torvalds 已提交
2538 2539 2540
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);

2541 2542 2543 2544 2545 2546 2547 2548 2549 2550
/*
 * Call this whenever redirtying a page, to de-account the dirty counters
 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
 * control.
 */
void account_page_redirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
2551

2552
	if (mapping && mapping_cap_account_dirty(mapping)) {
2553 2554 2555
		struct inode *inode = mapping->host;
		struct bdi_writeback *wb;
		bool locked;
2556

2557
		wb = unlocked_inode_to_wb_begin(inode, &locked);
2558 2559
		current->nr_dirtied--;
		dec_zone_page_state(page, NR_DIRTIED);
2560
		dec_wb_stat(wb, WB_DIRTIED);
2561
		unlocked_inode_to_wb_end(inode, locked);
2562 2563 2564 2565
	}
}
EXPORT_SYMBOL(account_page_redirty);

L
Linus Torvalds 已提交
2566 2567 2568 2569 2570 2571 2572
/*
 * When a writepage implementation decides that it doesn't want to write this
 * page for some reason, it should redirty the locked page via
 * redirty_page_for_writepage() and it should then unlock the page and return 0
 */
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
2573 2574
	int ret;

L
Linus Torvalds 已提交
2575
	wbc->pages_skipped++;
2576
	ret = __set_page_dirty_nobuffers(page);
2577
	account_page_redirty(page);
2578
	return ret;
L
Linus Torvalds 已提交
2579 2580 2581 2582
}
EXPORT_SYMBOL(redirty_page_for_writepage);

/*
2583 2584 2585 2586 2587 2588 2589
 * Dirty a page.
 *
 * For pages with a mapping this should be done under the page lock
 * for the benefit of asynchronous memory errors who prefer a consistent
 * dirty state. This rule can be broken in some special cases,
 * but should be better not to.
 *
L
Linus Torvalds 已提交
2590 2591 2592
 * If the mapping doesn't provide a set_page_dirty a_op, then
 * just fall through and assume that it wants buffer_heads.
 */
N
Nick Piggin 已提交
2593
int set_page_dirty(struct page *page)
L
Linus Torvalds 已提交
2594 2595 2596
{
	struct address_space *mapping = page_mapping(page);

2597
	page = compound_head(page);
L
Linus Torvalds 已提交
2598 2599
	if (likely(mapping)) {
		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
M
Minchan Kim 已提交
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
		/*
		 * readahead/lru_deactivate_page could remain
		 * PG_readahead/PG_reclaim due to race with end_page_writeback
		 * About readahead, if the page is written, the flags would be
		 * reset. So no problem.
		 * About lru_deactivate_page, if the page is redirty, the flag
		 * will be reset. So no problem. but if the page is used by readahead
		 * it will confuse readahead and make it restart the size rampup
		 * process. But it's a trivial problem.
		 */
2610 2611
		if (PageReclaim(page))
			ClearPageReclaim(page);
2612 2613 2614 2615 2616
#ifdef CONFIG_BLOCK
		if (!spd)
			spd = __set_page_dirty_buffers;
#endif
		return (*spd)(page);
L
Linus Torvalds 已提交
2617
	}
2618 2619 2620 2621
	if (!PageDirty(page)) {
		if (!TestSetPageDirty(page))
			return 1;
	}
L
Linus Torvalds 已提交
2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639
	return 0;
}
EXPORT_SYMBOL(set_page_dirty);

/*
 * set_page_dirty() is racy if the caller has no reference against
 * page->mapping->host, and if the page is unlocked.  This is because another
 * CPU could truncate the page off the mapping and then free the mapping.
 *
 * Usually, the page _is_ locked, or the caller is a user-space process which
 * holds a reference on the inode by having an open file.
 *
 * In other cases, the page should be locked before running set_page_dirty().
 */
int set_page_dirty_lock(struct page *page)
{
	int ret;

J
Jens Axboe 已提交
2640
	lock_page(page);
L
Linus Torvalds 已提交
2641 2642 2643 2644 2645 2646
	ret = set_page_dirty(page);
	unlock_page(page);
	return ret;
}
EXPORT_SYMBOL(set_page_dirty_lock);

2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
/*
 * This cancels just the dirty bit on the kernel page itself, it does NOT
 * actually remove dirty bits on any mmap's that may be around. It also
 * leaves the page tagged dirty, so any sync activity will still find it on
 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
 * look at the dirty bits in the VM.
 *
 * Doing this should *normally* only ever be done when a page is truncated,
 * and is not actually mapped anywhere at all. However, fs/buffer.c does
 * this when it notices that somebody has cleaned out all the buffers on a
 * page without actually doing it through the VM. Can you say "ext3 is
 * horribly ugly"? Thought you could.
 */
void cancel_dirty_page(struct page *page)
{
2662 2663 2664
	struct address_space *mapping = page_mapping(page);

	if (mapping_cap_account_dirty(mapping)) {
2665 2666 2667
		struct inode *inode = mapping->host;
		struct bdi_writeback *wb;
		bool locked;
2668

J
Johannes Weiner 已提交
2669
		lock_page_memcg(page);
2670
		wb = unlocked_inode_to_wb_begin(inode, &locked);
2671 2672

		if (TestClearPageDirty(page))
J
Johannes Weiner 已提交
2673
			account_page_cleaned(page, mapping, wb);
2674

2675
		unlocked_inode_to_wb_end(inode, locked);
J
Johannes Weiner 已提交
2676
		unlock_page_memcg(page);
2677 2678 2679
	} else {
		ClearPageDirty(page);
	}
2680 2681 2682
}
EXPORT_SYMBOL(cancel_dirty_page);

L
Linus Torvalds 已提交
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699
/*
 * Clear a page's dirty flag, while caring for dirty memory accounting.
 * Returns true if the page was previously dirty.
 *
 * This is for preparing to put the page under writeout.  We leave the page
 * tagged as dirty in the radix tree so that a concurrent write-for-sync
 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
 * implementation will run either set_page_writeback() or set_page_dirty(),
 * at which stage we bring the page's dirty flag and radix-tree dirty tag
 * back into sync.
 *
 * This incoherency between the page's dirty flag and radix-tree tag is
 * unfortunate, but it only exists while the page is locked.
 */
int clear_page_dirty_for_io(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
2700
	int ret = 0;
L
Linus Torvalds 已提交
2701

2702 2703
	BUG_ON(!PageLocked(page));

2704
	if (mapping && mapping_cap_account_dirty(mapping)) {
2705 2706 2707 2708
		struct inode *inode = mapping->host;
		struct bdi_writeback *wb;
		bool locked;

2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
		/*
		 * Yes, Virginia, this is indeed insane.
		 *
		 * We use this sequence to make sure that
		 *  (a) we account for dirty stats properly
		 *  (b) we tell the low-level filesystem to
		 *      mark the whole page dirty if it was
		 *      dirty in a pagetable. Only to then
		 *  (c) clean the page again and return 1 to
		 *      cause the writeback.
		 *
		 * This way we avoid all nasty races with the
		 * dirty bit in multiple places and clearing
		 * them concurrently from different threads.
		 *
		 * Note! Normally the "set_page_dirty(page)"
		 * has no effect on the actual dirty bit - since
		 * that will already usually be set. But we
		 * need the side effects, and it can help us
		 * avoid races.
		 *
		 * We basically use the page "master dirty bit"
		 * as a serialization point for all the different
		 * threads doing their things.
		 */
		if (page_mkclean(page))
			set_page_dirty(page);
2736 2737 2738
		/*
		 * We carefully synchronise fault handlers against
		 * installing a dirty pte and marking the page dirty
2739 2740 2741 2742
		 * at this point.  We do this by having them hold the
		 * page lock while dirtying the page, and pages are
		 * always locked coming in here, so we get the desired
		 * exclusion.
2743
		 */
2744
		wb = unlocked_inode_to_wb_begin(inode, &locked);
2745
		if (TestClearPageDirty(page)) {
J
Johannes Weiner 已提交
2746
			mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
2747
			dec_zone_page_state(page, NR_FILE_DIRTY);
2748
			dec_wb_stat(wb, WB_RECLAIMABLE);
2749
			ret = 1;
L
Linus Torvalds 已提交
2750
		}
2751
		unlocked_inode_to_wb_end(inode, locked);
2752
		return ret;
L
Linus Torvalds 已提交
2753
	}
2754
	return TestClearPageDirty(page);
L
Linus Torvalds 已提交
2755
}
2756
EXPORT_SYMBOL(clear_page_dirty_for_io);
L
Linus Torvalds 已提交
2757 2758 2759 2760

int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
2761
	int ret;
L
Linus Torvalds 已提交
2762

J
Johannes Weiner 已提交
2763
	lock_page_memcg(page);
L
Linus Torvalds 已提交
2764
	if (mapping) {
2765 2766
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
L
Linus Torvalds 已提交
2767 2768
		unsigned long flags;

N
Nick Piggin 已提交
2769
		spin_lock_irqsave(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2770
		ret = TestClearPageWriteback(page);
P
Peter Zijlstra 已提交
2771
		if (ret) {
L
Linus Torvalds 已提交
2772 2773 2774
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
2775
			if (bdi_cap_account_writeback(bdi)) {
2776 2777 2778 2779
				struct bdi_writeback *wb = inode_to_wb(inode);

				__dec_wb_stat(wb, WB_WRITEBACK);
				__wb_writeout_inc(wb);
P
Peter Zijlstra 已提交
2780
			}
P
Peter Zijlstra 已提交
2781
		}
2782 2783 2784 2785 2786

		if (mapping->host && !mapping_tagged(mapping,
						     PAGECACHE_TAG_WRITEBACK))
			sb_clear_inode_writeback(mapping->host);

N
Nick Piggin 已提交
2787
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2788 2789 2790
	} else {
		ret = TestClearPageWriteback(page);
	}
2791
	if (ret) {
J
Johannes Weiner 已提交
2792
		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2793
		dec_zone_page_state(page, NR_WRITEBACK);
2794 2795
		inc_zone_page_state(page, NR_WRITTEN);
	}
J
Johannes Weiner 已提交
2796
	unlock_page_memcg(page);
L
Linus Torvalds 已提交
2797 2798 2799
	return ret;
}

2800
int __test_set_page_writeback(struct page *page, bool keep_write)
L
Linus Torvalds 已提交
2801 2802
{
	struct address_space *mapping = page_mapping(page);
2803
	int ret;
L
Linus Torvalds 已提交
2804

J
Johannes Weiner 已提交
2805
	lock_page_memcg(page);
L
Linus Torvalds 已提交
2806
	if (mapping) {
2807 2808
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
L
Linus Torvalds 已提交
2809 2810
		unsigned long flags;

N
Nick Piggin 已提交
2811
		spin_lock_irqsave(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2812
		ret = TestSetPageWriteback(page);
P
Peter Zijlstra 已提交
2813
		if (!ret) {
2814 2815 2816 2817 2818
			bool on_wblist;

			on_wblist = mapping_tagged(mapping,
						   PAGECACHE_TAG_WRITEBACK);

L
Linus Torvalds 已提交
2819 2820 2821
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
2822
			if (bdi_cap_account_writeback(bdi))
2823
				__inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
2824 2825 2826 2827 2828 2829 2830 2831

			/*
			 * We can come through here when swapping anonymous
			 * pages, so we don't necessarily have an inode to track
			 * for sync.
			 */
			if (mapping->host && !on_wblist)
				sb_mark_inode_writeback(mapping->host);
P
Peter Zijlstra 已提交
2832
		}
L
Linus Torvalds 已提交
2833 2834 2835 2836
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
2837 2838 2839 2840
		if (!keep_write)
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_TOWRITE);
N
Nick Piggin 已提交
2841
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2842 2843 2844
	} else {
		ret = TestSetPageWriteback(page);
	}
2845
	if (!ret) {
J
Johannes Weiner 已提交
2846
		mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2847 2848
		inc_zone_page_state(page, NR_WRITEBACK);
	}
J
Johannes Weiner 已提交
2849
	unlock_page_memcg(page);
L
Linus Torvalds 已提交
2850 2851 2852
	return ret;

}
2853
EXPORT_SYMBOL(__test_set_page_writeback);
L
Linus Torvalds 已提交
2854 2855

/*
N
Nick Piggin 已提交
2856
 * Return true if any of the pages in the mapping are marked with the
L
Linus Torvalds 已提交
2857 2858 2859 2860
 * passed tag.
 */
int mapping_tagged(struct address_space *mapping, int tag)
{
2861
	return radix_tree_tagged(&mapping->page_tree, tag);
L
Linus Torvalds 已提交
2862 2863
}
EXPORT_SYMBOL(mapping_tagged);
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874

/**
 * wait_for_stable_page() - wait for writeback to finish, if necessary.
 * @page:	The page to wait on.
 *
 * This function determines if the given page is related to a backing device
 * that requires page contents to be held stable during writeback.  If so, then
 * it will wait for any pending writeback to complete.
 */
void wait_for_stable_page(struct page *page)
{
2875 2876
	if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
		wait_on_page_writeback(page);
2877 2878
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);