page-writeback.c 75.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * mm/page-writeback.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 2002, Linus Torvalds.
P
Peter Zijlstra 已提交
5
 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
L
Linus Torvalds 已提交
6 7 8 9
 *
 * Contains functions related to writing back dirty pages at the
 * address_space level.
 *
10
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
11 12 13 14
 *		Initial version
 */

#include <linux/kernel.h>
15
#include <linux/export.h>
L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
25
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
26 27
#include <linux/blkdev.h>
#include <linux/mpage.h>
28
#include <linux/rmap.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
A
Al Viro 已提交
35
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
36
#include <linux/pagevec.h>
37
#include <linux/timer.h>
38
#include <linux/sched/rt.h>
39
#include <linux/mm_inline.h>
40
#include <trace/events/writeback.h>
L
Linus Torvalds 已提交
41

42 43
#include "internal.h"

44 45 46 47 48
/*
 * Sleep at most 200ms at a time in balance_dirty_pages().
 */
#define MAX_PAUSE		max(HZ/5, 1)

49 50 51 52 53 54
/*
 * Try to keep balance_dirty_pages() call intervals higher than this many pages
 * by raising pause time to max_pause when falls below it.
 */
#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))

55 56 57 58 59
/*
 * Estimate write bandwidth at 200ms intervals.
 */
#define BANDWIDTH_INTERVAL	max(HZ/5, 1)

W
Wu Fengguang 已提交
60 61
#define RATELIMIT_CALC_SHIFT	10

L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70
/*
 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
 * will look to see if it needs to force writeback or throttling.
 */
static long ratelimit_pages = 32;

/* The following parameters are exported via /proc/sys/vm */

/*
71
 * Start background writeback (via writeback threads) at this percentage
L
Linus Torvalds 已提交
72
 */
73
int dirty_background_ratio = 10;
L
Linus Torvalds 已提交
74

75 76 77 78 79 80
/*
 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
 * dirty_background_ratio * the amount of dirtyable memory
 */
unsigned long dirty_background_bytes;

81 82 83 84 85 86
/*
 * free highmem will not be subtracted from the total free memory
 * for calculating free ratios if vm_highmem_is_dirtyable is true
 */
int vm_highmem_is_dirtyable;

L
Linus Torvalds 已提交
87 88 89
/*
 * The generator of dirty data starts writeback at this percentage
 */
90
int vm_dirty_ratio = 20;
L
Linus Torvalds 已提交
91

92 93 94 95 96 97
/*
 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
 * vm_dirty_ratio * the amount of dirtyable memory
 */
unsigned long vm_dirty_bytes;

L
Linus Torvalds 已提交
98
/*
99
 * The interval between `kupdate'-style writebacks
L
Linus Torvalds 已提交
100
 */
101
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
L
Linus Torvalds 已提交
102

103 104
EXPORT_SYMBOL_GPL(dirty_writeback_interval);

L
Linus Torvalds 已提交
105
/*
106
 * The longest time for which data is allowed to remain dirty
L
Linus Torvalds 已提交
107
 */
108
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
L
Linus Torvalds 已提交
109 110 111 112 113 114 115

/*
 * Flag that makes the machine dump writes/reads and block dirtyings.
 */
int block_dump;

/*
116 117
 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 * a full sync is triggered after this time elapses without any disk activity.
L
Linus Torvalds 已提交
118 119 120 121 122 123 124
 */
int laptop_mode;

EXPORT_SYMBOL(laptop_mode);

/* End of sysctl-exported parameters */

125
struct wb_domain global_wb_domain;
126

127 128 129
/* consolidated parameters for balance_dirty_pages() and its subroutines */
struct dirty_throttle_control {
	struct bdi_writeback	*wb;
130
	struct fprop_local_percpu *wb_completions;
131 132 133 134 135 136 137

	unsigned long		dirty;		/* file_dirty + write + nfs */
	unsigned long		thresh;		/* dirty threshold */
	unsigned long		bg_thresh;	/* dirty background threshold */

	unsigned long		wb_dirty;	/* per-wb counterparts */
	unsigned long		wb_thresh;
138
	unsigned long		wb_bg_thresh;
139 140

	unsigned long		pos_ratio;
141 142
};

143 144
#define GDTC_INIT(__wb)		.wb = (__wb),				\
				.wb_completions = &(__wb)->completions
145

146 147 148 149 150 151
/*
 * Length of period for aging writeout fractions of bdis. This is an
 * arbitrarily chosen number. The longer the period, the slower fractions will
 * reflect changes in current writeout rate.
 */
#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
P
Peter Zijlstra 已提交
152

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
#ifdef CONFIG_CGROUP_WRITEBACK

static void wb_min_max_ratio(struct bdi_writeback *wb,
			     unsigned long *minp, unsigned long *maxp)
{
	unsigned long this_bw = wb->avg_write_bandwidth;
	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
	unsigned long long min = wb->bdi->min_ratio;
	unsigned long long max = wb->bdi->max_ratio;

	/*
	 * @wb may already be clean by the time control reaches here and
	 * the total may not include its bw.
	 */
	if (this_bw < tot_bw) {
		if (min) {
			min *= this_bw;
			do_div(min, tot_bw);
		}
		if (max < 100) {
			max *= this_bw;
			do_div(max, tot_bw);
		}
	}

	*minp = min;
	*maxp = max;
}

#else	/* CONFIG_CGROUP_WRITEBACK */

static void wb_min_max_ratio(struct bdi_writeback *wb,
			     unsigned long *minp, unsigned long *maxp)
{
	*minp = wb->bdi->min_ratio;
	*maxp = wb->bdi->max_ratio;
}

#endif	/* CONFIG_CGROUP_WRITEBACK */

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/*
 * In a memory zone, there is a certain amount of pages we consider
 * available for the page cache, which is essentially the number of
 * free and reclaimable pages, minus some zone reserves to protect
 * lowmem and the ability to uphold the zone's watermarks without
 * requiring writeback.
 *
 * This number of dirtyable pages is the base value of which the
 * user-configurable dirty ratio is the effictive number of pages that
 * are allowed to be actually dirtied.  Per individual zone, or
 * globally by using the sum of dirtyable pages over all zones.
 *
 * Because the user is allowed to specify the dirty limit globally as
 * absolute number of bytes, calculating the per-zone dirty limit can
 * require translating the configured limit into a percentage of
 * global dirtyable memory first.
 */

211 212 213 214 215 216 217 218 219 220 221 222 223 224
/**
 * zone_dirtyable_memory - number of dirtyable pages in a zone
 * @zone: the zone
 *
 * Returns the zone's number of pages potentially available for dirty
 * page cache.  This is the base value for the per-zone dirty limits.
 */
static unsigned long zone_dirtyable_memory(struct zone *zone)
{
	unsigned long nr_pages;

	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
	nr_pages -= min(nr_pages, zone->dirty_balance_reserve);

225 226
	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
227 228 229 230

	return nr_pages;
}

231 232 233 234 235 236 237
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
	int node;
	unsigned long x = 0;

	for_each_node_state(node, N_HIGH_MEMORY) {
238
		struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
239

240
		x += zone_dirtyable_memory(z);
241
	}
242 243 244 245 246 247 248 249 250 251 252 253
	/*
	 * Unreclaimable memory (kernel memory or anonymous memory
	 * without swap) can bring down the dirtyable pages below
	 * the zone's dirty balance reserve and the above calculation
	 * will underflow.  However we still want to add in nodes
	 * which are below threshold (negative values) to get a more
	 * accurate calculation but make sure that the total never
	 * underflows.
	 */
	if ((long)x < 0)
		x = 0;

254 255 256 257 258 259 260 261 262 263 264 265 266
	/*
	 * Make sure that the number of highmem pages is never larger
	 * than the number of the total dirtyable memory. This can only
	 * occur in very strange VM situations but we want to make sure
	 * that this does not occur.
	 */
	return min(x, total);
#else
	return 0;
#endif
}

/**
267
 * global_dirtyable_memory - number of globally dirtyable pages
268
 *
269 270
 * Returns the global number of pages potentially available for dirty
 * page cache.  This is the base value for the global dirty limits.
271
 */
272
static unsigned long global_dirtyable_memory(void)
273 274 275
{
	unsigned long x;

276
	x = global_page_state(NR_FREE_PAGES);
277
	x -= min(x, dirty_balance_reserve);
278

279 280
	x += global_page_state(NR_INACTIVE_FILE);
	x += global_page_state(NR_ACTIVE_FILE);
281

282 283 284 285 286 287
	if (!vm_highmem_is_dirtyable)
		x -= highmem_dirtyable_memory(x);

	return x + 1;	/* Ensure that we never return 0 */
}

288 289 290 291 292 293 294 295 296 297 298
/*
 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 *
 * Calculate the dirty thresholds based on sysctl parameters
 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
 * - vm.dirty_ratio             or  vm.dirty_bytes
 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 * real-time tasks.
 */
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
299
	const unsigned long available_memory = global_dirtyable_memory();
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	unsigned long background;
	unsigned long dirty;
	struct task_struct *tsk;

	if (vm_dirty_bytes)
		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
	else
		dirty = (vm_dirty_ratio * available_memory) / 100;

	if (dirty_background_bytes)
		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
	else
		background = (dirty_background_ratio * available_memory) / 100;

	if (background >= dirty)
		background = dirty / 2;
	tsk = current;
	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
		background += background / 4;
		dirty += dirty / 4;
	}
	*pbackground = background;
	*pdirty = dirty;
	trace_global_dirty_state(background, dirty);
}

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
/**
 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
 * @zone: the zone
 *
 * Returns the maximum number of dirty pages allowed in a zone, based
 * on the zone's dirtyable memory.
 */
static unsigned long zone_dirty_limit(struct zone *zone)
{
	unsigned long zone_memory = zone_dirtyable_memory(zone);
	struct task_struct *tsk = current;
	unsigned long dirty;

	if (vm_dirty_bytes)
		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
			zone_memory / global_dirtyable_memory();
	else
		dirty = vm_dirty_ratio * zone_memory / 100;

	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
		dirty += dirty / 4;

	return dirty;
}

/**
 * zone_dirty_ok - tells whether a zone is within its dirty limits
 * @zone: the zone to check
 *
 * Returns %true when the dirty pages in @zone are within the zone's
 * dirty limit, %false if the limit is exceeded.
 */
bool zone_dirty_ok(struct zone *zone)
{
	unsigned long limit = zone_dirty_limit(zone);

	return zone_page_state(zone, NR_FILE_DIRTY) +
	       zone_page_state(zone, NR_UNSTABLE_NFS) +
	       zone_page_state(zone, NR_WRITEBACK) <= limit;
}

367
int dirty_background_ratio_handler(struct ctl_table *table, int write,
368
		void __user *buffer, size_t *lenp,
369 370 371 372
		loff_t *ppos)
{
	int ret;

373
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
374 375 376 377 378 379
	if (ret == 0 && write)
		dirty_background_bytes = 0;
	return ret;
}

int dirty_background_bytes_handler(struct ctl_table *table, int write,
380
		void __user *buffer, size_t *lenp,
381 382 383 384
		loff_t *ppos)
{
	int ret;

385
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
386 387 388 389 390
	if (ret == 0 && write)
		dirty_background_ratio = 0;
	return ret;
}

P
Peter Zijlstra 已提交
391
int dirty_ratio_handler(struct ctl_table *table, int write,
392
		void __user *buffer, size_t *lenp,
P
Peter Zijlstra 已提交
393 394 395
		loff_t *ppos)
{
	int old_ratio = vm_dirty_ratio;
396 397
	int ret;

398
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
P
Peter Zijlstra 已提交
399
	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
400
		writeback_set_ratelimit();
401 402 403 404 405 406
		vm_dirty_bytes = 0;
	}
	return ret;
}

int dirty_bytes_handler(struct ctl_table *table, int write,
407
		void __user *buffer, size_t *lenp,
408 409
		loff_t *ppos)
{
410
	unsigned long old_bytes = vm_dirty_bytes;
411 412
	int ret;

413
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
414
	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
415
		writeback_set_ratelimit();
416
		vm_dirty_ratio = 0;
P
Peter Zijlstra 已提交
417 418 419 420
	}
	return ret;
}

421 422 423 424 425 426 427 428 429
static unsigned long wp_next_time(unsigned long cur_time)
{
	cur_time += VM_COMPLETIONS_PERIOD_LEN;
	/* 0 has a special meaning... */
	if (!cur_time)
		return 1;
	return cur_time;
}

P
Peter Zijlstra 已提交
430
/*
T
Tejun Heo 已提交
431
 * Increment the wb's writeout completion count and the global writeout
P
Peter Zijlstra 已提交
432 433
 * completion count. Called from test_clear_page_writeback().
 */
434
static inline void __wb_writeout_inc(struct bdi_writeback *wb)
P
Peter Zijlstra 已提交
435
{
T
Tejun Heo 已提交
436 437
	struct wb_domain *dom = &global_wb_domain;

438
	__inc_wb_stat(wb, WB_WRITTEN);
T
Tejun Heo 已提交
439
	__fprop_inc_percpu_max(&dom->completions, &wb->completions,
440
			       wb->bdi->max_prop_frac);
441
	/* First event after period switching was turned off? */
T
Tejun Heo 已提交
442
	if (!unlikely(dom->period_time)) {
443 444 445 446 447 448
		/*
		 * We can race with other __bdi_writeout_inc calls here but
		 * it does not cause any harm since the resulting time when
		 * timer will fire and what is in writeout_period_time will be
		 * roughly the same.
		 */
T
Tejun Heo 已提交
449 450
		dom->period_time = wp_next_time(jiffies);
		mod_timer(&dom->period_timer, dom->period_time);
451
	}
P
Peter Zijlstra 已提交
452 453
}

454
void wb_writeout_inc(struct bdi_writeback *wb)
455 456 457 458
{
	unsigned long flags;

	local_irq_save(flags);
459
	__wb_writeout_inc(wb);
460 461
	local_irq_restore(flags);
}
462
EXPORT_SYMBOL_GPL(wb_writeout_inc);
463

464 465 466 467 468 469
/*
 * On idle system, we can be called long after we scheduled because we use
 * deferred timers so count with missed periods.
 */
static void writeout_period(unsigned long t)
{
T
Tejun Heo 已提交
470 471
	struct wb_domain *dom = (void *)t;
	int miss_periods = (jiffies - dom->period_time) /
472 473
						 VM_COMPLETIONS_PERIOD_LEN;

T
Tejun Heo 已提交
474 475
	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
		dom->period_time = wp_next_time(dom->period_time +
476
				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
T
Tejun Heo 已提交
477
		mod_timer(&dom->period_timer, dom->period_time);
478 479 480 481 482
	} else {
		/*
		 * Aging has zeroed all fractions. Stop wasting CPU on period
		 * updates.
		 */
T
Tejun Heo 已提交
483
		dom->period_time = 0;
484 485 486
	}
}

T
Tejun Heo 已提交
487 488 489
int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
{
	memset(dom, 0, sizeof(*dom));
490 491 492

	spin_lock_init(&dom->lock);

T
Tejun Heo 已提交
493 494 495
	init_timer_deferrable(&dom->period_timer);
	dom->period_timer.function = writeout_period;
	dom->period_timer.data = (unsigned long)dom;
496 497 498

	dom->dirty_limit_tstamp = jiffies;

T
Tejun Heo 已提交
499 500 501
	return fprop_global_init(&dom->completions, gfp);
}

502
/*
503 504 505
 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 * registered backing devices, which, for obvious reasons, can not
 * exceed 100%.
506 507 508 509 510 511 512
 */
static unsigned int bdi_min_ratio;

int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
	int ret = 0;

513
	spin_lock_bh(&bdi_lock);
514
	if (min_ratio > bdi->max_ratio) {
515
		ret = -EINVAL;
516 517 518 519 520 521 522 523 524
	} else {
		min_ratio -= bdi->min_ratio;
		if (bdi_min_ratio + min_ratio < 100) {
			bdi_min_ratio += min_ratio;
			bdi->min_ratio += min_ratio;
		} else {
			ret = -EINVAL;
		}
	}
525
	spin_unlock_bh(&bdi_lock);
526 527 528 529 530 531 532 533 534 535 536

	return ret;
}

int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
	int ret = 0;

	if (max_ratio > 100)
		return -EINVAL;

537
	spin_lock_bh(&bdi_lock);
538 539 540 541
	if (bdi->min_ratio > max_ratio) {
		ret = -EINVAL;
	} else {
		bdi->max_ratio = max_ratio;
542
		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
543
	}
544
	spin_unlock_bh(&bdi_lock);
545 546 547

	return ret;
}
548
EXPORT_SYMBOL(bdi_set_max_ratio);
549

W
Wu Fengguang 已提交
550 551 552 553 554 555
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
					   unsigned long bg_thresh)
{
	return (thresh + bg_thresh) / 2;
}

556 557
static unsigned long hard_dirty_limit(unsigned long thresh)
{
558 559 560
	struct wb_domain *dom = &global_wb_domain;

	return max(thresh, dom->dirty_limit);
561 562
}

563
/**
564 565
 * __wb_calc_thresh - @wb's share of dirty throttling threshold
 * @dtc: dirty_throttle_context of interest
566
 *
567
 * Returns @wb's dirty limit in pages. The term "dirty" in the context of
568
 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
569 570 571 572 573 574
 *
 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 * when sleeping max_pause per page is not enough to keep the dirty pages under
 * control. For example, when the device is completely stalled due to some error
 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 * In the other normal situations, it acts more gently by throttling the tasks
575
 * more (rather than completely block them) when the wb dirty pages go high.
576
 *
577
 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
578 579 580
 * - starving fast devices
 * - piling up dirty pages (that will take long time to sync) on slow devices
 *
581
 * The wb's share of dirty limit will be adapting to its throughput and
582 583
 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 */
584
static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
585
{
T
Tejun Heo 已提交
586
	struct wb_domain *dom = &global_wb_domain;
587
	unsigned long thresh = dtc->thresh;
T
Tejun Heo 已提交
588
	u64 wb_thresh;
589
	long numerator, denominator;
590
	unsigned long wb_min_ratio, wb_max_ratio;
P
Peter Zijlstra 已提交
591

592
	/*
T
Tejun Heo 已提交
593
	 * Calculate this BDI's share of the thresh ratio.
594
	 */
595
	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
T
Tejun Heo 已提交
596
			      &numerator, &denominator);
P
Peter Zijlstra 已提交
597

T
Tejun Heo 已提交
598 599 600
	wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
	wb_thresh *= numerator;
	do_div(wb_thresh, denominator);
P
Peter Zijlstra 已提交
601

602
	wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
603

T
Tejun Heo 已提交
604 605 606
	wb_thresh += (thresh * wb_min_ratio) / 100;
	if (wb_thresh > (thresh * wb_max_ratio) / 100)
		wb_thresh = thresh * wb_max_ratio / 100;
607

T
Tejun Heo 已提交
608
	return wb_thresh;
L
Linus Torvalds 已提交
609 610
}

611 612 613 614 615 616 617
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
{
	struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
					       .thresh = thresh };
	return __wb_calc_thresh(&gdtc);
}

618 619 620 621 622 623 624 625 626 627 628 629 630 631
/*
 *                           setpoint - dirty 3
 *        f(dirty) := 1.0 + (----------------)
 *                           limit - setpoint
 *
 * it's a 3rd order polynomial that subjects to
 *
 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 * (2) f(setpoint) = 1.0 => the balance point
 * (3) f(limit)    = 0   => the hard limit
 * (4) df/dx      <= 0	 => negative feedback control
 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 *     => fast response on large errors; small oscillation near setpoint
 */
632
static long long pos_ratio_polynom(unsigned long setpoint,
633 634 635 636 637 638
					  unsigned long dirty,
					  unsigned long limit)
{
	long long pos_ratio;
	long x;

639
	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
640 641 642 643 644 645 646 647 648
		    limit - setpoint + 1);
	pos_ratio = x;
	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;

	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
}

W
Wu Fengguang 已提交
649 650 651 652 653
/*
 * Dirty position control.
 *
 * (o) global/bdi setpoints
 *
654
 * We want the dirty pages be balanced around the global/wb setpoints.
W
Wu Fengguang 已提交
655 656 657 658 659 660 661 662 663
 * When the number of dirty pages is higher/lower than the setpoint, the
 * dirty position control ratio (and hence task dirty ratelimit) will be
 * decreased/increased to bring the dirty pages back to the setpoint.
 *
 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 *
 *     if (dirty < setpoint) scale up   pos_ratio
 *     if (dirty > setpoint) scale down pos_ratio
 *
664 665
 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
W
Wu Fengguang 已提交
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
 *
 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 *
 * (o) global control line
 *
 *     ^ pos_ratio
 *     |
 *     |            |<===== global dirty control scope ======>|
 * 2.0 .............*
 *     |            .*
 *     |            . *
 *     |            .   *
 *     |            .     *
 *     |            .        *
 *     |            .            *
 * 1.0 ................................*
 *     |            .                  .     *
 *     |            .                  .          *
 *     |            .                  .              *
 *     |            .                  .                 *
 *     |            .                  .                    *
 *   0 +------------.------------------.----------------------*------------->
 *           freerun^          setpoint^                 limit^   dirty pages
 *
690
 * (o) wb control line
W
Wu Fengguang 已提交
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
 *
 *     ^ pos_ratio
 *     |
 *     |            *
 *     |              *
 *     |                *
 *     |                  *
 *     |                    * |<=========== span ============>|
 * 1.0 .......................*
 *     |                      . *
 *     |                      .   *
 *     |                      .     *
 *     |                      .       *
 *     |                      .         *
 *     |                      .           *
 *     |                      .             *
 *     |                      .               *
 *     |                      .                 *
 *     |                      .                   *
 *     |                      .                     *
 * 1/4 ...............................................* * * * * * * * * * * *
 *     |                      .                         .
 *     |                      .                           .
 *     |                      .                             .
 *   0 +----------------------.-------------------------------.------------->
716
 *                wb_setpoint^                    x_intercept^
W
Wu Fengguang 已提交
717
 *
718
 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
W
Wu Fengguang 已提交
719 720
 * be smoothly throttled down to normal if it starts high in situations like
 * - start writing to a slow SD card and a fast disk at the same time. The SD
721 722
 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 * - the wb dirty thresh drops quickly due to change of JBOD workload
W
Wu Fengguang 已提交
723
 */
724
static void wb_position_ratio(struct dirty_throttle_control *dtc)
W
Wu Fengguang 已提交
725
{
726
	struct bdi_writeback *wb = dtc->wb;
727
	unsigned long write_bw = wb->avg_write_bandwidth;
728 729 730
	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
	unsigned long limit = hard_dirty_limit(dtc->thresh);
	unsigned long wb_thresh = dtc->wb_thresh;
W
Wu Fengguang 已提交
731 732
	unsigned long x_intercept;
	unsigned long setpoint;		/* dirty pages' target balance point */
733
	unsigned long wb_setpoint;
W
Wu Fengguang 已提交
734 735 736 737
	unsigned long span;
	long long pos_ratio;		/* for scaling up/down the rate limit */
	long x;

738 739
	dtc->pos_ratio = 0;

740
	if (unlikely(dtc->dirty >= limit))
741
		return;
W
Wu Fengguang 已提交
742 743 744 745

	/*
	 * global setpoint
	 *
746 747 748
	 * See comment for pos_ratio_polynom().
	 */
	setpoint = (freerun + limit) / 2;
749
	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
750 751 752 753

	/*
	 * The strictlimit feature is a tool preventing mistrusted filesystems
	 * from growing a large number of dirty pages before throttling. For
754 755
	 * such filesystems balance_dirty_pages always checks wb counters
	 * against wb limits. Even if global "nr_dirty" is under "freerun".
756 757 758 759
	 * This is especially important for fuse which sets bdi->max_ratio to
	 * 1% by default. Without strictlimit feature, fuse writeback may
	 * consume arbitrary amount of RAM because it is accounted in
	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
W
Wu Fengguang 已提交
760
	 *
761
	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
762
	 * two values: wb_dirty and wb_thresh. Let's consider an example:
763 764
	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
	 * limits are set by default to 10% and 20% (background and throttle).
765
	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
T
Tejun Heo 已提交
766
	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
767
	 * about ~6K pages (as the average of background and throttle wb
768
	 * limits). The 3rd order polynomial will provide positive feedback if
769
	 * wb_dirty is under wb_setpoint and vice versa.
W
Wu Fengguang 已提交
770
	 *
771
	 * Note, that we cannot use global counters in these calculations
772
	 * because we want to throttle process writing to a strictlimit wb
773 774
	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
	 * in the example above).
W
Wu Fengguang 已提交
775
	 */
776
	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
777
		long long wb_pos_ratio;
778

779 780 781 782 783
		if (dtc->wb_dirty < 8) {
			dtc->pos_ratio = min_t(long long, pos_ratio * 2,
					   2 << RATELIMIT_CALC_SHIFT);
			return;
		}
784

785
		if (dtc->wb_dirty >= wb_thresh)
786
			return;
787

788 789
		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
						    dtc->wb_bg_thresh);
790

791
		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
792
			return;
793

794
		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
795
						 wb_thresh);
796 797

		/*
798 799
		 * Typically, for strictlimit case, wb_setpoint << setpoint
		 * and pos_ratio >> wb_pos_ratio. In the other words global
800
		 * state ("dirty") is not limiting factor and we have to
801
		 * make decision based on wb counters. But there is an
802 803
		 * important case when global pos_ratio should get precedence:
		 * global limits are exceeded (e.g. due to activities on other
804
		 * wb's) while given strictlimit wb is below limit.
805
		 *
806
		 * "pos_ratio * wb_pos_ratio" would work for the case above,
807
		 * but it would look too non-natural for the case of all
808
		 * activity in the system coming from a single strictlimit wb
809 810 811 812
		 * with bdi->max_ratio == 100%.
		 *
		 * Note that min() below somewhat changes the dynamics of the
		 * control system. Normally, pos_ratio value can be well over 3
813
		 * (when globally we are at freerun and wb is well below wb
814 815 816 817
		 * setpoint). Now the maximum pos_ratio in the same situation
		 * is 2. We might want to tweak this if we observe the control
		 * system is too slow to adapt.
		 */
818 819
		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
		return;
820
	}
W
Wu Fengguang 已提交
821 822 823

	/*
	 * We have computed basic pos_ratio above based on global situation. If
824
	 * the wb is over/under its share of dirty pages, we want to scale
W
Wu Fengguang 已提交
825 826 827 828
	 * pos_ratio further down/up. That is done by the following mechanism.
	 */

	/*
829
	 * wb setpoint
W
Wu Fengguang 已提交
830
	 *
831
	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
W
Wu Fengguang 已提交
832
	 *
833
	 *                        x_intercept - wb_dirty
W
Wu Fengguang 已提交
834
	 *                     := --------------------------
835
	 *                        x_intercept - wb_setpoint
W
Wu Fengguang 已提交
836
	 *
837
	 * The main wb control line is a linear function that subjects to
W
Wu Fengguang 已提交
838
	 *
839 840 841
	 * (1) f(wb_setpoint) = 1.0
	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
W
Wu Fengguang 已提交
842
	 *
843
	 * For single wb case, the dirty pages are observed to fluctuate
W
Wu Fengguang 已提交
844
	 * regularly within range
845
	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
W
Wu Fengguang 已提交
846 847 848
	 * for various filesystems, where (2) can yield in a reasonable 12.5%
	 * fluctuation range for pos_ratio.
	 *
849
	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
W
Wu Fengguang 已提交
850
	 * own size, so move the slope over accordingly and choose a slope that
851
	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
W
Wu Fengguang 已提交
852
	 */
853 854
	if (unlikely(wb_thresh > dtc->thresh))
		wb_thresh = dtc->thresh;
855
	/*
856
	 * It's very possible that wb_thresh is close to 0 not because the
857 858 859 860 861
	 * device is slow, but that it has remained inactive for long time.
	 * Honour such devices a reasonable good (hopefully IO efficient)
	 * threshold, so that the occasional writes won't be blocked and active
	 * writes can rampup the threshold quickly.
	 */
862
	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
W
Wu Fengguang 已提交
863
	/*
864 865
	 * scale global setpoint to wb's:
	 *	wb_setpoint = setpoint * wb_thresh / thresh
W
Wu Fengguang 已提交
866
	 */
867
	x = div_u64((u64)wb_thresh << 16, dtc->thresh + 1);
868
	wb_setpoint = setpoint * (u64)x >> 16;
W
Wu Fengguang 已提交
869
	/*
870 871
	 * Use span=(8*write_bw) in single wb case as indicated by
	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
W
Wu Fengguang 已提交
872
	 *
873 874 875
	 *        wb_thresh                    thresh - wb_thresh
	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
	 *         thresh                           thresh
W
Wu Fengguang 已提交
876
	 */
877
	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
878
	x_intercept = wb_setpoint + span;
W
Wu Fengguang 已提交
879

880 881 882
	if (dtc->wb_dirty < x_intercept - span / 4) {
		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
				      x_intercept - wb_setpoint + 1);
W
Wu Fengguang 已提交
883 884 885
	} else
		pos_ratio /= 4;

886
	/*
887
	 * wb reserve area, safeguard against dirty pool underrun and disk idle
888 889 890
	 * It may push the desired control point of global dirty pages higher
	 * than setpoint.
	 */
891
	x_intercept = wb_thresh / 2;
892 893 894 895
	if (dtc->wb_dirty < x_intercept) {
		if (dtc->wb_dirty > x_intercept / 8)
			pos_ratio = div_u64(pos_ratio * x_intercept,
					    dtc->wb_dirty);
896
		else
897 898 899
			pos_ratio *= 8;
	}

900
	dtc->pos_ratio = pos_ratio;
W
Wu Fengguang 已提交
901 902
}

903 904 905
static void wb_update_write_bandwidth(struct bdi_writeback *wb,
				      unsigned long elapsed,
				      unsigned long written)
906 907
{
	const unsigned long period = roundup_pow_of_two(3 * HZ);
908 909
	unsigned long avg = wb->avg_write_bandwidth;
	unsigned long old = wb->write_bandwidth;
910 911 912 913 914 915 916 917
	u64 bw;

	/*
	 * bw = written * HZ / elapsed
	 *
	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
	 * write_bandwidth = ---------------------------------------------------
	 *                                          period
918 919 920
	 *
	 * @written may have decreased due to account_page_redirty().
	 * Avoid underflowing @bw calculation.
921
	 */
922
	bw = written - min(written, wb->written_stamp);
923 924 925 926 927 928
	bw *= HZ;
	if (unlikely(elapsed > period)) {
		do_div(bw, elapsed);
		avg = bw;
		goto out;
	}
929
	bw += (u64)wb->write_bandwidth * (period - elapsed);
930 931 932 933 934 935 936 937 938 939 940 941
	bw >>= ilog2(period);

	/*
	 * one more level of smoothing, for filtering out sudden spikes
	 */
	if (avg > old && old >= (unsigned long)bw)
		avg -= (avg - old) >> 3;

	if (avg < old && old <= (unsigned long)bw)
		avg += (old - avg) >> 3;

out:
942 943 944 945 946 947 948
	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
	avg = max(avg, 1LU);
	if (wb_has_dirty_io(wb)) {
		long delta = avg - wb->avg_write_bandwidth;
		WARN_ON_ONCE(atomic_long_add_return(delta,
					&wb->bdi->tot_write_bandwidth) <= 0);
	}
949 950
	wb->write_bandwidth = bw;
	wb->avg_write_bandwidth = avg;
951 952
}

953
static void update_dirty_limit(struct dirty_throttle_control *dtc)
954
{
955
	struct wb_domain *dom = &global_wb_domain;
956
	unsigned long thresh = dtc->thresh;
957
	unsigned long limit = dom->dirty_limit;
958 959 960 961 962 963 964 965 966 967 968 969

	/*
	 * Follow up in one step.
	 */
	if (limit < thresh) {
		limit = thresh;
		goto update;
	}

	/*
	 * Follow down slowly. Use the higher one as the target, because thresh
	 * may drop below dirty. This is exactly the reason to introduce
970
	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
971
	 */
972
	thresh = max(thresh, dtc->dirty);
973 974 975 976 977 978
	if (limit > thresh) {
		limit -= (limit - thresh) >> 5;
		goto update;
	}
	return;
update:
979
	dom->dirty_limit = limit;
980 981
}

982
static void global_update_bandwidth(struct dirty_throttle_control *dtc,
983 984
				    unsigned long now)
{
985
	struct wb_domain *dom = &global_wb_domain;
986 987 988 989

	/*
	 * check locklessly first to optimize away locking for the most time
	 */
990
	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
991 992
		return;

993 994
	spin_lock(&dom->lock);
	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
995
		update_dirty_limit(dtc);
996
		dom->dirty_limit_tstamp = now;
997
	}
998
	spin_unlock(&dom->lock);
999 1000
}

W
Wu Fengguang 已提交
1001
/*
1002
 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
W
Wu Fengguang 已提交
1003
 *
1004
 * Normal wb tasks will be curbed at or below it in long term.
W
Wu Fengguang 已提交
1005 1006
 * Obviously it should be around (write_bw / N) when there are N dd tasks.
 */
1007
static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1008 1009
				      unsigned long dirtied,
				      unsigned long elapsed)
W
Wu Fengguang 已提交
1010
{
1011 1012 1013 1014
	struct bdi_writeback *wb = dtc->wb;
	unsigned long dirty = dtc->dirty;
	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
	unsigned long limit = hard_dirty_limit(dtc->thresh);
1015
	unsigned long setpoint = (freerun + limit) / 2;
1016 1017
	unsigned long write_bw = wb->avg_write_bandwidth;
	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
W
Wu Fengguang 已提交
1018 1019 1020
	unsigned long dirty_rate;
	unsigned long task_ratelimit;
	unsigned long balanced_dirty_ratelimit;
1021 1022
	unsigned long step;
	unsigned long x;
W
Wu Fengguang 已提交
1023 1024 1025 1026 1027

	/*
	 * The dirty rate will match the writeout rate in long term, except
	 * when dirty pages are truncated by userspace or re-dirtied by FS.
	 */
1028
	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
W
Wu Fengguang 已提交
1029 1030 1031 1032 1033

	/*
	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
	 */
	task_ratelimit = (u64)dirty_ratelimit *
1034
					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
W
Wu Fengguang 已提交
1035 1036 1037 1038
	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */

	/*
	 * A linear estimation of the "balanced" throttle rate. The theory is,
1039
	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
W
Wu Fengguang 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
	 * formula will yield the balanced rate limit (write_bw / N).
	 *
	 * Note that the expanded form is not a pure rate feedback:
	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
	 * but also takes pos_ratio into account:
	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
	 *
	 * (1) is not realistic because pos_ratio also takes part in balancing
	 * the dirty rate.  Consider the state
	 *	pos_ratio = 0.5						     (3)
	 *	rate = 2 * (write_bw / N)				     (4)
	 * If (1) is used, it will stuck in that state! Because each dd will
	 * be throttled at
	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
	 * yielding
	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
	 * put (6) into (1) we get
	 *	rate_(i+1) = rate_(i)					     (7)
	 *
	 * So we end up using (2) to always keep
	 *	rate_(i+1) ~= (write_bw / N)				     (8)
	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
	 * pos_ratio is able to drive itself to 1.0, which is not only where
	 * the dirty count meet the setpoint, but also where the slope of
	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
	 */
	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
					   dirty_rate | 1);
1069 1070 1071 1072 1073
	/*
	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
	 */
	if (unlikely(balanced_dirty_ratelimit > write_bw))
		balanced_dirty_ratelimit = write_bw;
W
Wu Fengguang 已提交
1074

1075 1076 1077
	/*
	 * We could safely do this and return immediately:
	 *
1078
	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1079 1080
	 *
	 * However to get a more stable dirty_ratelimit, the below elaborated
W
Wanpeng Li 已提交
1081
	 * code makes use of task_ratelimit to filter out singular points and
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	 * limit the step size.
	 *
	 * The below code essentially only uses the relative value of
	 *
	 *	task_ratelimit - dirty_ratelimit
	 *	= (pos_ratio - 1) * dirty_ratelimit
	 *
	 * which reflects the direction and size of dirty position error.
	 */

	/*
	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
	 * task_ratelimit is on the same side of dirty_ratelimit, too.
	 * For example, when
	 * - dirty_ratelimit > balanced_dirty_ratelimit
	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
	 * lowering dirty_ratelimit will help meet both the position and rate
	 * control targets. Otherwise, don't update dirty_ratelimit if it will
	 * only help meet the rate target. After all, what the users ultimately
	 * feel and care are stable dirty rate and small position error.
	 *
	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
W
Wanpeng Li 已提交
1104
	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1105 1106 1107 1108 1109
	 * keeps jumping around randomly and can even leap far away at times
	 * due to the small 200ms estimation period of dirty_rate (we want to
	 * keep that period small to reduce time lags).
	 */
	step = 0;
1110 1111

	/*
1112
	 * For strictlimit case, calculations above were based on wb counters
1113
	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1114
	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1115 1116
	 * Hence, to calculate "step" properly, we have to use wb_dirty as
	 * "dirty" and wb_setpoint as "setpoint".
1117
	 *
1118 1119
	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
	 * it's possible that wb_thresh is close to zero due to inactivity
1120
	 * of backing device.
1121
	 */
1122
	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1123 1124 1125
		dirty = dtc->wb_dirty;
		if (dtc->wb_dirty < 8)
			setpoint = dtc->wb_dirty + 1;
1126
		else
1127
			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1128 1129
	}

1130
	if (dirty < setpoint) {
1131
		x = min3(wb->balanced_dirty_ratelimit,
1132
			 balanced_dirty_ratelimit, task_ratelimit);
1133 1134 1135
		if (dirty_ratelimit < x)
			step = x - dirty_ratelimit;
	} else {
1136
		x = max3(wb->balanced_dirty_ratelimit,
1137
			 balanced_dirty_ratelimit, task_ratelimit);
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
		if (dirty_ratelimit > x)
			step = dirty_ratelimit - x;
	}

	/*
	 * Don't pursue 100% rate matching. It's impossible since the balanced
	 * rate itself is constantly fluctuating. So decrease the track speed
	 * when it gets close to the target. Helps eliminate pointless tremors.
	 */
	step >>= dirty_ratelimit / (2 * step + 1);
	/*
	 * Limit the tracking speed to avoid overshooting.
	 */
	step = (step + 7) / 8;

	if (dirty_ratelimit < balanced_dirty_ratelimit)
		dirty_ratelimit += step;
	else
		dirty_ratelimit -= step;

1158 1159
	wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1160

1161
	trace_bdi_dirty_ratelimit(wb->bdi, dirty_rate, task_ratelimit);
W
Wu Fengguang 已提交
1162 1163
}

1164
static void __wb_update_bandwidth(struct dirty_throttle_control *dtc,
1165 1166
				  unsigned long start_time,
				  bool update_ratelimit)
1167
{
1168
	struct bdi_writeback *wb = dtc->wb;
1169
	unsigned long now = jiffies;
1170
	unsigned long elapsed = now - wb->bw_time_stamp;
W
Wu Fengguang 已提交
1171
	unsigned long dirtied;
1172 1173
	unsigned long written;

1174 1175
	lockdep_assert_held(&wb->list_lock);

1176 1177 1178 1179 1180 1181
	/*
	 * rate-limit, only update once every 200ms.
	 */
	if (elapsed < BANDWIDTH_INTERVAL)
		return;

1182 1183
	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1184 1185 1186 1187 1188

	/*
	 * Skip quiet periods when disk bandwidth is under-utilized.
	 * (at least 1s idle time between two flusher runs)
	 */
1189
	if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1190 1191
		goto snapshot;

1192
	if (update_ratelimit) {
1193 1194
		global_update_bandwidth(dtc, now);
		wb_update_dirty_ratelimit(dtc, dirtied, elapsed);
W
Wu Fengguang 已提交
1195
	}
1196
	wb_update_write_bandwidth(wb, elapsed, written);
1197 1198

snapshot:
1199 1200 1201
	wb->dirtied_stamp = dirtied;
	wb->written_stamp = written;
	wb->bw_time_stamp = now;
1202 1203
}

1204
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1205
{
1206 1207 1208
	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };

	__wb_update_bandwidth(&gdtc, start_time, false);
1209 1210
}

1211
/*
1212
 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
 * will look to see if it needs to start dirty throttling.
 *
 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
 * global_page_state() too often. So scale it near-sqrt to the safety margin
 * (the number of pages we may dirty without exceeding the dirty limits).
 */
static unsigned long dirty_poll_interval(unsigned long dirty,
					 unsigned long thresh)
{
	if (thresh > dirty)
		return 1UL << (ilog2(thresh - dirty) >> 1);

	return 1;
}

1228
static unsigned long wb_max_pause(struct bdi_writeback *wb,
1229
				  unsigned long wb_dirty)
1230
{
1231
	unsigned long bw = wb->avg_write_bandwidth;
1232
	unsigned long t;
1233

1234 1235 1236 1237 1238 1239 1240
	/*
	 * Limit pause time for small memory systems. If sleeping for too long
	 * time, a small pool of dirty/writeback pages may go empty and disk go
	 * idle.
	 *
	 * 8 serves as the safety ratio.
	 */
1241
	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1242 1243
	t++;

1244
	return min_t(unsigned long, t, MAX_PAUSE);
1245 1246
}

1247 1248 1249 1250 1251
static long wb_min_pause(struct bdi_writeback *wb,
			 long max_pause,
			 unsigned long task_ratelimit,
			 unsigned long dirty_ratelimit,
			 int *nr_dirtied_pause)
1252
{
1253 1254
	long hi = ilog2(wb->avg_write_bandwidth);
	long lo = ilog2(wb->dirty_ratelimit);
1255 1256 1257
	long t;		/* target pause */
	long pause;	/* estimated next pause */
	int pages;	/* target nr_dirtied_pause */
1258

1259 1260
	/* target for 10ms pause on 1-dd case */
	t = max(1, HZ / 100);
1261 1262 1263 1264 1265

	/*
	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
	 * overheads.
	 *
1266
	 * (N * 10ms) on 2^N concurrent tasks.
1267 1268
	 */
	if (hi > lo)
1269
		t += (hi - lo) * (10 * HZ) / 1024;
1270 1271

	/*
1272 1273 1274 1275 1276 1277 1278 1279
	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
	 * on the much more stable dirty_ratelimit. However the next pause time
	 * will be computed based on task_ratelimit and the two rate limits may
	 * depart considerably at some time. Especially if task_ratelimit goes
	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
	 * result task_ratelimit won't be executed faithfully, which could
	 * eventually bring down dirty_ratelimit.
1280
	 *
1281 1282 1283 1284 1285 1286 1287
	 * We apply two rules to fix it up:
	 * 1) try to estimate the next pause time and if necessary, use a lower
	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
	 * 2) limit the target pause time to max_pause/2, so that the normal
	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1288
	 */
1289 1290
	t = min(t, 1 + max_pause / 2);
	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1291 1292

	/*
1293 1294 1295 1296 1297 1298
	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
	 * When the 16 consecutive reads are often interrupted by some dirty
	 * throttling pause during the async writes, cfq will go into idles
	 * (deadline is fine). So push nr_dirtied_pause as high as possible
	 * until reaches DIRTY_POLL_THRESH=32 pages.
1299
	 */
1300 1301 1302 1303 1304 1305 1306 1307 1308
	if (pages < DIRTY_POLL_THRESH) {
		t = max_pause;
		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
		if (pages > DIRTY_POLL_THRESH) {
			pages = DIRTY_POLL_THRESH;
			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
		}
	}

1309 1310 1311 1312 1313
	pause = HZ * pages / (task_ratelimit + 1);
	if (pause > max_pause) {
		t = max_pause;
		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
	}
1314

1315
	*nr_dirtied_pause = pages;
1316
	/*
1317
	 * The minimal pause time will normally be half the target pause time.
1318
	 */
1319
	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1320 1321
}

1322
static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1323
{
1324
	struct bdi_writeback *wb = dtc->wb;
1325
	unsigned long wb_reclaimable;
1326 1327

	/*
1328
	 * wb_thresh is not treated as some limiting factor as
1329
	 * dirty_thresh, due to reasons
1330
	 * - in JBOD setup, wb_thresh can fluctuate a lot
1331
	 * - in a system with HDD and USB key, the USB key may somehow
1332 1333
	 *   go into state (wb_dirty >> wb_thresh) either because
	 *   wb_dirty starts high, or because wb_thresh drops low.
1334
	 *   In this case we don't want to hard throttle the USB key
1335 1336
	 *   dirtiers for 100 seconds until wb_dirty drops under
	 *   wb_thresh. Instead the auxiliary wb control line in
1337
	 *   wb_position_ratio() will let the dirtier task progress
1338
	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1339
	 */
1340
	dtc->wb_thresh = __wb_calc_thresh(dtc);
1341 1342
	dtc->wb_bg_thresh = dtc->thresh ?
		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353

	/*
	 * In order to avoid the stacked BDI deadlock we need
	 * to ensure we accurately count the 'dirty' pages when
	 * the threshold is low.
	 *
	 * Otherwise it would be possible to get thresh+n pages
	 * reported dirty, even though there are thresh-m pages
	 * actually dirty; with m+n sitting in the percpu
	 * deltas.
	 */
1354
	if (dtc->wb_thresh < 2 * wb_stat_error(wb)) {
1355
		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1356
		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1357
	} else {
1358
		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1359
		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1360 1361 1362
	}
}

L
Linus Torvalds 已提交
1363 1364 1365
/*
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
1366
 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1367 1368
 * If we're over `background_thresh' then the writeback threads are woken to
 * perform some writeout.
L
Linus Torvalds 已提交
1369
 */
1370
static void balance_dirty_pages(struct address_space *mapping,
1371
				struct bdi_writeback *wb,
1372
				unsigned long pages_dirtied)
L
Linus Torvalds 已提交
1373
{
1374 1375
	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1376
	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1377
	long period;
1378 1379 1380 1381
	long pause;
	long max_pause;
	long min_pause;
	int nr_dirtied_pause;
1382
	bool dirty_exceeded = false;
1383
	unsigned long task_ratelimit;
1384
	unsigned long dirty_ratelimit;
1385
	struct backing_dev_info *bdi = wb->bdi;
1386
	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1387
	unsigned long start_time = jiffies;
L
Linus Torvalds 已提交
1388 1389

	for (;;) {
1390
		unsigned long now = jiffies;
1391
		unsigned long dirty, thresh, bg_thresh;
1392

1393 1394 1395 1396 1397 1398
		/*
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
		 */
1399 1400
		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
					global_page_state(NR_UNSTABLE_NFS);
1401
		gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1402

1403
		global_dirty_limits(&gdtc->bg_thresh, &gdtc->thresh);
1404

1405
		if (unlikely(strictlimit)) {
1406
			wb_dirty_limits(gdtc);
1407

1408 1409
			dirty = gdtc->wb_dirty;
			thresh = gdtc->wb_thresh;
1410
			bg_thresh = gdtc->wb_bg_thresh;
1411
		} else {
1412 1413 1414
			dirty = gdtc->dirty;
			thresh = gdtc->thresh;
			bg_thresh = gdtc->bg_thresh;
1415 1416
		}

1417 1418 1419
		/*
		 * Throttle it only when the background writeback cannot
		 * catch-up. This avoids (excessively) small writeouts
1420
		 * when the wb limits are ramping up in case of !strictlimit.
1421
		 *
1422 1423
		 * In strictlimit case make decision based on the wb counters
		 * and limits. Small writeouts when the wb limits are ramping
1424
		 * up are the price we consciously pay for strictlimit-ing.
1425
		 */
1426
		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) {
1427 1428
			current->dirty_paused_when = now;
			current->nr_dirtied = 0;
1429
			current->nr_dirtied_pause =
1430
				dirty_poll_interval(dirty, thresh);
1431
			break;
1432
		}
1433

1434
		if (unlikely(!writeback_in_progress(wb)))
1435
			wb_start_background_writeback(wb);
1436

1437
		if (!strictlimit)
1438
			wb_dirty_limits(gdtc);
1439

1440 1441
		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
			((gdtc->dirty > gdtc->thresh) || strictlimit);
1442 1443 1444

		wb_position_ratio(gdtc);

1445 1446
		if (dirty_exceeded && !wb->dirty_exceeded)
			wb->dirty_exceeded = 1;
L
Linus Torvalds 已提交
1447

1448 1449 1450
		if (time_is_before_jiffies(wb->bw_time_stamp +
					   BANDWIDTH_INTERVAL)) {
			spin_lock(&wb->list_lock);
1451
			__wb_update_bandwidth(gdtc, start_time, true);
1452 1453
			spin_unlock(&wb->list_lock);
		}
1454

1455
		dirty_ratelimit = wb->dirty_ratelimit;
1456
		task_ratelimit = ((u64)dirty_ratelimit * gdtc->pos_ratio) >>
1457
							RATELIMIT_CALC_SHIFT;
1458
		max_pause = wb_max_pause(wb, gdtc->wb_dirty);
1459 1460 1461
		min_pause = wb_min_pause(wb, max_pause,
					 task_ratelimit, dirty_ratelimit,
					 &nr_dirtied_pause);
1462

1463
		if (unlikely(task_ratelimit == 0)) {
1464
			period = max_pause;
1465
			pause = max_pause;
1466
			goto pause;
P
Peter Zijlstra 已提交
1467
		}
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		period = HZ * pages_dirtied / task_ratelimit;
		pause = period;
		if (current->dirty_paused_when)
			pause -= now - current->dirty_paused_when;
		/*
		 * For less than 1s think time (ext3/4 may block the dirtier
		 * for up to 800ms from time to time on 1-HDD; so does xfs,
		 * however at much less frequency), try to compensate it in
		 * future periods by updating the virtual time; otherwise just
		 * do a reset, as it may be a light dirtier.
		 */
1479
		if (pause < min_pause) {
1480
			trace_balance_dirty_pages(bdi,
1481 1482 1483 1484 1485
						  gdtc->thresh,
						  gdtc->bg_thresh,
						  gdtc->dirty,
						  gdtc->wb_thresh,
						  gdtc->wb_dirty,
1486 1487 1488
						  dirty_ratelimit,
						  task_ratelimit,
						  pages_dirtied,
1489
						  period,
1490
						  min(pause, 0L),
1491
						  start_time);
1492 1493 1494 1495 1496 1497
			if (pause < -HZ) {
				current->dirty_paused_when = now;
				current->nr_dirtied = 0;
			} else if (period) {
				current->dirty_paused_when += period;
				current->nr_dirtied = 0;
1498 1499
			} else if (current->nr_dirtied_pause <= pages_dirtied)
				current->nr_dirtied_pause += pages_dirtied;
W
Wu Fengguang 已提交
1500
			break;
P
Peter Zijlstra 已提交
1501
		}
1502 1503 1504 1505 1506
		if (unlikely(pause > max_pause)) {
			/* for occasional dropped task_ratelimit */
			now += min(pause - max_pause, max_pause);
			pause = max_pause;
		}
1507 1508

pause:
1509
		trace_balance_dirty_pages(bdi,
1510 1511 1512 1513 1514
					  gdtc->thresh,
					  gdtc->bg_thresh,
					  gdtc->dirty,
					  gdtc->wb_thresh,
					  gdtc->wb_dirty,
1515 1516 1517
					  dirty_ratelimit,
					  task_ratelimit,
					  pages_dirtied,
1518
					  period,
1519 1520
					  pause,
					  start_time);
1521
		__set_current_state(TASK_KILLABLE);
1522
		io_schedule_timeout(pause);
1523

1524 1525
		current->dirty_paused_when = now + pause;
		current->nr_dirtied = 0;
1526
		current->nr_dirtied_pause = nr_dirtied_pause;
1527

1528
		/*
1529 1530
		 * This is typically equal to (dirty < thresh) and can also
		 * keep "1000+ dd on a slow USB stick" under control.
1531
		 */
1532
		if (task_ratelimit)
1533
			break;
1534

1535 1536
		/*
		 * In the case of an unresponding NFS server and the NFS dirty
1537
		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1538 1539 1540 1541
		 * to go through, so that tasks on them still remain responsive.
		 *
		 * In theory 1 page is enough to keep the comsumer-producer
		 * pipe going: the flusher cleans 1 page => the task dirties 1
1542
		 * more page. However wb_dirty has accounting errors.  So use
1543
		 * the larger and more IO friendly wb_stat_error.
1544
		 */
1545
		if (gdtc->wb_dirty <= wb_stat_error(wb))
1546 1547
			break;

1548 1549
		if (fatal_signal_pending(current))
			break;
L
Linus Torvalds 已提交
1550 1551
	}

1552 1553
	if (!dirty_exceeded && wb->dirty_exceeded)
		wb->dirty_exceeded = 0;
L
Linus Torvalds 已提交
1554

1555
	if (writeback_in_progress(wb))
1556
		return;
L
Linus Torvalds 已提交
1557 1558 1559 1560 1561 1562 1563 1564 1565

	/*
	 * In laptop mode, we wait until hitting the higher threshold before
	 * starting background writeout, and then write out all the way down
	 * to the lower threshold.  So slow writers cause minimal disk activity.
	 *
	 * In normal mode, we start background writeout at the lower
	 * background_thresh, to keep the amount of dirty memory low.
	 */
1566 1567 1568
	if (laptop_mode)
		return;

1569
	if (nr_reclaimable > gdtc->bg_thresh)
1570
		wb_start_background_writeback(wb);
L
Linus Torvalds 已提交
1571 1572
}

1573
static DEFINE_PER_CPU(int, bdp_ratelimits);
1574

1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
/*
 * Normal tasks are throttled by
 *	loop {
 *		dirty tsk->nr_dirtied_pause pages;
 *		take a snap in balance_dirty_pages();
 *	}
 * However there is a worst case. If every task exit immediately when dirtied
 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
 * called to throttle the page dirties. The solution is to save the not yet
 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
 * randomly into the running tasks. This works well for the above worst case,
 * as the new task will pick up and accumulate the old task's leaked dirty
 * count and eventually get throttled.
 */
DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;

L
Linus Torvalds 已提交
1591
/**
1592
 * balance_dirty_pages_ratelimited - balance dirty memory state
1593
 * @mapping: address_space which was dirtied
L
Linus Torvalds 已提交
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
 * On really big machines, get_writeback_state is expensive, so try to avoid
 * calling it too often (ratelimiting).  But once we're over the dirty memory
 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 * from overshooting the limit by (ratelimit_pages) each.
 */
1604
void balance_dirty_pages_ratelimited(struct address_space *mapping)
L
Linus Torvalds 已提交
1605
{
1606 1607 1608
	struct inode *inode = mapping->host;
	struct backing_dev_info *bdi = inode_to_bdi(inode);
	struct bdi_writeback *wb = NULL;
1609 1610
	int ratelimit;
	int *p;
L
Linus Torvalds 已提交
1611

1612 1613 1614
	if (!bdi_cap_account_dirty(bdi))
		return;

1615 1616 1617 1618 1619
	if (inode_cgwb_enabled(inode))
		wb = wb_get_create_current(bdi, GFP_KERNEL);
	if (!wb)
		wb = &bdi->wb;

1620
	ratelimit = current->nr_dirtied_pause;
1621
	if (wb->dirty_exceeded)
1622 1623 1624
		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));

	preempt_disable();
L
Linus Torvalds 已提交
1625
	/*
1626 1627 1628 1629
	 * This prevents one CPU to accumulate too many dirtied pages without
	 * calling into balance_dirty_pages(), which can happen when there are
	 * 1000+ tasks, all of them start dirtying pages at exactly the same
	 * time, hence all honoured too large initial task->nr_dirtied_pause.
L
Linus Torvalds 已提交
1630
	 */
1631
	p =  this_cpu_ptr(&bdp_ratelimits);
1632
	if (unlikely(current->nr_dirtied >= ratelimit))
1633
		*p = 0;
1634 1635 1636
	else if (unlikely(*p >= ratelimit_pages)) {
		*p = 0;
		ratelimit = 0;
L
Linus Torvalds 已提交
1637
	}
1638 1639 1640 1641 1642
	/*
	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
	 * the dirty throttling and livelock other long-run dirtiers.
	 */
1643
	p = this_cpu_ptr(&dirty_throttle_leaks);
1644
	if (*p > 0 && current->nr_dirtied < ratelimit) {
1645
		unsigned long nr_pages_dirtied;
1646 1647 1648
		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
		*p -= nr_pages_dirtied;
		current->nr_dirtied += nr_pages_dirtied;
L
Linus Torvalds 已提交
1649
	}
1650
	preempt_enable();
1651 1652

	if (unlikely(current->nr_dirtied >= ratelimit))
1653 1654 1655
		balance_dirty_pages(mapping, wb, current->nr_dirtied);

	wb_put(wb);
L
Linus Torvalds 已提交
1656
}
1657
EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
L
Linus Torvalds 已提交
1658

1659
void throttle_vm_writeout(gfp_t gfp_mask)
L
Linus Torvalds 已提交
1660
{
1661 1662
	unsigned long background_thresh;
	unsigned long dirty_thresh;
L
Linus Torvalds 已提交
1663 1664

        for ( ; ; ) {
1665
		global_dirty_limits(&background_thresh, &dirty_thresh);
1666
		dirty_thresh = hard_dirty_limit(dirty_thresh);
L
Linus Torvalds 已提交
1667 1668 1669 1670 1671 1672 1673

                /*
                 * Boost the allowable dirty threshold a bit for page
                 * allocators so they don't get DoS'ed by heavy writers
                 */
                dirty_thresh += dirty_thresh / 10;      /* wheeee... */

1674 1675 1676
                if (global_page_state(NR_UNSTABLE_NFS) +
			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                        	break;
1677
                congestion_wait(BLK_RW_ASYNC, HZ/10);
1678 1679 1680 1681 1682 1683 1684 1685

		/*
		 * The caller might hold locks which can prevent IO completion
		 * or progress in the filesystem.  So we cannot just sit here
		 * waiting for IO to complete.
		 */
		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
			break;
L
Linus Torvalds 已提交
1686 1687 1688 1689 1690 1691
        }
}

/*
 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 */
1692
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
1693
	void __user *buffer, size_t *length, loff_t *ppos)
L
Linus Torvalds 已提交
1694
{
1695
	proc_dointvec(table, write, buffer, length, ppos);
L
Linus Torvalds 已提交
1696 1697 1698
	return 0;
}

1699
#ifdef CONFIG_BLOCK
1700
void laptop_mode_timer_fn(unsigned long data)
L
Linus Torvalds 已提交
1701
{
1702 1703 1704
	struct request_queue *q = (struct request_queue *)data;
	int nr_pages = global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS);
1705 1706
	struct bdi_writeback *wb;
	struct wb_iter iter;
L
Linus Torvalds 已提交
1707

1708 1709 1710 1711
	/*
	 * We want to write everything out, not just down to the dirty
	 * threshold
	 */
1712 1713 1714 1715 1716 1717 1718
	if (!bdi_has_dirty_io(&q->backing_dev_info))
		return;

	bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0)
		if (wb_has_dirty_io(wb))
			wb_start_writeback(wb, nr_pages, true,
					   WB_REASON_LAPTOP_TIMER);
L
Linus Torvalds 已提交
1719 1720 1721 1722 1723 1724 1725
}

/*
 * We've spun up the disk and we're in laptop mode: schedule writeback
 * of all dirty data a few seconds from now.  If the flush is already scheduled
 * then push it back - the user is still using the disk.
 */
1726
void laptop_io_completion(struct backing_dev_info *info)
L
Linus Torvalds 已提交
1727
{
1728
	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
L
Linus Torvalds 已提交
1729 1730 1731 1732 1733 1734 1735 1736 1737
}

/*
 * We're in laptop mode and we've just synced. The sync's writes will have
 * caused another writeback to be scheduled by laptop_io_completion.
 * Nothing needs to be written back anymore, so we unschedule the writeback.
 */
void laptop_sync_completion(void)
{
1738 1739 1740 1741 1742 1743 1744 1745
	struct backing_dev_info *bdi;

	rcu_read_lock();

	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
		del_timer(&bdi->laptop_mode_wb_timer);

	rcu_read_unlock();
L
Linus Torvalds 已提交
1746
}
1747
#endif
L
Linus Torvalds 已提交
1748 1749 1750 1751 1752 1753 1754 1755 1756

/*
 * If ratelimit_pages is too high then we can get into dirty-data overload
 * if a large number of processes all perform writes at the same time.
 * If it is too low then SMP machines will call the (expensive)
 * get_writeback_state too often.
 *
 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
1757
 * thresholds.
L
Linus Torvalds 已提交
1758 1759
 */

1760
void writeback_set_ratelimit(void)
L
Linus Torvalds 已提交
1761
{
1762
	struct wb_domain *dom = &global_wb_domain;
1763 1764
	unsigned long background_thresh;
	unsigned long dirty_thresh;
1765

1766
	global_dirty_limits(&background_thresh, &dirty_thresh);
1767
	dom->dirty_limit = dirty_thresh;
1768
	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
L
Linus Torvalds 已提交
1769 1770 1771 1772
	if (ratelimit_pages < 16)
		ratelimit_pages = 16;
}

1773
static int
1774 1775
ratelimit_handler(struct notifier_block *self, unsigned long action,
		  void *hcpu)
L
Linus Torvalds 已提交
1776
{
1777 1778 1779 1780 1781 1782 1783 1784 1785

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
	case CPU_DEAD:
		writeback_set_ratelimit();
		return NOTIFY_OK;
	default:
		return NOTIFY_DONE;
	}
L
Linus Torvalds 已提交
1786 1787
}

1788
static struct notifier_block ratelimit_nb = {
L
Linus Torvalds 已提交
1789 1790 1791 1792 1793
	.notifier_call	= ratelimit_handler,
	.next		= NULL,
};

/*
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
L
Linus Torvalds 已提交
1810 1811 1812
 */
void __init page_writeback_init(void)
{
1813
	writeback_set_ratelimit();
L
Linus Torvalds 已提交
1814
	register_cpu_notifier(&ratelimit_nb);
P
Peter Zijlstra 已提交
1815

T
Tejun Heo 已提交
1816
	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
L
Linus Torvalds 已提交
1817 1818
}

1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
/**
 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
 * @mapping: address space structure to write
 * @start: starting page index
 * @end: ending page index (inclusive)
 *
 * This function scans the page range from @start to @end (inclusive) and tags
 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
 * that write_cache_pages (or whoever calls this function) will then use
 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
 * used to avoid livelocking of writeback by a process steadily creating new
 * dirty pages in the file (thus it is important for this function to be quick
 * so that it can tag pages faster than a dirtying process can create them).
 */
/*
 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
 */
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end)
{
R
Randy Dunlap 已提交
1839
#define WRITEBACK_TAG_BATCH 4096
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
	unsigned long tagged;

	do {
		spin_lock_irq(&mapping->tree_lock);
		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
				&start, end, WRITEBACK_TAG_BATCH,
				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
		spin_unlock_irq(&mapping->tree_lock);
		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
		cond_resched();
1850 1851
		/* We check 'start' to handle wrapping when end == ~0UL */
	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1852 1853 1854
}
EXPORT_SYMBOL(tag_pages_for_writeback);

1855
/**
1856
 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1857 1858
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1859 1860
 * @writepage: function called for each page
 * @data: data passed to writepage function
1861
 *
1862
 * If a page is already under I/O, write_cache_pages() skips it, even
1863 1864 1865 1866 1867 1868
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
1869 1870 1871 1872 1873 1874 1875
 *
 * To avoid livelocks (when other process dirties new pages), we first tag
 * pages which should be written back with TOWRITE tag and only then start
 * writing them. For data-integrity sync we have to be careful so that we do
 * not miss some pages (e.g., because some other process has cleared TOWRITE
 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
 * by the process clearing the DIRTY tag (and submitting the page for IO).
1876
 */
1877 1878 1879
int write_cache_pages(struct address_space *mapping,
		      struct writeback_control *wbc, writepage_t writepage,
		      void *data)
1880 1881 1882 1883 1884
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
N
Nick Piggin 已提交
1885
	pgoff_t uninitialized_var(writeback_index);
1886 1887
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
1888
	pgoff_t done_index;
N
Nick Piggin 已提交
1889
	int cycled;
1890
	int range_whole = 0;
1891
	int tag;
1892 1893 1894

	pagevec_init(&pvec, 0);
	if (wbc->range_cyclic) {
N
Nick Piggin 已提交
1895 1896 1897 1898 1899 1900
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
1901 1902 1903 1904 1905 1906
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
N
Nick Piggin 已提交
1907
		cycled = 1; /* ignore range_cyclic tests */
1908
	}
1909
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1910 1911 1912
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
1913
retry:
1914
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1915
		tag_pages_for_writeback(mapping, index, end);
1916
	done_index = index;
N
Nick Piggin 已提交
1917 1918 1919
	while (!done && (index <= end)) {
		int i;

1920
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
N
Nick Piggin 已提交
1921 1922 1923
			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
		if (nr_pages == 0)
			break;
1924 1925 1926 1927 1928

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			/*
1929 1930 1931 1932 1933
			 * At this point, the page may be truncated or
			 * invalidated (changing page->mapping to NULL), or
			 * even swizzled back from swapper_space to tmpfs file
			 * mapping. However, page->index will not change
			 * because we have a reference on the page.
1934
			 */
1935 1936 1937 1938 1939 1940 1941 1942 1943
			if (page->index > end) {
				/*
				 * can't be range_cyclic (1st pass) because
				 * end == -1 in that case.
				 */
				done = 1;
				break;
			}

1944
			done_index = page->index;
1945

1946 1947
			lock_page(page);

N
Nick Piggin 已提交
1948 1949 1950 1951 1952 1953 1954 1955
			/*
			 * Page truncated or invalidated. We can freely skip it
			 * then, even for data integrity operations: the page
			 * has disappeared concurrently, so there could be no
			 * real expectation of this data interity operation
			 * even if there is now a new, dirty page at the same
			 * pagecache address.
			 */
1956
			if (unlikely(page->mapping != mapping)) {
N
Nick Piggin 已提交
1957
continue_unlock:
1958 1959 1960 1961
				unlock_page(page);
				continue;
			}

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
					wait_on_page_writeback(page);
				else
					goto continue_unlock;
			}
1973

1974 1975
			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
N
Nick Piggin 已提交
1976
				goto continue_unlock;
1977

1978
			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1979
			ret = (*writepage)(page, wbc, data);
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					/*
					 * done_index is set past this page,
					 * so media errors will not choke
					 * background writeout for the entire
					 * file. This has consequences for
					 * range_cyclic semantics (ie. it may
					 * not be suitable for data integrity
					 * writeout).
					 */
1994
					done_index = page->index + 1;
1995 1996 1997
					done = 1;
					break;
				}
1998
			}
1999

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
			/*
			 * We stop writing back only if we are not doing
			 * integrity sync. In case of integrity sync we have to
			 * keep going until we have written all the pages
			 * we tagged for writeback prior to entering this loop.
			 */
			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
2010
			}
2011 2012 2013 2014
		}
		pagevec_release(&pvec);
		cond_resched();
	}
2015
	if (!cycled && !done) {
2016
		/*
N
Nick Piggin 已提交
2017
		 * range_cyclic:
2018 2019 2020
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
N
Nick Piggin 已提交
2021
		cycled = 1;
2022
		index = 0;
N
Nick Piggin 已提交
2023
		end = writeback_index - 1;
2024 2025
		goto retry;
	}
2026 2027
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;
2028

2029 2030
	return ret;
}
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
EXPORT_SYMBOL(write_cache_pages);

/*
 * Function used by generic_writepages to call the real writepage
 * function and set the mapping flags on error
 */
static int __writepage(struct page *page, struct writeback_control *wbc,
		       void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

/**
 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 *
 * This is a library function, which implements the writepages()
 * address_space_operation.
 */
int generic_writepages(struct address_space *mapping,
		       struct writeback_control *wbc)
{
2057 2058 2059
	struct blk_plug plug;
	int ret;

2060 2061 2062 2063
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2064 2065 2066 2067
	blk_start_plug(&plug);
	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
	blk_finish_plug(&plug);
	return ret;
2068
}
2069 2070 2071

EXPORT_SYMBOL(generic_writepages);

L
Linus Torvalds 已提交
2072 2073
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
2074 2075
	int ret;

L
Linus Torvalds 已提交
2076 2077 2078
	if (wbc->nr_to_write <= 0)
		return 0;
	if (mapping->a_ops->writepages)
2079
		ret = mapping->a_ops->writepages(mapping, wbc);
2080 2081 2082
	else
		ret = generic_writepages(mapping, wbc);
	return ret;
L
Linus Torvalds 已提交
2083 2084 2085 2086
}

/**
 * write_one_page - write out a single page and optionally wait on I/O
2087 2088
 * @page: the page to write
 * @wait: if true, wait on writeout
L
Linus Torvalds 已提交
2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
 *
 * The page must be locked by the caller and will be unlocked upon return.
 *
 * write_one_page() returns a negative error code if I/O failed.
 */
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 1,
	};

	BUG_ON(!PageLocked(page));

	if (wait)
		wait_on_page_writeback(page);

	if (clear_page_dirty_for_io(page)) {
		page_cache_get(page);
		ret = mapping->a_ops->writepage(page, &wbc);
		if (ret == 0 && wait) {
			wait_on_page_writeback(page);
			if (PageError(page))
				ret = -EIO;
		}
		page_cache_release(page);
	} else {
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

2124 2125 2126 2127 2128 2129
/*
 * For address_spaces which do not use buffers nor write back.
 */
int __set_page_dirty_no_writeback(struct page *page)
{
	if (!PageDirty(page))
2130
		return !TestSetPageDirty(page);
2131 2132 2133
	return 0;
}

2134 2135
/*
 * Helper function for set_page_dirty family.
2136 2137 2138
 *
 * Caller must hold mem_cgroup_begin_page_stat().
 *
2139 2140
 * NOTE: This relies on being atomic wrt interrupts.
 */
2141 2142
void account_page_dirtied(struct page *page, struct address_space *mapping,
			  struct mem_cgroup *memcg)
2143
{
2144 2145
	struct inode *inode = mapping->host;

T
Tejun Heo 已提交
2146 2147
	trace_writeback_dirty_page(page, mapping);

2148
	if (mapping_cap_account_dirty(mapping)) {
2149 2150 2151 2152
		struct bdi_writeback *wb;

		inode_attach_wb(inode, page);
		wb = inode_to_wb(inode);
2153

2154
		mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2155
		__inc_zone_page_state(page, NR_FILE_DIRTY);
2156
		__inc_zone_page_state(page, NR_DIRTIED);
2157 2158
		__inc_wb_stat(wb, WB_RECLAIMABLE);
		__inc_wb_stat(wb, WB_DIRTIED);
2159
		task_io_account_write(PAGE_CACHE_SIZE);
2160 2161
		current->nr_dirtied++;
		this_cpu_inc(bdp_ratelimits);
2162 2163
	}
}
M
Michael Rubin 已提交
2164
EXPORT_SYMBOL(account_page_dirtied);
2165

2166 2167
/*
 * Helper function for deaccounting dirty page without writeback.
2168 2169
 *
 * Caller must hold mem_cgroup_begin_page_stat().
2170
 */
2171 2172
void account_page_cleaned(struct page *page, struct address_space *mapping,
			  struct mem_cgroup *memcg)
2173 2174
{
	if (mapping_cap_account_dirty(mapping)) {
2175
		mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2176
		dec_zone_page_state(page, NR_FILE_DIRTY);
2177
		dec_wb_stat(inode_to_wb(mapping->host), WB_RECLAIMABLE);
2178 2179 2180 2181
		task_io_account_cancelled_write(PAGE_CACHE_SIZE);
	}
}

L
Linus Torvalds 已提交
2182 2183 2184 2185 2186 2187 2188 2189
/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
2190 2191 2192
 * The caller must ensure this doesn't race with truncation.  Most will simply
 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
 * the pte lock held, which also locks out truncation.
L
Linus Torvalds 已提交
2193 2194 2195
 */
int __set_page_dirty_nobuffers(struct page *page)
{
2196 2197 2198
	struct mem_cgroup *memcg;

	memcg = mem_cgroup_begin_page_stat(page);
L
Linus Torvalds 已提交
2199 2200
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
2201
		unsigned long flags;
L
Linus Torvalds 已提交
2202

2203 2204
		if (!mapping) {
			mem_cgroup_end_page_stat(memcg);
2205
			return 1;
2206
		}
2207

2208
		spin_lock_irqsave(&mapping->tree_lock, flags);
2209 2210
		BUG_ON(page_mapping(page) != mapping);
		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2211
		account_page_dirtied(page, mapping, memcg);
2212 2213
		radix_tree_tag_set(&mapping->page_tree, page_index(page),
				   PAGECACHE_TAG_DIRTY);
2214
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2215 2216
		mem_cgroup_end_page_stat(memcg);

2217 2218 2219
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
L
Linus Torvalds 已提交
2220
		}
2221
		return 1;
L
Linus Torvalds 已提交
2222
	}
2223
	mem_cgroup_end_page_stat(memcg);
2224
	return 0;
L
Linus Torvalds 已提交
2225 2226 2227
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);

2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
/*
 * Call this whenever redirtying a page, to de-account the dirty counters
 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
 * control.
 */
void account_page_redirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
2238

2239
	if (mapping && mapping_cap_account_dirty(mapping)) {
2240 2241
		struct bdi_writeback *wb = inode_to_wb(mapping->host);

2242 2243
		current->nr_dirtied--;
		dec_zone_page_state(page, NR_DIRTIED);
2244
		dec_wb_stat(wb, WB_DIRTIED);
2245 2246 2247 2248
	}
}
EXPORT_SYMBOL(account_page_redirty);

L
Linus Torvalds 已提交
2249 2250 2251 2252 2253 2254 2255
/*
 * When a writepage implementation decides that it doesn't want to write this
 * page for some reason, it should redirty the locked page via
 * redirty_page_for_writepage() and it should then unlock the page and return 0
 */
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
2256 2257
	int ret;

L
Linus Torvalds 已提交
2258
	wbc->pages_skipped++;
2259
	ret = __set_page_dirty_nobuffers(page);
2260
	account_page_redirty(page);
2261
	return ret;
L
Linus Torvalds 已提交
2262 2263 2264 2265
}
EXPORT_SYMBOL(redirty_page_for_writepage);

/*
2266 2267 2268 2269 2270 2271 2272
 * Dirty a page.
 *
 * For pages with a mapping this should be done under the page lock
 * for the benefit of asynchronous memory errors who prefer a consistent
 * dirty state. This rule can be broken in some special cases,
 * but should be better not to.
 *
L
Linus Torvalds 已提交
2273 2274 2275
 * If the mapping doesn't provide a set_page_dirty a_op, then
 * just fall through and assume that it wants buffer_heads.
 */
N
Nick Piggin 已提交
2276
int set_page_dirty(struct page *page)
L
Linus Torvalds 已提交
2277 2278 2279 2280 2281
{
	struct address_space *mapping = page_mapping(page);

	if (likely(mapping)) {
		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
M
Minchan Kim 已提交
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
		/*
		 * readahead/lru_deactivate_page could remain
		 * PG_readahead/PG_reclaim due to race with end_page_writeback
		 * About readahead, if the page is written, the flags would be
		 * reset. So no problem.
		 * About lru_deactivate_page, if the page is redirty, the flag
		 * will be reset. So no problem. but if the page is used by readahead
		 * it will confuse readahead and make it restart the size rampup
		 * process. But it's a trivial problem.
		 */
2292 2293
		if (PageReclaim(page))
			ClearPageReclaim(page);
2294 2295 2296 2297 2298
#ifdef CONFIG_BLOCK
		if (!spd)
			spd = __set_page_dirty_buffers;
#endif
		return (*spd)(page);
L
Linus Torvalds 已提交
2299
	}
2300 2301 2302 2303
	if (!PageDirty(page)) {
		if (!TestSetPageDirty(page))
			return 1;
	}
L
Linus Torvalds 已提交
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
	return 0;
}
EXPORT_SYMBOL(set_page_dirty);

/*
 * set_page_dirty() is racy if the caller has no reference against
 * page->mapping->host, and if the page is unlocked.  This is because another
 * CPU could truncate the page off the mapping and then free the mapping.
 *
 * Usually, the page _is_ locked, or the caller is a user-space process which
 * holds a reference on the inode by having an open file.
 *
 * In other cases, the page should be locked before running set_page_dirty().
 */
int set_page_dirty_lock(struct page *page)
{
	int ret;

J
Jens Axboe 已提交
2322
	lock_page(page);
L
Linus Torvalds 已提交
2323 2324 2325 2326 2327 2328
	ret = set_page_dirty(page);
	unlock_page(page);
	return ret;
}
EXPORT_SYMBOL(set_page_dirty_lock);

2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
/*
 * This cancels just the dirty bit on the kernel page itself, it does NOT
 * actually remove dirty bits on any mmap's that may be around. It also
 * leaves the page tagged dirty, so any sync activity will still find it on
 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
 * look at the dirty bits in the VM.
 *
 * Doing this should *normally* only ever be done when a page is truncated,
 * and is not actually mapped anywhere at all. However, fs/buffer.c does
 * this when it notices that somebody has cleaned out all the buffers on a
 * page without actually doing it through the VM. Can you say "ext3 is
 * horribly ugly"? Thought you could.
 */
void cancel_dirty_page(struct page *page)
{
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
	struct address_space *mapping = page_mapping(page);

	if (mapping_cap_account_dirty(mapping)) {
		struct mem_cgroup *memcg;

		memcg = mem_cgroup_begin_page_stat(page);

		if (TestClearPageDirty(page))
			account_page_cleaned(page, mapping, memcg);

		mem_cgroup_end_page_stat(memcg);
	} else {
		ClearPageDirty(page);
	}
2358 2359 2360
}
EXPORT_SYMBOL(cancel_dirty_page);

L
Linus Torvalds 已提交
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
/*
 * Clear a page's dirty flag, while caring for dirty memory accounting.
 * Returns true if the page was previously dirty.
 *
 * This is for preparing to put the page under writeout.  We leave the page
 * tagged as dirty in the radix tree so that a concurrent write-for-sync
 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
 * implementation will run either set_page_writeback() or set_page_dirty(),
 * at which stage we bring the page's dirty flag and radix-tree dirty tag
 * back into sync.
 *
 * This incoherency between the page's dirty flag and radix-tree tag is
 * unfortunate, but it only exists while the page is locked.
 */
int clear_page_dirty_for_io(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
2378 2379
	struct mem_cgroup *memcg;
	int ret = 0;
L
Linus Torvalds 已提交
2380

2381 2382
	BUG_ON(!PageLocked(page));

2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
	if (mapping && mapping_cap_account_dirty(mapping)) {
		/*
		 * Yes, Virginia, this is indeed insane.
		 *
		 * We use this sequence to make sure that
		 *  (a) we account for dirty stats properly
		 *  (b) we tell the low-level filesystem to
		 *      mark the whole page dirty if it was
		 *      dirty in a pagetable. Only to then
		 *  (c) clean the page again and return 1 to
		 *      cause the writeback.
		 *
		 * This way we avoid all nasty races with the
		 * dirty bit in multiple places and clearing
		 * them concurrently from different threads.
		 *
		 * Note! Normally the "set_page_dirty(page)"
		 * has no effect on the actual dirty bit - since
		 * that will already usually be set. But we
		 * need the side effects, and it can help us
		 * avoid races.
		 *
		 * We basically use the page "master dirty bit"
		 * as a serialization point for all the different
		 * threads doing their things.
		 */
		if (page_mkclean(page))
			set_page_dirty(page);
2411 2412 2413
		/*
		 * We carefully synchronise fault handlers against
		 * installing a dirty pte and marking the page dirty
2414 2415 2416 2417
		 * at this point.  We do this by having them hold the
		 * page lock while dirtying the page, and pages are
		 * always locked coming in here, so we get the desired
		 * exclusion.
2418
		 */
2419
		memcg = mem_cgroup_begin_page_stat(page);
2420
		if (TestClearPageDirty(page)) {
2421
			mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2422
			dec_zone_page_state(page, NR_FILE_DIRTY);
2423
			dec_wb_stat(inode_to_wb(mapping->host), WB_RECLAIMABLE);
2424
			ret = 1;
L
Linus Torvalds 已提交
2425
		}
2426 2427
		mem_cgroup_end_page_stat(memcg);
		return ret;
L
Linus Torvalds 已提交
2428
	}
2429
	return TestClearPageDirty(page);
L
Linus Torvalds 已提交
2430
}
2431
EXPORT_SYMBOL(clear_page_dirty_for_io);
L
Linus Torvalds 已提交
2432 2433 2434 2435

int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
2436 2437
	struct mem_cgroup *memcg;
	int ret;
L
Linus Torvalds 已提交
2438

2439
	memcg = mem_cgroup_begin_page_stat(page);
L
Linus Torvalds 已提交
2440
	if (mapping) {
2441 2442
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
L
Linus Torvalds 已提交
2443 2444
		unsigned long flags;

N
Nick Piggin 已提交
2445
		spin_lock_irqsave(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2446
		ret = TestClearPageWriteback(page);
P
Peter Zijlstra 已提交
2447
		if (ret) {
L
Linus Torvalds 已提交
2448 2449 2450
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
2451
			if (bdi_cap_account_writeback(bdi)) {
2452 2453 2454 2455
				struct bdi_writeback *wb = inode_to_wb(inode);

				__dec_wb_stat(wb, WB_WRITEBACK);
				__wb_writeout_inc(wb);
P
Peter Zijlstra 已提交
2456
			}
P
Peter Zijlstra 已提交
2457
		}
N
Nick Piggin 已提交
2458
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2459 2460 2461
	} else {
		ret = TestClearPageWriteback(page);
	}
2462
	if (ret) {
2463
		mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2464
		dec_zone_page_state(page, NR_WRITEBACK);
2465 2466
		inc_zone_page_state(page, NR_WRITTEN);
	}
2467
	mem_cgroup_end_page_stat(memcg);
L
Linus Torvalds 已提交
2468 2469 2470
	return ret;
}

2471
int __test_set_page_writeback(struct page *page, bool keep_write)
L
Linus Torvalds 已提交
2472 2473
{
	struct address_space *mapping = page_mapping(page);
2474 2475
	struct mem_cgroup *memcg;
	int ret;
L
Linus Torvalds 已提交
2476

2477
	memcg = mem_cgroup_begin_page_stat(page);
L
Linus Torvalds 已提交
2478
	if (mapping) {
2479 2480
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
L
Linus Torvalds 已提交
2481 2482
		unsigned long flags;

N
Nick Piggin 已提交
2483
		spin_lock_irqsave(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2484
		ret = TestSetPageWriteback(page);
P
Peter Zijlstra 已提交
2485
		if (!ret) {
L
Linus Torvalds 已提交
2486 2487 2488
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
2489
			if (bdi_cap_account_writeback(bdi))
2490
				__inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
P
Peter Zijlstra 已提交
2491
		}
L
Linus Torvalds 已提交
2492 2493 2494 2495
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
2496 2497 2498 2499
		if (!keep_write)
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_TOWRITE);
N
Nick Piggin 已提交
2500
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2501 2502 2503
	} else {
		ret = TestSetPageWriteback(page);
	}
2504
	if (!ret) {
2505
		mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2506 2507
		inc_zone_page_state(page, NR_WRITEBACK);
	}
2508
	mem_cgroup_end_page_stat(memcg);
L
Linus Torvalds 已提交
2509 2510 2511
	return ret;

}
2512
EXPORT_SYMBOL(__test_set_page_writeback);
L
Linus Torvalds 已提交
2513 2514

/*
N
Nick Piggin 已提交
2515
 * Return true if any of the pages in the mapping are marked with the
L
Linus Torvalds 已提交
2516 2517 2518 2519
 * passed tag.
 */
int mapping_tagged(struct address_space *mapping, int tag)
{
2520
	return radix_tree_tagged(&mapping->page_tree, tag);
L
Linus Torvalds 已提交
2521 2522
}
EXPORT_SYMBOL(mapping_tagged);
2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533

/**
 * wait_for_stable_page() - wait for writeback to finish, if necessary.
 * @page:	The page to wait on.
 *
 * This function determines if the given page is related to a backing device
 * that requires page contents to be held stable during writeback.  If so, then
 * it will wait for any pending writeback to complete.
 */
void wait_for_stable_page(struct page *page)
{
2534 2535
	if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
		wait_on_page_writeback(page);
2536 2537
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);