page-writeback.c 75.8 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * mm/page-writeback.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 2002, Linus Torvalds.
P
Peter Zijlstra 已提交
5
 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
L
Linus Torvalds 已提交
6 7 8 9
 *
 * Contains functions related to writing back dirty pages at the
 * address_space level.
 *
10
 * 10Apr2002	Andrew Morton
L
Linus Torvalds 已提交
11 12 13 14
 *		Initial version
 */

#include <linux/kernel.h>
15
#include <linux/export.h>
L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
25
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
26 27
#include <linux/blkdev.h>
#include <linux/mpage.h>
28
#include <linux/rmap.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
A
Al Viro 已提交
35
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
36
#include <linux/pagevec.h>
37
#include <linux/timer.h>
38
#include <linux/sched/rt.h>
39
#include <linux/mm_inline.h>
40
#include <trace/events/writeback.h>
L
Linus Torvalds 已提交
41

42 43
#include "internal.h"

44 45 46 47 48
/*
 * Sleep at most 200ms at a time in balance_dirty_pages().
 */
#define MAX_PAUSE		max(HZ/5, 1)

49 50 51 52 53 54
/*
 * Try to keep balance_dirty_pages() call intervals higher than this many pages
 * by raising pause time to max_pause when falls below it.
 */
#define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))

55 56 57 58 59
/*
 * Estimate write bandwidth at 200ms intervals.
 */
#define BANDWIDTH_INTERVAL	max(HZ/5, 1)

W
Wu Fengguang 已提交
60 61
#define RATELIMIT_CALC_SHIFT	10

L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70
/*
 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
 * will look to see if it needs to force writeback or throttling.
 */
static long ratelimit_pages = 32;

/* The following parameters are exported via /proc/sys/vm */

/*
71
 * Start background writeback (via writeback threads) at this percentage
L
Linus Torvalds 已提交
72
 */
73
int dirty_background_ratio = 10;
L
Linus Torvalds 已提交
74

75 76 77 78 79 80
/*
 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
 * dirty_background_ratio * the amount of dirtyable memory
 */
unsigned long dirty_background_bytes;

81 82 83 84 85 86
/*
 * free highmem will not be subtracted from the total free memory
 * for calculating free ratios if vm_highmem_is_dirtyable is true
 */
int vm_highmem_is_dirtyable;

L
Linus Torvalds 已提交
87 88 89
/*
 * The generator of dirty data starts writeback at this percentage
 */
90
int vm_dirty_ratio = 20;
L
Linus Torvalds 已提交
91

92 93 94 95 96 97
/*
 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
 * vm_dirty_ratio * the amount of dirtyable memory
 */
unsigned long vm_dirty_bytes;

L
Linus Torvalds 已提交
98
/*
99
 * The interval between `kupdate'-style writebacks
L
Linus Torvalds 已提交
100
 */
101
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
L
Linus Torvalds 已提交
102

103 104
EXPORT_SYMBOL_GPL(dirty_writeback_interval);

L
Linus Torvalds 已提交
105
/*
106
 * The longest time for which data is allowed to remain dirty
L
Linus Torvalds 已提交
107
 */
108
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
L
Linus Torvalds 已提交
109 110 111 112 113 114 115

/*
 * Flag that makes the machine dump writes/reads and block dirtyings.
 */
int block_dump;

/*
116 117
 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 * a full sync is triggered after this time elapses without any disk activity.
L
Linus Torvalds 已提交
118 119 120 121 122 123 124
 */
int laptop_mode;

EXPORT_SYMBOL(laptop_mode);

/* End of sysctl-exported parameters */

125
struct wb_domain global_wb_domain;
126

127 128 129 130 131 132 133 134 135 136 137 138 139 140
/* consolidated parameters for balance_dirty_pages() and its subroutines */
struct dirty_throttle_control {
	struct bdi_writeback	*wb;

	unsigned long		dirty;		/* file_dirty + write + nfs */
	unsigned long		thresh;		/* dirty threshold */
	unsigned long		bg_thresh;	/* dirty background threshold */

	unsigned long		wb_dirty;	/* per-wb counterparts */
	unsigned long		wb_thresh;
};

#define GDTC_INIT(__wb)		.wb = (__wb)

141 142 143 144 145 146
/*
 * Length of period for aging writeout fractions of bdis. This is an
 * arbitrarily chosen number. The longer the period, the slower fractions will
 * reflect changes in current writeout rate.
 */
#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
P
Peter Zijlstra 已提交
147

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
#ifdef CONFIG_CGROUP_WRITEBACK

static void wb_min_max_ratio(struct bdi_writeback *wb,
			     unsigned long *minp, unsigned long *maxp)
{
	unsigned long this_bw = wb->avg_write_bandwidth;
	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
	unsigned long long min = wb->bdi->min_ratio;
	unsigned long long max = wb->bdi->max_ratio;

	/*
	 * @wb may already be clean by the time control reaches here and
	 * the total may not include its bw.
	 */
	if (this_bw < tot_bw) {
		if (min) {
			min *= this_bw;
			do_div(min, tot_bw);
		}
		if (max < 100) {
			max *= this_bw;
			do_div(max, tot_bw);
		}
	}

	*minp = min;
	*maxp = max;
}

#else	/* CONFIG_CGROUP_WRITEBACK */

static void wb_min_max_ratio(struct bdi_writeback *wb,
			     unsigned long *minp, unsigned long *maxp)
{
	*minp = wb->bdi->min_ratio;
	*maxp = wb->bdi->max_ratio;
}

#endif	/* CONFIG_CGROUP_WRITEBACK */

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
/*
 * In a memory zone, there is a certain amount of pages we consider
 * available for the page cache, which is essentially the number of
 * free and reclaimable pages, minus some zone reserves to protect
 * lowmem and the ability to uphold the zone's watermarks without
 * requiring writeback.
 *
 * This number of dirtyable pages is the base value of which the
 * user-configurable dirty ratio is the effictive number of pages that
 * are allowed to be actually dirtied.  Per individual zone, or
 * globally by using the sum of dirtyable pages over all zones.
 *
 * Because the user is allowed to specify the dirty limit globally as
 * absolute number of bytes, calculating the per-zone dirty limit can
 * require translating the configured limit into a percentage of
 * global dirtyable memory first.
 */

206 207 208 209 210 211 212 213 214 215 216 217 218 219
/**
 * zone_dirtyable_memory - number of dirtyable pages in a zone
 * @zone: the zone
 *
 * Returns the zone's number of pages potentially available for dirty
 * page cache.  This is the base value for the per-zone dirty limits.
 */
static unsigned long zone_dirtyable_memory(struct zone *zone)
{
	unsigned long nr_pages;

	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
	nr_pages -= min(nr_pages, zone->dirty_balance_reserve);

220 221
	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
222 223 224 225

	return nr_pages;
}

226 227 228 229 230 231 232
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
	int node;
	unsigned long x = 0;

	for_each_node_state(node, N_HIGH_MEMORY) {
233
		struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
234

235
		x += zone_dirtyable_memory(z);
236
	}
237 238 239 240 241 242 243 244 245 246 247 248
	/*
	 * Unreclaimable memory (kernel memory or anonymous memory
	 * without swap) can bring down the dirtyable pages below
	 * the zone's dirty balance reserve and the above calculation
	 * will underflow.  However we still want to add in nodes
	 * which are below threshold (negative values) to get a more
	 * accurate calculation but make sure that the total never
	 * underflows.
	 */
	if ((long)x < 0)
		x = 0;

249 250 251 252 253 254 255 256 257 258 259 260 261
	/*
	 * Make sure that the number of highmem pages is never larger
	 * than the number of the total dirtyable memory. This can only
	 * occur in very strange VM situations but we want to make sure
	 * that this does not occur.
	 */
	return min(x, total);
#else
	return 0;
#endif
}

/**
262
 * global_dirtyable_memory - number of globally dirtyable pages
263
 *
264 265
 * Returns the global number of pages potentially available for dirty
 * page cache.  This is the base value for the global dirty limits.
266
 */
267
static unsigned long global_dirtyable_memory(void)
268 269 270
{
	unsigned long x;

271
	x = global_page_state(NR_FREE_PAGES);
272
	x -= min(x, dirty_balance_reserve);
273

274 275
	x += global_page_state(NR_INACTIVE_FILE);
	x += global_page_state(NR_ACTIVE_FILE);
276

277 278 279 280 281 282
	if (!vm_highmem_is_dirtyable)
		x -= highmem_dirtyable_memory(x);

	return x + 1;	/* Ensure that we never return 0 */
}

283 284 285 286 287 288 289 290 291 292 293
/*
 * global_dirty_limits - background-writeback and dirty-throttling thresholds
 *
 * Calculate the dirty thresholds based on sysctl parameters
 * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
 * - vm.dirty_ratio             or  vm.dirty_bytes
 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
 * real-time tasks.
 */
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
294
	const unsigned long available_memory = global_dirtyable_memory();
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	unsigned long background;
	unsigned long dirty;
	struct task_struct *tsk;

	if (vm_dirty_bytes)
		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
	else
		dirty = (vm_dirty_ratio * available_memory) / 100;

	if (dirty_background_bytes)
		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
	else
		background = (dirty_background_ratio * available_memory) / 100;

	if (background >= dirty)
		background = dirty / 2;
	tsk = current;
	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
		background += background / 4;
		dirty += dirty / 4;
	}
	*pbackground = background;
	*pdirty = dirty;
	trace_global_dirty_state(background, dirty);
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
/**
 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
 * @zone: the zone
 *
 * Returns the maximum number of dirty pages allowed in a zone, based
 * on the zone's dirtyable memory.
 */
static unsigned long zone_dirty_limit(struct zone *zone)
{
	unsigned long zone_memory = zone_dirtyable_memory(zone);
	struct task_struct *tsk = current;
	unsigned long dirty;

	if (vm_dirty_bytes)
		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
			zone_memory / global_dirtyable_memory();
	else
		dirty = vm_dirty_ratio * zone_memory / 100;

	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
		dirty += dirty / 4;

	return dirty;
}

/**
 * zone_dirty_ok - tells whether a zone is within its dirty limits
 * @zone: the zone to check
 *
 * Returns %true when the dirty pages in @zone are within the zone's
 * dirty limit, %false if the limit is exceeded.
 */
bool zone_dirty_ok(struct zone *zone)
{
	unsigned long limit = zone_dirty_limit(zone);

	return zone_page_state(zone, NR_FILE_DIRTY) +
	       zone_page_state(zone, NR_UNSTABLE_NFS) +
	       zone_page_state(zone, NR_WRITEBACK) <= limit;
}

362
int dirty_background_ratio_handler(struct ctl_table *table, int write,
363
		void __user *buffer, size_t *lenp,
364 365 366 367
		loff_t *ppos)
{
	int ret;

368
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
369 370 371 372 373 374
	if (ret == 0 && write)
		dirty_background_bytes = 0;
	return ret;
}

int dirty_background_bytes_handler(struct ctl_table *table, int write,
375
		void __user *buffer, size_t *lenp,
376 377 378 379
		loff_t *ppos)
{
	int ret;

380
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
381 382 383 384 385
	if (ret == 0 && write)
		dirty_background_ratio = 0;
	return ret;
}

P
Peter Zijlstra 已提交
386
int dirty_ratio_handler(struct ctl_table *table, int write,
387
		void __user *buffer, size_t *lenp,
P
Peter Zijlstra 已提交
388 389 390
		loff_t *ppos)
{
	int old_ratio = vm_dirty_ratio;
391 392
	int ret;

393
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
P
Peter Zijlstra 已提交
394
	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
395
		writeback_set_ratelimit();
396 397 398 399 400 401
		vm_dirty_bytes = 0;
	}
	return ret;
}

int dirty_bytes_handler(struct ctl_table *table, int write,
402
		void __user *buffer, size_t *lenp,
403 404
		loff_t *ppos)
{
405
	unsigned long old_bytes = vm_dirty_bytes;
406 407
	int ret;

408
	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
409
	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
410
		writeback_set_ratelimit();
411
		vm_dirty_ratio = 0;
P
Peter Zijlstra 已提交
412 413 414 415
	}
	return ret;
}

416 417 418 419 420 421 422 423 424
static unsigned long wp_next_time(unsigned long cur_time)
{
	cur_time += VM_COMPLETIONS_PERIOD_LEN;
	/* 0 has a special meaning... */
	if (!cur_time)
		return 1;
	return cur_time;
}

P
Peter Zijlstra 已提交
425
/*
T
Tejun Heo 已提交
426
 * Increment the wb's writeout completion count and the global writeout
P
Peter Zijlstra 已提交
427 428
 * completion count. Called from test_clear_page_writeback().
 */
429
static inline void __wb_writeout_inc(struct bdi_writeback *wb)
P
Peter Zijlstra 已提交
430
{
T
Tejun Heo 已提交
431 432
	struct wb_domain *dom = &global_wb_domain;

433
	__inc_wb_stat(wb, WB_WRITTEN);
T
Tejun Heo 已提交
434
	__fprop_inc_percpu_max(&dom->completions, &wb->completions,
435
			       wb->bdi->max_prop_frac);
436
	/* First event after period switching was turned off? */
T
Tejun Heo 已提交
437
	if (!unlikely(dom->period_time)) {
438 439 440 441 442 443
		/*
		 * We can race with other __bdi_writeout_inc calls here but
		 * it does not cause any harm since the resulting time when
		 * timer will fire and what is in writeout_period_time will be
		 * roughly the same.
		 */
T
Tejun Heo 已提交
444 445
		dom->period_time = wp_next_time(jiffies);
		mod_timer(&dom->period_timer, dom->period_time);
446
	}
P
Peter Zijlstra 已提交
447 448
}

449
void wb_writeout_inc(struct bdi_writeback *wb)
450 451 452 453
{
	unsigned long flags;

	local_irq_save(flags);
454
	__wb_writeout_inc(wb);
455 456
	local_irq_restore(flags);
}
457
EXPORT_SYMBOL_GPL(wb_writeout_inc);
458

459 460 461 462 463 464
/*
 * On idle system, we can be called long after we scheduled because we use
 * deferred timers so count with missed periods.
 */
static void writeout_period(unsigned long t)
{
T
Tejun Heo 已提交
465 466
	struct wb_domain *dom = (void *)t;
	int miss_periods = (jiffies - dom->period_time) /
467 468
						 VM_COMPLETIONS_PERIOD_LEN;

T
Tejun Heo 已提交
469 470
	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
		dom->period_time = wp_next_time(dom->period_time +
471
				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
T
Tejun Heo 已提交
472
		mod_timer(&dom->period_timer, dom->period_time);
473 474 475 476 477
	} else {
		/*
		 * Aging has zeroed all fractions. Stop wasting CPU on period
		 * updates.
		 */
T
Tejun Heo 已提交
478
		dom->period_time = 0;
479 480 481
	}
}

T
Tejun Heo 已提交
482 483 484
int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
{
	memset(dom, 0, sizeof(*dom));
485 486 487

	spin_lock_init(&dom->lock);

T
Tejun Heo 已提交
488 489 490
	init_timer_deferrable(&dom->period_timer);
	dom->period_timer.function = writeout_period;
	dom->period_timer.data = (unsigned long)dom;
491 492 493

	dom->dirty_limit_tstamp = jiffies;

T
Tejun Heo 已提交
494 495 496
	return fprop_global_init(&dom->completions, gfp);
}

497
/*
498 499 500
 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
 * registered backing devices, which, for obvious reasons, can not
 * exceed 100%.
501 502 503 504 505 506 507
 */
static unsigned int bdi_min_ratio;

int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
	int ret = 0;

508
	spin_lock_bh(&bdi_lock);
509
	if (min_ratio > bdi->max_ratio) {
510
		ret = -EINVAL;
511 512 513 514 515 516 517 518 519
	} else {
		min_ratio -= bdi->min_ratio;
		if (bdi_min_ratio + min_ratio < 100) {
			bdi_min_ratio += min_ratio;
			bdi->min_ratio += min_ratio;
		} else {
			ret = -EINVAL;
		}
	}
520
	spin_unlock_bh(&bdi_lock);
521 522 523 524 525 526 527 528 529 530 531

	return ret;
}

int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
	int ret = 0;

	if (max_ratio > 100)
		return -EINVAL;

532
	spin_lock_bh(&bdi_lock);
533 534 535 536
	if (bdi->min_ratio > max_ratio) {
		ret = -EINVAL;
	} else {
		bdi->max_ratio = max_ratio;
537
		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
538
	}
539
	spin_unlock_bh(&bdi_lock);
540 541 542

	return ret;
}
543
EXPORT_SYMBOL(bdi_set_max_ratio);
544

W
Wu Fengguang 已提交
545 546 547 548 549 550
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
					   unsigned long bg_thresh)
{
	return (thresh + bg_thresh) / 2;
}

551 552
static unsigned long hard_dirty_limit(unsigned long thresh)
{
553 554 555
	struct wb_domain *dom = &global_wb_domain;

	return max(thresh, dom->dirty_limit);
556 557
}

558
/**
T
Tejun Heo 已提交
559
 * wb_calc_thresh - @wb's share of dirty throttling threshold
560
 * @wb: bdi_writeback to query
561
 * @dirty: global dirty limit in pages
562
 *
563
 * Returns @wb's dirty limit in pages. The term "dirty" in the context of
564
 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
565 566 567 568 569 570
 *
 * Note that balance_dirty_pages() will only seriously take it as a hard limit
 * when sleeping max_pause per page is not enough to keep the dirty pages under
 * control. For example, when the device is completely stalled due to some error
 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
 * In the other normal situations, it acts more gently by throttling the tasks
571
 * more (rather than completely block them) when the wb dirty pages go high.
572
 *
573
 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
574 575 576
 * - starving fast devices
 * - piling up dirty pages (that will take long time to sync) on slow devices
 *
577
 * The wb's share of dirty limit will be adapting to its throughput and
578 579
 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
 */
T
Tejun Heo 已提交
580
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
581
{
T
Tejun Heo 已提交
582
	struct wb_domain *dom = &global_wb_domain;
T
Tejun Heo 已提交
583
	u64 wb_thresh;
584
	long numerator, denominator;
585
	unsigned long wb_min_ratio, wb_max_ratio;
P
Peter Zijlstra 已提交
586

587
	/*
T
Tejun Heo 已提交
588
	 * Calculate this BDI's share of the thresh ratio.
589
	 */
T
Tejun Heo 已提交
590 591
	fprop_fraction_percpu(&dom->completions, &wb->completions,
			      &numerator, &denominator);
P
Peter Zijlstra 已提交
592

T
Tejun Heo 已提交
593 594 595
	wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
	wb_thresh *= numerator;
	do_div(wb_thresh, denominator);
P
Peter Zijlstra 已提交
596

597 598
	wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio);

T
Tejun Heo 已提交
599 600 601
	wb_thresh += (thresh * wb_min_ratio) / 100;
	if (wb_thresh > (thresh * wb_max_ratio) / 100)
		wb_thresh = thresh * wb_max_ratio / 100;
602

T
Tejun Heo 已提交
603
	return wb_thresh;
L
Linus Torvalds 已提交
604 605
}

606 607 608 609 610 611 612 613 614 615 616 617 618 619
/*
 *                           setpoint - dirty 3
 *        f(dirty) := 1.0 + (----------------)
 *                           limit - setpoint
 *
 * it's a 3rd order polynomial that subjects to
 *
 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
 * (2) f(setpoint) = 1.0 => the balance point
 * (3) f(limit)    = 0   => the hard limit
 * (4) df/dx      <= 0	 => negative feedback control
 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
 *     => fast response on large errors; small oscillation near setpoint
 */
620
static long long pos_ratio_polynom(unsigned long setpoint,
621 622 623 624 625 626
					  unsigned long dirty,
					  unsigned long limit)
{
	long long pos_ratio;
	long x;

627
	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
628 629 630 631 632 633 634 635 636
		    limit - setpoint + 1);
	pos_ratio = x;
	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;

	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
}

W
Wu Fengguang 已提交
637 638 639 640 641
/*
 * Dirty position control.
 *
 * (o) global/bdi setpoints
 *
642
 * We want the dirty pages be balanced around the global/wb setpoints.
W
Wu Fengguang 已提交
643 644 645 646 647 648 649 650 651
 * When the number of dirty pages is higher/lower than the setpoint, the
 * dirty position control ratio (and hence task dirty ratelimit) will be
 * decreased/increased to bring the dirty pages back to the setpoint.
 *
 *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
 *
 *     if (dirty < setpoint) scale up   pos_ratio
 *     if (dirty > setpoint) scale down pos_ratio
 *
652 653
 *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
 *     if (wb_dirty > wb_setpoint) scale down pos_ratio
W
Wu Fengguang 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
 *
 *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
 *
 * (o) global control line
 *
 *     ^ pos_ratio
 *     |
 *     |            |<===== global dirty control scope ======>|
 * 2.0 .............*
 *     |            .*
 *     |            . *
 *     |            .   *
 *     |            .     *
 *     |            .        *
 *     |            .            *
 * 1.0 ................................*
 *     |            .                  .     *
 *     |            .                  .          *
 *     |            .                  .              *
 *     |            .                  .                 *
 *     |            .                  .                    *
 *   0 +------------.------------------.----------------------*------------->
 *           freerun^          setpoint^                 limit^   dirty pages
 *
678
 * (o) wb control line
W
Wu Fengguang 已提交
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
 *
 *     ^ pos_ratio
 *     |
 *     |            *
 *     |              *
 *     |                *
 *     |                  *
 *     |                    * |<=========== span ============>|
 * 1.0 .......................*
 *     |                      . *
 *     |                      .   *
 *     |                      .     *
 *     |                      .       *
 *     |                      .         *
 *     |                      .           *
 *     |                      .             *
 *     |                      .               *
 *     |                      .                 *
 *     |                      .                   *
 *     |                      .                     *
 * 1/4 ...............................................* * * * * * * * * * * *
 *     |                      .                         .
 *     |                      .                           .
 *     |                      .                             .
 *   0 +----------------------.-------------------------------.------------->
704
 *                wb_setpoint^                    x_intercept^
W
Wu Fengguang 已提交
705
 *
706
 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
W
Wu Fengguang 已提交
707 708
 * be smoothly throttled down to normal if it starts high in situations like
 * - start writing to a slow SD card and a fast disk at the same time. The SD
709 710
 *   card's wb_dirty may rush to many times higher than wb_setpoint.
 * - the wb dirty thresh drops quickly due to change of JBOD workload
W
Wu Fengguang 已提交
711
 */
712
static unsigned long wb_position_ratio(struct dirty_throttle_control *dtc)
W
Wu Fengguang 已提交
713
{
714
	struct bdi_writeback *wb = dtc->wb;
715
	unsigned long write_bw = wb->avg_write_bandwidth;
716 717 718
	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
	unsigned long limit = hard_dirty_limit(dtc->thresh);
	unsigned long wb_thresh = dtc->wb_thresh;
W
Wu Fengguang 已提交
719 720
	unsigned long x_intercept;
	unsigned long setpoint;		/* dirty pages' target balance point */
721
	unsigned long wb_setpoint;
W
Wu Fengguang 已提交
722 723 724 725
	unsigned long span;
	long long pos_ratio;		/* for scaling up/down the rate limit */
	long x;

726
	if (unlikely(dtc->dirty >= limit))
W
Wu Fengguang 已提交
727 728 729 730 731
		return 0;

	/*
	 * global setpoint
	 *
732 733 734
	 * See comment for pos_ratio_polynom().
	 */
	setpoint = (freerun + limit) / 2;
735
	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
736 737 738 739

	/*
	 * The strictlimit feature is a tool preventing mistrusted filesystems
	 * from growing a large number of dirty pages before throttling. For
740 741
	 * such filesystems balance_dirty_pages always checks wb counters
	 * against wb limits. Even if global "nr_dirty" is under "freerun".
742 743 744 745
	 * This is especially important for fuse which sets bdi->max_ratio to
	 * 1% by default. Without strictlimit feature, fuse writeback may
	 * consume arbitrary amount of RAM because it is accounted in
	 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
W
Wu Fengguang 已提交
746
	 *
747
	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
748
	 * two values: wb_dirty and wb_thresh. Let's consider an example:
749 750
	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
	 * limits are set by default to 10% and 20% (background and throttle).
751
	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
T
Tejun Heo 已提交
752
	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
753
	 * about ~6K pages (as the average of background and throttle wb
754
	 * limits). The 3rd order polynomial will provide positive feedback if
755
	 * wb_dirty is under wb_setpoint and vice versa.
W
Wu Fengguang 已提交
756
	 *
757
	 * Note, that we cannot use global counters in these calculations
758
	 * because we want to throttle process writing to a strictlimit wb
759 760
	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
	 * in the example above).
W
Wu Fengguang 已提交
761
	 */
762
	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
763 764
		long long wb_pos_ratio;
		unsigned long wb_bg_thresh;
765

766
		if (dtc->wb_dirty < 8)
767 768 769
			return min_t(long long, pos_ratio * 2,
				     2 << RATELIMIT_CALC_SHIFT);

770
		if (dtc->wb_dirty >= wb_thresh)
771 772
			return 0;

773 774
		wb_bg_thresh = div_u64((u64)wb_thresh * dtc->bg_thresh,
				       dtc->thresh);
775
		wb_setpoint = dirty_freerun_ceiling(wb_thresh, wb_bg_thresh);
776

777
		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
778 779
			return 0;

780
		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
781
						 wb_thresh);
782 783

		/*
784 785
		 * Typically, for strictlimit case, wb_setpoint << setpoint
		 * and pos_ratio >> wb_pos_ratio. In the other words global
786
		 * state ("dirty") is not limiting factor and we have to
787
		 * make decision based on wb counters. But there is an
788 789
		 * important case when global pos_ratio should get precedence:
		 * global limits are exceeded (e.g. due to activities on other
790
		 * wb's) while given strictlimit wb is below limit.
791
		 *
792
		 * "pos_ratio * wb_pos_ratio" would work for the case above,
793
		 * but it would look too non-natural for the case of all
794
		 * activity in the system coming from a single strictlimit wb
795 796 797 798
		 * with bdi->max_ratio == 100%.
		 *
		 * Note that min() below somewhat changes the dynamics of the
		 * control system. Normally, pos_ratio value can be well over 3
799
		 * (when globally we are at freerun and wb is well below wb
800 801 802 803
		 * setpoint). Now the maximum pos_ratio in the same situation
		 * is 2. We might want to tweak this if we observe the control
		 * system is too slow to adapt.
		 */
804
		return min(pos_ratio, wb_pos_ratio);
805
	}
W
Wu Fengguang 已提交
806 807 808

	/*
	 * We have computed basic pos_ratio above based on global situation. If
809
	 * the wb is over/under its share of dirty pages, we want to scale
W
Wu Fengguang 已提交
810 811 812 813
	 * pos_ratio further down/up. That is done by the following mechanism.
	 */

	/*
814
	 * wb setpoint
W
Wu Fengguang 已提交
815
	 *
816
	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
W
Wu Fengguang 已提交
817
	 *
818
	 *                        x_intercept - wb_dirty
W
Wu Fengguang 已提交
819
	 *                     := --------------------------
820
	 *                        x_intercept - wb_setpoint
W
Wu Fengguang 已提交
821
	 *
822
	 * The main wb control line is a linear function that subjects to
W
Wu Fengguang 已提交
823
	 *
824 825 826
	 * (1) f(wb_setpoint) = 1.0
	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
W
Wu Fengguang 已提交
827
	 *
828
	 * For single wb case, the dirty pages are observed to fluctuate
W
Wu Fengguang 已提交
829
	 * regularly within range
830
	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
W
Wu Fengguang 已提交
831 832 833
	 * for various filesystems, where (2) can yield in a reasonable 12.5%
	 * fluctuation range for pos_ratio.
	 *
834
	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
W
Wu Fengguang 已提交
835
	 * own size, so move the slope over accordingly and choose a slope that
836
	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
W
Wu Fengguang 已提交
837
	 */
838 839
	if (unlikely(wb_thresh > dtc->thresh))
		wb_thresh = dtc->thresh;
840
	/*
841
	 * It's very possible that wb_thresh is close to 0 not because the
842 843 844 845 846
	 * device is slow, but that it has remained inactive for long time.
	 * Honour such devices a reasonable good (hopefully IO efficient)
	 * threshold, so that the occasional writes won't be blocked and active
	 * writes can rampup the threshold quickly.
	 */
847
	wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
W
Wu Fengguang 已提交
848
	/*
849 850
	 * scale global setpoint to wb's:
	 *	wb_setpoint = setpoint * wb_thresh / thresh
W
Wu Fengguang 已提交
851
	 */
852
	x = div_u64((u64)wb_thresh << 16, dtc->thresh + 1);
853
	wb_setpoint = setpoint * (u64)x >> 16;
W
Wu Fengguang 已提交
854
	/*
855 856
	 * Use span=(8*write_bw) in single wb case as indicated by
	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
W
Wu Fengguang 已提交
857
	 *
858 859 860
	 *        wb_thresh                    thresh - wb_thresh
	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
	 *         thresh                           thresh
W
Wu Fengguang 已提交
861
	 */
862
	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
863
	x_intercept = wb_setpoint + span;
W
Wu Fengguang 已提交
864

865 866 867
	if (dtc->wb_dirty < x_intercept - span / 4) {
		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
				      x_intercept - wb_setpoint + 1);
W
Wu Fengguang 已提交
868 869 870
	} else
		pos_ratio /= 4;

871
	/*
872
	 * wb reserve area, safeguard against dirty pool underrun and disk idle
873 874 875
	 * It may push the desired control point of global dirty pages higher
	 * than setpoint.
	 */
876
	x_intercept = wb_thresh / 2;
877 878 879 880
	if (dtc->wb_dirty < x_intercept) {
		if (dtc->wb_dirty > x_intercept / 8)
			pos_ratio = div_u64(pos_ratio * x_intercept,
					    dtc->wb_dirty);
881
		else
882 883 884
			pos_ratio *= 8;
	}

W
Wu Fengguang 已提交
885 886 887
	return pos_ratio;
}

888 889 890
static void wb_update_write_bandwidth(struct bdi_writeback *wb,
				      unsigned long elapsed,
				      unsigned long written)
891 892
{
	const unsigned long period = roundup_pow_of_two(3 * HZ);
893 894
	unsigned long avg = wb->avg_write_bandwidth;
	unsigned long old = wb->write_bandwidth;
895 896 897 898 899 900 901 902
	u64 bw;

	/*
	 * bw = written * HZ / elapsed
	 *
	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
	 * write_bandwidth = ---------------------------------------------------
	 *                                          period
903 904 905
	 *
	 * @written may have decreased due to account_page_redirty().
	 * Avoid underflowing @bw calculation.
906
	 */
907
	bw = written - min(written, wb->written_stamp);
908 909 910 911 912 913
	bw *= HZ;
	if (unlikely(elapsed > period)) {
		do_div(bw, elapsed);
		avg = bw;
		goto out;
	}
914
	bw += (u64)wb->write_bandwidth * (period - elapsed);
915 916 917 918 919 920 921 922 923 924 925 926
	bw >>= ilog2(period);

	/*
	 * one more level of smoothing, for filtering out sudden spikes
	 */
	if (avg > old && old >= (unsigned long)bw)
		avg -= (avg - old) >> 3;

	if (avg < old && old <= (unsigned long)bw)
		avg += (old - avg) >> 3;

out:
927 928 929 930 931 932 933
	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
	avg = max(avg, 1LU);
	if (wb_has_dirty_io(wb)) {
		long delta = avg - wb->avg_write_bandwidth;
		WARN_ON_ONCE(atomic_long_add_return(delta,
					&wb->bdi->tot_write_bandwidth) <= 0);
	}
934 935
	wb->write_bandwidth = bw;
	wb->avg_write_bandwidth = avg;
936 937
}

938
static void update_dirty_limit(struct dirty_throttle_control *dtc)
939
{
940
	struct wb_domain *dom = &global_wb_domain;
941
	unsigned long thresh = dtc->thresh;
942
	unsigned long limit = dom->dirty_limit;
943 944 945 946 947 948 949 950 951 952 953 954

	/*
	 * Follow up in one step.
	 */
	if (limit < thresh) {
		limit = thresh;
		goto update;
	}

	/*
	 * Follow down slowly. Use the higher one as the target, because thresh
	 * may drop below dirty. This is exactly the reason to introduce
955
	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
956
	 */
957
	thresh = max(thresh, dtc->dirty);
958 959 960 961 962 963
	if (limit > thresh) {
		limit -= (limit - thresh) >> 5;
		goto update;
	}
	return;
update:
964
	dom->dirty_limit = limit;
965 966
}

967
static void global_update_bandwidth(struct dirty_throttle_control *dtc,
968 969
				    unsigned long now)
{
970
	struct wb_domain *dom = &global_wb_domain;
971 972 973 974

	/*
	 * check locklessly first to optimize away locking for the most time
	 */
975
	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
976 977
		return;

978 979
	spin_lock(&dom->lock);
	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
980
		update_dirty_limit(dtc);
981
		dom->dirty_limit_tstamp = now;
982
	}
983
	spin_unlock(&dom->lock);
984 985
}

W
Wu Fengguang 已提交
986
/*
987
 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
W
Wu Fengguang 已提交
988
 *
989
 * Normal wb tasks will be curbed at or below it in long term.
W
Wu Fengguang 已提交
990 991
 * Obviously it should be around (write_bw / N) when there are N dd tasks.
 */
992
static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
993 994
				      unsigned long dirtied,
				      unsigned long elapsed)
W
Wu Fengguang 已提交
995
{
996 997 998 999
	struct bdi_writeback *wb = dtc->wb;
	unsigned long dirty = dtc->dirty;
	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
	unsigned long limit = hard_dirty_limit(dtc->thresh);
1000
	unsigned long setpoint = (freerun + limit) / 2;
1001 1002
	unsigned long write_bw = wb->avg_write_bandwidth;
	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
W
Wu Fengguang 已提交
1003 1004 1005 1006
	unsigned long dirty_rate;
	unsigned long task_ratelimit;
	unsigned long balanced_dirty_ratelimit;
	unsigned long pos_ratio;
1007 1008
	unsigned long step;
	unsigned long x;
W
Wu Fengguang 已提交
1009 1010 1011 1012 1013

	/*
	 * The dirty rate will match the writeout rate in long term, except
	 * when dirty pages are truncated by userspace or re-dirtied by FS.
	 */
1014
	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
W
Wu Fengguang 已提交
1015

1016
	pos_ratio = wb_position_ratio(dtc);
W
Wu Fengguang 已提交
1017 1018 1019 1020 1021 1022 1023 1024 1025
	/*
	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
	 */
	task_ratelimit = (u64)dirty_ratelimit *
					pos_ratio >> RATELIMIT_CALC_SHIFT;
	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */

	/*
	 * A linear estimation of the "balanced" throttle rate. The theory is,
1026
	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
W
Wu Fengguang 已提交
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
	 * formula will yield the balanced rate limit (write_bw / N).
	 *
	 * Note that the expanded form is not a pure rate feedback:
	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
	 * but also takes pos_ratio into account:
	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
	 *
	 * (1) is not realistic because pos_ratio also takes part in balancing
	 * the dirty rate.  Consider the state
	 *	pos_ratio = 0.5						     (3)
	 *	rate = 2 * (write_bw / N)				     (4)
	 * If (1) is used, it will stuck in that state! Because each dd will
	 * be throttled at
	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
	 * yielding
	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
	 * put (6) into (1) we get
	 *	rate_(i+1) = rate_(i)					     (7)
	 *
	 * So we end up using (2) to always keep
	 *	rate_(i+1) ~= (write_bw / N)				     (8)
	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
	 * pos_ratio is able to drive itself to 1.0, which is not only where
	 * the dirty count meet the setpoint, but also where the slope of
	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
	 */
	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
					   dirty_rate | 1);
1056 1057 1058 1059 1060
	/*
	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
	 */
	if (unlikely(balanced_dirty_ratelimit > write_bw))
		balanced_dirty_ratelimit = write_bw;
W
Wu Fengguang 已提交
1061

1062 1063 1064
	/*
	 * We could safely do this and return immediately:
	 *
1065
	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1066 1067
	 *
	 * However to get a more stable dirty_ratelimit, the below elaborated
W
Wanpeng Li 已提交
1068
	 * code makes use of task_ratelimit to filter out singular points and
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	 * limit the step size.
	 *
	 * The below code essentially only uses the relative value of
	 *
	 *	task_ratelimit - dirty_ratelimit
	 *	= (pos_ratio - 1) * dirty_ratelimit
	 *
	 * which reflects the direction and size of dirty position error.
	 */

	/*
	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
	 * task_ratelimit is on the same side of dirty_ratelimit, too.
	 * For example, when
	 * - dirty_ratelimit > balanced_dirty_ratelimit
	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
	 * lowering dirty_ratelimit will help meet both the position and rate
	 * control targets. Otherwise, don't update dirty_ratelimit if it will
	 * only help meet the rate target. After all, what the users ultimately
	 * feel and care are stable dirty rate and small position error.
	 *
	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
W
Wanpeng Li 已提交
1091
	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1092 1093 1094 1095 1096
	 * keeps jumping around randomly and can even leap far away at times
	 * due to the small 200ms estimation period of dirty_rate (we want to
	 * keep that period small to reduce time lags).
	 */
	step = 0;
1097 1098

	/*
1099
	 * For strictlimit case, calculations above were based on wb counters
1100
	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1101
	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1102 1103
	 * Hence, to calculate "step" properly, we have to use wb_dirty as
	 * "dirty" and wb_setpoint as "setpoint".
1104
	 *
1105 1106
	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
	 * it's possible that wb_thresh is close to zero due to inactivity
T
Tejun Heo 已提交
1107
	 * of backing device (see the implementation of wb_calc_thresh()).
1108
	 */
1109
	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1110 1111 1112
		dirty = dtc->wb_dirty;
		if (dtc->wb_dirty < 8)
			setpoint = dtc->wb_dirty + 1;
1113
		else
1114 1115
			setpoint = (dtc->wb_thresh +
				    wb_calc_thresh(wb, dtc->bg_thresh)) / 2;
1116 1117
	}

1118
	if (dirty < setpoint) {
1119
		x = min3(wb->balanced_dirty_ratelimit,
1120
			 balanced_dirty_ratelimit, task_ratelimit);
1121 1122 1123
		if (dirty_ratelimit < x)
			step = x - dirty_ratelimit;
	} else {
1124
		x = max3(wb->balanced_dirty_ratelimit,
1125
			 balanced_dirty_ratelimit, task_ratelimit);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
		if (dirty_ratelimit > x)
			step = dirty_ratelimit - x;
	}

	/*
	 * Don't pursue 100% rate matching. It's impossible since the balanced
	 * rate itself is constantly fluctuating. So decrease the track speed
	 * when it gets close to the target. Helps eliminate pointless tremors.
	 */
	step >>= dirty_ratelimit / (2 * step + 1);
	/*
	 * Limit the tracking speed to avoid overshooting.
	 */
	step = (step + 7) / 8;

	if (dirty_ratelimit < balanced_dirty_ratelimit)
		dirty_ratelimit += step;
	else
		dirty_ratelimit -= step;

1146 1147
	wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1148

1149
	trace_bdi_dirty_ratelimit(wb->bdi, dirty_rate, task_ratelimit);
W
Wu Fengguang 已提交
1150 1151
}

1152
static void __wb_update_bandwidth(struct dirty_throttle_control *dtc,
1153 1154
				  unsigned long start_time,
				  bool update_ratelimit)
1155
{
1156
	struct bdi_writeback *wb = dtc->wb;
1157
	unsigned long now = jiffies;
1158
	unsigned long elapsed = now - wb->bw_time_stamp;
W
Wu Fengguang 已提交
1159
	unsigned long dirtied;
1160 1161
	unsigned long written;

1162 1163
	lockdep_assert_held(&wb->list_lock);

1164 1165 1166 1167 1168 1169
	/*
	 * rate-limit, only update once every 200ms.
	 */
	if (elapsed < BANDWIDTH_INTERVAL)
		return;

1170 1171
	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1172 1173 1174 1175 1176

	/*
	 * Skip quiet periods when disk bandwidth is under-utilized.
	 * (at least 1s idle time between two flusher runs)
	 */
1177
	if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1178 1179
		goto snapshot;

1180
	if (update_ratelimit) {
1181 1182
		global_update_bandwidth(dtc, now);
		wb_update_dirty_ratelimit(dtc, dirtied, elapsed);
W
Wu Fengguang 已提交
1183
	}
1184
	wb_update_write_bandwidth(wb, elapsed, written);
1185 1186

snapshot:
1187 1188 1189
	wb->dirtied_stamp = dirtied;
	wb->written_stamp = written;
	wb->bw_time_stamp = now;
1190 1191
}

1192
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1193
{
1194 1195 1196
	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };

	__wb_update_bandwidth(&gdtc, start_time, false);
1197 1198
}

1199
/*
1200
 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
 * will look to see if it needs to start dirty throttling.
 *
 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
 * global_page_state() too often. So scale it near-sqrt to the safety margin
 * (the number of pages we may dirty without exceeding the dirty limits).
 */
static unsigned long dirty_poll_interval(unsigned long dirty,
					 unsigned long thresh)
{
	if (thresh > dirty)
		return 1UL << (ilog2(thresh - dirty) >> 1);

	return 1;
}

1216
static unsigned long wb_max_pause(struct bdi_writeback *wb,
1217
				  unsigned long wb_dirty)
1218
{
1219
	unsigned long bw = wb->avg_write_bandwidth;
1220
	unsigned long t;
1221

1222 1223 1224 1225 1226 1227 1228
	/*
	 * Limit pause time for small memory systems. If sleeping for too long
	 * time, a small pool of dirty/writeback pages may go empty and disk go
	 * idle.
	 *
	 * 8 serves as the safety ratio.
	 */
1229
	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1230 1231
	t++;

1232
	return min_t(unsigned long, t, MAX_PAUSE);
1233 1234
}

1235 1236 1237 1238 1239
static long wb_min_pause(struct bdi_writeback *wb,
			 long max_pause,
			 unsigned long task_ratelimit,
			 unsigned long dirty_ratelimit,
			 int *nr_dirtied_pause)
1240
{
1241 1242
	long hi = ilog2(wb->avg_write_bandwidth);
	long lo = ilog2(wb->dirty_ratelimit);
1243 1244 1245
	long t;		/* target pause */
	long pause;	/* estimated next pause */
	int pages;	/* target nr_dirtied_pause */
1246

1247 1248
	/* target for 10ms pause on 1-dd case */
	t = max(1, HZ / 100);
1249 1250 1251 1252 1253

	/*
	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
	 * overheads.
	 *
1254
	 * (N * 10ms) on 2^N concurrent tasks.
1255 1256
	 */
	if (hi > lo)
1257
		t += (hi - lo) * (10 * HZ) / 1024;
1258 1259

	/*
1260 1261 1262 1263 1264 1265 1266 1267
	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
	 * on the much more stable dirty_ratelimit. However the next pause time
	 * will be computed based on task_ratelimit and the two rate limits may
	 * depart considerably at some time. Especially if task_ratelimit goes
	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
	 * result task_ratelimit won't be executed faithfully, which could
	 * eventually bring down dirty_ratelimit.
1268
	 *
1269 1270 1271 1272 1273 1274 1275
	 * We apply two rules to fix it up:
	 * 1) try to estimate the next pause time and if necessary, use a lower
	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
	 * 2) limit the target pause time to max_pause/2, so that the normal
	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1276
	 */
1277 1278
	t = min(t, 1 + max_pause / 2);
	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1279 1280

	/*
1281 1282 1283 1284 1285 1286
	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
	 * When the 16 consecutive reads are often interrupted by some dirty
	 * throttling pause during the async writes, cfq will go into idles
	 * (deadline is fine). So push nr_dirtied_pause as high as possible
	 * until reaches DIRTY_POLL_THRESH=32 pages.
1287
	 */
1288 1289 1290 1291 1292 1293 1294 1295 1296
	if (pages < DIRTY_POLL_THRESH) {
		t = max_pause;
		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
		if (pages > DIRTY_POLL_THRESH) {
			pages = DIRTY_POLL_THRESH;
			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
		}
	}

1297 1298 1299 1300 1301
	pause = HZ * pages / (task_ratelimit + 1);
	if (pause > max_pause) {
		t = max_pause;
		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
	}
1302

1303
	*nr_dirtied_pause = pages;
1304
	/*
1305
	 * The minimal pause time will normally be half the target pause time.
1306
	 */
1307
	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1308 1309
}

1310
static inline void wb_dirty_limits(struct dirty_throttle_control *dtc,
1311
				   unsigned long *wb_bg_thresh)
1312
{
1313
	struct bdi_writeback *wb = dtc->wb;
1314
	unsigned long wb_reclaimable;
1315 1316

	/*
1317
	 * wb_thresh is not treated as some limiting factor as
1318
	 * dirty_thresh, due to reasons
1319
	 * - in JBOD setup, wb_thresh can fluctuate a lot
1320
	 * - in a system with HDD and USB key, the USB key may somehow
1321 1322
	 *   go into state (wb_dirty >> wb_thresh) either because
	 *   wb_dirty starts high, or because wb_thresh drops low.
1323
	 *   In this case we don't want to hard throttle the USB key
1324 1325
	 *   dirtiers for 100 seconds until wb_dirty drops under
	 *   wb_thresh. Instead the auxiliary wb control line in
1326
	 *   wb_position_ratio() will let the dirtier task progress
1327
	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1328
	 */
1329
	dtc->wb_thresh = wb_calc_thresh(dtc->wb, dtc->thresh);
1330

1331
	if (wb_bg_thresh)
1332 1333 1334
		*wb_bg_thresh = dtc->thresh ? div_u64((u64)dtc->wb_thresh *
						      dtc->bg_thresh,
						      dtc->thresh) : 0;
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

	/*
	 * In order to avoid the stacked BDI deadlock we need
	 * to ensure we accurately count the 'dirty' pages when
	 * the threshold is low.
	 *
	 * Otherwise it would be possible to get thresh+n pages
	 * reported dirty, even though there are thresh-m pages
	 * actually dirty; with m+n sitting in the percpu
	 * deltas.
	 */
1346
	if (dtc->wb_thresh < 2 * wb_stat_error(wb)) {
1347
		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1348
		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1349
	} else {
1350
		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1351
		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1352 1353 1354
	}
}

L
Linus Torvalds 已提交
1355 1356 1357
/*
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
1358
 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1359 1360
 * If we're over `background_thresh' then the writeback threads are woken to
 * perform some writeout.
L
Linus Torvalds 已提交
1361
 */
1362
static void balance_dirty_pages(struct address_space *mapping,
1363
				struct bdi_writeback *wb,
1364
				unsigned long pages_dirtied)
L
Linus Torvalds 已提交
1365
{
1366 1367
	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1368
	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
1369
	long period;
1370 1371 1372 1373
	long pause;
	long max_pause;
	long min_pause;
	int nr_dirtied_pause;
1374
	bool dirty_exceeded = false;
1375
	unsigned long task_ratelimit;
1376
	unsigned long dirty_ratelimit;
1377
	unsigned long pos_ratio;
1378
	struct backing_dev_info *bdi = wb->bdi;
1379
	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1380
	unsigned long start_time = jiffies;
L
Linus Torvalds 已提交
1381 1382

	for (;;) {
1383
		unsigned long now = jiffies;
1384
		unsigned long dirty, thresh, bg_thresh;
1385

1386 1387 1388 1389 1390 1391
		/*
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
		 */
1392 1393
		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
					global_page_state(NR_UNSTABLE_NFS);
1394
		gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1395

1396
		global_dirty_limits(&gdtc->bg_thresh, &gdtc->thresh);
1397

1398
		if (unlikely(strictlimit)) {
1399
			wb_dirty_limits(gdtc, &bg_thresh);
1400

1401 1402
			dirty = gdtc->wb_dirty;
			thresh = gdtc->wb_thresh;
1403
		} else {
1404 1405 1406
			dirty = gdtc->dirty;
			thresh = gdtc->thresh;
			bg_thresh = gdtc->bg_thresh;
1407 1408
		}

1409 1410 1411
		/*
		 * Throttle it only when the background writeback cannot
		 * catch-up. This avoids (excessively) small writeouts
1412
		 * when the wb limits are ramping up in case of !strictlimit.
1413
		 *
1414 1415
		 * In strictlimit case make decision based on the wb counters
		 * and limits. Small writeouts when the wb limits are ramping
1416
		 * up are the price we consciously pay for strictlimit-ing.
1417
		 */
1418
		if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) {
1419 1420
			current->dirty_paused_when = now;
			current->nr_dirtied = 0;
1421
			current->nr_dirtied_pause =
1422
				dirty_poll_interval(dirty, thresh);
1423
			break;
1424
		}
1425

1426
		if (unlikely(!writeback_in_progress(wb)))
1427
			wb_start_background_writeback(wb);
1428

1429
		if (!strictlimit)
1430
			wb_dirty_limits(gdtc, NULL);
1431

1432 1433
		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
			((gdtc->dirty > gdtc->thresh) || strictlimit);
1434 1435
		if (dirty_exceeded && !wb->dirty_exceeded)
			wb->dirty_exceeded = 1;
L
Linus Torvalds 已提交
1436

1437 1438 1439
		if (time_is_before_jiffies(wb->bw_time_stamp +
					   BANDWIDTH_INTERVAL)) {
			spin_lock(&wb->list_lock);
1440
			__wb_update_bandwidth(gdtc, start_time, true);
1441 1442
			spin_unlock(&wb->list_lock);
		}
1443

1444
		dirty_ratelimit = wb->dirty_ratelimit;
1445
		pos_ratio = wb_position_ratio(gdtc);
1446 1447
		task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
							RATELIMIT_CALC_SHIFT;
1448
		max_pause = wb_max_pause(wb, gdtc->wb_dirty);
1449 1450 1451
		min_pause = wb_min_pause(wb, max_pause,
					 task_ratelimit, dirty_ratelimit,
					 &nr_dirtied_pause);
1452

1453
		if (unlikely(task_ratelimit == 0)) {
1454
			period = max_pause;
1455
			pause = max_pause;
1456
			goto pause;
P
Peter Zijlstra 已提交
1457
		}
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
		period = HZ * pages_dirtied / task_ratelimit;
		pause = period;
		if (current->dirty_paused_when)
			pause -= now - current->dirty_paused_when;
		/*
		 * For less than 1s think time (ext3/4 may block the dirtier
		 * for up to 800ms from time to time on 1-HDD; so does xfs,
		 * however at much less frequency), try to compensate it in
		 * future periods by updating the virtual time; otherwise just
		 * do a reset, as it may be a light dirtier.
		 */
1469
		if (pause < min_pause) {
1470
			trace_balance_dirty_pages(bdi,
1471 1472 1473 1474 1475
						  gdtc->thresh,
						  gdtc->bg_thresh,
						  gdtc->dirty,
						  gdtc->wb_thresh,
						  gdtc->wb_dirty,
1476 1477 1478
						  dirty_ratelimit,
						  task_ratelimit,
						  pages_dirtied,
1479
						  period,
1480
						  min(pause, 0L),
1481
						  start_time);
1482 1483 1484 1485 1486 1487
			if (pause < -HZ) {
				current->dirty_paused_when = now;
				current->nr_dirtied = 0;
			} else if (period) {
				current->dirty_paused_when += period;
				current->nr_dirtied = 0;
1488 1489
			} else if (current->nr_dirtied_pause <= pages_dirtied)
				current->nr_dirtied_pause += pages_dirtied;
W
Wu Fengguang 已提交
1490
			break;
P
Peter Zijlstra 已提交
1491
		}
1492 1493 1494 1495 1496
		if (unlikely(pause > max_pause)) {
			/* for occasional dropped task_ratelimit */
			now += min(pause - max_pause, max_pause);
			pause = max_pause;
		}
1497 1498

pause:
1499
		trace_balance_dirty_pages(bdi,
1500 1501 1502 1503 1504
					  gdtc->thresh,
					  gdtc->bg_thresh,
					  gdtc->dirty,
					  gdtc->wb_thresh,
					  gdtc->wb_dirty,
1505 1506 1507
					  dirty_ratelimit,
					  task_ratelimit,
					  pages_dirtied,
1508
					  period,
1509 1510
					  pause,
					  start_time);
1511
		__set_current_state(TASK_KILLABLE);
1512
		io_schedule_timeout(pause);
1513

1514 1515
		current->dirty_paused_when = now + pause;
		current->nr_dirtied = 0;
1516
		current->nr_dirtied_pause = nr_dirtied_pause;
1517

1518
		/*
1519 1520
		 * This is typically equal to (dirty < thresh) and can also
		 * keep "1000+ dd on a slow USB stick" under control.
1521
		 */
1522
		if (task_ratelimit)
1523
			break;
1524

1525 1526
		/*
		 * In the case of an unresponding NFS server and the NFS dirty
1527
		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1528 1529 1530 1531
		 * to go through, so that tasks on them still remain responsive.
		 *
		 * In theory 1 page is enough to keep the comsumer-producer
		 * pipe going: the flusher cleans 1 page => the task dirties 1
1532
		 * more page. However wb_dirty has accounting errors.  So use
1533
		 * the larger and more IO friendly wb_stat_error.
1534
		 */
1535
		if (gdtc->wb_dirty <= wb_stat_error(wb))
1536 1537
			break;

1538 1539
		if (fatal_signal_pending(current))
			break;
L
Linus Torvalds 已提交
1540 1541
	}

1542 1543
	if (!dirty_exceeded && wb->dirty_exceeded)
		wb->dirty_exceeded = 0;
L
Linus Torvalds 已提交
1544

1545
	if (writeback_in_progress(wb))
1546
		return;
L
Linus Torvalds 已提交
1547 1548 1549 1550 1551 1552 1553 1554 1555

	/*
	 * In laptop mode, we wait until hitting the higher threshold before
	 * starting background writeout, and then write out all the way down
	 * to the lower threshold.  So slow writers cause minimal disk activity.
	 *
	 * In normal mode, we start background writeout at the lower
	 * background_thresh, to keep the amount of dirty memory low.
	 */
1556 1557 1558
	if (laptop_mode)
		return;

1559
	if (nr_reclaimable > gdtc->bg_thresh)
1560
		wb_start_background_writeback(wb);
L
Linus Torvalds 已提交
1561 1562
}

1563
static DEFINE_PER_CPU(int, bdp_ratelimits);
1564

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
/*
 * Normal tasks are throttled by
 *	loop {
 *		dirty tsk->nr_dirtied_pause pages;
 *		take a snap in balance_dirty_pages();
 *	}
 * However there is a worst case. If every task exit immediately when dirtied
 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
 * called to throttle the page dirties. The solution is to save the not yet
 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
 * randomly into the running tasks. This works well for the above worst case,
 * as the new task will pick up and accumulate the old task's leaked dirty
 * count and eventually get throttled.
 */
DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;

L
Linus Torvalds 已提交
1581
/**
1582
 * balance_dirty_pages_ratelimited - balance dirty memory state
1583
 * @mapping: address_space which was dirtied
L
Linus Torvalds 已提交
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
 * On really big machines, get_writeback_state is expensive, so try to avoid
 * calling it too often (ratelimiting).  But once we're over the dirty memory
 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 * from overshooting the limit by (ratelimit_pages) each.
 */
1594
void balance_dirty_pages_ratelimited(struct address_space *mapping)
L
Linus Torvalds 已提交
1595
{
1596 1597 1598
	struct inode *inode = mapping->host;
	struct backing_dev_info *bdi = inode_to_bdi(inode);
	struct bdi_writeback *wb = NULL;
1599 1600
	int ratelimit;
	int *p;
L
Linus Torvalds 已提交
1601

1602 1603 1604
	if (!bdi_cap_account_dirty(bdi))
		return;

1605 1606 1607 1608 1609
	if (inode_cgwb_enabled(inode))
		wb = wb_get_create_current(bdi, GFP_KERNEL);
	if (!wb)
		wb = &bdi->wb;

1610
	ratelimit = current->nr_dirtied_pause;
1611
	if (wb->dirty_exceeded)
1612 1613 1614
		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));

	preempt_disable();
L
Linus Torvalds 已提交
1615
	/*
1616 1617 1618 1619
	 * This prevents one CPU to accumulate too many dirtied pages without
	 * calling into balance_dirty_pages(), which can happen when there are
	 * 1000+ tasks, all of them start dirtying pages at exactly the same
	 * time, hence all honoured too large initial task->nr_dirtied_pause.
L
Linus Torvalds 已提交
1620
	 */
1621
	p =  this_cpu_ptr(&bdp_ratelimits);
1622
	if (unlikely(current->nr_dirtied >= ratelimit))
1623
		*p = 0;
1624 1625 1626
	else if (unlikely(*p >= ratelimit_pages)) {
		*p = 0;
		ratelimit = 0;
L
Linus Torvalds 已提交
1627
	}
1628 1629 1630 1631 1632
	/*
	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
	 * the dirty throttling and livelock other long-run dirtiers.
	 */
1633
	p = this_cpu_ptr(&dirty_throttle_leaks);
1634
	if (*p > 0 && current->nr_dirtied < ratelimit) {
1635
		unsigned long nr_pages_dirtied;
1636 1637 1638
		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
		*p -= nr_pages_dirtied;
		current->nr_dirtied += nr_pages_dirtied;
L
Linus Torvalds 已提交
1639
	}
1640
	preempt_enable();
1641 1642

	if (unlikely(current->nr_dirtied >= ratelimit))
1643 1644 1645
		balance_dirty_pages(mapping, wb, current->nr_dirtied);

	wb_put(wb);
L
Linus Torvalds 已提交
1646
}
1647
EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
L
Linus Torvalds 已提交
1648

1649
void throttle_vm_writeout(gfp_t gfp_mask)
L
Linus Torvalds 已提交
1650
{
1651 1652
	unsigned long background_thresh;
	unsigned long dirty_thresh;
L
Linus Torvalds 已提交
1653 1654

        for ( ; ; ) {
1655
		global_dirty_limits(&background_thresh, &dirty_thresh);
1656
		dirty_thresh = hard_dirty_limit(dirty_thresh);
L
Linus Torvalds 已提交
1657 1658 1659 1660 1661 1662 1663

                /*
                 * Boost the allowable dirty threshold a bit for page
                 * allocators so they don't get DoS'ed by heavy writers
                 */
                dirty_thresh += dirty_thresh / 10;      /* wheeee... */

1664 1665 1666
                if (global_page_state(NR_UNSTABLE_NFS) +
			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                        	break;
1667
                congestion_wait(BLK_RW_ASYNC, HZ/10);
1668 1669 1670 1671 1672 1673 1674 1675

		/*
		 * The caller might hold locks which can prevent IO completion
		 * or progress in the filesystem.  So we cannot just sit here
		 * waiting for IO to complete.
		 */
		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
			break;
L
Linus Torvalds 已提交
1676 1677 1678 1679 1680 1681
        }
}

/*
 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 */
1682
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
1683
	void __user *buffer, size_t *length, loff_t *ppos)
L
Linus Torvalds 已提交
1684
{
1685
	proc_dointvec(table, write, buffer, length, ppos);
L
Linus Torvalds 已提交
1686 1687 1688
	return 0;
}

1689
#ifdef CONFIG_BLOCK
1690
void laptop_mode_timer_fn(unsigned long data)
L
Linus Torvalds 已提交
1691
{
1692 1693 1694
	struct request_queue *q = (struct request_queue *)data;
	int nr_pages = global_page_state(NR_FILE_DIRTY) +
		global_page_state(NR_UNSTABLE_NFS);
1695 1696
	struct bdi_writeback *wb;
	struct wb_iter iter;
L
Linus Torvalds 已提交
1697

1698 1699 1700 1701
	/*
	 * We want to write everything out, not just down to the dirty
	 * threshold
	 */
1702 1703 1704 1705 1706 1707 1708
	if (!bdi_has_dirty_io(&q->backing_dev_info))
		return;

	bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0)
		if (wb_has_dirty_io(wb))
			wb_start_writeback(wb, nr_pages, true,
					   WB_REASON_LAPTOP_TIMER);
L
Linus Torvalds 已提交
1709 1710 1711 1712 1713 1714 1715
}

/*
 * We've spun up the disk and we're in laptop mode: schedule writeback
 * of all dirty data a few seconds from now.  If the flush is already scheduled
 * then push it back - the user is still using the disk.
 */
1716
void laptop_io_completion(struct backing_dev_info *info)
L
Linus Torvalds 已提交
1717
{
1718
	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
L
Linus Torvalds 已提交
1719 1720 1721 1722 1723 1724 1725 1726 1727
}

/*
 * We're in laptop mode and we've just synced. The sync's writes will have
 * caused another writeback to be scheduled by laptop_io_completion.
 * Nothing needs to be written back anymore, so we unschedule the writeback.
 */
void laptop_sync_completion(void)
{
1728 1729 1730 1731 1732 1733 1734 1735
	struct backing_dev_info *bdi;

	rcu_read_lock();

	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
		del_timer(&bdi->laptop_mode_wb_timer);

	rcu_read_unlock();
L
Linus Torvalds 已提交
1736
}
1737
#endif
L
Linus Torvalds 已提交
1738 1739 1740 1741 1742 1743 1744 1745 1746

/*
 * If ratelimit_pages is too high then we can get into dirty-data overload
 * if a large number of processes all perform writes at the same time.
 * If it is too low then SMP machines will call the (expensive)
 * get_writeback_state too often.
 *
 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
1747
 * thresholds.
L
Linus Torvalds 已提交
1748 1749
 */

1750
void writeback_set_ratelimit(void)
L
Linus Torvalds 已提交
1751
{
1752
	struct wb_domain *dom = &global_wb_domain;
1753 1754
	unsigned long background_thresh;
	unsigned long dirty_thresh;
1755

1756
	global_dirty_limits(&background_thresh, &dirty_thresh);
1757
	dom->dirty_limit = dirty_thresh;
1758
	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
L
Linus Torvalds 已提交
1759 1760 1761 1762
	if (ratelimit_pages < 16)
		ratelimit_pages = 16;
}

1763
static int
1764 1765
ratelimit_handler(struct notifier_block *self, unsigned long action,
		  void *hcpu)
L
Linus Torvalds 已提交
1766
{
1767 1768 1769 1770 1771 1772 1773 1774 1775

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
	case CPU_DEAD:
		writeback_set_ratelimit();
		return NOTIFY_OK;
	default:
		return NOTIFY_DONE;
	}
L
Linus Torvalds 已提交
1776 1777
}

1778
static struct notifier_block ratelimit_nb = {
L
Linus Torvalds 已提交
1779 1780 1781 1782 1783
	.notifier_call	= ratelimit_handler,
	.next		= NULL,
};

/*
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
L
Linus Torvalds 已提交
1800 1801 1802
 */
void __init page_writeback_init(void)
{
1803
	writeback_set_ratelimit();
L
Linus Torvalds 已提交
1804
	register_cpu_notifier(&ratelimit_nb);
P
Peter Zijlstra 已提交
1805

T
Tejun Heo 已提交
1806
	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
L
Linus Torvalds 已提交
1807 1808
}

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
/**
 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
 * @mapping: address space structure to write
 * @start: starting page index
 * @end: ending page index (inclusive)
 *
 * This function scans the page range from @start to @end (inclusive) and tags
 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
 * that write_cache_pages (or whoever calls this function) will then use
 * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
 * used to avoid livelocking of writeback by a process steadily creating new
 * dirty pages in the file (thus it is important for this function to be quick
 * so that it can tag pages faster than a dirtying process can create them).
 */
/*
 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
 */
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end)
{
R
Randy Dunlap 已提交
1829
#define WRITEBACK_TAG_BATCH 4096
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
	unsigned long tagged;

	do {
		spin_lock_irq(&mapping->tree_lock);
		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
				&start, end, WRITEBACK_TAG_BATCH,
				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
		spin_unlock_irq(&mapping->tree_lock);
		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
		cond_resched();
1840 1841
		/* We check 'start' to handle wrapping when end == ~0UL */
	} while (tagged >= WRITEBACK_TAG_BATCH && start);
1842 1843 1844
}
EXPORT_SYMBOL(tag_pages_for_writeback);

1845
/**
1846
 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1847 1848
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1849 1850
 * @writepage: function called for each page
 * @data: data passed to writepage function
1851
 *
1852
 * If a page is already under I/O, write_cache_pages() skips it, even
1853 1854 1855 1856 1857 1858
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
1859 1860 1861 1862 1863 1864 1865
 *
 * To avoid livelocks (when other process dirties new pages), we first tag
 * pages which should be written back with TOWRITE tag and only then start
 * writing them. For data-integrity sync we have to be careful so that we do
 * not miss some pages (e.g., because some other process has cleared TOWRITE
 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
 * by the process clearing the DIRTY tag (and submitting the page for IO).
1866
 */
1867 1868 1869
int write_cache_pages(struct address_space *mapping,
		      struct writeback_control *wbc, writepage_t writepage,
		      void *data)
1870 1871 1872 1873 1874
{
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
N
Nick Piggin 已提交
1875
	pgoff_t uninitialized_var(writeback_index);
1876 1877
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
1878
	pgoff_t done_index;
N
Nick Piggin 已提交
1879
	int cycled;
1880
	int range_whole = 0;
1881
	int tag;
1882 1883 1884

	pagevec_init(&pvec, 0);
	if (wbc->range_cyclic) {
N
Nick Piggin 已提交
1885 1886 1887 1888 1889 1890
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
1891 1892 1893 1894 1895 1896
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
N
Nick Piggin 已提交
1897
		cycled = 1; /* ignore range_cyclic tests */
1898
	}
1899
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1900 1901 1902
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
1903
retry:
1904
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1905
		tag_pages_for_writeback(mapping, index, end);
1906
	done_index = index;
N
Nick Piggin 已提交
1907 1908 1909
	while (!done && (index <= end)) {
		int i;

1910
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
N
Nick Piggin 已提交
1911 1912 1913
			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
		if (nr_pages == 0)
			break;
1914 1915 1916 1917 1918

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			/*
1919 1920 1921 1922 1923
			 * At this point, the page may be truncated or
			 * invalidated (changing page->mapping to NULL), or
			 * even swizzled back from swapper_space to tmpfs file
			 * mapping. However, page->index will not change
			 * because we have a reference on the page.
1924
			 */
1925 1926 1927 1928 1929 1930 1931 1932 1933
			if (page->index > end) {
				/*
				 * can't be range_cyclic (1st pass) because
				 * end == -1 in that case.
				 */
				done = 1;
				break;
			}

1934
			done_index = page->index;
1935

1936 1937
			lock_page(page);

N
Nick Piggin 已提交
1938 1939 1940 1941 1942 1943 1944 1945
			/*
			 * Page truncated or invalidated. We can freely skip it
			 * then, even for data integrity operations: the page
			 * has disappeared concurrently, so there could be no
			 * real expectation of this data interity operation
			 * even if there is now a new, dirty page at the same
			 * pagecache address.
			 */
1946
			if (unlikely(page->mapping != mapping)) {
N
Nick Piggin 已提交
1947
continue_unlock:
1948 1949 1950 1951
				unlock_page(page);
				continue;
			}

1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
					wait_on_page_writeback(page);
				else
					goto continue_unlock;
			}
1963

1964 1965
			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
N
Nick Piggin 已提交
1966
				goto continue_unlock;
1967

1968
			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1969
			ret = (*writepage)(page, wbc, data);
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
			if (unlikely(ret)) {
				if (ret == AOP_WRITEPAGE_ACTIVATE) {
					unlock_page(page);
					ret = 0;
				} else {
					/*
					 * done_index is set past this page,
					 * so media errors will not choke
					 * background writeout for the entire
					 * file. This has consequences for
					 * range_cyclic semantics (ie. it may
					 * not be suitable for data integrity
					 * writeout).
					 */
1984
					done_index = page->index + 1;
1985 1986 1987
					done = 1;
					break;
				}
1988
			}
1989

1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
			/*
			 * We stop writing back only if we are not doing
			 * integrity sync. In case of integrity sync we have to
			 * keep going until we have written all the pages
			 * we tagged for writeback prior to entering this loop.
			 */
			if (--wbc->nr_to_write <= 0 &&
			    wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
2000
			}
2001 2002 2003 2004
		}
		pagevec_release(&pvec);
		cond_resched();
	}
2005
	if (!cycled && !done) {
2006
		/*
N
Nick Piggin 已提交
2007
		 * range_cyclic:
2008 2009 2010
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
N
Nick Piggin 已提交
2011
		cycled = 1;
2012
		index = 0;
N
Nick Piggin 已提交
2013
		end = writeback_index - 1;
2014 2015
		goto retry;
	}
2016 2017
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;
2018

2019 2020
	return ret;
}
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
EXPORT_SYMBOL(write_cache_pages);

/*
 * Function used by generic_writepages to call the real writepage
 * function and set the mapping flags on error
 */
static int __writepage(struct page *page, struct writeback_control *wbc,
		       void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

/**
 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 *
 * This is a library function, which implements the writepages()
 * address_space_operation.
 */
int generic_writepages(struct address_space *mapping,
		       struct writeback_control *wbc)
{
2047 2048 2049
	struct blk_plug plug;
	int ret;

2050 2051 2052 2053
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

2054 2055 2056 2057
	blk_start_plug(&plug);
	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
	blk_finish_plug(&plug);
	return ret;
2058
}
2059 2060 2061

EXPORT_SYMBOL(generic_writepages);

L
Linus Torvalds 已提交
2062 2063
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
2064 2065
	int ret;

L
Linus Torvalds 已提交
2066 2067 2068
	if (wbc->nr_to_write <= 0)
		return 0;
	if (mapping->a_ops->writepages)
2069
		ret = mapping->a_ops->writepages(mapping, wbc);
2070 2071 2072
	else
		ret = generic_writepages(mapping, wbc);
	return ret;
L
Linus Torvalds 已提交
2073 2074 2075 2076
}

/**
 * write_one_page - write out a single page and optionally wait on I/O
2077 2078
 * @page: the page to write
 * @wait: if true, wait on writeout
L
Linus Torvalds 已提交
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
 *
 * The page must be locked by the caller and will be unlocked upon return.
 *
 * write_one_page() returns a negative error code if I/O failed.
 */
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 1,
	};

	BUG_ON(!PageLocked(page));

	if (wait)
		wait_on_page_writeback(page);

	if (clear_page_dirty_for_io(page)) {
		page_cache_get(page);
		ret = mapping->a_ops->writepage(page, &wbc);
		if (ret == 0 && wait) {
			wait_on_page_writeback(page);
			if (PageError(page))
				ret = -EIO;
		}
		page_cache_release(page);
	} else {
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

2114 2115 2116 2117 2118 2119
/*
 * For address_spaces which do not use buffers nor write back.
 */
int __set_page_dirty_no_writeback(struct page *page)
{
	if (!PageDirty(page))
2120
		return !TestSetPageDirty(page);
2121 2122 2123
	return 0;
}

2124 2125
/*
 * Helper function for set_page_dirty family.
2126 2127 2128
 *
 * Caller must hold mem_cgroup_begin_page_stat().
 *
2129 2130
 * NOTE: This relies on being atomic wrt interrupts.
 */
2131 2132
void account_page_dirtied(struct page *page, struct address_space *mapping,
			  struct mem_cgroup *memcg)
2133
{
2134 2135
	struct inode *inode = mapping->host;

T
Tejun Heo 已提交
2136 2137
	trace_writeback_dirty_page(page, mapping);

2138
	if (mapping_cap_account_dirty(mapping)) {
2139 2140 2141 2142
		struct bdi_writeback *wb;

		inode_attach_wb(inode, page);
		wb = inode_to_wb(inode);
2143

2144
		mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2145
		__inc_zone_page_state(page, NR_FILE_DIRTY);
2146
		__inc_zone_page_state(page, NR_DIRTIED);
2147 2148
		__inc_wb_stat(wb, WB_RECLAIMABLE);
		__inc_wb_stat(wb, WB_DIRTIED);
2149
		task_io_account_write(PAGE_CACHE_SIZE);
2150 2151
		current->nr_dirtied++;
		this_cpu_inc(bdp_ratelimits);
2152 2153
	}
}
M
Michael Rubin 已提交
2154
EXPORT_SYMBOL(account_page_dirtied);
2155

2156 2157
/*
 * Helper function for deaccounting dirty page without writeback.
2158 2159
 *
 * Caller must hold mem_cgroup_begin_page_stat().
2160
 */
2161 2162
void account_page_cleaned(struct page *page, struct address_space *mapping,
			  struct mem_cgroup *memcg)
2163 2164
{
	if (mapping_cap_account_dirty(mapping)) {
2165
		mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2166
		dec_zone_page_state(page, NR_FILE_DIRTY);
2167
		dec_wb_stat(inode_to_wb(mapping->host), WB_RECLAIMABLE);
2168 2169 2170 2171
		task_io_account_cancelled_write(PAGE_CACHE_SIZE);
	}
}

L
Linus Torvalds 已提交
2172 2173 2174 2175 2176 2177 2178 2179
/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
2180 2181 2182
 * The caller must ensure this doesn't race with truncation.  Most will simply
 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
 * the pte lock held, which also locks out truncation.
L
Linus Torvalds 已提交
2183 2184 2185
 */
int __set_page_dirty_nobuffers(struct page *page)
{
2186 2187 2188
	struct mem_cgroup *memcg;

	memcg = mem_cgroup_begin_page_stat(page);
L
Linus Torvalds 已提交
2189 2190
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
2191
		unsigned long flags;
L
Linus Torvalds 已提交
2192

2193 2194
		if (!mapping) {
			mem_cgroup_end_page_stat(memcg);
2195
			return 1;
2196
		}
2197

2198
		spin_lock_irqsave(&mapping->tree_lock, flags);
2199 2200
		BUG_ON(page_mapping(page) != mapping);
		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2201
		account_page_dirtied(page, mapping, memcg);
2202 2203
		radix_tree_tag_set(&mapping->page_tree, page_index(page),
				   PAGECACHE_TAG_DIRTY);
2204
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
2205 2206
		mem_cgroup_end_page_stat(memcg);

2207 2208 2209
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
L
Linus Torvalds 已提交
2210
		}
2211
		return 1;
L
Linus Torvalds 已提交
2212
	}
2213
	mem_cgroup_end_page_stat(memcg);
2214
	return 0;
L
Linus Torvalds 已提交
2215 2216 2217
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);

2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
/*
 * Call this whenever redirtying a page, to de-account the dirty counters
 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
 * control.
 */
void account_page_redirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
2228

2229
	if (mapping && mapping_cap_account_dirty(mapping)) {
2230 2231
		struct bdi_writeback *wb = inode_to_wb(mapping->host);

2232 2233
		current->nr_dirtied--;
		dec_zone_page_state(page, NR_DIRTIED);
2234
		dec_wb_stat(wb, WB_DIRTIED);
2235 2236 2237 2238
	}
}
EXPORT_SYMBOL(account_page_redirty);

L
Linus Torvalds 已提交
2239 2240 2241 2242 2243 2244 2245
/*
 * When a writepage implementation decides that it doesn't want to write this
 * page for some reason, it should redirty the locked page via
 * redirty_page_for_writepage() and it should then unlock the page and return 0
 */
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
2246 2247
	int ret;

L
Linus Torvalds 已提交
2248
	wbc->pages_skipped++;
2249
	ret = __set_page_dirty_nobuffers(page);
2250
	account_page_redirty(page);
2251
	return ret;
L
Linus Torvalds 已提交
2252 2253 2254 2255
}
EXPORT_SYMBOL(redirty_page_for_writepage);

/*
2256 2257 2258 2259 2260 2261 2262
 * Dirty a page.
 *
 * For pages with a mapping this should be done under the page lock
 * for the benefit of asynchronous memory errors who prefer a consistent
 * dirty state. This rule can be broken in some special cases,
 * but should be better not to.
 *
L
Linus Torvalds 已提交
2263 2264 2265
 * If the mapping doesn't provide a set_page_dirty a_op, then
 * just fall through and assume that it wants buffer_heads.
 */
N
Nick Piggin 已提交
2266
int set_page_dirty(struct page *page)
L
Linus Torvalds 已提交
2267 2268 2269 2270 2271
{
	struct address_space *mapping = page_mapping(page);

	if (likely(mapping)) {
		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
M
Minchan Kim 已提交
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
		/*
		 * readahead/lru_deactivate_page could remain
		 * PG_readahead/PG_reclaim due to race with end_page_writeback
		 * About readahead, if the page is written, the flags would be
		 * reset. So no problem.
		 * About lru_deactivate_page, if the page is redirty, the flag
		 * will be reset. So no problem. but if the page is used by readahead
		 * it will confuse readahead and make it restart the size rampup
		 * process. But it's a trivial problem.
		 */
2282 2283
		if (PageReclaim(page))
			ClearPageReclaim(page);
2284 2285 2286 2287 2288
#ifdef CONFIG_BLOCK
		if (!spd)
			spd = __set_page_dirty_buffers;
#endif
		return (*spd)(page);
L
Linus Torvalds 已提交
2289
	}
2290 2291 2292 2293
	if (!PageDirty(page)) {
		if (!TestSetPageDirty(page))
			return 1;
	}
L
Linus Torvalds 已提交
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
	return 0;
}
EXPORT_SYMBOL(set_page_dirty);

/*
 * set_page_dirty() is racy if the caller has no reference against
 * page->mapping->host, and if the page is unlocked.  This is because another
 * CPU could truncate the page off the mapping and then free the mapping.
 *
 * Usually, the page _is_ locked, or the caller is a user-space process which
 * holds a reference on the inode by having an open file.
 *
 * In other cases, the page should be locked before running set_page_dirty().
 */
int set_page_dirty_lock(struct page *page)
{
	int ret;

J
Jens Axboe 已提交
2312
	lock_page(page);
L
Linus Torvalds 已提交
2313 2314 2315 2316 2317 2318
	ret = set_page_dirty(page);
	unlock_page(page);
	return ret;
}
EXPORT_SYMBOL(set_page_dirty_lock);

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
/*
 * This cancels just the dirty bit on the kernel page itself, it does NOT
 * actually remove dirty bits on any mmap's that may be around. It also
 * leaves the page tagged dirty, so any sync activity will still find it on
 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
 * look at the dirty bits in the VM.
 *
 * Doing this should *normally* only ever be done when a page is truncated,
 * and is not actually mapped anywhere at all. However, fs/buffer.c does
 * this when it notices that somebody has cleaned out all the buffers on a
 * page without actually doing it through the VM. Can you say "ext3 is
 * horribly ugly"? Thought you could.
 */
void cancel_dirty_page(struct page *page)
{
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
	struct address_space *mapping = page_mapping(page);

	if (mapping_cap_account_dirty(mapping)) {
		struct mem_cgroup *memcg;

		memcg = mem_cgroup_begin_page_stat(page);

		if (TestClearPageDirty(page))
			account_page_cleaned(page, mapping, memcg);

		mem_cgroup_end_page_stat(memcg);
	} else {
		ClearPageDirty(page);
	}
2348 2349 2350
}
EXPORT_SYMBOL(cancel_dirty_page);

L
Linus Torvalds 已提交
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
/*
 * Clear a page's dirty flag, while caring for dirty memory accounting.
 * Returns true if the page was previously dirty.
 *
 * This is for preparing to put the page under writeout.  We leave the page
 * tagged as dirty in the radix tree so that a concurrent write-for-sync
 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
 * implementation will run either set_page_writeback() or set_page_dirty(),
 * at which stage we bring the page's dirty flag and radix-tree dirty tag
 * back into sync.
 *
 * This incoherency between the page's dirty flag and radix-tree tag is
 * unfortunate, but it only exists while the page is locked.
 */
int clear_page_dirty_for_io(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
2368 2369
	struct mem_cgroup *memcg;
	int ret = 0;
L
Linus Torvalds 已提交
2370

2371 2372
	BUG_ON(!PageLocked(page));

2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
	if (mapping && mapping_cap_account_dirty(mapping)) {
		/*
		 * Yes, Virginia, this is indeed insane.
		 *
		 * We use this sequence to make sure that
		 *  (a) we account for dirty stats properly
		 *  (b) we tell the low-level filesystem to
		 *      mark the whole page dirty if it was
		 *      dirty in a pagetable. Only to then
		 *  (c) clean the page again and return 1 to
		 *      cause the writeback.
		 *
		 * This way we avoid all nasty races with the
		 * dirty bit in multiple places and clearing
		 * them concurrently from different threads.
		 *
		 * Note! Normally the "set_page_dirty(page)"
		 * has no effect on the actual dirty bit - since
		 * that will already usually be set. But we
		 * need the side effects, and it can help us
		 * avoid races.
		 *
		 * We basically use the page "master dirty bit"
		 * as a serialization point for all the different
		 * threads doing their things.
		 */
		if (page_mkclean(page))
			set_page_dirty(page);
2401 2402 2403
		/*
		 * We carefully synchronise fault handlers against
		 * installing a dirty pte and marking the page dirty
2404 2405 2406 2407
		 * at this point.  We do this by having them hold the
		 * page lock while dirtying the page, and pages are
		 * always locked coming in here, so we get the desired
		 * exclusion.
2408
		 */
2409
		memcg = mem_cgroup_begin_page_stat(page);
2410
		if (TestClearPageDirty(page)) {
2411
			mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2412
			dec_zone_page_state(page, NR_FILE_DIRTY);
2413
			dec_wb_stat(inode_to_wb(mapping->host), WB_RECLAIMABLE);
2414
			ret = 1;
L
Linus Torvalds 已提交
2415
		}
2416 2417
		mem_cgroup_end_page_stat(memcg);
		return ret;
L
Linus Torvalds 已提交
2418
	}
2419
	return TestClearPageDirty(page);
L
Linus Torvalds 已提交
2420
}
2421
EXPORT_SYMBOL(clear_page_dirty_for_io);
L
Linus Torvalds 已提交
2422 2423 2424 2425

int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
2426 2427
	struct mem_cgroup *memcg;
	int ret;
L
Linus Torvalds 已提交
2428

2429
	memcg = mem_cgroup_begin_page_stat(page);
L
Linus Torvalds 已提交
2430
	if (mapping) {
2431 2432
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
L
Linus Torvalds 已提交
2433 2434
		unsigned long flags;

N
Nick Piggin 已提交
2435
		spin_lock_irqsave(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2436
		ret = TestClearPageWriteback(page);
P
Peter Zijlstra 已提交
2437
		if (ret) {
L
Linus Torvalds 已提交
2438 2439 2440
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
2441
			if (bdi_cap_account_writeback(bdi)) {
2442 2443 2444 2445
				struct bdi_writeback *wb = inode_to_wb(inode);

				__dec_wb_stat(wb, WB_WRITEBACK);
				__wb_writeout_inc(wb);
P
Peter Zijlstra 已提交
2446
			}
P
Peter Zijlstra 已提交
2447
		}
N
Nick Piggin 已提交
2448
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2449 2450 2451
	} else {
		ret = TestClearPageWriteback(page);
	}
2452
	if (ret) {
2453
		mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2454
		dec_zone_page_state(page, NR_WRITEBACK);
2455 2456
		inc_zone_page_state(page, NR_WRITTEN);
	}
2457
	mem_cgroup_end_page_stat(memcg);
L
Linus Torvalds 已提交
2458 2459 2460
	return ret;
}

2461
int __test_set_page_writeback(struct page *page, bool keep_write)
L
Linus Torvalds 已提交
2462 2463
{
	struct address_space *mapping = page_mapping(page);
2464 2465
	struct mem_cgroup *memcg;
	int ret;
L
Linus Torvalds 已提交
2466

2467
	memcg = mem_cgroup_begin_page_stat(page);
L
Linus Torvalds 已提交
2468
	if (mapping) {
2469 2470
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
L
Linus Torvalds 已提交
2471 2472
		unsigned long flags;

N
Nick Piggin 已提交
2473
		spin_lock_irqsave(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2474
		ret = TestSetPageWriteback(page);
P
Peter Zijlstra 已提交
2475
		if (!ret) {
L
Linus Torvalds 已提交
2476 2477 2478
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
2479
			if (bdi_cap_account_writeback(bdi))
2480
				__inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
P
Peter Zijlstra 已提交
2481
		}
L
Linus Torvalds 已提交
2482 2483 2484 2485
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
2486 2487 2488 2489
		if (!keep_write)
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_TOWRITE);
N
Nick Piggin 已提交
2490
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
L
Linus Torvalds 已提交
2491 2492 2493
	} else {
		ret = TestSetPageWriteback(page);
	}
2494
	if (!ret) {
2495
		mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2496 2497
		inc_zone_page_state(page, NR_WRITEBACK);
	}
2498
	mem_cgroup_end_page_stat(memcg);
L
Linus Torvalds 已提交
2499 2500 2501
	return ret;

}
2502
EXPORT_SYMBOL(__test_set_page_writeback);
L
Linus Torvalds 已提交
2503 2504

/*
N
Nick Piggin 已提交
2505
 * Return true if any of the pages in the mapping are marked with the
L
Linus Torvalds 已提交
2506 2507 2508 2509
 * passed tag.
 */
int mapping_tagged(struct address_space *mapping, int tag)
{
2510
	return radix_tree_tagged(&mapping->page_tree, tag);
L
Linus Torvalds 已提交
2511 2512
}
EXPORT_SYMBOL(mapping_tagged);
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523

/**
 * wait_for_stable_page() - wait for writeback to finish, if necessary.
 * @page:	The page to wait on.
 *
 * This function determines if the given page is related to a backing device
 * that requires page contents to be held stable during writeback.  If so, then
 * it will wait for any pending writeback to complete.
 */
void wait_for_stable_page(struct page *page)
{
2524 2525
	if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
		wait_on_page_writeback(page);
2526 2527
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);