writeback.h 12.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * include/linux/writeback.h
L
Linus Torvalds 已提交
3 4 5 6
 */
#ifndef WRITEBACK_H
#define WRITEBACK_H

A
Alexey Dobriyan 已提交
7
#include <linux/sched.h>
8
#include <linux/workqueue.h>
J
Jens Axboe 已提交
9
#include <linux/fs.h>
T
Tejun Heo 已提交
10
#include <linux/flex_proportions.h>
11
#include <linux/backing-dev-defs.h>
J
Jens Axboe 已提交
12
#include <linux/blk_types.h>
A
Alexey Dobriyan 已提交
13

14 15
struct bio;

16 17
DECLARE_PER_CPU(int, dirty_throttle_leaks);

18
/*
19 20 21 22
 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
 *
 *	(thresh - thresh/DIRTY_FULL_SCOPE, thresh)
 *
23 24 25 26 27 28 29 30
 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
 * time) for the dirty pages to drop, unless written enough pages.
 *
 * The global dirty threshold is normally equal to the global dirty limit,
 * except when the system suddenly allocates a lot of anonymous memory and
 * knocks down the global dirty threshold quickly, in which case the global
 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
 */
31 32
#define DIRTY_SCOPE		8
#define DIRTY_FULL_SCOPE	(DIRTY_SCOPE / 2)
33

L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41 42 43
struct backing_dev_info;

/*
 * fs/fs-writeback.c
 */
enum writeback_sync_modes {
	WB_SYNC_NONE,	/* Don't wait on anything */
	WB_SYNC_ALL,	/* Wait on every mapping */
};

44 45 46 47 48 49 50 51 52 53 54
/*
 * why some writeback work was initiated
 */
enum wb_reason {
	WB_REASON_BACKGROUND,
	WB_REASON_TRY_TO_FREE_PAGES,
	WB_REASON_SYNC,
	WB_REASON_PERIODIC,
	WB_REASON_LAPTOP_TIMER,
	WB_REASON_FREE_MORE_MEM,
	WB_REASON_FS_FREE_SPACE,
55 56 57 58 59 60
	/*
	 * There is no bdi forker thread any more and works are done
	 * by emergency worker, however, this is TPs userland visible
	 * and we'll be exposing exactly the same information,
	 * so it has a mismatch name.
	 */
61 62 63 64 65
	WB_REASON_FORKER_THREAD,

	WB_REASON_MAX,
};

L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73 74 75 76
/*
 * A control structure which tells the writeback code what to do.  These are
 * always on the stack, and hence need no locking.  They are always initialised
 * in a manner such that unspecified fields are set to zero.
 */
struct writeback_control {
	long nr_to_write;		/* Write this many pages, and decrement
					   this for each page written */
	long pages_skipped;		/* Pages which were not written */

	/*
77
	 * For a_ops->writepages(): if start or end are non-zero then this is
L
Linus Torvalds 已提交
78 79 80
	 * a hint that the filesystem need only write out the pages inside that
	 * byterange.  The byte at `end' is included in the writeout request.
	 */
81 82
	loff_t range_start;
	loff_t range_end;
L
Linus Torvalds 已提交
83

84 85
	enum writeback_sync_modes sync_mode;

86
	unsigned for_kupdate:1;		/* A kupdate writeback */
87
	unsigned for_background:1;	/* A background writeback */
88
	unsigned tagged_writepages:1;	/* tag-and-write to avoid livelock */
89
	unsigned for_reclaim:1;		/* Invoked from the page allocator */
90
	unsigned range_cyclic:1;	/* range_start is cyclic */
91
	unsigned for_sync:1;		/* sync(2) WB_SYNC_ALL writeback */
92 93
#ifdef CONFIG_CGROUP_WRITEBACK
	struct bdi_writeback *wb;	/* wb this writeback is issued under */
94 95 96 97 98 99 100 101 102
	struct inode *inode;		/* inode being written out */

	/* foreign inode detection, see wbc_detach_inode() */
	int wb_id;			/* current wb id */
	int wb_lcand_id;		/* last foreign candidate wb id */
	int wb_tcand_id;		/* this foreign candidate wb id */
	size_t wb_bytes;		/* bytes written by current wb */
	size_t wb_lcand_bytes;		/* bytes written by last candidate */
	size_t wb_tcand_bytes;		/* bytes written by this candidate */
103
#endif
L
Linus Torvalds 已提交
104 105
};

J
Jens Axboe 已提交
106 107 108 109
static inline int wbc_to_write_flags(struct writeback_control *wbc)
{
	if (wbc->sync_mode == WB_SYNC_ALL)
		return REQ_SYNC;
110 111
	else if (wbc->for_kupdate || wbc->for_background)
		return REQ_BACKGROUND;
J
Jens Axboe 已提交
112 113 114 115

	return 0;
}

T
Tejun Heo 已提交
116 117 118 119 120 121 122 123
/*
 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
 * and are measured against each other in.  There always is one global
 * domain, global_wb_domain, that every wb in the system is a member of.
 * This allows measuring the relative bandwidth of each wb to distribute
 * dirtyable memory accordingly.
 */
struct wb_domain {
124 125
	spinlock_t lock;

T
Tejun Heo 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
	/*
	 * Scale the writeback cache size proportional to the relative
	 * writeout speed.
	 *
	 * We do this by keeping a floating proportion between BDIs, based
	 * on page writeback completions [end_page_writeback()]. Those
	 * devices that write out pages fastest will get the larger share,
	 * while the slower will get a smaller share.
	 *
	 * We use page writeout completions because we are interested in
	 * getting rid of dirty pages. Having them written out is the
	 * primary goal.
	 *
	 * We introduce a concept of time, a period over which we measure
	 * these events, because demand can/will vary over time. The length
	 * of this period itself is measured in page writeback completions.
	 */
	struct fprop_global completions;
	struct timer_list period_timer;	/* timer for aging of completions */
	unsigned long period_time;
146 147 148 149 150 151 152 153 154 155 156 157 158

	/*
	 * The dirtyable memory and dirty threshold could be suddenly
	 * knocked down by a large amount (eg. on the startup of KVM in a
	 * swapless system). This may throw the system into deep dirty
	 * exceeded state and throttle heavy/light dirtiers alike. To
	 * retain good responsiveness, maintain global_dirty_limit for
	 * tracking slowly down to the knocked down dirty threshold.
	 *
	 * Both fields are protected by ->lock.
	 */
	unsigned long dirty_limit_tstamp;
	unsigned long dirty_limit;
T
Tejun Heo 已提交
159 160
};

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
/**
 * wb_domain_size_changed - memory available to a wb_domain has changed
 * @dom: wb_domain of interest
 *
 * This function should be called when the amount of memory available to
 * @dom has changed.  It resets @dom's dirty limit parameters to prevent
 * the past values which don't match the current configuration from skewing
 * dirty throttling.  Without this, when memory size of a wb_domain is
 * greatly reduced, the dirty throttling logic may allow too many pages to
 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
 * that situation.
 */
static inline void wb_domain_size_changed(struct wb_domain *dom)
{
	spin_lock(&dom->lock);
	dom->dirty_limit_tstamp = jiffies;
	dom->dirty_limit = 0;
	spin_unlock(&dom->lock);
}

L
Linus Torvalds 已提交
181 182 183
/*
 * fs/fs-writeback.c
 */	
184
struct bdi_writeback;
185 186 187
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
							enum wb_reason reason);
188 189 190
bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
				   enum wb_reason reason);
191
void sync_inodes_sb(struct super_block *);
192
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
193
void inode_wait_for_writeback(struct inode *inode);
L
Linus Torvalds 已提交
194 195 196 197 198

/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
	might_sleep();
199
	wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
200
}
J
Joern Engel 已提交
201

202 203
#ifdef CONFIG_CGROUP_WRITEBACK

204 205 206
#include <linux/cgroup.h>
#include <linux/bio.h>

207
void __inode_attach_wb(struct inode *inode, struct page *page);
208 209 210 211
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
				 struct inode *inode)
	__releases(&inode->i_lock);
void wbc_detach_inode(struct writeback_control *wbc);
212 213
void wbc_account_io(struct writeback_control *wbc, struct page *page,
		    size_t bytes);
214
void cgroup_writeback_umount(void);
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244

/**
 * inode_attach_wb - associate an inode with its wb
 * @inode: inode of interest
 * @page: page being dirtied (may be NULL)
 *
 * If @inode doesn't have its wb, associate it with the wb matching the
 * memcg of @page or, if @page is NULL, %current.  May be called w/ or w/o
 * @inode->i_lock.
 */
static inline void inode_attach_wb(struct inode *inode, struct page *page)
{
	if (!inode->i_wb)
		__inode_attach_wb(inode, page);
}

/**
 * inode_detach_wb - disassociate an inode from its wb
 * @inode: inode of interest
 *
 * @inode is being freed.  Detach from its wb.
 */
static inline void inode_detach_wb(struct inode *inode)
{
	if (inode->i_wb) {
		wb_put(inode->i_wb);
		inode->i_wb = NULL;
	}
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
/**
 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
 * @wbc: writeback_control of interest
 * @inode: target inode
 *
 * This function is to be used by __filemap_fdatawrite_range(), which is an
 * alternative entry point into writeback code, and first ensures @inode is
 * associated with a bdi_writeback and attaches it to @wbc.
 */
static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
					       struct inode *inode)
{
	spin_lock(&inode->i_lock);
	inode_attach_wb(inode, NULL);
	wbc_attach_and_unlock_inode(wbc, inode);
}

/**
 * wbc_init_bio - writeback specific initializtion of bio
 * @wbc: writeback_control for the writeback in progress
 * @bio: bio to be initialized
 *
 * @bio is a part of the writeback in progress controlled by @wbc.  Perform
 * writeback specific initialization.  This is used to apply the cgroup
 * writeback context.
 */
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
	/*
	 * pageout() path doesn't attach @wbc to the inode being written
	 * out.  This is intentional as we don't want the function to block
	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
	 * regular writeback instead of writing things out itself.
	 */
	if (wbc->wb)
		bio_associate_blkcg(bio, wbc->wb->blkcg_css);
}

283 284 285 286 287 288 289 290 291 292
#else	/* CONFIG_CGROUP_WRITEBACK */

static inline void inode_attach_wb(struct inode *inode, struct page *page)
{
}

static inline void inode_detach_wb(struct inode *inode)
{
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
					       struct inode *inode)
	__releases(&inode->i_lock)
{
	spin_unlock(&inode->i_lock);
}

static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
					       struct inode *inode)
{
}

static inline void wbc_detach_inode(struct writeback_control *wbc)
{
}

static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
}

313 314 315 316 317
static inline void wbc_account_io(struct writeback_control *wbc,
				  struct page *page, size_t bytes)
{
}

318 319 320 321
static inline void cgroup_writeback_umount(void)
{
}

322 323
#endif	/* CONFIG_CGROUP_WRITEBACK */

L
Linus Torvalds 已提交
324 325 326
/*
 * mm/page-writeback.c
 */
327
#ifdef CONFIG_BLOCK
328
void laptop_io_completion(struct backing_dev_info *info);
L
Linus Torvalds 已提交
329
void laptop_sync_completion(void);
330 331
void laptop_mode_sync(struct work_struct *work);
void laptop_mode_timer_fn(unsigned long data);
332 333 334
#else
static inline void laptop_sync_completion(void) { }
#endif
335
bool node_dirty_ok(struct pglist_data *pgdat);
T
Tejun Heo 已提交
336
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
T
Tejun Heo 已提交
337 338 339
#ifdef CONFIG_CGROUP_WRITEBACK
void wb_domain_exit(struct wb_domain *dom);
#endif
L
Linus Torvalds 已提交
340

341
extern struct wb_domain global_wb_domain;
342

L
Linus Torvalds 已提交
343 344
/* These are exported to sysctl. */
extern int dirty_background_ratio;
345
extern unsigned long dirty_background_bytes;
L
Linus Torvalds 已提交
346
extern int vm_dirty_ratio;
347
extern unsigned long vm_dirty_bytes;
348 349
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
350
extern unsigned int dirtytime_expire_interval;
351
extern int vm_highmem_is_dirtyable;
L
Linus Torvalds 已提交
352 353 354
extern int block_dump;
extern int laptop_mode;

355
extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
356
		void __user *buffer, size_t *lenp,
357 358
		loff_t *ppos);
extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
359
		void __user *buffer, size_t *lenp,
360
		loff_t *ppos);
P
Peter Zijlstra 已提交
361
extern int dirty_ratio_handler(struct ctl_table *table, int write,
362
		void __user *buffer, size_t *lenp,
P
Peter Zijlstra 已提交
363
		loff_t *ppos);
364
extern int dirty_bytes_handler(struct ctl_table *table, int write,
365
		void __user *buffer, size_t *lenp,
366
		loff_t *ppos);
367 368
int dirtytime_interval_handler(struct ctl_table *table, int write,
			       void __user *buffer, size_t *lenp, loff_t *ppos);
P
Peter Zijlstra 已提交
369

L
Linus Torvalds 已提交
370
struct ctl_table;
371
int dirty_writeback_centisecs_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
372 373
				      void __user *, size_t *, loff_t *);

374
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
T
Tejun Heo 已提交
375
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
376

377
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
L
Linus Torvalds 已提交
378
void page_writeback_init(void);
379
void balance_dirty_pages_ratelimited(struct address_space *mapping);
380
bool wb_over_bg_thresh(struct bdi_writeback *wb);
381

382 383 384 385 386
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
				void *data);

int generic_writepages(struct address_space *mapping,
		       struct writeback_control *wbc);
387 388
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end);
389 390 391
int write_cache_pages(struct address_space *mapping,
		      struct writeback_control *wbc, writepage_t writepage,
		      void *data);
L
Linus Torvalds 已提交
392
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
393
void writeback_set_ratelimit(void);
N
Namhyung Kim 已提交
394 395
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end);
L
Linus Torvalds 已提交
396

397 398
void account_page_redirty(struct page *page);

399 400 401
void sb_mark_inode_writeback(struct inode *inode);
void sb_clear_inode_writeback(struct inode *inode);

L
Linus Torvalds 已提交
402
#endif		/* WRITEBACK_H */