writeback.h 12.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * include/linux/writeback.h
L
Linus Torvalds 已提交
3 4 5 6
 */
#ifndef WRITEBACK_H
#define WRITEBACK_H

A
Alexey Dobriyan 已提交
7
#include <linux/sched.h>
8
#include <linux/workqueue.h>
J
Jens Axboe 已提交
9
#include <linux/fs.h>
T
Tejun Heo 已提交
10
#include <linux/flex_proportions.h>
11
#include <linux/backing-dev-defs.h>
A
Alexey Dobriyan 已提交
12

13 14
DECLARE_PER_CPU(int, dirty_throttle_leaks);

15
/*
16 17 18 19
 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
 *
 *	(thresh - thresh/DIRTY_FULL_SCOPE, thresh)
 *
20 21 22 23 24 25 26 27
 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
 * time) for the dirty pages to drop, unless written enough pages.
 *
 * The global dirty threshold is normally equal to the global dirty limit,
 * except when the system suddenly allocates a lot of anonymous memory and
 * knocks down the global dirty threshold quickly, in which case the global
 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
 */
28 29
#define DIRTY_SCOPE		8
#define DIRTY_FULL_SCOPE	(DIRTY_SCOPE / 2)
30

L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40
struct backing_dev_info;

/*
 * fs/fs-writeback.c
 */
enum writeback_sync_modes {
	WB_SYNC_NONE,	/* Don't wait on anything */
	WB_SYNC_ALL,	/* Wait on every mapping */
};

41 42 43 44 45 46 47 48 49 50 51
/*
 * why some writeback work was initiated
 */
enum wb_reason {
	WB_REASON_BACKGROUND,
	WB_REASON_TRY_TO_FREE_PAGES,
	WB_REASON_SYNC,
	WB_REASON_PERIODIC,
	WB_REASON_LAPTOP_TIMER,
	WB_REASON_FREE_MORE_MEM,
	WB_REASON_FS_FREE_SPACE,
52 53 54 55 56 57
	/*
	 * There is no bdi forker thread any more and works are done
	 * by emergency worker, however, this is TPs userland visible
	 * and we'll be exposing exactly the same information,
	 * so it has a mismatch name.
	 */
58 59 60 61 62
	WB_REASON_FORKER_THREAD,

	WB_REASON_MAX,
};

L
Linus Torvalds 已提交
63 64 65 66 67 68 69 70 71 72 73
/*
 * A control structure which tells the writeback code what to do.  These are
 * always on the stack, and hence need no locking.  They are always initialised
 * in a manner such that unspecified fields are set to zero.
 */
struct writeback_control {
	long nr_to_write;		/* Write this many pages, and decrement
					   this for each page written */
	long pages_skipped;		/* Pages which were not written */

	/*
74
	 * For a_ops->writepages(): if start or end are non-zero then this is
L
Linus Torvalds 已提交
75 76 77
	 * a hint that the filesystem need only write out the pages inside that
	 * byterange.  The byte at `end' is included in the writeout request.
	 */
78 79
	loff_t range_start;
	loff_t range_end;
L
Linus Torvalds 已提交
80

81 82
	enum writeback_sync_modes sync_mode;

83
	unsigned for_kupdate:1;		/* A kupdate writeback */
84
	unsigned for_background:1;	/* A background writeback */
85
	unsigned tagged_writepages:1;	/* tag-and-write to avoid livelock */
86
	unsigned for_reclaim:1;		/* Invoked from the page allocator */
87
	unsigned range_cyclic:1;	/* range_start is cyclic */
88
	unsigned for_sync:1;		/* sync(2) WB_SYNC_ALL writeback */
89 90
#ifdef CONFIG_CGROUP_WRITEBACK
	struct bdi_writeback *wb;	/* wb this writeback is issued under */
91 92 93 94 95 96 97 98 99
	struct inode *inode;		/* inode being written out */

	/* foreign inode detection, see wbc_detach_inode() */
	int wb_id;			/* current wb id */
	int wb_lcand_id;		/* last foreign candidate wb id */
	int wb_tcand_id;		/* this foreign candidate wb id */
	size_t wb_bytes;		/* bytes written by current wb */
	size_t wb_lcand_bytes;		/* bytes written by last candidate */
	size_t wb_tcand_bytes;		/* bytes written by this candidate */
100
#endif
L
Linus Torvalds 已提交
101 102
};

T
Tejun Heo 已提交
103 104 105 106 107 108 109 110
/*
 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
 * and are measured against each other in.  There always is one global
 * domain, global_wb_domain, that every wb in the system is a member of.
 * This allows measuring the relative bandwidth of each wb to distribute
 * dirtyable memory accordingly.
 */
struct wb_domain {
111 112
	spinlock_t lock;

T
Tejun Heo 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
	/*
	 * Scale the writeback cache size proportional to the relative
	 * writeout speed.
	 *
	 * We do this by keeping a floating proportion between BDIs, based
	 * on page writeback completions [end_page_writeback()]. Those
	 * devices that write out pages fastest will get the larger share,
	 * while the slower will get a smaller share.
	 *
	 * We use page writeout completions because we are interested in
	 * getting rid of dirty pages. Having them written out is the
	 * primary goal.
	 *
	 * We introduce a concept of time, a period over which we measure
	 * these events, because demand can/will vary over time. The length
	 * of this period itself is measured in page writeback completions.
	 */
	struct fprop_global completions;
	struct timer_list period_timer;	/* timer for aging of completions */
	unsigned long period_time;
133 134 135 136 137 138 139 140 141 142 143 144 145

	/*
	 * The dirtyable memory and dirty threshold could be suddenly
	 * knocked down by a large amount (eg. on the startup of KVM in a
	 * swapless system). This may throw the system into deep dirty
	 * exceeded state and throttle heavy/light dirtiers alike. To
	 * retain good responsiveness, maintain global_dirty_limit for
	 * tracking slowly down to the knocked down dirty threshold.
	 *
	 * Both fields are protected by ->lock.
	 */
	unsigned long dirty_limit_tstamp;
	unsigned long dirty_limit;
T
Tejun Heo 已提交
146 147
};

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
/**
 * wb_domain_size_changed - memory available to a wb_domain has changed
 * @dom: wb_domain of interest
 *
 * This function should be called when the amount of memory available to
 * @dom has changed.  It resets @dom's dirty limit parameters to prevent
 * the past values which don't match the current configuration from skewing
 * dirty throttling.  Without this, when memory size of a wb_domain is
 * greatly reduced, the dirty throttling logic may allow too many pages to
 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
 * that situation.
 */
static inline void wb_domain_size_changed(struct wb_domain *dom)
{
	spin_lock(&dom->lock);
	dom->dirty_limit_tstamp = jiffies;
	dom->dirty_limit = 0;
	spin_unlock(&dom->lock);
}

L
Linus Torvalds 已提交
168 169 170
/*
 * fs/fs-writeback.c
 */	
171
struct bdi_writeback;
172 173 174
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
							enum wb_reason reason);
175 176 177
bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
				   enum wb_reason reason);
178
void sync_inodes_sb(struct super_block *);
179
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
180
void inode_wait_for_writeback(struct inode *inode);
L
Linus Torvalds 已提交
181 182 183 184 185

/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
	might_sleep();
186
	wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
187
}
J
Joern Engel 已提交
188

189 190
#ifdef CONFIG_CGROUP_WRITEBACK

191 192 193
#include <linux/cgroup.h>
#include <linux/bio.h>

194
void __inode_attach_wb(struct inode *inode, struct page *page);
195 196 197 198
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
				 struct inode *inode)
	__releases(&inode->i_lock);
void wbc_detach_inode(struct writeback_control *wbc);
199 200
void wbc_account_io(struct writeback_control *wbc, struct page *page,
		    size_t bytes);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230

/**
 * inode_attach_wb - associate an inode with its wb
 * @inode: inode of interest
 * @page: page being dirtied (may be NULL)
 *
 * If @inode doesn't have its wb, associate it with the wb matching the
 * memcg of @page or, if @page is NULL, %current.  May be called w/ or w/o
 * @inode->i_lock.
 */
static inline void inode_attach_wb(struct inode *inode, struct page *page)
{
	if (!inode->i_wb)
		__inode_attach_wb(inode, page);
}

/**
 * inode_detach_wb - disassociate an inode from its wb
 * @inode: inode of interest
 *
 * @inode is being freed.  Detach from its wb.
 */
static inline void inode_detach_wb(struct inode *inode)
{
	if (inode->i_wb) {
		wb_put(inode->i_wb);
		inode->i_wb = NULL;
	}
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/**
 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
 * @wbc: writeback_control of interest
 * @inode: target inode
 *
 * This function is to be used by __filemap_fdatawrite_range(), which is an
 * alternative entry point into writeback code, and first ensures @inode is
 * associated with a bdi_writeback and attaches it to @wbc.
 */
static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
					       struct inode *inode)
{
	spin_lock(&inode->i_lock);
	inode_attach_wb(inode, NULL);
	wbc_attach_and_unlock_inode(wbc, inode);
}

/**
 * wbc_init_bio - writeback specific initializtion of bio
 * @wbc: writeback_control for the writeback in progress
 * @bio: bio to be initialized
 *
 * @bio is a part of the writeback in progress controlled by @wbc.  Perform
 * writeback specific initialization.  This is used to apply the cgroup
 * writeback context.
 */
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
	/*
	 * pageout() path doesn't attach @wbc to the inode being written
	 * out.  This is intentional as we don't want the function to block
	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
	 * regular writeback instead of writing things out itself.
	 */
	if (wbc->wb)
		bio_associate_blkcg(bio, wbc->wb->blkcg_css);
}

269 270 271 272 273 274 275 276 277 278
#else	/* CONFIG_CGROUP_WRITEBACK */

static inline void inode_attach_wb(struct inode *inode, struct page *page)
{
}

static inline void inode_detach_wb(struct inode *inode)
{
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
					       struct inode *inode)
	__releases(&inode->i_lock)
{
	spin_unlock(&inode->i_lock);
}

static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
					       struct inode *inode)
{
}

static inline void wbc_detach_inode(struct writeback_control *wbc)
{
}

static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
}

299 300 301 302 303
static inline void wbc_account_io(struct writeback_control *wbc,
				  struct page *page, size_t bytes)
{
}

304 305
#endif	/* CONFIG_CGROUP_WRITEBACK */

L
Linus Torvalds 已提交
306 307 308
/*
 * mm/page-writeback.c
 */
309
#ifdef CONFIG_BLOCK
310
void laptop_io_completion(struct backing_dev_info *info);
L
Linus Torvalds 已提交
311
void laptop_sync_completion(void);
312 313
void laptop_mode_sync(struct work_struct *work);
void laptop_mode_timer_fn(unsigned long data);
314 315 316
#else
static inline void laptop_sync_completion(void) { }
#endif
317
void throttle_vm_writeout(gfp_t gfp_mask);
318
bool zone_dirty_ok(struct zone *zone);
T
Tejun Heo 已提交
319
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
T
Tejun Heo 已提交
320 321 322
#ifdef CONFIG_CGROUP_WRITEBACK
void wb_domain_exit(struct wb_domain *dom);
#endif
L
Linus Torvalds 已提交
323

324
extern struct wb_domain global_wb_domain;
325

L
Linus Torvalds 已提交
326 327
/* These are exported to sysctl. */
extern int dirty_background_ratio;
328
extern unsigned long dirty_background_bytes;
L
Linus Torvalds 已提交
329
extern int vm_dirty_ratio;
330
extern unsigned long vm_dirty_bytes;
331 332
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
333
extern unsigned int dirtytime_expire_interval;
334
extern int vm_highmem_is_dirtyable;
L
Linus Torvalds 已提交
335 336 337
extern int block_dump;
extern int laptop_mode;

338
extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
339
		void __user *buffer, size_t *lenp,
340 341
		loff_t *ppos);
extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
342
		void __user *buffer, size_t *lenp,
343
		loff_t *ppos);
P
Peter Zijlstra 已提交
344
extern int dirty_ratio_handler(struct ctl_table *table, int write,
345
		void __user *buffer, size_t *lenp,
P
Peter Zijlstra 已提交
346
		loff_t *ppos);
347
extern int dirty_bytes_handler(struct ctl_table *table, int write,
348
		void __user *buffer, size_t *lenp,
349
		loff_t *ppos);
350 351
int dirtytime_interval_handler(struct ctl_table *table, int write,
			       void __user *buffer, size_t *lenp, loff_t *ppos);
P
Peter Zijlstra 已提交
352

L
Linus Torvalds 已提交
353
struct ctl_table;
354
int dirty_writeback_centisecs_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
355 356
				      void __user *, size_t *, loff_t *);

357
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
T
Tejun Heo 已提交
358
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
359

360
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
L
Linus Torvalds 已提交
361
void page_writeback_init(void);
362
void balance_dirty_pages_ratelimited(struct address_space *mapping);
363
bool wb_over_bg_thresh(struct bdi_writeback *wb);
364

365 366 367 368 369
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
				void *data);

int generic_writepages(struct address_space *mapping,
		       struct writeback_control *wbc);
370 371
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end);
372 373 374
int write_cache_pages(struct address_space *mapping,
		      struct writeback_control *wbc, writepage_t writepage,
		      void *data);
L
Linus Torvalds 已提交
375
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
376
void writeback_set_ratelimit(void);
N
Namhyung Kim 已提交
377 378
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end);
L
Linus Torvalds 已提交
379

380 381
void account_page_redirty(struct page *page);

L
Linus Torvalds 已提交
382
#endif		/* WRITEBACK_H */