backing-dev.h 9.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * include/linux/backing-dev.h
 *
 * low-level device information and state which is propagated up through
 * to high-level code.
 */

#ifndef _LINUX_BACKING_DEV_H
#define _LINUX_BACKING_DEV_H

11 12
#include <linux/percpu_counter.h>
#include <linux/log2.h>
13
#include <linux/flex_proportions.h>
14
#include <linux/kernel.h>
15
#include <linux/fs.h>
16
#include <linux/sched.h>
17
#include <linux/timer.h>
18
#include <linux/writeback.h>
A
Arun Sharma 已提交
19
#include <linux/atomic.h>
20
#include <linux/sysctl.h>
21
#include <linux/workqueue.h>
L
Linus Torvalds 已提交
22

23
struct page;
24
struct device;
25
struct dentry;
26

L
Linus Torvalds 已提交
27 28 29 30
/*
 * Bits in backing_dev_info.state
 */
enum bdi_state {
31 32
	BDI_async_congested,	/* The async (write) queue is getting full */
	BDI_sync_congested,	/* The sync queue is getting full */
33
	BDI_registered,		/* bdi_register() was done */
J
Jan Kara 已提交
34
	BDI_writeback_running,	/* Writeback is in progress */
L
Linus Torvalds 已提交
35 36 37 38
};

typedef int (congested_fn)(void *, int);

39
enum bdi_stat_item {
40
	BDI_RECLAIMABLE,
P
Peter Zijlstra 已提交
41
	BDI_WRITEBACK,
42
	BDI_DIRTIED,
43
	BDI_WRITTEN,
44 45 46 47 48
	NR_BDI_STAT_ITEMS
};

#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))

49
struct bdi_writeback {
50
	struct backing_dev_info *bdi;	/* our parent bdi */
51

52
	unsigned long last_old_flush;	/* last old data flush */
53

54
	struct delayed_work dwork;	/* work item used for writeback */
55 56 57
	struct list_head b_dirty;	/* dirty inodes */
	struct list_head b_io;		/* parked for writeback */
	struct list_head b_more_io;	/* parked for more writeback */
58
	spinlock_t list_lock;		/* protects the b_* lists */
59 60
};

L
Linus Torvalds 已提交
61
struct backing_dev_info {
62
	struct list_head bdi_list;
L
Linus Torvalds 已提交
63 64 65 66 67
	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
	unsigned long state;	/* Always use atomic bitops on this */
	unsigned int capabilities; /* Device capabilities */
	congested_fn *congested_fn; /* Function pointer if device is md/dm */
	void *congested_data;	/* Pointer to aux data for congested func */
68

69 70
	char *name;

71
	struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
P
Peter Zijlstra 已提交
72

73
	unsigned long bw_time_stamp;	/* last time write bw is updated */
W
Wu Fengguang 已提交
74
	unsigned long dirtied_stamp;
75 76 77 78
	unsigned long written_stamp;	/* pages written at bw_time_stamp */
	unsigned long write_bandwidth;	/* the estimated write bandwidth */
	unsigned long avg_write_bandwidth; /* further smoothed write bw */

W
Wu Fengguang 已提交
79 80 81
	/*
	 * The base dirty throttle rate, re-calculated on every 200ms.
	 * All the bdi tasks' dirty rate will be curbed under it.
82 83
	 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
	 * in small steps and is much more smooth/stable than the latter.
W
Wu Fengguang 已提交
84 85
	 */
	unsigned long dirty_ratelimit;
86
	unsigned long balanced_dirty_ratelimit;
W
Wu Fengguang 已提交
87

88
	struct fprop_local_percpu completions;
P
Peter Zijlstra 已提交
89
	int dirty_exceeded;
90

91
	unsigned int min_ratio;
92
	unsigned int max_ratio, max_prop_frac;
93

94
	struct bdi_writeback wb;  /* default writeback info for this bdi */
J
Jan Kara 已提交
95
	spinlock_t wb_lock;	  /* protects work_list & wb.dwork scheduling */
96

97 98 99
	struct list_head work_list;

	struct device *dev;
100

101 102
	struct timer_list laptop_mode_wb_timer;

103 104 105 106
#ifdef CONFIG_DEBUG_FS
	struct dentry *debug_dir;
	struct dentry *debug_stats;
#endif
L
Linus Torvalds 已提交
107 108
};

M
Mikulas Patocka 已提交
109
int __must_check bdi_init(struct backing_dev_info *bdi);
110 111
void bdi_destroy(struct backing_dev_info *bdi);

112
__printf(3, 4)
113 114 115 116
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
		const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
M
Mikulas Patocka 已提交
117
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
118 119
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
			enum wb_reason reason);
120
void bdi_start_background_writeback(struct backing_dev_info *bdi);
121
void bdi_writeback_workfn(struct work_struct *work);
122
int bdi_has_dirty_io(struct backing_dev_info *bdi);
123
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
124

125
extern spinlock_t bdi_lock;
126 127
extern struct list_head bdi_list;

128 129
extern struct workqueue_struct *bdi_wq;

130 131 132 133 134 135 136
static inline int wb_has_dirty_io(struct bdi_writeback *wb)
{
	return !list_empty(&wb->b_dirty) ||
	       !list_empty(&wb->b_io) ||
	       !list_empty(&wb->b_more_io);
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
static inline void __add_bdi_stat(struct backing_dev_info *bdi,
		enum bdi_stat_item item, s64 amount)
{
	__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
}

static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
		enum bdi_stat_item item)
{
	__add_bdi_stat(bdi, item, 1);
}

static inline void inc_bdi_stat(struct backing_dev_info *bdi,
		enum bdi_stat_item item)
{
	unsigned long flags;

	local_irq_save(flags);
	__inc_bdi_stat(bdi, item);
	local_irq_restore(flags);
}

static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
		enum bdi_stat_item item)
{
	__add_bdi_stat(bdi, item, -1);
}

static inline void dec_bdi_stat(struct backing_dev_info *bdi,
		enum bdi_stat_item item)
{
	unsigned long flags;

	local_irq_save(flags);
	__dec_bdi_stat(bdi, item);
	local_irq_restore(flags);
}

static inline s64 bdi_stat(struct backing_dev_info *bdi,
		enum bdi_stat_item item)
{
	return percpu_counter_read_positive(&bdi->bdi_stat[item]);
}

static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
		enum bdi_stat_item item)
{
	return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
}

static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
		enum bdi_stat_item item)
P
Peter Zijlstra 已提交
189
{
190 191 192 193 194 195 196 197
	s64 sum;
	unsigned long flags;

	local_irq_save(flags);
	sum = __bdi_stat_sum(bdi, item);
	local_irq_restore(flags);

	return sum;
P
Peter Zijlstra 已提交
198 199
}

200 201
extern void bdi_writeout_inc(struct backing_dev_info *bdi);

202 203 204 205
/*
 * maximal error of a stat counter.
 */
static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
P
Peter Zijlstra 已提交
206
{
207 208 209 210 211
#ifdef CONFIG_SMP
	return nr_cpu_ids * BDI_STAT_BATCH;
#else
	return 1;
#endif
P
Peter Zijlstra 已提交
212
}
L
Linus Torvalds 已提交
213

214
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
215
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
216

L
Linus Torvalds 已提交
217 218
/*
 * Flags in backing_dev_info::capability
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
 *
 * The first three flags control whether dirty pages will contribute to the
 * VM's accounting and whether writepages() should be called for dirty pages
 * (something that would not, for example, be appropriate for ramfs)
 *
 * WARNING: these flags are closely related and should not normally be
 * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
 * three flags into a single convenience macro.
 *
 * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
 * BDI_CAP_NO_WRITEBACK:   Don't write pages back
 * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
 *
 * These flags let !MMU mmap() govern direct device mapping vs immediate
 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
 *
 * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
 * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
 * BDI_CAP_READ_MAP:       Can be mapped for reading
 * BDI_CAP_WRITE_MAP:      Can be mapped for writing
 * BDI_CAP_EXEC_MAP:       Can be mapped for execution
240
 *
241
 * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
L
Linus Torvalds 已提交
242
 */
243 244 245 246 247 248 249 250
#define BDI_CAP_NO_ACCT_DIRTY	0x00000001
#define BDI_CAP_NO_WRITEBACK	0x00000002
#define BDI_CAP_MAP_COPY	0x00000004
#define BDI_CAP_MAP_DIRECT	0x00000008
#define BDI_CAP_READ_MAP	0x00000010
#define BDI_CAP_WRITE_MAP	0x00000020
#define BDI_CAP_EXEC_MAP	0x00000040
#define BDI_CAP_NO_ACCT_WB	0x00000080
251
#define BDI_CAP_STABLE_WRITES	0x00000200
252
#define BDI_CAP_STRICTLIMIT	0x00000400
253

L
Linus Torvalds 已提交
254 255 256
#define BDI_CAP_VMFLAGS \
	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)

257 258 259
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)

L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267
#if defined(VM_MAYREAD) && \
	(BDI_CAP_READ_MAP != VM_MAYREAD || \
	 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
	 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
#error please change backing_dev_info::capabilities flags
#endif

extern struct backing_dev_info default_backing_dev_info;
J
Jörn Engel 已提交
268
extern struct backing_dev_info noop_backing_dev_info;
L
Linus Torvalds 已提交
269 270 271 272 273 274 275 276 277 278 279 280

int writeback_in_progress(struct backing_dev_info *bdi);

static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
{
	if (bdi->congested_fn)
		return bdi->congested_fn(bdi->congested_data, bdi_bits);
	return (bdi->state & bdi_bits);
}

static inline int bdi_read_congested(struct backing_dev_info *bdi)
{
281
	return bdi_congested(bdi, 1 << BDI_sync_congested);
L
Linus Torvalds 已提交
282 283 284 285
}

static inline int bdi_write_congested(struct backing_dev_info *bdi)
{
286
	return bdi_congested(bdi, 1 << BDI_async_congested);
L
Linus Torvalds 已提交
287 288 289 290
}

static inline int bdi_rw_congested(struct backing_dev_info *bdi)
{
291 292
	return bdi_congested(bdi, (1 << BDI_sync_congested) |
				  (1 << BDI_async_congested));
L
Linus Torvalds 已提交
293 294
}

295 296 297 298 299
enum {
	BLK_RW_ASYNC	= 0,
	BLK_RW_SYNC	= 1,
};

300 301 302
void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
303
long wait_iff_congested(struct zone *zone, int sync, long timeout);
304 305
int pdflush_proc_obsolete(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp, loff_t *ppos);
L
Linus Torvalds 已提交
306

307 308 309 310 311
static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
{
	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
}

312 313 314 315 316 317 318 319 320
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
{
	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
}

static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
{
	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
}
L
Linus Torvalds 已提交
321

322 323 324 325 326 327
static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
{
	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
				      BDI_CAP_NO_WRITEBACK));
}
L
Linus Torvalds 已提交
328

329 330 331 332
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
}
L
Linus Torvalds 已提交
333

334 335 336 337
static inline bool mapping_cap_account_dirty(struct address_space *mapping)
{
	return bdi_cap_account_dirty(mapping->backing_dev_info);
}
L
Linus Torvalds 已提交
338

339 340 341 342 343 344
static inline int bdi_sched_wait(void *word)
{
	schedule();
	return 0;
}

L
Linus Torvalds 已提交
345
#endif		/* _LINUX_BACKING_DEV_H */