backing-dev-defs.h 7.6 KB
Newer Older
1 2 3 4
#ifndef __LINUX_BACKING_DEV_DEFS_H
#define __LINUX_BACKING_DEV_DEFS_H

#include <linux/list.h>
5 6
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
7 8
#include <linux/spinlock.h>
#include <linux/percpu_counter.h>
9
#include <linux/percpu-refcount.h>
10 11 12
#include <linux/flex_proportions.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
13
#include <linux/kref.h>
14 15 16 17 18 19 20 21 22 23 24

struct page;
struct device;
struct dentry;

/*
 * Bits in bdi_writeback.state
 */
enum wb_state {
	WB_registered,		/* bdi_register() was done */
	WB_writeback_running,	/* Writeback is in progress */
25
	WB_has_dirty_io,	/* Dirty inodes on ->b_{dirty|io|more_io} */
26 27
};

28 29 30 31 32
enum wb_congested_state {
	WB_async_congested,	/* The async (write) queue is getting full */
	WB_sync_congested,	/* The sync queue is getting full */
};

33 34 35 36 37 38 39 40 41 42 43 44
typedef int (congested_fn)(void *, int);

enum wb_stat_item {
	WB_RECLAIMABLE,
	WB_WRITEBACK,
	WB_DIRTIED,
	WB_WRITTEN,
	NR_WB_STAT_ITEMS
};

#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))

45 46 47 48 49 50 51
/*
 * For cgroup writeback, multiple wb's may map to the same blkcg.  Those
 * wb's can operate mostly independently but should share the congested
 * state.  To facilitate such sharing, the congested state is tracked using
 * the following struct which is created on demand, indexed by blkcg ID on
 * its bdi, and refcounted.
 */
52 53
struct bdi_writeback_congested {
	unsigned long state;		/* WB_[a]sync_congested flags */
54
	atomic_t refcnt;		/* nr of attached wb's and blkg */
55 56 57 58 59 60

#ifdef CONFIG_CGROUP_WRITEBACK
	struct backing_dev_info *bdi;	/* the associated bdi */
	int blkcg_id;			/* ID of the associated blkcg */
	struct rb_node rb_node;		/* on bdi->cgwb_congestion_tree */
#endif
61 62
};

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * Each wb (bdi_writeback) can perform writeback operations, is measured
 * and throttled, independently.  Without cgroup writeback, each bdi
 * (bdi_writeback) is served by its embedded bdi->wb.
 *
 * On the default hierarchy, blkcg implicitly enables memcg.  This allows
 * using memcg's page ownership for attributing writeback IOs, and every
 * memcg - blkcg combination can be served by its own wb by assigning a
 * dedicated wb to each memcg, which enables isolation across different
 * cgroups and propagation of IO back pressure down from the IO layer upto
 * the tasks which are generating the dirty pages to be written back.
 *
 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
 * refcounted with the number of inodes attached to it, and pins the memcg
 * and the corresponding blkcg.  As the corresponding blkcg for a memcg may
 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
 * is tested for blkcg after lookup and removed from index on mismatch so
 * that a new wb for the combination can be created.
 */
82 83 84 85 86 87 88 89 90 91 92 93 94 95
struct bdi_writeback {
	struct backing_dev_info *bdi;	/* our parent bdi */

	unsigned long state;		/* Always use atomic bitops on this */
	unsigned long last_old_flush;	/* last old data flush */

	struct list_head b_dirty;	/* dirty inodes */
	struct list_head b_io;		/* parked for writeback */
	struct list_head b_more_io;	/* parked for more writeback */
	struct list_head b_dirty_time;	/* time stamps are dirty */
	spinlock_t list_lock;		/* protects the b_* lists */

	struct percpu_counter stat[NR_WB_STAT_ITEMS];

96 97
	struct bdi_writeback_congested *congested;

98 99 100 101
	unsigned long bw_time_stamp;	/* last time write bw is updated */
	unsigned long dirtied_stamp;
	unsigned long written_stamp;	/* pages written at bw_time_stamp */
	unsigned long write_bandwidth;	/* the estimated write bandwidth */
102
	unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

	/*
	 * The base dirty throttle rate, re-calculated on every 200ms.
	 * All the bdi tasks' dirty rate will be curbed under it.
	 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
	 * in small steps and is much more smooth/stable than the latter.
	 */
	unsigned long dirty_ratelimit;
	unsigned long balanced_dirty_ratelimit;

	struct fprop_local_percpu completions;
	int dirty_exceeded;

	spinlock_t work_lock;		/* protects work_list & dwork scheduling */
	struct list_head work_list;
	struct delayed_work dwork;	/* work item used for writeback */
119

120 121
	unsigned long dirty_sleep;	/* last wait */

122 123
	struct list_head bdi_node;	/* anchored at bdi->wb_list */

124 125
#ifdef CONFIG_CGROUP_WRITEBACK
	struct percpu_ref refcnt;	/* used only for !root wb's */
T
Tejun Heo 已提交
126
	struct fprop_local_percpu memcg_completions;
127 128 129 130 131 132 133 134 135 136
	struct cgroup_subsys_state *memcg_css; /* the associated memcg */
	struct cgroup_subsys_state *blkcg_css; /* and blkcg */
	struct list_head memcg_node;	/* anchored at memcg->cgwb_list */
	struct list_head blkcg_node;	/* anchored at blkcg->cgwb_list */

	union {
		struct work_struct release_work;
		struct rcu_head rcu;
	};
#endif
137 138 139 140
};

struct backing_dev_info {
	struct list_head bdi_list;
141
	unsigned long ra_pages;	/* max readahead in PAGE_SIZE units */
142
	unsigned long io_pages;	/* max allowed IO size */
143 144 145 146 147
	congested_fn *congested_fn; /* Function pointer if device is md/dm */
	void *congested_data;	/* Pointer to aux data for congested func */

	char *name;

148
	struct kref refcnt;	/* Reference counter for the structure */
149
	unsigned int capabilities; /* Device capabilities */
150 151 152
	unsigned int min_ratio;
	unsigned int max_ratio, max_prop_frac;

153 154 155 156 157
	/*
	 * Sum of avg_write_bw of wbs with dirty inodes.  > 0 if there are
	 * any dirty wbs, which is depended upon by bdi_has_dirty().
	 */
	atomic_long_t tot_write_bandwidth;
158

159
	struct bdi_writeback wb;  /* the root writeback info for this bdi */
160
	struct list_head wb_list; /* list of all wbs */
161 162 163 164
#ifdef CONFIG_CGROUP_WRITEBACK
	struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
	struct rb_root cgwb_congested_tree; /* their congested states */
	atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
165 166
#else
	struct bdi_writeback_congested *wb_congested;
167
#endif
168 169
	wait_queue_head_t wb_waitq;

170
	struct device *dev;
171
	struct device *owner;
172 173 174 175 176 177 178 179 180 181 182 183 184 185

	struct timer_list laptop_mode_wb_timer;

#ifdef CONFIG_DEBUG_FS
	struct dentry *debug_dir;
	struct dentry *debug_stats;
#endif
};

enum {
	BLK_RW_ASYNC	= 0,
	BLK_RW_SYNC	= 1,
};

186 187 188 189 190 191 192 193 194 195 196 197
void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
void set_wb_congested(struct bdi_writeback_congested *congested, int sync);

static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
{
	clear_wb_congested(bdi->wb.congested, sync);
}

static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
{
	set_wb_congested(bdi->wb.congested, sync);
}
198

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
#ifdef CONFIG_CGROUP_WRITEBACK

/**
 * wb_tryget - try to increment a wb's refcount
 * @wb: bdi_writeback to get
 */
static inline bool wb_tryget(struct bdi_writeback *wb)
{
	if (wb != &wb->bdi->wb)
		return percpu_ref_tryget(&wb->refcnt);
	return true;
}

/**
 * wb_get - increment a wb's refcount
 * @wb: bdi_writeback to get
 */
static inline void wb_get(struct bdi_writeback *wb)
{
	if (wb != &wb->bdi->wb)
		percpu_ref_get(&wb->refcnt);
}

/**
 * wb_put - decrement a wb's refcount
 * @wb: bdi_writeback to put
 */
static inline void wb_put(struct bdi_writeback *wb)
{
	if (wb != &wb->bdi->wb)
		percpu_ref_put(&wb->refcnt);
}

232 233 234 235 236 237 238 239 240 241 242
/**
 * wb_dying - is a wb dying?
 * @wb: bdi_writeback of interest
 *
 * Returns whether @wb is unlinked and being drained.
 */
static inline bool wb_dying(struct bdi_writeback *wb)
{
	return percpu_ref_is_dying(&wb->refcnt);
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
#else	/* CONFIG_CGROUP_WRITEBACK */

static inline bool wb_tryget(struct bdi_writeback *wb)
{
	return true;
}

static inline void wb_get(struct bdi_writeback *wb)
{
}

static inline void wb_put(struct bdi_writeback *wb)
{
}

258 259 260 261 262
static inline bool wb_dying(struct bdi_writeback *wb)
{
	return false;
}

263 264
#endif	/* CONFIG_CGROUP_WRITEBACK */

265
#endif	/* __LINUX_BACKING_DEV_DEFS_H */