backing-dev-defs.h 5.5 KB
Newer Older
1 2 3 4
#ifndef __LINUX_BACKING_DEV_DEFS_H
#define __LINUX_BACKING_DEV_DEFS_H

#include <linux/list.h>
5 6
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
7 8
#include <linux/spinlock.h>
#include <linux/percpu_counter.h>
9
#include <linux/percpu-refcount.h>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include <linux/flex_proportions.h>
#include <linux/timer.h>
#include <linux/workqueue.h>

struct page;
struct device;
struct dentry;

/*
 * Bits in bdi_writeback.state
 */
enum wb_state {
	WB_registered,		/* bdi_register() was done */
	WB_writeback_running,	/* Writeback is in progress */
};

26 27 28 29 30
enum wb_congested_state {
	WB_async_congested,	/* The async (write) queue is getting full */
	WB_sync_congested,	/* The sync queue is getting full */
};

31 32 33 34 35 36 37 38 39 40 41 42
typedef int (congested_fn)(void *, int);

enum wb_stat_item {
	WB_RECLAIMABLE,
	WB_WRITEBACK,
	WB_DIRTIED,
	WB_WRITTEN,
	NR_WB_STAT_ITEMS
};

#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))

43 44 45 46 47 48 49
/*
 * For cgroup writeback, multiple wb's may map to the same blkcg.  Those
 * wb's can operate mostly independently but should share the congested
 * state.  To facilitate such sharing, the congested state is tracked using
 * the following struct which is created on demand, indexed by blkcg ID on
 * its bdi, and refcounted.
 */
50 51
struct bdi_writeback_congested {
	unsigned long state;		/* WB_[a]sync_congested flags */
52 53 54 55 56 57 58

#ifdef CONFIG_CGROUP_WRITEBACK
	struct backing_dev_info *bdi;	/* the associated bdi */
	atomic_t refcnt;		/* nr of attached wb's and blkg */
	int blkcg_id;			/* ID of the associated blkcg */
	struct rb_node rb_node;		/* on bdi->cgwb_congestion_tree */
#endif
59 60
};

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * Each wb (bdi_writeback) can perform writeback operations, is measured
 * and throttled, independently.  Without cgroup writeback, each bdi
 * (bdi_writeback) is served by its embedded bdi->wb.
 *
 * On the default hierarchy, blkcg implicitly enables memcg.  This allows
 * using memcg's page ownership for attributing writeback IOs, and every
 * memcg - blkcg combination can be served by its own wb by assigning a
 * dedicated wb to each memcg, which enables isolation across different
 * cgroups and propagation of IO back pressure down from the IO layer upto
 * the tasks which are generating the dirty pages to be written back.
 *
 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
 * refcounted with the number of inodes attached to it, and pins the memcg
 * and the corresponding blkcg.  As the corresponding blkcg for a memcg may
 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
 * is tested for blkcg after lookup and removed from index on mismatch so
 * that a new wb for the combination can be created.
 */
80 81 82 83 84 85 86 87 88 89 90 91 92 93
struct bdi_writeback {
	struct backing_dev_info *bdi;	/* our parent bdi */

	unsigned long state;		/* Always use atomic bitops on this */
	unsigned long last_old_flush;	/* last old data flush */

	struct list_head b_dirty;	/* dirty inodes */
	struct list_head b_io;		/* parked for writeback */
	struct list_head b_more_io;	/* parked for more writeback */
	struct list_head b_dirty_time;	/* time stamps are dirty */
	spinlock_t list_lock;		/* protects the b_* lists */

	struct percpu_counter stat[NR_WB_STAT_ITEMS];

94 95
	struct bdi_writeback_congested *congested;

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	unsigned long bw_time_stamp;	/* last time write bw is updated */
	unsigned long dirtied_stamp;
	unsigned long written_stamp;	/* pages written at bw_time_stamp */
	unsigned long write_bandwidth;	/* the estimated write bandwidth */
	unsigned long avg_write_bandwidth; /* further smoothed write bw */

	/*
	 * The base dirty throttle rate, re-calculated on every 200ms.
	 * All the bdi tasks' dirty rate will be curbed under it.
	 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
	 * in small steps and is much more smooth/stable than the latter.
	 */
	unsigned long dirty_ratelimit;
	unsigned long balanced_dirty_ratelimit;

	struct fprop_local_percpu completions;
	int dirty_exceeded;

	spinlock_t work_lock;		/* protects work_list & dwork scheduling */
	struct list_head work_list;
	struct delayed_work dwork;	/* work item used for writeback */
117 118 119 120 121 122 123 124 125 126 127 128 129

#ifdef CONFIG_CGROUP_WRITEBACK
	struct percpu_ref refcnt;	/* used only for !root wb's */
	struct cgroup_subsys_state *memcg_css; /* the associated memcg */
	struct cgroup_subsys_state *blkcg_css; /* and blkcg */
	struct list_head memcg_node;	/* anchored at memcg->cgwb_list */
	struct list_head blkcg_node;	/* anchored at blkcg->cgwb_list */

	union {
		struct work_struct release_work;
		struct rcu_head rcu;
	};
#endif
130 131 132 133 134 135 136 137 138 139 140 141 142 143
};

struct backing_dev_info {
	struct list_head bdi_list;
	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
	unsigned int capabilities; /* Device capabilities */
	congested_fn *congested_fn; /* Function pointer if device is md/dm */
	void *congested_data;	/* Pointer to aux data for congested func */

	char *name;

	unsigned int min_ratio;
	unsigned int max_ratio, max_prop_frac;

144 145 146 147 148 149 150
	struct bdi_writeback wb;  /* the root writeback info for this bdi */
	struct bdi_writeback_congested wb_congested; /* its congested state */
#ifdef CONFIG_CGROUP_WRITEBACK
	struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
	struct rb_root cgwb_congested_tree; /* their congested states */
	atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
#endif
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
	struct device *dev;

	struct timer_list laptop_mode_wb_timer;

#ifdef CONFIG_DEBUG_FS
	struct dentry *debug_dir;
	struct dentry *debug_stats;
#endif
};

enum {
	BLK_RW_ASYNC	= 0,
	BLK_RW_SYNC	= 1,
};

void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
void set_bdi_congested(struct backing_dev_info *bdi, int sync);

#endif	/* __LINUX_BACKING_DEV_DEFS_H */