blk-cgroup.c 9.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14 15
#include <linux/seq_file.h>
#include <linux/kdev_t.h>
16
#include <linux/module.h>
17
#include <linux/err.h>
18
#include "blk-cgroup.h"
19 20 21

static DEFINE_SPINLOCK(blkio_list_lock);
static LIST_HEAD(blkio_list);
22

23
struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
24 25
EXPORT_SYMBOL_GPL(blkio_root_cgroup);

B
Ben Blum 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
						  struct cgroup *);
static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
			      struct task_struct *, bool);
static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
			   struct cgroup *, struct task_struct *, bool);
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);

struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
	.create = blkiocg_create,
	.can_attach = blkiocg_can_attach,
	.attach = blkiocg_attach,
	.destroy = blkiocg_destroy,
	.populate = blkiocg_populate,
#ifdef CONFIG_BLK_CGROUP
	/* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
	.subsys_id = blkio_subsys_id,
#endif
	.use_id = 1,
	.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);

51 52 53 54 55
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
			    struct blkio_cgroup, css);
}
56
EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
57

58
void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
59
						unsigned long time)
60 61 62
{
	blkg->time += time;
}
63
EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
64

65
void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
66
			struct blkio_group *blkg, void *key, dev_t dev)
67 68 69 70 71
{
	unsigned long flags;

	spin_lock_irqsave(&blkcg->lock, flags);
	rcu_assign_pointer(blkg->key, key);
72
	blkg->blkcg_id = css_id(&blkcg->css);
73 74
	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
	spin_unlock_irqrestore(&blkcg->lock, flags);
V
Vivek Goyal 已提交
75 76 77 78
#ifdef CONFIG_DEBUG_BLK_CGROUP
	/* Need to take css reference ? */
	cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
#endif
79
	blkg->dev = dev;
80
}
81
EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
82

83 84 85 86 87 88 89 90 91 92
static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
{
	hlist_del_init_rcu(&blkg->blkcg_node);
	blkg->blkcg_id = 0;
}

/*
 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
 * indicating that blk_group was unhashed by the time we got to it.
 */
93 94
int blkiocg_del_blkio_group(struct blkio_group *blkg)
{
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
	struct blkio_cgroup *blkcg;
	unsigned long flags;
	struct cgroup_subsys_state *css;
	int ret = 1;

	rcu_read_lock();
	css = css_lookup(&blkio_subsys, blkg->blkcg_id);
	if (!css)
		goto out;

	blkcg = container_of(css, struct blkio_cgroup, css);
	spin_lock_irqsave(&blkcg->lock, flags);
	if (!hlist_unhashed(&blkg->blkcg_node)) {
		__blkiocg_del_blkio_group(blkg);
		ret = 0;
	}
	spin_unlock_irqrestore(&blkcg->lock, flags);
out:
	rcu_read_unlock();
	return ret;
115
}
116
EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132

/* called under rcu_read_lock(). */
struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
{
	struct blkio_group *blkg;
	struct hlist_node *n;
	void *__key;

	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
		__key = blkg->key;
		if (__key == key)
			return blkg;
	}

	return NULL;
}
133
EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

#define SHOW_FUNCTION(__VAR)						\
static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup,		\
				       struct cftype *cftype)		\
{									\
	struct blkio_cgroup *blkcg;					\
									\
	blkcg = cgroup_to_blkio_cgroup(cgroup);				\
	return (u64)blkcg->__VAR;					\
}

SHOW_FUNCTION(weight);
#undef SHOW_FUNCTION

static int
blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
{
	struct blkio_cgroup *blkcg;
152 153
	struct blkio_group *blkg;
	struct hlist_node *n;
154
	struct blkio_policy_type *blkiop;
155 156 157 158 159

	if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
		return -EINVAL;

	blkcg = cgroup_to_blkio_cgroup(cgroup);
160
	spin_lock(&blkio_list_lock);
161
	spin_lock_irq(&blkcg->lock);
162
	blkcg->weight = (unsigned int)val;
163 164 165 166 167
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
		list_for_each_entry(blkiop, &blkio_list, list)
			blkiop->ops.blkio_update_group_weight_fn(blkg,
					blkcg->weight);
	}
168
	spin_unlock_irq(&blkcg->lock);
169
	spin_unlock(&blkio_list_lock);
170 171 172
	return 0;
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
#define SHOW_FUNCTION_PER_GROUP(__VAR)					\
static int blkiocg_##__VAR##_read(struct cgroup *cgroup,		\
			struct cftype *cftype, struct seq_file *m)	\
{									\
	struct blkio_cgroup *blkcg;					\
	struct blkio_group *blkg;					\
	struct hlist_node *n;						\
									\
	if (!cgroup_lock_live_group(cgroup))				\
		return -ENODEV;						\
									\
	blkcg = cgroup_to_blkio_cgroup(cgroup);				\
	rcu_read_lock();						\
	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
		if (blkg->dev)						\
			seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev),	\
				 MINOR(blkg->dev), blkg->__VAR);	\
	}								\
	rcu_read_unlock();						\
	cgroup_unlock();						\
	return 0;							\
}

SHOW_FUNCTION_PER_GROUP(time);
SHOW_FUNCTION_PER_GROUP(sectors);
#ifdef CONFIG_DEBUG_BLK_CGROUP
SHOW_FUNCTION_PER_GROUP(dequeue);
#endif
#undef SHOW_FUNCTION_PER_GROUP

#ifdef CONFIG_DEBUG_BLK_CGROUP
void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
			unsigned long dequeue)
{
	blkg->dequeue += dequeue;
}
209
EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
210 211
#endif

212 213 214 215 216 217
struct cftype blkio_files[] = {
	{
		.name = "weight",
		.read_u64 = blkiocg_weight_read,
		.write_u64 = blkiocg_weight_write,
	},
218 219 220 221 222 223 224 225 226 227 228 229 230 231
	{
		.name = "time",
		.read_seq_string = blkiocg_time_read,
	},
	{
		.name = "sectors",
		.read_seq_string = blkiocg_sectors_read,
	},
#ifdef CONFIG_DEBUG_BLK_CGROUP
       {
		.name = "dequeue",
		.read_seq_string = blkiocg_dequeue_read,
       },
#endif
232 233 234 235 236 237 238 239 240 241 242
};

static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
{
	return cgroup_add_files(cgroup, subsys, blkio_files,
				ARRAY_SIZE(blkio_files));
}

static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
243 244 245
	unsigned long flags;
	struct blkio_group *blkg;
	void *key;
246
	struct blkio_policy_type *blkiop;
247 248 249 250 251 252 253 254 255 256 257 258 259 260

	rcu_read_lock();
remove_entry:
	spin_lock_irqsave(&blkcg->lock, flags);

	if (hlist_empty(&blkcg->blkg_list)) {
		spin_unlock_irqrestore(&blkcg->lock, flags);
		goto done;
	}

	blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
				blkcg_node);
	key = rcu_dereference(blkg->key);
	__blkiocg_del_blkio_group(blkg);
261

262 263 264 265 266 267 268 269 270 271
	spin_unlock_irqrestore(&blkcg->lock, flags);

	/*
	 * This blkio_group is being unlinked as associated cgroup is going
	 * away. Let all the IO controlling policies know about this event.
	 *
	 * Currently this is static call to one io controlling policy. Once
	 * we have more policies in place, we need some dynamic registration
	 * of callback function.
	 */
272 273 274 275
	spin_lock(&blkio_list_lock);
	list_for_each_entry(blkiop, &blkio_list, list)
		blkiop->ops.blkio_unlink_group_fn(key, blkg);
	spin_unlock(&blkio_list_lock);
276 277
	goto remove_entry;
done:
278
	free_css_id(&blkio_subsys, &blkcg->css);
279
	rcu_read_unlock();
B
Ben Blum 已提交
280 281
	if (blkcg != &blkio_root_cgroup)
		kfree(blkcg);
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
}

static struct cgroup_subsys_state *
blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
{
	struct blkio_cgroup *blkcg, *parent_blkcg;

	if (!cgroup->parent) {
		blkcg = &blkio_root_cgroup;
		goto done;
	}

	/* Currently we do not support hierarchy deeper than two level (0,1) */
	parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
	if (css_depth(&parent_blkcg->css) > 0)
		return ERR_PTR(-EINVAL);

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

	blkcg->weight = BLKIO_WEIGHT_DEFAULT;
done:
	spin_lock_init(&blkcg->lock);
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
static int blkiocg_can_attach(struct cgroup_subsys *subsys,
				struct cgroup *cgroup, struct task_struct *tsk,
				bool threadgroup)
{
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
	task_lock(tsk);
	ioc = tsk->io_context;
	if (ioc && atomic_read(&ioc->nr_tasks) > 1)
		ret = -EINVAL;
	task_unlock(tsk);

	return ret;
}

static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
				struct cgroup *prev, struct task_struct *tsk,
				bool threadgroup)
{
	struct io_context *ioc;

	task_lock(tsk);
	ioc = tsk->io_context;
	if (ioc)
		ioc->cgroup_changed = 1;
	task_unlock(tsk);
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
void blkio_policy_register(struct blkio_policy_type *blkiop)
{
	spin_lock(&blkio_list_lock);
	list_add_tail(&blkiop->list, &blkio_list);
	spin_unlock(&blkio_list_lock);
}
EXPORT_SYMBOL_GPL(blkio_policy_register);

void blkio_policy_unregister(struct blkio_policy_type *blkiop)
{
	spin_lock(&blkio_list_lock);
	list_del_init(&blkiop->list);
	spin_unlock(&blkio_list_lock);
}
EXPORT_SYMBOL_GPL(blkio_policy_unregister);
B
Ben Blum 已提交
362 363 364 365 366 367 368 369 370 371 372 373 374 375

static int __init init_cgroup_blkio(void)
{
	return cgroup_load_subsys(&blkio_subsys);
}

static void __exit exit_cgroup_blkio(void)
{
	cgroup_unload_subsys(&blkio_subsys);
}

module_init(init_cgroup_blkio);
module_exit(exit_cgroup_blkio);
MODULE_LICENSE("GPL");