blk-cgroup.c 21.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14
#include <linux/kdev_t.h>
15
#include <linux/module.h>
16
#include <linux/err.h>
17
#include <linux/blkdev.h>
18
#include <linux/slab.h>
19
#include <linux/genhd.h>
20
#include <linux/delay.h>
T
Tejun Heo 已提交
21
#include <linux/atomic.h>
22
#include "blk-cgroup.h"
23
#include "blk.h"
24

25 26
#define MAX_KEY_LEN 100

27
static DEFINE_MUTEX(blkcg_pol_mutex);
28

T
Tejun Heo 已提交
29 30
struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkcg_root);
31

T
Tejun Heo 已提交
32
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33

T
Tejun Heo 已提交
34
struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
35 36
{
	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
T
Tejun Heo 已提交
37
			    struct blkcg, css);
38
}
T
Tejun Heo 已提交
39
EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
40

T
Tejun Heo 已提交
41
static struct blkcg *task_blkcg(struct task_struct *tsk)
42 43
{
	return container_of(task_subsys_state(tsk, blkio_subsys_id),
T
Tejun Heo 已提交
44
			    struct blkcg, css);
45
}
46

T
Tejun Heo 已提交
47
struct blkcg *bio_blkcg(struct bio *bio)
48 49
{
	if (bio && bio->bi_css)
T
Tejun Heo 已提交
50 51
		return container_of(bio->bi_css, struct blkcg, css);
	return task_blkcg(current);
52
}
T
Tejun Heo 已提交
53
EXPORT_SYMBOL_GPL(bio_blkcg);
54

55
static bool blkcg_policy_enabled(struct request_queue *q,
T
Tejun Heo 已提交
56
				 const struct blkcg_policy *pol)
57 58 59 60
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

T
Tejun Heo 已提交
61
static size_t blkg_pd_size(const struct blkcg_policy *pol)
62 63 64 65
{
	return sizeof(struct blkg_policy_data) + pol->pdata_size;
}

66 67 68 69 70 71
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
T
Tejun Heo 已提交
72
static void blkg_free(struct blkcg_gq *blkg)
73
{
74
	int i;
75 76 77 78

	if (!blkg)
		return;

T
Tejun Heo 已提交
79
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
80
		struct blkcg_policy *pol = blkcg_policy[i];
81 82
		struct blkg_policy_data *pd = blkg->pd[i];

83 84 85
		if (!pd)
			continue;

T
Tejun Heo 已提交
86 87
		if (pol && pol->ops.pd_exit_fn)
			pol->ops.pd_exit_fn(blkg);
88 89

		kfree(pd);
90
	}
91

92
	kfree(blkg);
93 94 95 96 97 98 99
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
 *
100
 * Allocate a new blkg assocating @blkcg and @q.
101
 */
T
Tejun Heo 已提交
102
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
103
{
T
Tejun Heo 已提交
104
	struct blkcg_gq *blkg;
105
	int i;
106 107 108 109 110 111

	/* alloc and init base part */
	blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
112
	blkg->q = q;
113
	INIT_LIST_HEAD(&blkg->q_node);
114
	blkg->blkcg = blkcg;
T
Tejun Heo 已提交
115
	blkg->refcnt = 1;
116

T
Tejun Heo 已提交
117
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
118
		struct blkcg_policy *pol = blkcg_policy[i];
119
		struct blkg_policy_data *pd;
120

121
		if (!blkcg_policy_enabled(q, pol))
122 123 124
			continue;

		/* alloc per-policy data and attach it to blkg */
125
		pd = kzalloc_node(blkg_pd_size(pol), GFP_ATOMIC, q->node);
126 127 128 129
		if (!pd) {
			blkg_free(blkg);
			return NULL;
		}
130

131 132
		blkg->pd[i] = pd;
		pd->blkg = blkg;
133 134
	}

135
	/* invoke per-policy init */
T
Tejun Heo 已提交
136
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
137
		struct blkcg_policy *pol = blkcg_policy[i];
138

139
		if (blkcg_policy_enabled(blkg->q, pol))
T
Tejun Heo 已提交
140
			pol->ops.pd_init_fn(blkg);
141 142
	}

143 144 145
	return blkg;
}

T
Tejun Heo 已提交
146 147
static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
				      struct request_queue *q)
148
{
T
Tejun Heo 已提交
149
	struct blkcg_gq *blkg;
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
	struct hlist_node *n;

	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
		if (blkg->q == q)
			return blkg;
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
T
Tejun Heo 已提交
167
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
168 169 170 171 172 173 174 175 176
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q);
}
EXPORT_SYMBOL_GPL(blkg_lookup);

T
Tejun Heo 已提交
177 178
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
					     struct request_queue *q)
179
	__releases(q->queue_lock) __acquires(q->queue_lock)
180
{
T
Tejun Heo 已提交
181
	struct blkcg_gq *blkg;
182

183 184 185
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

186
	blkg = __blkg_lookup(blkcg, q);
187 188 189
	if (blkg)
		return blkg;

190
	/* blkg holds a reference to blkcg */
191 192 193 194 195 196
	if (!css_tryget(&blkcg->css))
		return ERR_PTR(-EINVAL);

	/*
	 * Allocate and initialize.
	 */
197
	blkg = blkg_alloc(blkcg, q);
198 199

	/* did alloc fail? */
200
	if (unlikely(!blkg)) {
201 202 203 204 205 206
		blkg = ERR_PTR(-ENOMEM);
		goto out;
	}

	/* insert */
	spin_lock(&blkcg->lock);
207
	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
208
	list_add(&blkg->q_node, &q->blkg_list);
209 210 211
	spin_unlock(&blkcg->lock);
out:
	return blkg;
212
}
213

T
Tejun Heo 已提交
214 215
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
216 217 218 219 220 221 222 223 224
{
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
	return __blkg_lookup_create(blkcg, q);
}
225
EXPORT_SYMBOL_GPL(blkg_lookup_create);
226

T
Tejun Heo 已提交
227
static void blkg_destroy(struct blkcg_gq *blkg)
228 229
{
	struct request_queue *q = blkg->q;
T
Tejun Heo 已提交
230
	struct blkcg *blkcg = blkg->blkcg;
231 232

	lockdep_assert_held(q->queue_lock);
233
	lockdep_assert_held(&blkcg->lock);
234 235

	/* Something wrong if we are trying to remove same group twice */
236
	WARN_ON_ONCE(list_empty(&blkg->q_node));
237
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
238
	list_del_init(&blkg->q_node);
239
	hlist_del_init_rcu(&blkg->blkcg_node);
240 241 242 243 244 245 246 247

	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

248 249 250 251
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
252
 * Destroy all blkgs associated with @q.
253
 */
254
static void blkg_destroy_all(struct request_queue *q)
255
{
T
Tejun Heo 已提交
256
	struct blkcg_gq *blkg, *n;
257

258
	lockdep_assert_held(q->queue_lock);
259

260
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
T
Tejun Heo 已提交
261
		struct blkcg *blkcg = blkg->blkcg;
262

263 264 265
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
266 267 268
	}
}

T
Tejun Heo 已提交
269 270
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
T
Tejun Heo 已提交
271
	blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
T
Tejun Heo 已提交
272 273
}

T
Tejun Heo 已提交
274
void __blkg_release(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
{
	/* release the extra blkcg reference this blkg has been holding */
	css_put(&blkg->blkcg->css);

	/*
	 * A group is freed in rcu manner. But having an rcu lock does not
	 * mean that one can access all the fields of blkg and assume these
	 * are valid. For example, don't try to follow throtl_data and
	 * request queue links.
	 *
	 * Having a reference to blkg under an rcu allows acess to only
	 * values local to groups like group stats and group rate limits
	 */
	call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
EXPORT_SYMBOL_GPL(__blkg_release);

T
Tejun Heo 已提交
292 293
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
			     u64 val)
294
{
T
Tejun Heo 已提交
295 296
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
	struct blkcg_gq *blkg;
297
	struct hlist_node *n;
298
	int i;
299

300
	mutex_lock(&blkcg_pol_mutex);
301
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
302 303 304 305 306 307

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
308
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
309
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
310
			struct blkcg_policy *pol = blkcg_policy[i];
311

312
			if (blkcg_policy_enabled(blkg->q, pol) &&
T
Tejun Heo 已提交
313 314
			    pol->ops.pd_reset_stats_fn)
				pol->ops.pd_reset_stats_fn(blkg);
315
		}
316
	}
317

318
	spin_unlock_irq(&blkcg->lock);
319
	mutex_unlock(&blkcg_pol_mutex);
320 321 322
	return 0;
}

T
Tejun Heo 已提交
323
static const char *blkg_dev_name(struct blkcg_gq *blkg)
324
{
325 326 327 328
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
329 330
}

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 * policy data and @data.  If @show_total is %true, the sum of the return
 * values from @prfill is printed with "Total" label at the end.
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
T
Tejun Heo 已提交
348
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
349
		       u64 (*prfill)(struct seq_file *, void *, int),
T
Tejun Heo 已提交
350
		       const struct blkcg_policy *pol, int data,
351
		       bool show_total)
352
{
T
Tejun Heo 已提交
353
	struct blkcg_gq *blkg;
354 355
	struct hlist_node *n;
	u64 total = 0;
356

357 358
	spin_lock_irq(&blkcg->lock);
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
359
		if (blkcg_policy_enabled(blkg->q, pol))
360
			total += prfill(sf, blkg->pd[pol->plid]->pdata, data);
361 362 363 364 365
	spin_unlock_irq(&blkcg->lock);

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
366
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
367 368 369 370

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
371
 * @pdata: policy private data of interest
372 373
 * @v: value to print
 *
374
 * Print @v to @sf for the device assocaited with @pdata.
375
 */
376
u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v)
377
{
378
	const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
379 380 381 382 383 384 385

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
386
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
387 388 389 390

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
391
 * @pdata: policy private data of interest
392 393
 * @rwstat: rwstat to print
 *
394
 * Print @rwstat to @sf for the device assocaited with @pdata.
395
 */
396
u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata,
397
			 const struct blkg_rwstat *rwstat)
398 399 400 401 402 403 404
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
405
	const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}

421 422 423 424 425 426 427 428 429
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
 * @pdata: policy private data of interest
 * @off: offset to the blkg_stat in @pdata
 *
 * prfill callback for printing a blkg_stat.
 */
u64 blkg_prfill_stat(struct seq_file *sf, void *pdata, int off)
430
{
431
	return __blkg_prfill_u64(sf, pdata, blkg_stat_read(pdata + off));
432
}
433
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
434

435 436 437 438 439 440 441 442 443
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
 * @pdata: policy private data of interest
 * @off: offset to the blkg_rwstat in @pdata
 *
 * prfill callback for printing a blkg_rwstat.
 */
u64 blkg_prfill_rwstat(struct seq_file *sf, void *pdata, int off)
444
{
445
	struct blkg_rwstat rwstat = blkg_rwstat_read(pdata + off);
446

447
	return __blkg_prfill_rwstat(sf, pdata, &rwstat);
448
}
449
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
450

451 452 453
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
454
 * @pol: target policy
455 456 457 458 459
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
460 461
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
462
 */
T
Tejun Heo 已提交
463 464
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx)
465
	__acquires(rcu) __acquires(disk->queue->queue_lock)
466
{
467
	struct gendisk *disk;
T
Tejun Heo 已提交
468
	struct blkcg_gq *blkg;
T
Tejun Heo 已提交
469 470 471
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
472

T
Tejun Heo 已提交
473 474
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
475

T
Tejun Heo 已提交
476
	disk = get_gendisk(MKDEV(major, minor), &part);
T
Tejun Heo 已提交
477
	if (!disk || part)
T
Tejun Heo 已提交
478
		return -EINVAL;
479 480

	rcu_read_lock();
T
Tejun Heo 已提交
481
	spin_lock_irq(disk->queue->queue_lock);
482

483
	if (blkcg_policy_enabled(disk->queue, pol))
484
		blkg = blkg_lookup_create(blkcg, disk->queue);
485 486
	else
		blkg = ERR_PTR(-EINVAL);
487

T
Tejun Heo 已提交
488 489
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
490
		rcu_read_unlock();
491
		spin_unlock_irq(disk->queue->queue_lock);
492 493 494 495 496 497 498 499 500 501
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
502
		}
T
Tejun Heo 已提交
503
		return ret;
504
	}
505 506 507

	ctx->disk = disk;
	ctx->blkg = blkg;
T
Tejun Heo 已提交
508 509
	ctx->v = v;
	return 0;
510
}
511
EXPORT_SYMBOL_GPL(blkg_conf_prep);
512

513 514 515 516 517 518 519
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
520
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
521
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
522
{
523
	spin_unlock_irq(ctx->disk->queue->queue_lock);
524 525
	rcu_read_unlock();
	put_disk(ctx->disk);
526
}
527
EXPORT_SYMBOL_GPL(blkg_conf_finish);
528

T
Tejun Heo 已提交
529
struct cftype blkcg_files[] = {
530 531
	{
		.name = "reset_stats",
T
Tejun Heo 已提交
532
		.write_u64 = blkcg_reset_stats,
533
	},
534
	{ }	/* terminate */
535 536
};

537
/**
T
Tejun Heo 已提交
538
 * blkcg_pre_destroy - cgroup pre_destroy callback
539 540 541 542 543 544 545 546 547
 * @cgroup: cgroup of interest
 *
 * This function is called when @cgroup is about to go away and responsible
 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
T
Tejun Heo 已提交
548
static int blkcg_pre_destroy(struct cgroup *cgroup)
549
{
T
Tejun Heo 已提交
550
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
551

552
	spin_lock_irq(&blkcg->lock);
553

554
	while (!hlist_empty(&blkcg->blkg_list)) {
T
Tejun Heo 已提交
555 556
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
T
Tejun Heo 已提交
557
		struct request_queue *q = blkg->q;
558

559 560 561 562 563 564
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
565
			spin_lock_irq(&blkcg->lock);
566
		}
567
	}
568

569
	spin_unlock_irq(&blkcg->lock);
570 571 572
	return 0;
}

T
Tejun Heo 已提交
573
static void blkcg_destroy(struct cgroup *cgroup)
574
{
T
Tejun Heo 已提交
575
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
576

T
Tejun Heo 已提交
577
	if (blkcg != &blkcg_root)
B
Ben Blum 已提交
578
		kfree(blkcg);
579 580
}

T
Tejun Heo 已提交
581
static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
582
{
T
Tejun Heo 已提交
583
	static atomic64_t id_seq = ATOMIC64_INIT(0);
T
Tejun Heo 已提交
584
	struct blkcg *blkcg;
585
	struct cgroup *parent = cgroup->parent;
586

587
	if (!parent) {
T
Tejun Heo 已提交
588
		blkcg = &blkcg_root;
589 590 591 592 593 594 595
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

596
	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
597
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
598 599 600 601 602 603 604
done:
	spin_lock_init(&blkcg->lock);
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

605 606 607 608 609 610 611 612 613 614 615 616 617 618
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
	might_sleep();

619
	return blk_throtl_init(q);
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
643
	spin_lock_irq(q->queue_lock);
644
	blkg_destroy_all(q);
645 646
	spin_unlock_irq(q->queue_lock);

647 648 649
	blk_throtl_exit(q);
}

650 651 652 653 654 655
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
T
Tejun Heo 已提交
656
static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
657
{
658
	struct task_struct *task;
659 660 661 662
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
663 664 665 666 667 668 669 670 671
	cgroup_taskset_for_each(task, cgrp, tset) {
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
672 673 674
	return ret;
}

675 676
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
T
Tejun Heo 已提交
677 678 679 680
	.create = blkcg_create,
	.can_attach = blkcg_can_attach,
	.pre_destroy = blkcg_pre_destroy,
	.destroy = blkcg_destroy,
681
	.subsys_id = blkio_subsys_id,
T
Tejun Heo 已提交
682
	.base_cftypes = blkcg_files,
683 684 685 686
	.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
704
			  const struct blkcg_policy *pol)
705 706
{
	LIST_HEAD(pds);
T
Tejun Heo 已提交
707
	struct blkcg_gq *blkg;
708 709 710 711 712 713 714 715 716 717 718 719
	struct blkg_policy_data *pd, *n;
	int cnt = 0, ret;

	if (blkcg_policy_enabled(q, pol))
		return 0;

	blk_queue_bypass_start(q);

	/* make sure the root blkg exists and count the existing blkgs */
	spin_lock_irq(q->queue_lock);

	rcu_read_lock();
T
Tejun Heo 已提交
720
	blkg = __blkg_lookup_create(&blkcg_root, q);
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
	rcu_read_unlock();

	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
		goto out_unlock;
	}
	q->root_blkg = blkg;

	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;

	spin_unlock_irq(q->queue_lock);

	/* allocate policy_data for all existing blkgs */
	while (cnt--) {
		pd = kzalloc_node(blkg_pd_size(pol), GFP_KERNEL, q->node);
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
	}

	/*
	 * Install the allocated pds.  With @q bypassing, no new blkg
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		if (WARN_ON(list_empty(&pds))) {
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
764
		pol->ops.pd_init_fn(blkg);
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
	list_for_each_entry_safe(pd, n, &pds, alloc_node)
		kfree(pd);
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
790
			     const struct blkcg_policy *pol)
791
{
T
Tejun Heo 已提交
792
	struct blkcg_gq *blkg;
793 794 795 796 797 798 799 800 801

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

802 803 804 805
	/* if no policy is left, no need for blkgs - shoot them down */
	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
		blkg_destroy_all(q);

806 807 808 809
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

T
Tejun Heo 已提交
810 811
		if (pol->ops.pd_exit_fn)
			pol->ops.pd_exit_fn(blkg);
812 813 814 815 816 817 818 819 820 821 822 823

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
824
/**
T
Tejun Heo 已提交
825 826
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
T
Tejun Heo 已提交
827
 *
T
Tejun Heo 已提交
828 829
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
T
Tejun Heo 已提交
830
 */
T
Tejun Heo 已提交
831
int blkcg_policy_register(struct blkcg_policy *pol)
832
{
T
Tejun Heo 已提交
833
	int i, ret;
834

835 836
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
837 838 839
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
T
Tejun Heo 已提交
840
		if (!blkcg_policy[i])
T
Tejun Heo 已提交
841 842 843
			break;
	if (i >= BLKCG_MAX_POLS)
		goto out_unlock;
844

T
Tejun Heo 已提交
845
	/* register and update blkgs */
T
Tejun Heo 已提交
846 847
	pol->plid = i;
	blkcg_policy[i] = pol;
T
Tejun Heo 已提交
848 849

	/* everything is in place, add intf files for the new policy */
T
Tejun Heo 已提交
850 851
	if (pol->cftypes)
		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
T
Tejun Heo 已提交
852 853
	ret = 0;
out_unlock:
854
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
855
	return ret;
856
}
T
Tejun Heo 已提交
857
EXPORT_SYMBOL_GPL(blkcg_policy_register);
858

T
Tejun Heo 已提交
859
/**
T
Tejun Heo 已提交
860 861
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
T
Tejun Heo 已提交
862
 *
T
Tejun Heo 已提交
863
 * Undo blkcg_policy_register(@pol).  Might sleep.
T
Tejun Heo 已提交
864
 */
T
Tejun Heo 已提交
865
void blkcg_policy_unregister(struct blkcg_policy *pol)
866
{
867 868
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
869
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
T
Tejun Heo 已提交
870 871 872
		goto out_unlock;

	/* kill the intf files first */
T
Tejun Heo 已提交
873 874
	if (pol->cftypes)
		cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
875

T
Tejun Heo 已提交
876
	/* unregister and update blkgs */
T
Tejun Heo 已提交
877
	blkcg_policy[pol->plid] = NULL;
T
Tejun Heo 已提交
878
out_unlock:
879
	mutex_unlock(&blkcg_pol_mutex);
880
}
T
Tejun Heo 已提交
881
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);