blk-cgroup.c 24.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14
#include <linux/kdev_t.h>
15
#include <linux/module.h>
16
#include <linux/err.h>
17
#include <linux/blkdev.h>
18
#include <linux/slab.h>
19
#include <linux/genhd.h>
20
#include <linux/delay.h>
T
Tejun Heo 已提交
21
#include <linux/atomic.h>
22
#include "blk-cgroup.h"
23
#include "blk.h"
24

25 26
#define MAX_KEY_LEN 100

27
static DEFINE_MUTEX(blkcg_pol_mutex);
28

T
Tejun Heo 已提交
29 30
struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkcg_root);
31

T
Tejun Heo 已提交
32
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33

34
static bool blkcg_policy_enabled(struct request_queue *q,
T
Tejun Heo 已提交
35
				 const struct blkcg_policy *pol)
36 37 38 39
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

40 41 42 43 44 45
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
T
Tejun Heo 已提交
46
static void blkg_free(struct blkcg_gq *blkg)
47
{
48
	int i;
49 50 51 52

	if (!blkg)
		return;

T
Tejun Heo 已提交
53
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
54
		struct blkcg_policy *pol = blkcg_policy[i];
55 56
		struct blkg_policy_data *pd = blkg->pd[i];

57 58 59
		if (!pd)
			continue;

60 61
		if (pol && pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
62 63

		kfree(pd);
64
	}
65

66
	blk_exit_rl(&blkg->rl);
67
	kfree(blkg);
68 69 70 71 72 73
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
74
 * @gfp_mask: allocation mask to use
75
 *
76
 * Allocate a new blkg assocating @blkcg and @q.
77
 */
78 79
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
80
{
T
Tejun Heo 已提交
81
	struct blkcg_gq *blkg;
82
	int i;
83 84

	/* alloc and init base part */
85
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
86 87 88
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
89
	blkg->q = q;
90
	INIT_LIST_HEAD(&blkg->q_node);
91
	blkg->blkcg = blkcg;
T
Tejun Heo 已提交
92
	blkg->refcnt = 1;
93

94 95 96 97 98 99 100
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

T
Tejun Heo 已提交
101
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
102
		struct blkcg_policy *pol = blkcg_policy[i];
103
		struct blkg_policy_data *pd;
104

105
		if (!blkcg_policy_enabled(q, pol))
106 107 108
			continue;

		/* alloc per-policy data and attach it to blkg */
109
		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
110 111
		if (!pd)
			goto err_free;
112

113 114 115
		blkg->pd[i] = pd;
		pd->blkg = blkg;

T
Tejun Heo 已提交
116
		/* invoke per-policy init */
117
		if (blkcg_policy_enabled(blkg->q, pol))
118
			pol->pd_init_fn(blkg);
119 120
	}

121
	return blkg;
122 123 124 125

err_free:
	blkg_free(blkg);
	return NULL;
126 127
}

T
Tejun Heo 已提交
128 129
static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
				      struct request_queue *q)
130
{
T
Tejun Heo 已提交
131
	struct blkcg_gq *blkg;
132

133 134 135 136 137 138 139 140 141 142 143 144 145 146
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	/*
	 * Hint didn't match.  Look up from the radix tree.  Note that we
	 * may not be holding queue_lock and thus are not sure whether
	 * @blkg from blkg_tree has already been removed or not, so we
	 * can't update hint to the lookup result.  Leave it to the caller.
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
	if (blkg && blkg->q == q)
		return blkg;

147 148 149 150 151 152 153 154 155 156 157 158
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
T
Tejun Heo 已提交
159
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
160 161 162 163 164 165 166 167 168
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q);
}
EXPORT_SYMBOL_GPL(blkg_lookup);

169 170 171 172
/*
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 */
T
Tejun Heo 已提交
173
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
174 175
					     struct request_queue *q,
					     struct blkcg_gq *new_blkg)
176
{
T
Tejun Heo 已提交
177
	struct blkcg_gq *blkg;
178
	int ret;
179

180 181 182
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

183
	/* lookup and update hint on success, see __blkg_lookup() for details */
184
	blkg = __blkg_lookup(blkcg, q);
185 186
	if (blkg) {
		rcu_assign_pointer(blkcg->blkg_hint, blkg);
187
		goto out_free;
188
	}
189

190
	/* blkg holds a reference to blkcg */
191 192 193 194
	if (!css_tryget(&blkcg->css)) {
		blkg = ERR_PTR(-EINVAL);
		goto out_free;
	}
195

196
	/* allocate */
197 198 199 200 201 202 203 204
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
		if (unlikely(!new_blkg)) {
			blkg = ERR_PTR(-ENOMEM);
			goto out_put;
		}
	}
	blkg = new_blkg;
205 206 207

	/* insert */
	spin_lock(&blkcg->lock);
208 209 210 211 212
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
	}
213
	spin_unlock(&blkcg->lock);
214

215 216
	if (!ret)
		return blkg;
217 218 219

	blkg = ERR_PTR(ret);
out_put:
220
	css_put(&blkcg->css);
221 222 223
out_free:
	blkg_free(new_blkg);
	return blkg;
224
}
225

T
Tejun Heo 已提交
226 227
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
228 229 230 231 232 233 234
{
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
235
	return __blkg_lookup_create(blkcg, q, NULL);
236
}
237
EXPORT_SYMBOL_GPL(blkg_lookup_create);
238

T
Tejun Heo 已提交
239
static void blkg_destroy(struct blkcg_gq *blkg)
240
{
T
Tejun Heo 已提交
241
	struct blkcg *blkcg = blkg->blkcg;
242

243
	lockdep_assert_held(blkg->q->queue_lock);
244
	lockdep_assert_held(&blkcg->lock);
245 246

	/* Something wrong if we are trying to remove same group twice */
247
	WARN_ON_ONCE(list_empty(&blkg->q_node));
248
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
249 250

	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
251
	list_del_init(&blkg->q_node);
252
	hlist_del_init_rcu(&blkg->blkcg_node);
253

254 255 256 257 258 259 260 261
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
	if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

262 263 264 265 266 267 268
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

269 270 271 272
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
273
 * Destroy all blkgs associated with @q.
274
 */
275
static void blkg_destroy_all(struct request_queue *q)
276
{
T
Tejun Heo 已提交
277
	struct blkcg_gq *blkg, *n;
278

279
	lockdep_assert_held(q->queue_lock);
280

281
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
T
Tejun Heo 已提交
282
		struct blkcg *blkcg = blkg->blkcg;
283

284 285 286
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
287 288 289
	}
}

T
Tejun Heo 已提交
290 291
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
T
Tejun Heo 已提交
292
	blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
T
Tejun Heo 已提交
293 294
}

T
Tejun Heo 已提交
295
void __blkg_release(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
{
	/* release the extra blkcg reference this blkg has been holding */
	css_put(&blkg->blkcg->css);

	/*
	 * A group is freed in rcu manner. But having an rcu lock does not
	 * mean that one can access all the fields of blkg and assume these
	 * are valid. For example, don't try to follow throtl_data and
	 * request queue links.
	 *
	 * Having a reference to blkg under an rcu allows acess to only
	 * values local to groups like group stats and group rate limits
	 */
	call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
EXPORT_SYMBOL_GPL(__blkg_release);

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

T
Tejun Heo 已提交
345 346
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
			     u64 val)
347
{
T
Tejun Heo 已提交
348 349
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
	struct blkcg_gq *blkg;
350
	struct hlist_node *n;
351
	int i;
352

353
	mutex_lock(&blkcg_pol_mutex);
354
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
355 356 357 358 359 360

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
361
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
362
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
363
			struct blkcg_policy *pol = blkcg_policy[i];
364

365
			if (blkcg_policy_enabled(blkg->q, pol) &&
366 367
			    pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg);
368
		}
369
	}
370

371
	spin_unlock_irq(&blkcg->lock);
372
	mutex_unlock(&blkcg_pol_mutex);
373 374 375
	return 0;
}

T
Tejun Heo 已提交
376
static const char *blkg_dev_name(struct blkcg_gq *blkg)
377
{
378 379 380 381
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
382 383
}

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 * policy data and @data.  If @show_total is %true, the sum of the return
 * values from @prfill is printed with "Total" label at the end.
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
T
Tejun Heo 已提交
401
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
402 403
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
404
		       const struct blkcg_policy *pol, int data,
405
		       bool show_total)
406
{
T
Tejun Heo 已提交
407
	struct blkcg_gq *blkg;
408 409
	struct hlist_node *n;
	u64 total = 0;
410

411 412
	spin_lock_irq(&blkcg->lock);
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
413
		if (blkcg_policy_enabled(blkg->q, pol))
414
			total += prfill(sf, blkg->pd[pol->plid], data);
415 416 417 418 419
	spin_unlock_irq(&blkcg->lock);

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
420
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
421 422 423 424

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
425
 * @pd: policy private data of interest
426 427
 * @v: value to print
 *
428
 * Print @v to @sf for the device assocaited with @pd.
429
 */
430
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
431
{
432
	const char *dname = blkg_dev_name(pd->blkg);
433 434 435 436 437 438 439

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
440
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
441 442 443 444

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
445
 * @pd: policy private data of interest
446 447
 * @rwstat: rwstat to print
 *
448
 * Print @rwstat to @sf for the device assocaited with @pd.
449
 */
450
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
451
			 const struct blkg_rwstat *rwstat)
452 453 454 455 456 457 458
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
459
	const char *dname = blkg_dev_name(pd->blkg);
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}

475 476 477
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
478 479
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
480 481 482
 *
 * prfill callback for printing a blkg_stat.
 */
483
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
484
{
485
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
486
}
487
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
488

489 490 491
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
492 493
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
494 495 496
 *
 * prfill callback for printing a blkg_rwstat.
 */
497 498
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
499
{
500
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
501

502
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
503
}
504
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
505

506 507 508
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
509
 * @pol: target policy
510 511 512 513 514
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
515 516
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
517
 */
T
Tejun Heo 已提交
518 519
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx)
520
	__acquires(rcu) __acquires(disk->queue->queue_lock)
521
{
522
	struct gendisk *disk;
T
Tejun Heo 已提交
523
	struct blkcg_gq *blkg;
T
Tejun Heo 已提交
524 525 526
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
527

T
Tejun Heo 已提交
528 529
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
530

T
Tejun Heo 已提交
531
	disk = get_gendisk(MKDEV(major, minor), &part);
T
Tejun Heo 已提交
532
	if (!disk || part)
T
Tejun Heo 已提交
533
		return -EINVAL;
534 535

	rcu_read_lock();
T
Tejun Heo 已提交
536
	spin_lock_irq(disk->queue->queue_lock);
537

538
	if (blkcg_policy_enabled(disk->queue, pol))
539
		blkg = blkg_lookup_create(blkcg, disk->queue);
540 541
	else
		blkg = ERR_PTR(-EINVAL);
542

T
Tejun Heo 已提交
543 544
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
545
		rcu_read_unlock();
546
		spin_unlock_irq(disk->queue->queue_lock);
547 548 549 550 551 552 553 554 555 556
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
557
		}
T
Tejun Heo 已提交
558
		return ret;
559
	}
560 561 562

	ctx->disk = disk;
	ctx->blkg = blkg;
T
Tejun Heo 已提交
563 564
	ctx->v = v;
	return 0;
565
}
566
EXPORT_SYMBOL_GPL(blkg_conf_prep);
567

568 569 570 571 572 573 574
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
575
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
576
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
577
{
578
	spin_unlock_irq(ctx->disk->queue->queue_lock);
579 580
	rcu_read_unlock();
	put_disk(ctx->disk);
581
}
582
EXPORT_SYMBOL_GPL(blkg_conf_finish);
583

T
Tejun Heo 已提交
584
struct cftype blkcg_files[] = {
585 586
	{
		.name = "reset_stats",
T
Tejun Heo 已提交
587
		.write_u64 = blkcg_reset_stats,
588
	},
589
	{ }	/* terminate */
590 591
};

592
/**
T
Tejun Heo 已提交
593
 * blkcg_pre_destroy - cgroup pre_destroy callback
594 595 596 597 598 599 600 601 602
 * @cgroup: cgroup of interest
 *
 * This function is called when @cgroup is about to go away and responsible
 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
T
Tejun Heo 已提交
603
static int blkcg_pre_destroy(struct cgroup *cgroup)
604
{
T
Tejun Heo 已提交
605
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
606

607
	spin_lock_irq(&blkcg->lock);
608

609
	while (!hlist_empty(&blkcg->blkg_list)) {
T
Tejun Heo 已提交
610 611
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
T
Tejun Heo 已提交
612
		struct request_queue *q = blkg->q;
613

614 615 616 617 618 619
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
620
			spin_lock_irq(&blkcg->lock);
621
		}
622
	}
623

624
	spin_unlock_irq(&blkcg->lock);
625 626 627
	return 0;
}

T
Tejun Heo 已提交
628
static void blkcg_destroy(struct cgroup *cgroup)
629
{
T
Tejun Heo 已提交
630
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
631

T
Tejun Heo 已提交
632
	if (blkcg != &blkcg_root)
B
Ben Blum 已提交
633
		kfree(blkcg);
634 635
}

T
Tejun Heo 已提交
636
static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
637
{
T
Tejun Heo 已提交
638
	static atomic64_t id_seq = ATOMIC64_INIT(0);
T
Tejun Heo 已提交
639
	struct blkcg *blkcg;
640
	struct cgroup *parent = cgroup->parent;
641

642
	if (!parent) {
T
Tejun Heo 已提交
643
		blkcg = &blkcg_root;
644 645 646 647 648 649 650
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

651
	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
652
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
653 654
done:
	spin_lock_init(&blkcg->lock);
655
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
656 657 658 659 660
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

661 662 663 664 665 666 667 668 669 670 671 672 673 674
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
	might_sleep();

675
	return blk_throtl_init(q);
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
699
	spin_lock_irq(q->queue_lock);
700
	blkg_destroy_all(q);
701 702
	spin_unlock_irq(q->queue_lock);

703 704 705
	blk_throtl_exit(q);
}

706 707 708 709 710 711
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
T
Tejun Heo 已提交
712
static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
713
{
714
	struct task_struct *task;
715 716 717 718
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
719 720 721 722 723 724 725 726 727
	cgroup_taskset_for_each(task, cgrp, tset) {
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
728 729 730
	return ret;
}

731 732
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
T
Tejun Heo 已提交
733 734 735 736
	.create = blkcg_create,
	.can_attach = blkcg_can_attach,
	.pre_destroy = blkcg_pre_destroy,
	.destroy = blkcg_destroy,
737
	.subsys_id = blkio_subsys_id,
T
Tejun Heo 已提交
738
	.base_cftypes = blkcg_files,
739
	.module = THIS_MODULE,
740 741 742 743 744 745 746 747

	/*
	 * blkio subsystem is utterly broken in terms of hierarchy support.
	 * It treats all cgroups equally regardless of where they're
	 * located in the hierarchy - all cgroups are treated as if they're
	 * right below the root.  Fix it and remove the following.
	 */
	.broken_hierarchy = true,
748 749 750
};
EXPORT_SYMBOL_GPL(blkio_subsys);

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
768
			  const struct blkcg_policy *pol)
769 770
{
	LIST_HEAD(pds);
T
Tejun Heo 已提交
771
	struct blkcg_gq *blkg;
772 773
	struct blkg_policy_data *pd, *n;
	int cnt = 0, ret;
774
	bool preloaded;
775 776 777 778

	if (blkcg_policy_enabled(q, pol))
		return 0;

779 780 781 782 783 784 785
	/* preallocations for root blkg */
	blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!blkg)
		return -ENOMEM;

	preloaded = !radix_tree_preload(GFP_KERNEL);

786 787 788 789 790 791
	blk_queue_bypass_start(q);

	/* make sure the root blkg exists and count the existing blkgs */
	spin_lock_irq(q->queue_lock);

	rcu_read_lock();
792
	blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
793 794
	rcu_read_unlock();

795 796 797
	if (preloaded)
		radix_tree_preload_end();

798 799 800 801 802
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
		goto out_unlock;
	}
	q->root_blkg = blkg;
803
	q->root_rl.blkg = blkg;
804 805 806 807 808 809 810 811

	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;

	spin_unlock_irq(q->queue_lock);

	/* allocate policy_data for all existing blkgs */
	while (cnt--) {
812
		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
	}

	/*
	 * Install the allocated pds.  With @q bypassing, no new blkg
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		if (WARN_ON(list_empty(&pds))) {
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
840
		pol->pd_init_fn(blkg);
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
	list_for_each_entry_safe(pd, n, &pds, alloc_node)
		kfree(pd);
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
866
			     const struct blkcg_policy *pol)
867
{
T
Tejun Heo 已提交
868
	struct blkcg_gq *blkg;
869 870 871 872 873 874 875 876 877

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

878 879 880 881
	/* if no policy is left, no need for blkgs - shoot them down */
	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
		blkg_destroy_all(q);

882 883 884 885
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

886 887
		if (pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
888 889 890 891 892 893 894 895 896 897 898 899

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
900
/**
T
Tejun Heo 已提交
901 902
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
T
Tejun Heo 已提交
903
 *
T
Tejun Heo 已提交
904 905
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
T
Tejun Heo 已提交
906
 */
T
Tejun Heo 已提交
907
int blkcg_policy_register(struct blkcg_policy *pol)
908
{
T
Tejun Heo 已提交
909
	int i, ret;
910

911 912 913
	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
		return -EINVAL;

914 915
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
916 917 918
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
T
Tejun Heo 已提交
919
		if (!blkcg_policy[i])
T
Tejun Heo 已提交
920 921 922
			break;
	if (i >= BLKCG_MAX_POLS)
		goto out_unlock;
923

T
Tejun Heo 已提交
924
	/* register and update blkgs */
T
Tejun Heo 已提交
925 926
	pol->plid = i;
	blkcg_policy[i] = pol;
T
Tejun Heo 已提交
927 928

	/* everything is in place, add intf files for the new policy */
T
Tejun Heo 已提交
929 930
	if (pol->cftypes)
		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
T
Tejun Heo 已提交
931 932
	ret = 0;
out_unlock:
933
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
934
	return ret;
935
}
T
Tejun Heo 已提交
936
EXPORT_SYMBOL_GPL(blkcg_policy_register);
937

T
Tejun Heo 已提交
938
/**
T
Tejun Heo 已提交
939 940
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
T
Tejun Heo 已提交
941
 *
T
Tejun Heo 已提交
942
 * Undo blkcg_policy_register(@pol).  Might sleep.
T
Tejun Heo 已提交
943
 */
T
Tejun Heo 已提交
944
void blkcg_policy_unregister(struct blkcg_policy *pol)
945
{
946 947
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
948
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
T
Tejun Heo 已提交
949 950 951
		goto out_unlock;

	/* kill the intf files first */
T
Tejun Heo 已提交
952 953
	if (pol->cftypes)
		cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
954

T
Tejun Heo 已提交
955
	/* unregister and update blkgs */
T
Tejun Heo 已提交
956
	blkcg_policy[pol->plid] = NULL;
T
Tejun Heo 已提交
957
out_unlock:
958
	mutex_unlock(&blkcg_pol_mutex);
959
}
T
Tejun Heo 已提交
960
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);