blk-cgroup.c 31.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
12 13 14 15
 *
 * For policy-specific per-blkcg data:
 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
16 17
 */
#include <linux/ioprio.h>
18
#include <linux/kdev_t.h>
19
#include <linux/module.h>
20
#include <linux/err.h>
21
#include <linux/blkdev.h>
22
#include <linux/backing-dev.h>
23
#include <linux/slab.h>
24
#include <linux/genhd.h>
25
#include <linux/delay.h>
T
Tejun Heo 已提交
26
#include <linux/atomic.h>
27
#include <linux/blk-cgroup.h>
28
#include "blk.h"
29

30 31
#define MAX_KEY_LEN 100

32 33 34 35 36 37 38 39
/*
 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
 * policy [un]register operations including cgroup file additions /
 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
 * allows grabbing it from cgroup callbacks.
 */
static DEFINE_MUTEX(blkcg_pol_register_mutex);
40
static DEFINE_MUTEX(blkcg_pol_mutex);
41

42
struct blkcg blkcg_root;
T
Tejun Heo 已提交
43
EXPORT_SYMBOL_GPL(blkcg_root);
44

T
Tejun Heo 已提交
45 46
struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;

T
Tejun Heo 已提交
47
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
48

49
static bool blkcg_policy_enabled(struct request_queue *q,
T
Tejun Heo 已提交
50
				 const struct blkcg_policy *pol)
51 52 53 54
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

55 56 57 58 59 60
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
T
Tejun Heo 已提交
61
static void blkg_free(struct blkcg_gq *blkg)
62
{
63
	int i;
64 65 66 67

	if (!blkg)
		return;

68 69
	for (i = 0; i < BLKCG_MAX_POLS; i++)
		kfree(blkg->pd[i]);
70

71
	blk_exit_rl(&blkg->rl);
72
	kfree(blkg);
73 74 75 76 77 78
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
79
 * @gfp_mask: allocation mask to use
80
 *
81
 * Allocate a new blkg assocating @blkcg and @q.
82
 */
83 84
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
85
{
T
Tejun Heo 已提交
86
	struct blkcg_gq *blkg;
87
	int i;
88 89

	/* alloc and init base part */
90
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
91 92 93
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
94
	blkg->q = q;
95
	INIT_LIST_HEAD(&blkg->q_node);
96
	blkg->blkcg = blkcg;
97
	atomic_set(&blkg->refcnt, 1);
98

99 100 101 102 103 104 105
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

T
Tejun Heo 已提交
106
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
107
		struct blkcg_policy *pol = blkcg_policy[i];
108
		struct blkg_policy_data *pd;
109

110
		if (!blkcg_policy_enabled(q, pol))
111 112 113
			continue;

		/* alloc per-policy data and attach it to blkg */
114
		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
115 116
		if (!pd)
			goto err_free;
117

118 119
		blkg->pd[i] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
120
		pd->plid = i;
121 122
	}

123
	return blkg;
124 125 126 127

err_free:
	blkg_free(blkg);
	return NULL;
128 129
}

130 131 132 133 134 135 136 137 138 139 140
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
141 142
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
			       bool update_hint)
143
{
T
Tejun Heo 已提交
144
	struct blkcg_gq *blkg;
145

146 147 148 149 150
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	/*
151 152 153 154
	 * Hint didn't match.  Look up from the radix tree.  Note that the
	 * hint can only be updated under queue_lock as otherwise @blkg
	 * could have already been removed from blkg_tree.  The caller is
	 * responsible for grabbing queue_lock if @update_hint.
155 156
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
157 158 159 160 161
	if (blkg && blkg->q == q) {
		if (update_hint) {
			lockdep_assert_held(q->queue_lock);
			rcu_assign_pointer(blkcg->blkg_hint, blkg);
		}
162
		return blkg;
163
	}
164

165 166 167 168 169 170 171 172 173 174 175 176
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
T
Tejun Heo 已提交
177
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
178 179 180 181 182
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
183
	return __blkg_lookup(blkcg, q, false);
184 185 186
}
EXPORT_SYMBOL_GPL(blkg_lookup);

187 188 189 190
/*
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 */
191 192 193
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
				    struct request_queue *q,
				    struct blkcg_gq *new_blkg)
194
{
T
Tejun Heo 已提交
195
	struct blkcg_gq *blkg;
196
	struct bdi_writeback_congested *wb_congested;
197
	int i, ret;
198

199 200 201
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

202
	/* blkg holds a reference to blkcg */
203
	if (!css_tryget_online(&blkcg->css)) {
204 205
		ret = -EINVAL;
		goto err_free_blkg;
206
	}
207

208 209 210 211 212 213 214
	wb_congested = wb_congested_get_create(&q->backing_dev_info,
					       blkcg->css.id, GFP_ATOMIC);
	if (!wb_congested) {
		ret = -ENOMEM;
		goto err_put_css;
	}

215
	/* allocate */
216 217 218
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
		if (unlikely(!new_blkg)) {
219
			ret = -ENOMEM;
220
			goto err_put_congested;
221 222 223
		}
	}
	blkg = new_blkg;
224
	blkg->wb_congested = wb_congested;
225

226
	/* link parent */
T
Tejun Heo 已提交
227 228 229
	if (blkcg_parent(blkcg)) {
		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
		if (WARN_ON_ONCE(!blkg->parent)) {
230
			ret = -EINVAL;
231
			goto err_put_congested;
T
Tejun Heo 已提交
232 233 234 235
		}
		blkg_get(blkg->parent);
	}

236 237 238 239 240 241 242 243 244
	/* invoke per-policy init */
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_init_fn)
			pol->pd_init_fn(blkg);
	}

	/* insert */
245
	spin_lock(&blkcg->lock);
246 247 248 249
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
250 251 252 253 254 255 256

		for (i = 0; i < BLKCG_MAX_POLS; i++) {
			struct blkcg_policy *pol = blkcg_policy[i];

			if (blkg->pd[i] && pol->pd_online_fn)
				pol->pd_online_fn(blkg);
		}
257
	}
258
	blkg->online = true;
259
	spin_unlock(&blkcg->lock);
260

261
	if (!ret)
262
		return blkg;
263

T
Tejun Heo 已提交
264 265 266 267
	/* @blkg failed fully initialized, use the usual release path */
	blkg_put(blkg);
	return ERR_PTR(ret);

268 269
err_put_congested:
	wb_congested_put(wb_congested);
270
err_put_css:
271
	css_put(&blkcg->css);
272
err_free_blkg:
273
	blkg_free(new_blkg);
274
	return ERR_PTR(ret);
275
}
276

277 278 279 280 281 282
/**
 * blkg_lookup_create - lookup blkg, try to create one if not there
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
T
Tejun Heo 已提交
283 284 285
 * create one.  blkg creation is performed recursively from blkcg_root such
 * that all non-root blkg's have access to the parent blkg.  This function
 * should be called under RCU read lock and @q->queue_lock.
286 287 288 289 290
 *
 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 * dead and bypassing, returns ERR_PTR(-EBUSY).
 */
T
Tejun Heo 已提交
291 292
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
293
{
294 295 296 297 298
	struct blkcg_gq *blkg;

	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

299 300 301 302 303
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
B
Bart Van Assche 已提交
304
		return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
305 306 307 308 309

	blkg = __blkg_lookup(blkcg, q, true);
	if (blkg)
		return blkg;

T
Tejun Heo 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	/*
	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
	 * non-root blkgs have access to their parents.
	 */
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent = blkcg_parent(blkcg);

		while (parent && !__blkg_lookup(parent, q, false)) {
			pos = parent;
			parent = blkcg_parent(parent);
		}

		blkg = blkg_create(pos, q, NULL);
		if (pos == blkcg || IS_ERR(blkg))
			return blkg;
	}
327
}
328
EXPORT_SYMBOL_GPL(blkg_lookup_create);
329

T
Tejun Heo 已提交
330
static void blkg_destroy(struct blkcg_gq *blkg)
331
{
T
Tejun Heo 已提交
332
	struct blkcg *blkcg = blkg->blkcg;
333
	int i;
334

335
	lockdep_assert_held(blkg->q->queue_lock);
336
	lockdep_assert_held(&blkcg->lock);
337 338

	/* Something wrong if we are trying to remove same group twice */
339
	WARN_ON_ONCE(list_empty(&blkg->q_node));
340
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
341

342 343 344 345 346 347 348 349
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_offline_fn)
			pol->pd_offline_fn(blkg);
	}
	blkg->online = false;

350
	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
351
	list_del_init(&blkg->q_node);
352
	hlist_del_init_rcu(&blkg->blkcg_node);
353

354 355 356 357 358
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
359
	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
360 361
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

362 363 364 365 366 367 368
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

369 370 371 372
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
373
 * Destroy all blkgs associated with @q.
374
 */
375
static void blkg_destroy_all(struct request_queue *q)
376
{
T
Tejun Heo 已提交
377
	struct blkcg_gq *blkg, *n;
378

379
	lockdep_assert_held(q->queue_lock);
380

381
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
T
Tejun Heo 已提交
382
		struct blkcg *blkcg = blkg->blkcg;
383

384 385 386
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
387 388 389
	}
}

390 391 392 393 394 395 396 397 398
/*
 * A group is RCU protected, but having an rcu lock does not mean that one
 * can access all the fields of blkg and assume these are valid.  For
 * example, don't try to follow throtl_data and request queue links.
 *
 * Having a reference to blkg under an rcu allows accesses to only values
 * local to groups like group stats and group rate limits.
 */
void __blkg_release_rcu(struct rcu_head *rcu_head)
T
Tejun Heo 已提交
399
{
400
	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
401 402 403 404 405 406 407 408 409 410
	int i;

	/* tell policies that this one is being freed */
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
	}

T
Tejun Heo 已提交
411
	/* release the blkcg and parent blkg refs this blkg has been holding */
T
Tejun Heo 已提交
412
	css_put(&blkg->blkcg->css);
413
	if (blkg->parent)
T
Tejun Heo 已提交
414
		blkg_put(blkg->parent);
T
Tejun Heo 已提交
415

416 417
	wb_congested_put(blkg->wb_congested);

418
	blkg_free(blkg);
T
Tejun Heo 已提交
419
}
420
EXPORT_SYMBOL_GPL(__blkg_release_rcu);
T
Tejun Heo 已提交
421

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
438 439 440
		/* There are no more block groups, hence no request lists */
		if (list_empty(ent))
			return NULL;
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

457 458
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
			     struct cftype *cftype, u64 val)
459
{
460
	struct blkcg *blkcg = css_to_blkcg(css);
T
Tejun Heo 已提交
461
	struct blkcg_gq *blkg;
462
	int i;
463

464
	mutex_lock(&blkcg_pol_mutex);
465
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
466 467 468 469 470 471

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
472
	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
473
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
474
			struct blkcg_policy *pol = blkcg_policy[i];
475

476
			if (blkcg_policy_enabled(blkg->q, pol) &&
477 478
			    pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg);
479
		}
480
	}
481

482
	spin_unlock_irq(&blkcg->lock);
483
	mutex_unlock(&blkcg_pol_mutex);
484 485 486
	return 0;
}

T
Tejun Heo 已提交
487
static const char *blkg_dev_name(struct blkcg_gq *blkg)
488
{
489 490 491 492
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
493 494
}

495 496 497 498 499 500 501 502 503 504 505
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
506 507 508
 * policy data and @data and the matching queue lock held.  If @show_total
 * is %true, the sum of the return values from @prfill is printed with
 * "Total" label at the end.
509 510 511 512
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
T
Tejun Heo 已提交
513
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
514 515
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
516
		       const struct blkcg_policy *pol, int data,
517
		       bool show_total)
518
{
T
Tejun Heo 已提交
519
	struct blkcg_gq *blkg;
520
	u64 total = 0;
521

522
	rcu_read_lock();
523
	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
524
		spin_lock_irq(blkg->q->queue_lock);
525
		if (blkcg_policy_enabled(blkg->q, pol))
526
			total += prfill(sf, blkg->pd[pol->plid], data);
527 528 529
		spin_unlock_irq(blkg->q->queue_lock);
	}
	rcu_read_unlock();
530 531 532 533

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
534
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
535 536 537 538

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
539
 * @pd: policy private data of interest
540 541
 * @v: value to print
 *
542
 * Print @v to @sf for the device assocaited with @pd.
543
 */
544
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
545
{
546
	const char *dname = blkg_dev_name(pd->blkg);
547 548 549 550 551 552 553

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
554
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
555 556 557 558

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
559
 * @pd: policy private data of interest
560 561
 * @rwstat: rwstat to print
 *
562
 * Print @rwstat to @sf for the device assocaited with @pd.
563
 */
564
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
565
			 const struct blkg_rwstat *rwstat)
566 567 568 569 570 571 572
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
573
	const char *dname = blkg_dev_name(pd->blkg);
574 575 576 577 578 579 580 581 582 583 584 585 586 587
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}
T
Tejun Heo 已提交
588
EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
589

590 591 592
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
593 594
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
595 596 597
 *
 * prfill callback for printing a blkg_stat.
 */
598
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
599
{
600
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
601
}
602
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
603

604 605 606
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
607 608
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
609 610 611
 *
 * prfill callback for printing a blkg_rwstat.
 */
612 613
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
614
{
615
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
616

617
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
618
}
619
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
620

621 622 623 624 625 626 627 628 629 630 631 632 633
/**
 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
 *
 * Collect the blkg_stat specified by @off from @pd and all its online
 * descendants and return the sum.  The caller must be holding the queue
 * lock for online tests.
 */
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
{
	struct blkcg_policy *pol = blkcg_policy[pd->plid];
	struct blkcg_gq *pos_blkg;
634
	struct cgroup_subsys_state *pos_css;
635
	u64 sum = 0;
636 637 638 639

	lockdep_assert_held(pd->blkg->q->queue_lock);

	rcu_read_lock();
640
	blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
		struct blkg_stat *stat = (void *)pos_pd + off;

		if (pos_blkg->online)
			sum += blkg_stat_read(stat);
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);

/**
 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
 *
 * Collect the blkg_rwstat specified by @off from @pd and all its online
 * descendants and return the sum.  The caller must be holding the queue
 * lock for online tests.
 */
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off)
{
	struct blkcg_policy *pol = blkcg_policy[pd->plid];
	struct blkcg_gq *pos_blkg;
667
	struct cgroup_subsys_state *pos_css;
668
	struct blkg_rwstat sum = { };
669 670 671 672 673
	int i;

	lockdep_assert_held(pd->blkg->q->queue_lock);

	rcu_read_lock();
674
	blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
		struct blkg_rwstat *rwstat = (void *)pos_pd + off;
		struct blkg_rwstat tmp;

		if (!pos_blkg->online)
			continue;

		tmp = blkg_rwstat_read(rwstat);

		for (i = 0; i < BLKG_RWSTAT_NR; i++)
			sum.cnt[i] += tmp.cnt[i];
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);

693 694 695
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
696
 * @pol: target policy
697 698 699 700 701
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
702 703
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
704
 */
T
Tejun Heo 已提交
705 706
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx)
707
	__acquires(rcu) __acquires(disk->queue->queue_lock)
708
{
709
	struct gendisk *disk;
T
Tejun Heo 已提交
710
	struct blkcg_gq *blkg;
T
Tejun Heo 已提交
711 712 713
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
714

T
Tejun Heo 已提交
715 716
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
717

T
Tejun Heo 已提交
718
	disk = get_gendisk(MKDEV(major, minor), &part);
T
Tejun Heo 已提交
719
	if (!disk || part)
T
Tejun Heo 已提交
720
		return -EINVAL;
721 722

	rcu_read_lock();
T
Tejun Heo 已提交
723
	spin_lock_irq(disk->queue->queue_lock);
724

725
	if (blkcg_policy_enabled(disk->queue, pol))
726
		blkg = blkg_lookup_create(blkcg, disk->queue);
727 728
	else
		blkg = ERR_PTR(-EINVAL);
729

T
Tejun Heo 已提交
730 731
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
732
		rcu_read_unlock();
733
		spin_unlock_irq(disk->queue->queue_lock);
734 735 736 737 738 739 740 741 742 743
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
744
		}
T
Tejun Heo 已提交
745
		return ret;
746
	}
747 748 749

	ctx->disk = disk;
	ctx->blkg = blkg;
T
Tejun Heo 已提交
750 751
	ctx->v = v;
	return 0;
752
}
753
EXPORT_SYMBOL_GPL(blkg_conf_prep);
754

755 756 757 758 759 760 761
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
762
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
763
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
764
{
765
	spin_unlock_irq(ctx->disk->queue->queue_lock);
766 767
	rcu_read_unlock();
	put_disk(ctx->disk);
768
}
769
EXPORT_SYMBOL_GPL(blkg_conf_finish);
770

T
Tejun Heo 已提交
771
struct cftype blkcg_files[] = {
772 773
	{
		.name = "reset_stats",
T
Tejun Heo 已提交
774
		.write_u64 = blkcg_reset_stats,
775
	},
776
	{ }	/* terminate */
777 778
};

779
/**
780
 * blkcg_css_offline - cgroup css_offline callback
781
 * @css: css of interest
782
 *
783 784
 * This function is called when @css is about to go away and responsible
 * for shooting down all blkgs associated with @css.  blkgs should be
785 786 787 788 789
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
790
static void blkcg_css_offline(struct cgroup_subsys_state *css)
791
{
792
	struct blkcg *blkcg = css_to_blkcg(css);
793

794
	spin_lock_irq(&blkcg->lock);
795

796
	while (!hlist_empty(&blkcg->blkg_list)) {
T
Tejun Heo 已提交
797 798
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
T
Tejun Heo 已提交
799
		struct request_queue *q = blkg->q;
800

801 802 803 804 805 806
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
807
			spin_lock_irq(&blkcg->lock);
808
		}
809
	}
810

811
	spin_unlock_irq(&blkcg->lock);
812 813

	wb_blkcg_offline(blkcg);
814 815
}

816
static void blkcg_css_free(struct cgroup_subsys_state *css)
817
{
818
	struct blkcg *blkcg = css_to_blkcg(css);
819

820 821 822 823 824
	if (blkcg != &blkcg_root) {
		int i;

		for (i = 0; i < BLKCG_MAX_POLS; i++)
			kfree(blkcg->pd[i]);
B
Ben Blum 已提交
825
		kfree(blkcg);
826
	}
827 828
}

829 830
static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
831
{
T
Tejun Heo 已提交
832
	struct blkcg *blkcg;
833 834
	struct cgroup_subsys_state *ret;
	int i;
835

836
	if (!parent_css) {
T
Tejun Heo 已提交
837
		blkcg = &blkcg_root;
838 839 840 841
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
842 843 844 845 846
	if (!blkcg) {
		ret = ERR_PTR(-ENOMEM);
		goto free_blkcg;
	}

847 848
	mutex_lock(&blkcg_pol_mutex);

849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];
		struct blkcg_policy_data *cpd;

		/*
		 * If the policy hasn't been attached yet, wait for it
		 * to be attached before doing anything else. Otherwise,
		 * check if the policy requires any specific per-cgroup
		 * data: if it does, allocate and initialize it.
		 */
		if (!pol || !pol->cpd_size)
			continue;

		BUG_ON(blkcg->pd[i]);
		cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
		if (!cpd) {
865
			mutex_unlock(&blkcg_pol_mutex);
866 867 868 869 870 871 872
			ret = ERR_PTR(-ENOMEM);
			goto free_pd_blkcg;
		}
		blkcg->pd[i] = cpd;
		cpd->plid = i;
		pol->cpd_init_fn(blkcg);
	}
873

874
	mutex_unlock(&blkcg_pol_mutex);
875 876
done:
	spin_lock_init(&blkcg->lock);
877
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
878
	INIT_HLIST_HEAD(&blkcg->blkg_list);
879 880 881
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif
882
	return &blkcg->css;
883 884 885 886 887 888 889 890

free_pd_blkcg:
	for (i--; i >= 0; i--)
		kfree(blkcg->pd[i]);

free_blkcg:
	kfree(blkcg);
	return ret;
891 892
}

893 894 895 896 897 898 899 900 901 902 903 904
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
905 906 907 908 909 910 911 912 913
	struct blkcg_gq *new_blkg, *blkg;
	bool preloaded;
	int ret;

	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!new_blkg)
		return -ENOMEM;

	preloaded = !radix_tree_preload(GFP_KERNEL);
914

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
	/*
	 * Make sure the root blkg exists and count the existing blkgs.  As
	 * @q is bypassing at this point, blkg_lookup_create() can't be
	 * used.  Open code insertion.
	 */
	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
	blkg = blkg_create(&blkcg_root, q, new_blkg);
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();

	if (preloaded)
		radix_tree_preload_end();

	if (IS_ERR(blkg)) {
		kfree(new_blkg);
		return PTR_ERR(blkg);
	}

	q->root_blkg = blkg;
	q->root_rl.blkg = blkg;
936

937 938 939 940 941 942 943
	ret = blk_throtl_init(q);
	if (ret) {
		spin_lock_irq(q->queue_lock);
		blkg_destroy_all(q);
		spin_unlock_irq(q->queue_lock);
	}
	return ret;
944 945 946 947 948 949 950 951 952 953 954 955
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

956 957 958 959 960 961 962
	/*
	 * @q could be exiting and already have destroyed all blkgs as
	 * indicated by NULL root_blkg.  If so, don't confuse policies.
	 */
	if (!q->root_blkg)
		return;

963 964 965 966 967 968 969 970 971 972 973
	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
974
	spin_lock_irq(q->queue_lock);
975
	blkg_destroy_all(q);
976 977
	spin_unlock_irq(q->queue_lock);

978 979 980
	blk_throtl_exit(q);
}

981 982 983 984 985 986
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
987 988
static int blkcg_can_attach(struct cgroup_subsys_state *css,
			    struct cgroup_taskset *tset)
989
{
990
	struct task_struct *task;
991 992 993 994
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
995
	cgroup_taskset_for_each(task, tset) {
996 997 998 999 1000 1001 1002 1003
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
1004 1005 1006
	return ret;
}

1007
struct cgroup_subsys blkio_cgrp_subsys = {
1008 1009 1010
	.css_alloc = blkcg_css_alloc,
	.css_offline = blkcg_css_offline,
	.css_free = blkcg_css_free,
T
Tejun Heo 已提交
1011
	.can_attach = blkcg_can_attach,
1012
	.legacy_cftypes = blkcg_files,
1013 1014 1015 1016 1017 1018 1019 1020
#ifdef CONFIG_MEMCG
	/*
	 * This ensures that, if available, memcg is automatically enabled
	 * together on the default hierarchy so that the owner cgroup can
	 * be retrieved from writeback pages.
	 */
	.depends_on = 1 << memory_cgrp_id,
#endif
1021
};
1022
EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
1023

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
1041
			  const struct blkcg_policy *pol)
1042 1043
{
	LIST_HEAD(pds);
1044
	LIST_HEAD(cpds);
1045
	struct blkcg_gq *blkg;
1046 1047
	struct blkg_policy_data *pd, *nd;
	struct blkcg_policy_data *cpd, *cnd;
1048 1049 1050 1051 1052
	int cnt = 0, ret;

	if (blkcg_policy_enabled(q, pol))
		return 0;

1053
	/* count and allocate policy_data for all existing blkgs */
1054 1055 1056 1057 1058 1059
	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);
	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;
	spin_unlock_irq(q->queue_lock);

1060 1061 1062 1063
	/*
	 * Allocate per-blkg and per-blkcg policy data
	 * for all existing blkgs.
	 */
1064
	while (cnt--) {
1065
		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
1066 1067 1068 1069 1070
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
1071 1072 1073 1074 1075 1076 1077 1078 1079

		if (!pol->cpd_size)
			continue;
		cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
		if (!cpd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&cpd->alloc_node, &cpds);
1080 1081 1082
	}

	/*
1083
	 * Install the allocated pds and cpds. With @q bypassing, no new blkg
1084 1085 1086 1087 1088
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1089 1090
		if (WARN_ON(list_empty(&pds)) ||
		    WARN_ON(pol->cpd_size && list_empty(&cpds))) {
1091 1092 1093 1094
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
1095 1096 1097
		cpd = list_first_entry(&cpds, struct blkcg_policy_data,
				       alloc_node);
		list_del_init(&cpd->alloc_node);
1098 1099 1100 1101 1102 1103
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
		if (!pol->cpd_size)
			goto no_cpd;
		if (!blkg->blkcg->pd[pol->plid]) {
			/* Per-policy per-blkcg data */
			blkg->blkcg->pd[pol->plid] = cpd;
			cpd->plid = pol->plid;
			pol->cpd_init_fn(blkg->blkcg);
		} else { /* must free it as it has already been extracted */
			kfree(cpd);
		}
no_cpd:
1115 1116
		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
1117
		pd->plid = pol->plid;
1118
		pol->pd_init_fn(blkg);
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
1129
	list_for_each_entry_safe(pd, nd, &pds, alloc_node)
1130
		kfree(pd);
1131 1132
	list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
		kfree(cpd);
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
1146
			     const struct blkcg_policy *pol)
1147
{
T
Tejun Heo 已提交
1148
	struct blkcg_gq *blkg;
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

1162 1163
		if (pol->pd_offline_fn)
			pol->pd_offline_fn(blkg);
1164 1165
		if (pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
1178
/**
T
Tejun Heo 已提交
1179 1180
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
T
Tejun Heo 已提交
1181
 *
T
Tejun Heo 已提交
1182 1183
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
T
Tejun Heo 已提交
1184
 */
1185
int blkcg_policy_register(struct blkcg_policy *pol)
1186
{
T
Tejun Heo 已提交
1187
	int i, ret;
1188

1189 1190 1191
	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
		return -EINVAL;

1192
	mutex_lock(&blkcg_pol_register_mutex);
1193 1194
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
1195 1196 1197
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
T
Tejun Heo 已提交
1198
		if (!blkcg_policy[i])
T
Tejun Heo 已提交
1199 1200
			break;
	if (i >= BLKCG_MAX_POLS)
1201
		goto err_unlock;
1202

T
Tejun Heo 已提交
1203
	/* register and update blkgs */
T
Tejun Heo 已提交
1204 1205
	pol->plid = i;
	blkcg_policy[i] = pol;
1206
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
1207 1208

	/* everything is in place, add intf files for the new policy */
T
Tejun Heo 已提交
1209
	if (pol->cftypes)
1210 1211
		WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
						  pol->cftypes));
1212 1213 1214 1215
	mutex_unlock(&blkcg_pol_register_mutex);
	return 0;

err_unlock:
1216
	mutex_unlock(&blkcg_pol_mutex);
1217
	mutex_unlock(&blkcg_pol_register_mutex);
T
Tejun Heo 已提交
1218
	return ret;
1219
}
T
Tejun Heo 已提交
1220
EXPORT_SYMBOL_GPL(blkcg_policy_register);
1221

T
Tejun Heo 已提交
1222
/**
T
Tejun Heo 已提交
1223 1224
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
T
Tejun Heo 已提交
1225
 *
T
Tejun Heo 已提交
1226
 * Undo blkcg_policy_register(@pol).  Might sleep.
T
Tejun Heo 已提交
1227
 */
T
Tejun Heo 已提交
1228
void blkcg_policy_unregister(struct blkcg_policy *pol)
1229
{
1230
	mutex_lock(&blkcg_pol_register_mutex);
1231

T
Tejun Heo 已提交
1232
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
T
Tejun Heo 已提交
1233 1234 1235
		goto out_unlock;

	/* kill the intf files first */
T
Tejun Heo 已提交
1236
	if (pol->cftypes)
1237
		cgroup_rm_cftypes(pol->cftypes);
1238

T
Tejun Heo 已提交
1239
	/* unregister and update blkgs */
1240
	mutex_lock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
1241
	blkcg_policy[pol->plid] = NULL;
1242
	mutex_unlock(&blkcg_pol_mutex);
1243 1244
out_unlock:
	mutex_unlock(&blkcg_pol_register_mutex);
1245
}
T
Tejun Heo 已提交
1246
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);