blk-cgroup.c 28.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14
#include <linux/kdev_t.h>
15
#include <linux/module.h>
16
#include <linux/err.h>
17
#include <linux/blkdev.h>
18
#include <linux/slab.h>
19
#include <linux/genhd.h>
20
#include <linux/delay.h>
T
Tejun Heo 已提交
21
#include <linux/atomic.h>
22
#include "blk-cgroup.h"
23
#include "blk.h"
24

25 26
#define MAX_KEY_LEN 100

27
static DEFINE_MUTEX(blkcg_pol_mutex);
28

T
Tejun Heo 已提交
29 30
struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
			    .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
T
Tejun Heo 已提交
31
EXPORT_SYMBOL_GPL(blkcg_root);
32

T
Tejun Heo 已提交
33
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
34

35
static bool blkcg_policy_enabled(struct request_queue *q,
T
Tejun Heo 已提交
36
				 const struct blkcg_policy *pol)
37 38 39 40
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

41 42 43 44 45 46
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
T
Tejun Heo 已提交
47
static void blkg_free(struct blkcg_gq *blkg)
48
{
49
	int i;
50 51 52 53

	if (!blkg)
		return;

54 55
	for (i = 0; i < BLKCG_MAX_POLS; i++)
		kfree(blkg->pd[i]);
56

57
	blk_exit_rl(&blkg->rl);
58
	kfree(blkg);
59 60 61 62 63 64
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
65
 * @gfp_mask: allocation mask to use
66
 *
67
 * Allocate a new blkg assocating @blkcg and @q.
68
 */
69 70
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
71
{
T
Tejun Heo 已提交
72
	struct blkcg_gq *blkg;
73
	int i;
74 75

	/* alloc and init base part */
76
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
77 78 79
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
80
	blkg->q = q;
81
	INIT_LIST_HEAD(&blkg->q_node);
82
	blkg->blkcg = blkcg;
T
Tejun Heo 已提交
83
	blkg->refcnt = 1;
84

85 86 87 88 89 90 91
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

T
Tejun Heo 已提交
92
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
93
		struct blkcg_policy *pol = blkcg_policy[i];
94
		struct blkg_policy_data *pd;
95

96
		if (!blkcg_policy_enabled(q, pol))
97 98 99
			continue;

		/* alloc per-policy data and attach it to blkg */
100
		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
101 102
		if (!pd)
			goto err_free;
103

104 105
		blkg->pd[i] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
106
		pd->plid = i;
107 108
	}

109
	return blkg;
110 111 112 113

err_free:
	blkg_free(blkg);
	return NULL;
114 115
}

116 117 118 119 120 121 122 123 124 125 126
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
127 128
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
			       bool update_hint)
129
{
T
Tejun Heo 已提交
130
	struct blkcg_gq *blkg;
131

132 133 134 135 136
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	/*
137 138 139 140
	 * Hint didn't match.  Look up from the radix tree.  Note that the
	 * hint can only be updated under queue_lock as otherwise @blkg
	 * could have already been removed from blkg_tree.  The caller is
	 * responsible for grabbing queue_lock if @update_hint.
141 142
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
143 144 145 146 147
	if (blkg && blkg->q == q) {
		if (update_hint) {
			lockdep_assert_held(q->queue_lock);
			rcu_assign_pointer(blkcg->blkg_hint, blkg);
		}
148
		return blkg;
149
	}
150

151 152 153 154 155 156 157 158 159 160 161 162
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
T
Tejun Heo 已提交
163
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
164 165 166 167 168
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
169
	return __blkg_lookup(blkcg, q, false);
170 171 172
}
EXPORT_SYMBOL_GPL(blkg_lookup);

173 174 175 176
/*
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 */
177 178 179
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
				    struct request_queue *q,
				    struct blkcg_gq *new_blkg)
180
{
T
Tejun Heo 已提交
181
	struct blkcg_gq *blkg;
182
	int i, ret;
183

184 185 186
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

187
	/* blkg holds a reference to blkcg */
188
	if (!css_tryget(&blkcg->css)) {
189 190
		ret = -EINVAL;
		goto err_free_blkg;
191
	}
192

193
	/* allocate */
194 195 196
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
		if (unlikely(!new_blkg)) {
197 198
			ret = -ENOMEM;
			goto err_put_css;
199 200 201
		}
	}
	blkg = new_blkg;
202

203
	/* link parent */
T
Tejun Heo 已提交
204 205 206
	if (blkcg_parent(blkcg)) {
		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
		if (WARN_ON_ONCE(!blkg->parent)) {
207
			ret = -EINVAL;
T
Tejun Heo 已提交
208 209 210 211 212
			goto err_put_css;
		}
		blkg_get(blkg->parent);
	}

213 214 215 216 217 218 219 220 221
	/* invoke per-policy init */
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_init_fn)
			pol->pd_init_fn(blkg);
	}

	/* insert */
222
	spin_lock(&blkcg->lock);
223 224 225 226
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
227 228 229 230 231 232 233

		for (i = 0; i < BLKCG_MAX_POLS; i++) {
			struct blkcg_policy *pol = blkcg_policy[i];

			if (blkg->pd[i] && pol->pd_online_fn)
				pol->pd_online_fn(blkg);
		}
234
	}
235
	blkg->online = true;
236
	spin_unlock(&blkcg->lock);
237

238 239
	if (!ret)
		return blkg;
240

T
Tejun Heo 已提交
241 242 243 244
	/* @blkg failed fully initialized, use the usual release path */
	blkg_put(blkg);
	return ERR_PTR(ret);

245
err_put_css:
246
	css_put(&blkcg->css);
247
err_free_blkg:
248
	blkg_free(new_blkg);
249
	return ERR_PTR(ret);
250
}
251

252 253 254 255 256 257
/**
 * blkg_lookup_create - lookup blkg, try to create one if not there
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
T
Tejun Heo 已提交
258 259 260
 * create one.  blkg creation is performed recursively from blkcg_root such
 * that all non-root blkg's have access to the parent blkg.  This function
 * should be called under RCU read lock and @q->queue_lock.
261 262 263 264 265
 *
 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 * dead and bypassing, returns ERR_PTR(-EBUSY).
 */
T
Tejun Heo 已提交
266 267
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
268
{
269 270 271 272 273
	struct blkcg_gq *blkg;

	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

274 275 276 277 278
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
B
Bart Van Assche 已提交
279
		return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
280 281 282 283 284

	blkg = __blkg_lookup(blkcg, q, true);
	if (blkg)
		return blkg;

T
Tejun Heo 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	/*
	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
	 * non-root blkgs have access to their parents.
	 */
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent = blkcg_parent(blkcg);

		while (parent && !__blkg_lookup(parent, q, false)) {
			pos = parent;
			parent = blkcg_parent(parent);
		}

		blkg = blkg_create(pos, q, NULL);
		if (pos == blkcg || IS_ERR(blkg))
			return blkg;
	}
302
}
303
EXPORT_SYMBOL_GPL(blkg_lookup_create);
304

T
Tejun Heo 已提交
305
static void blkg_destroy(struct blkcg_gq *blkg)
306
{
T
Tejun Heo 已提交
307
	struct blkcg *blkcg = blkg->blkcg;
308
	int i;
309

310
	lockdep_assert_held(blkg->q->queue_lock);
311
	lockdep_assert_held(&blkcg->lock);
312 313

	/* Something wrong if we are trying to remove same group twice */
314
	WARN_ON_ONCE(list_empty(&blkg->q_node));
315
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
316

317 318 319 320 321 322 323 324
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_offline_fn)
			pol->pd_offline_fn(blkg);
	}
	blkg->online = false;

325
	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
326
	list_del_init(&blkg->q_node);
327
	hlist_del_init_rcu(&blkg->blkcg_node);
328

329 330 331 332 333 334 335 336
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
	if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

337 338 339 340 341 342 343
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

344 345 346 347
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
348
 * Destroy all blkgs associated with @q.
349
 */
350
static void blkg_destroy_all(struct request_queue *q)
351
{
T
Tejun Heo 已提交
352
	struct blkcg_gq *blkg, *n;
353

354
	lockdep_assert_held(q->queue_lock);
355

356
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
T
Tejun Heo 已提交
357
		struct blkcg *blkcg = blkg->blkcg;
358

359 360 361
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
362
	}
363 364 365 366 367 368 369

	/*
	 * root blkg is destroyed.  Just clear the pointer since
	 * root_rl does not take reference on root blkg.
	 */
	q->root_blkg = NULL;
	q->root_rl.blkg = NULL;
370 371
}

372 373 374 375 376 377 378 379 380
/*
 * A group is RCU protected, but having an rcu lock does not mean that one
 * can access all the fields of blkg and assume these are valid.  For
 * example, don't try to follow throtl_data and request queue links.
 *
 * Having a reference to blkg under an rcu allows accesses to only values
 * local to groups like group stats and group rate limits.
 */
void __blkg_release_rcu(struct rcu_head *rcu_head)
T
Tejun Heo 已提交
381
{
382
	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
383 384 385 386 387 388 389 390 391 392
	int i;

	/* tell policies that this one is being freed */
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
	}

T
Tejun Heo 已提交
393
	/* release the blkcg and parent blkg refs this blkg has been holding */
T
Tejun Heo 已提交
394
	css_put(&blkg->blkcg->css);
395 396
	if (blkg->parent) {
		spin_lock_irq(blkg->q->queue_lock);
T
Tejun Heo 已提交
397
		blkg_put(blkg->parent);
398 399
		spin_unlock_irq(blkg->q->queue_lock);
	}
T
Tejun Heo 已提交
400

401
	blkg_free(blkg);
T
Tejun Heo 已提交
402
}
403
EXPORT_SYMBOL_GPL(__blkg_release_rcu);
T
Tejun Heo 已提交
404

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
421 422 423
		/* There are no more block groups, hence no request lists */
		if (list_empty(ent))
			return NULL;
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

440 441
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
			     struct cftype *cftype, u64 val)
442
{
443
	struct blkcg *blkcg = css_to_blkcg(css);
T
Tejun Heo 已提交
444
	struct blkcg_gq *blkg;
445
	int i;
446

447
	mutex_lock(&blkcg_pol_mutex);
448
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
449 450 451 452 453 454

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
455
	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
456
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
457
			struct blkcg_policy *pol = blkcg_policy[i];
458

459
			if (blkcg_policy_enabled(blkg->q, pol) &&
460 461
			    pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg);
462
		}
463
	}
464

465
	spin_unlock_irq(&blkcg->lock);
466
	mutex_unlock(&blkcg_pol_mutex);
467 468 469
	return 0;
}

T
Tejun Heo 已提交
470
static const char *blkg_dev_name(struct blkcg_gq *blkg)
471
{
472 473 474 475
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
476 477
}

478 479 480 481 482 483 484 485 486 487 488
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
489 490 491
 * policy data and @data and the matching queue lock held.  If @show_total
 * is %true, the sum of the return values from @prfill is printed with
 * "Total" label at the end.
492 493 494 495
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
T
Tejun Heo 已提交
496
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
497 498
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
499
		       const struct blkcg_policy *pol, int data,
500
		       bool show_total)
501
{
T
Tejun Heo 已提交
502
	struct blkcg_gq *blkg;
503
	u64 total = 0;
504

505
	rcu_read_lock();
506
	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
507
		spin_lock_irq(blkg->q->queue_lock);
508
		if (blkcg_policy_enabled(blkg->q, pol))
509
			total += prfill(sf, blkg->pd[pol->plid], data);
510 511 512
		spin_unlock_irq(blkg->q->queue_lock);
	}
	rcu_read_unlock();
513 514 515 516

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
517
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
518 519 520 521

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
522
 * @pd: policy private data of interest
523 524
 * @v: value to print
 *
525
 * Print @v to @sf for the device assocaited with @pd.
526
 */
527
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
528
{
529
	const char *dname = blkg_dev_name(pd->blkg);
530 531 532 533 534 535 536

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
537
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
538 539 540 541

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
542
 * @pd: policy private data of interest
543 544
 * @rwstat: rwstat to print
 *
545
 * Print @rwstat to @sf for the device assocaited with @pd.
546
 */
547
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
548
			 const struct blkg_rwstat *rwstat)
549 550 551 552 553 554 555
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
556
	const char *dname = blkg_dev_name(pd->blkg);
557 558 559 560 561 562 563 564 565 566 567 568 569 570
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}
T
Tejun Heo 已提交
571
EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
572

573 574 575
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
576 577
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
578 579 580
 *
 * prfill callback for printing a blkg_stat.
 */
581
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
582
{
583
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
584
}
585
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
586

587 588 589
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
590 591
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
592 593 594
 *
 * prfill callback for printing a blkg_rwstat.
 */
595 596
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
597
{
598
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
599

600
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
601
}
602
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
603

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
/**
 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
 *
 * Collect the blkg_stat specified by @off from @pd and all its online
 * descendants and return the sum.  The caller must be holding the queue
 * lock for online tests.
 */
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
{
	struct blkcg_policy *pol = blkcg_policy[pd->plid];
	struct blkcg_gq *pos_blkg;
	struct cgroup *pos_cgrp;
	u64 sum;

	lockdep_assert_held(pd->blkg->q->queue_lock);

	sum = blkg_stat_read((void *)pd + off);

	rcu_read_lock();
	blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
		struct blkg_stat *stat = (void *)pos_pd + off;

		if (pos_blkg->online)
			sum += blkg_stat_read(stat);
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);

/**
 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
 *
 * Collect the blkg_rwstat specified by @off from @pd and all its online
 * descendants and return the sum.  The caller must be holding the queue
 * lock for online tests.
 */
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off)
{
	struct blkcg_policy *pol = blkcg_policy[pd->plid];
	struct blkcg_gq *pos_blkg;
	struct cgroup *pos_cgrp;
	struct blkg_rwstat sum;
	int i;

	lockdep_assert_held(pd->blkg->q->queue_lock);

	sum = blkg_rwstat_read((void *)pd + off);

	rcu_read_lock();
	blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
		struct blkg_rwstat *rwstat = (void *)pos_pd + off;
		struct blkg_rwstat tmp;

		if (!pos_blkg->online)
			continue;

		tmp = blkg_rwstat_read(rwstat);

		for (i = 0; i < BLKG_RWSTAT_NR; i++)
			sum.cnt[i] += tmp.cnt[i];
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);

680 681 682
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
683
 * @pol: target policy
684 685 686 687 688
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
689 690
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
691
 */
T
Tejun Heo 已提交
692 693
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx)
694
	__acquires(rcu) __acquires(disk->queue->queue_lock)
695
{
696
	struct gendisk *disk;
T
Tejun Heo 已提交
697
	struct blkcg_gq *blkg;
T
Tejun Heo 已提交
698 699 700
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
701

T
Tejun Heo 已提交
702 703
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
704

T
Tejun Heo 已提交
705
	disk = get_gendisk(MKDEV(major, minor), &part);
T
Tejun Heo 已提交
706
	if (!disk || part)
T
Tejun Heo 已提交
707
		return -EINVAL;
708 709

	rcu_read_lock();
T
Tejun Heo 已提交
710
	spin_lock_irq(disk->queue->queue_lock);
711

712
	if (blkcg_policy_enabled(disk->queue, pol))
713
		blkg = blkg_lookup_create(blkcg, disk->queue);
714 715
	else
		blkg = ERR_PTR(-EINVAL);
716

T
Tejun Heo 已提交
717 718
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
719
		rcu_read_unlock();
720
		spin_unlock_irq(disk->queue->queue_lock);
721 722 723 724 725 726 727 728 729 730
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
731
		}
T
Tejun Heo 已提交
732
		return ret;
733
	}
734 735 736

	ctx->disk = disk;
	ctx->blkg = blkg;
T
Tejun Heo 已提交
737 738
	ctx->v = v;
	return 0;
739
}
740
EXPORT_SYMBOL_GPL(blkg_conf_prep);
741

742 743 744 745 746 747 748
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
749
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
750
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
751
{
752
	spin_unlock_irq(ctx->disk->queue->queue_lock);
753 754
	rcu_read_unlock();
	put_disk(ctx->disk);
755
}
756
EXPORT_SYMBOL_GPL(blkg_conf_finish);
757

T
Tejun Heo 已提交
758
struct cftype blkcg_files[] = {
759 760
	{
		.name = "reset_stats",
T
Tejun Heo 已提交
761
		.write_u64 = blkcg_reset_stats,
762
	},
763
	{ }	/* terminate */
764 765
};

766
/**
767
 * blkcg_css_offline - cgroup css_offline callback
768
 * @css: css of interest
769
 *
770 771
 * This function is called when @css is about to go away and responsible
 * for shooting down all blkgs associated with @css.  blkgs should be
772 773 774 775 776
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
777
static void blkcg_css_offline(struct cgroup_subsys_state *css)
778
{
779
	struct blkcg *blkcg = css_to_blkcg(css);
780

781
	spin_lock_irq(&blkcg->lock);
782

783
	while (!hlist_empty(&blkcg->blkg_list)) {
T
Tejun Heo 已提交
784 785
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
T
Tejun Heo 已提交
786
		struct request_queue *q = blkg->q;
787

788 789 790 791 792 793
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
794
			spin_lock_irq(&blkcg->lock);
795
		}
796
	}
797

798
	spin_unlock_irq(&blkcg->lock);
799 800
}

801
static void blkcg_css_free(struct cgroup_subsys_state *css)
802
{
803
	struct blkcg *blkcg = css_to_blkcg(css);
804

T
Tejun Heo 已提交
805
	if (blkcg != &blkcg_root)
B
Ben Blum 已提交
806
		kfree(blkcg);
807 808
}

809 810
static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
811
{
T
Tejun Heo 已提交
812
	static atomic64_t id_seq = ATOMIC64_INIT(0);
T
Tejun Heo 已提交
813
	struct blkcg *blkcg;
814

815
	if (!parent_css) {
T
Tejun Heo 已提交
816
		blkcg = &blkcg_root;
817 818 819 820 821 822 823
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

824
	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
825
	blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
826
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
827 828
done:
	spin_lock_init(&blkcg->lock);
829
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
830 831 832 833 834
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

835 836 837 838 839 840 841 842 843 844 845 846 847 848
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
	might_sleep();

849
	return blk_throtl_init(q);
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
873
	spin_lock_irq(q->queue_lock);
874
	blkg_destroy_all(q);
875 876
	spin_unlock_irq(q->queue_lock);

877 878 879
	blk_throtl_exit(q);
}

880 881 882 883 884 885
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
886 887
static int blkcg_can_attach(struct cgroup_subsys_state *css,
			    struct cgroup_taskset *tset)
888
{
889
	struct task_struct *task;
890 891 892 893
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
894
	cgroup_taskset_for_each(task, css->cgroup, tset) {
895 896 897 898 899 900 901 902
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
903 904 905
	return ret;
}

906 907
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
908 909 910
	.css_alloc = blkcg_css_alloc,
	.css_offline = blkcg_css_offline,
	.css_free = blkcg_css_free,
T
Tejun Heo 已提交
911
	.can_attach = blkcg_can_attach,
912
	.subsys_id = blkio_subsys_id,
T
Tejun Heo 已提交
913
	.base_cftypes = blkcg_files,
914 915 916 917
	.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);

918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
935
			  const struct blkcg_policy *pol)
936 937
{
	LIST_HEAD(pds);
938
	struct blkcg_gq *blkg, *new_blkg;
939 940
	struct blkg_policy_data *pd, *n;
	int cnt = 0, ret;
941
	bool preloaded;
942 943 944 945

	if (blkcg_policy_enabled(q, pol))
		return 0;

946
	/* preallocations for root blkg */
947 948
	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!new_blkg)
949 950
		return -ENOMEM;

951 952
	blk_queue_bypass_start(q);

953 954
	preloaded = !radix_tree_preload(GFP_KERNEL);

955 956 957 958 959
	/*
	 * Make sure the root blkg exists and count the existing blkgs.  As
	 * @q is bypassing at this point, blkg_lookup_create() can't be
	 * used.  Open code it.
	 */
960 961 962
	spin_lock_irq(q->queue_lock);

	rcu_read_lock();
963 964 965 966 967
	blkg = __blkg_lookup(&blkcg_root, q, false);
	if (blkg)
		blkg_free(new_blkg);
	else
		blkg = blkg_create(&blkcg_root, q, new_blkg);
968 969
	rcu_read_unlock();

970 971 972
	if (preloaded)
		radix_tree_preload_end();

973 974 975 976 977
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
		goto out_unlock;
	}
	q->root_blkg = blkg;
978
	q->root_rl.blkg = blkg;
979 980 981 982 983 984 985 986

	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;

	spin_unlock_irq(q->queue_lock);

	/* allocate policy_data for all existing blkgs */
	while (cnt--) {
987
		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
	}

	/*
	 * Install the allocated pds.  With @q bypassing, no new blkg
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		if (WARN_ON(list_empty(&pds))) {
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
1015
		pd->plid = pol->plid;
1016
		pol->pd_init_fn(blkg);
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
	list_for_each_entry_safe(pd, n, &pds, alloc_node)
		kfree(pd);
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
1042
			     const struct blkcg_policy *pol)
1043
{
T
Tejun Heo 已提交
1044
	struct blkcg_gq *blkg;
1045 1046 1047 1048 1049 1050 1051 1052 1053

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

1054 1055 1056 1057
	/* if no policy is left, no need for blkgs - shoot them down */
	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
		blkg_destroy_all(q);

1058 1059 1060 1061
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

1062 1063
		if (pol->pd_offline_fn)
			pol->pd_offline_fn(blkg);
1064 1065
		if (pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
1078
/**
T
Tejun Heo 已提交
1079 1080
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
T
Tejun Heo 已提交
1081
 *
T
Tejun Heo 已提交
1082 1083
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
T
Tejun Heo 已提交
1084
 */
T
Tejun Heo 已提交
1085
int blkcg_policy_register(struct blkcg_policy *pol)
1086
{
T
Tejun Heo 已提交
1087
	int i, ret;
1088

1089 1090 1091
	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
		return -EINVAL;

1092 1093
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
1094 1095 1096
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
T
Tejun Heo 已提交
1097
		if (!blkcg_policy[i])
T
Tejun Heo 已提交
1098 1099 1100
			break;
	if (i >= BLKCG_MAX_POLS)
		goto out_unlock;
1101

T
Tejun Heo 已提交
1102
	/* register and update blkgs */
T
Tejun Heo 已提交
1103 1104
	pol->plid = i;
	blkcg_policy[i] = pol;
T
Tejun Heo 已提交
1105 1106

	/* everything is in place, add intf files for the new policy */
T
Tejun Heo 已提交
1107 1108
	if (pol->cftypes)
		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
T
Tejun Heo 已提交
1109 1110
	ret = 0;
out_unlock:
1111
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
1112
	return ret;
1113
}
T
Tejun Heo 已提交
1114
EXPORT_SYMBOL_GPL(blkcg_policy_register);
1115

T
Tejun Heo 已提交
1116
/**
T
Tejun Heo 已提交
1117 1118
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
T
Tejun Heo 已提交
1119
 *
T
Tejun Heo 已提交
1120
 * Undo blkcg_policy_register(@pol).  Might sleep.
T
Tejun Heo 已提交
1121
 */
T
Tejun Heo 已提交
1122
void blkcg_policy_unregister(struct blkcg_policy *pol)
1123
{
1124 1125
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
1126
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
T
Tejun Heo 已提交
1127 1128 1129
		goto out_unlock;

	/* kill the intf files first */
T
Tejun Heo 已提交
1130
	if (pol->cftypes)
1131
		cgroup_rm_cftypes(pol->cftypes);
1132

T
Tejun Heo 已提交
1133
	/* unregister and update blkgs */
T
Tejun Heo 已提交
1134
	blkcg_policy[pol->plid] = NULL;
T
Tejun Heo 已提交
1135
out_unlock:
1136
	mutex_unlock(&blkcg_pol_mutex);
1137
}
T
Tejun Heo 已提交
1138
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);