blk-cgroup.c 28.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14
#include <linux/kdev_t.h>
15
#include <linux/module.h>
16
#include <linux/err.h>
17
#include <linux/blkdev.h>
18
#include <linux/slab.h>
19
#include <linux/genhd.h>
20
#include <linux/delay.h>
T
Tejun Heo 已提交
21
#include <linux/atomic.h>
22
#include "blk-cgroup.h"
23
#include "blk.h"
24

25 26
#define MAX_KEY_LEN 100

27
static DEFINE_MUTEX(blkcg_pol_mutex);
28

T
Tejun Heo 已提交
29 30
struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
			    .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
T
Tejun Heo 已提交
31
EXPORT_SYMBOL_GPL(blkcg_root);
32

T
Tejun Heo 已提交
33
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
34

35
static bool blkcg_policy_enabled(struct request_queue *q,
T
Tejun Heo 已提交
36
				 const struct blkcg_policy *pol)
37 38 39 40
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

41 42 43 44 45 46
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
T
Tejun Heo 已提交
47
static void blkg_free(struct blkcg_gq *blkg)
48
{
49
	int i;
50 51 52 53

	if (!blkg)
		return;

T
Tejun Heo 已提交
54
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
55
		struct blkcg_policy *pol = blkcg_policy[i];
56 57
		struct blkg_policy_data *pd = blkg->pd[i];

58 59 60
		if (!pd)
			continue;

61 62
		if (pol && pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
63 64

		kfree(pd);
65
	}
66

67
	blk_exit_rl(&blkg->rl);
68
	kfree(blkg);
69 70 71 72 73 74
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
75
 * @gfp_mask: allocation mask to use
76
 *
77
 * Allocate a new blkg assocating @blkcg and @q.
78
 */
79 80
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
81
{
T
Tejun Heo 已提交
82
	struct blkcg_gq *blkg;
83
	int i;
84 85

	/* alloc and init base part */
86
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
87 88 89
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
90
	blkg->q = q;
91
	INIT_LIST_HEAD(&blkg->q_node);
92
	blkg->blkcg = blkcg;
T
Tejun Heo 已提交
93
	blkg->refcnt = 1;
94

95 96 97 98 99 100 101
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

T
Tejun Heo 已提交
102
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
103
		struct blkcg_policy *pol = blkcg_policy[i];
104
		struct blkg_policy_data *pd;
105

106
		if (!blkcg_policy_enabled(q, pol))
107 108 109
			continue;

		/* alloc per-policy data and attach it to blkg */
110
		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
111 112
		if (!pd)
			goto err_free;
113

114 115
		blkg->pd[i] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
116
		pd->plid = i;
117

T
Tejun Heo 已提交
118
		/* invoke per-policy init */
T
Tejun Heo 已提交
119
		if (pol->pd_init_fn)
120
			pol->pd_init_fn(blkg);
121 122
	}

123
	return blkg;
124 125 126 127

err_free:
	blkg_free(blkg);
	return NULL;
128 129
}

130 131 132 133 134 135 136 137 138 139 140
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
141 142
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
			       bool update_hint)
143
{
T
Tejun Heo 已提交
144
	struct blkcg_gq *blkg;
145

146 147 148 149 150
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	/*
151 152 153 154
	 * Hint didn't match.  Look up from the radix tree.  Note that the
	 * hint can only be updated under queue_lock as otherwise @blkg
	 * could have already been removed from blkg_tree.  The caller is
	 * responsible for grabbing queue_lock if @update_hint.
155 156
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
157 158 159 160 161
	if (blkg && blkg->q == q) {
		if (update_hint) {
			lockdep_assert_held(q->queue_lock);
			rcu_assign_pointer(blkcg->blkg_hint, blkg);
		}
162
		return blkg;
163
	}
164

165 166 167 168 169 170 171 172 173 174 175 176
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
T
Tejun Heo 已提交
177
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
178 179 180 181 182
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
183
	return __blkg_lookup(blkcg, q, false);
184 185 186
}
EXPORT_SYMBOL_GPL(blkg_lookup);

187 188 189 190
/*
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 */
191 192 193
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
				    struct request_queue *q,
				    struct blkcg_gq *new_blkg)
194
{
T
Tejun Heo 已提交
195
	struct blkcg_gq *blkg;
196
	int i, ret;
197

198 199 200
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

201
	/* blkg holds a reference to blkcg */
202
	if (!css_tryget(&blkcg->css)) {
203 204
		ret = -EINVAL;
		goto err_free_blkg;
205
	}
206

207
	/* allocate */
208 209 210
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
		if (unlikely(!new_blkg)) {
211 212
			ret = -ENOMEM;
			goto err_put_css;
213 214 215
		}
	}
	blkg = new_blkg;
216

T
Tejun Heo 已提交
217 218 219 220
	/* link parent and insert */
	if (blkcg_parent(blkcg)) {
		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
		if (WARN_ON_ONCE(!blkg->parent)) {
221
			ret = -EINVAL;
T
Tejun Heo 已提交
222 223 224 225 226
			goto err_put_css;
		}
		blkg_get(blkg->parent);
	}

227
	spin_lock(&blkcg->lock);
228 229 230 231
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
232 233 234 235 236 237 238

		for (i = 0; i < BLKCG_MAX_POLS; i++) {
			struct blkcg_policy *pol = blkcg_policy[i];

			if (blkg->pd[i] && pol->pd_online_fn)
				pol->pd_online_fn(blkg);
		}
239
	}
240
	blkg->online = true;
241
	spin_unlock(&blkcg->lock);
242

243 244
	if (!ret)
		return blkg;
245

T
Tejun Heo 已提交
246 247 248 249
	/* @blkg failed fully initialized, use the usual release path */
	blkg_put(blkg);
	return ERR_PTR(ret);

250
err_put_css:
251
	css_put(&blkcg->css);
252
err_free_blkg:
253
	blkg_free(new_blkg);
254
	return ERR_PTR(ret);
255
}
256

257 258 259 260 261 262
/**
 * blkg_lookup_create - lookup blkg, try to create one if not there
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
T
Tejun Heo 已提交
263 264 265
 * create one.  blkg creation is performed recursively from blkcg_root such
 * that all non-root blkg's have access to the parent blkg.  This function
 * should be called under RCU read lock and @q->queue_lock.
266 267 268 269 270
 *
 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 * dead and bypassing, returns ERR_PTR(-EBUSY).
 */
T
Tejun Heo 已提交
271 272
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
273
{
274 275 276 277 278
	struct blkcg_gq *blkg;

	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

279 280 281 282 283
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
B
Bart Van Assche 已提交
284
		return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
285 286 287 288 289

	blkg = __blkg_lookup(blkcg, q, true);
	if (blkg)
		return blkg;

T
Tejun Heo 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	/*
	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
	 * non-root blkgs have access to their parents.
	 */
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent = blkcg_parent(blkcg);

		while (parent && !__blkg_lookup(parent, q, false)) {
			pos = parent;
			parent = blkcg_parent(parent);
		}

		blkg = blkg_create(pos, q, NULL);
		if (pos == blkcg || IS_ERR(blkg))
			return blkg;
	}
307
}
308
EXPORT_SYMBOL_GPL(blkg_lookup_create);
309

T
Tejun Heo 已提交
310
static void blkg_destroy(struct blkcg_gq *blkg)
311
{
T
Tejun Heo 已提交
312
	struct blkcg *blkcg = blkg->blkcg;
313
	int i;
314

315
	lockdep_assert_held(blkg->q->queue_lock);
316
	lockdep_assert_held(&blkcg->lock);
317 318

	/* Something wrong if we are trying to remove same group twice */
319
	WARN_ON_ONCE(list_empty(&blkg->q_node));
320
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
321

322 323 324 325 326 327 328 329
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_offline_fn)
			pol->pd_offline_fn(blkg);
	}
	blkg->online = false;

330
	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
331
	list_del_init(&blkg->q_node);
332
	hlist_del_init_rcu(&blkg->blkcg_node);
333

334 335 336 337 338 339 340 341
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
	if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

342 343 344 345 346 347 348
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

349 350 351 352
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
353
 * Destroy all blkgs associated with @q.
354
 */
355
static void blkg_destroy_all(struct request_queue *q)
356
{
T
Tejun Heo 已提交
357
	struct blkcg_gq *blkg, *n;
358

359
	lockdep_assert_held(q->queue_lock);
360

361
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
T
Tejun Heo 已提交
362
		struct blkcg *blkcg = blkg->blkcg;
363

364 365 366
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
367
	}
368 369 370 371 372 373 374

	/*
	 * root blkg is destroyed.  Just clear the pointer since
	 * root_rl does not take reference on root blkg.
	 */
	q->root_blkg = NULL;
	q->root_rl.blkg = NULL;
375 376
}

T
Tejun Heo 已提交
377 378
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
T
Tejun Heo 已提交
379
	blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
T
Tejun Heo 已提交
380 381
}

T
Tejun Heo 已提交
382
void __blkg_release(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
383
{
T
Tejun Heo 已提交
384
	/* release the blkcg and parent blkg refs this blkg has been holding */
T
Tejun Heo 已提交
385
	css_put(&blkg->blkcg->css);
T
Tejun Heo 已提交
386 387
	if (blkg->parent)
		blkg_put(blkg->parent);
T
Tejun Heo 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400 401

	/*
	 * A group is freed in rcu manner. But having an rcu lock does not
	 * mean that one can access all the fields of blkg and assume these
	 * are valid. For example, don't try to follow throtl_data and
	 * request queue links.
	 *
	 * Having a reference to blkg under an rcu allows acess to only
	 * values local to groups like group stats and group rate limits
	 */
	call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
EXPORT_SYMBOL_GPL(__blkg_release);

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
418 419 420
		/* There are no more block groups, hence no request lists */
		if (list_empty(ent))
			return NULL;
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

T
Tejun Heo 已提交
437 438
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
			     u64 val)
439
{
T
Tejun Heo 已提交
440 441
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
	struct blkcg_gq *blkg;
442
	int i;
443

444
	mutex_lock(&blkcg_pol_mutex);
445
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
446 447 448 449 450 451

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
452
	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
453
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
454
			struct blkcg_policy *pol = blkcg_policy[i];
455

456
			if (blkcg_policy_enabled(blkg->q, pol) &&
457 458
			    pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg);
459
		}
460
	}
461

462
	spin_unlock_irq(&blkcg->lock);
463
	mutex_unlock(&blkcg_pol_mutex);
464 465 466
	return 0;
}

T
Tejun Heo 已提交
467
static const char *blkg_dev_name(struct blkcg_gq *blkg)
468
{
469 470 471 472
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
473 474
}

475 476 477 478 479 480 481 482 483 484 485
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
486 487 488
 * policy data and @data and the matching queue lock held.  If @show_total
 * is %true, the sum of the return values from @prfill is printed with
 * "Total" label at the end.
489 490 491 492
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
T
Tejun Heo 已提交
493
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
494 495
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
496
		       const struct blkcg_policy *pol, int data,
497
		       bool show_total)
498
{
T
Tejun Heo 已提交
499
	struct blkcg_gq *blkg;
500
	u64 total = 0;
501

502
	rcu_read_lock();
503
	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
504
		spin_lock_irq(blkg->q->queue_lock);
505
		if (blkcg_policy_enabled(blkg->q, pol))
506
			total += prfill(sf, blkg->pd[pol->plid], data);
507 508 509
		spin_unlock_irq(blkg->q->queue_lock);
	}
	rcu_read_unlock();
510 511 512 513

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
514
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
515 516 517 518

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
519
 * @pd: policy private data of interest
520 521
 * @v: value to print
 *
522
 * Print @v to @sf for the device assocaited with @pd.
523
 */
524
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
525
{
526
	const char *dname = blkg_dev_name(pd->blkg);
527 528 529 530 531 532 533

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
534
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
535 536 537 538

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
539
 * @pd: policy private data of interest
540 541
 * @rwstat: rwstat to print
 *
542
 * Print @rwstat to @sf for the device assocaited with @pd.
543
 */
544
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
545
			 const struct blkg_rwstat *rwstat)
546 547 548 549 550 551 552
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
553
	const char *dname = blkg_dev_name(pd->blkg);
554 555 556 557 558 559 560 561 562 563 564 565 566 567
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}
T
Tejun Heo 已提交
568
EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
569

570 571 572
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
573 574
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
575 576 577
 *
 * prfill callback for printing a blkg_stat.
 */
578
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
579
{
580
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
581
}
582
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
583

584 585 586
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
587 588
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
589 590 591
 *
 * prfill callback for printing a blkg_rwstat.
 */
592 593
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
594
{
595
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
596

597
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
598
}
599
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
600

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
/**
 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
 *
 * Collect the blkg_stat specified by @off from @pd and all its online
 * descendants and return the sum.  The caller must be holding the queue
 * lock for online tests.
 */
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
{
	struct blkcg_policy *pol = blkcg_policy[pd->plid];
	struct blkcg_gq *pos_blkg;
	struct cgroup *pos_cgrp;
	u64 sum;

	lockdep_assert_held(pd->blkg->q->queue_lock);

	sum = blkg_stat_read((void *)pd + off);

	rcu_read_lock();
	blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
		struct blkg_stat *stat = (void *)pos_pd + off;

		if (pos_blkg->online)
			sum += blkg_stat_read(stat);
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);

/**
 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
 *
 * Collect the blkg_rwstat specified by @off from @pd and all its online
 * descendants and return the sum.  The caller must be holding the queue
 * lock for online tests.
 */
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off)
{
	struct blkcg_policy *pol = blkcg_policy[pd->plid];
	struct blkcg_gq *pos_blkg;
	struct cgroup *pos_cgrp;
	struct blkg_rwstat sum;
	int i;

	lockdep_assert_held(pd->blkg->q->queue_lock);

	sum = blkg_rwstat_read((void *)pd + off);

	rcu_read_lock();
	blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
		struct blkg_rwstat *rwstat = (void *)pos_pd + off;
		struct blkg_rwstat tmp;

		if (!pos_blkg->online)
			continue;

		tmp = blkg_rwstat_read(rwstat);

		for (i = 0; i < BLKG_RWSTAT_NR; i++)
			sum.cnt[i] += tmp.cnt[i];
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);

677 678 679
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
680
 * @pol: target policy
681 682 683 684 685
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
686 687
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
688
 */
T
Tejun Heo 已提交
689 690
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx)
691
	__acquires(rcu) __acquires(disk->queue->queue_lock)
692
{
693
	struct gendisk *disk;
T
Tejun Heo 已提交
694
	struct blkcg_gq *blkg;
T
Tejun Heo 已提交
695 696 697
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
698

T
Tejun Heo 已提交
699 700
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
701

T
Tejun Heo 已提交
702
	disk = get_gendisk(MKDEV(major, minor), &part);
T
Tejun Heo 已提交
703
	if (!disk || part)
T
Tejun Heo 已提交
704
		return -EINVAL;
705 706

	rcu_read_lock();
T
Tejun Heo 已提交
707
	spin_lock_irq(disk->queue->queue_lock);
708

709
	if (blkcg_policy_enabled(disk->queue, pol))
710
		blkg = blkg_lookup_create(blkcg, disk->queue);
711 712
	else
		blkg = ERR_PTR(-EINVAL);
713

T
Tejun Heo 已提交
714 715
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
716
		rcu_read_unlock();
717
		spin_unlock_irq(disk->queue->queue_lock);
718 719 720 721 722 723 724 725 726 727
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
728
		}
T
Tejun Heo 已提交
729
		return ret;
730
	}
731 732 733

	ctx->disk = disk;
	ctx->blkg = blkg;
T
Tejun Heo 已提交
734 735
	ctx->v = v;
	return 0;
736
}
737
EXPORT_SYMBOL_GPL(blkg_conf_prep);
738

739 740 741 742 743 744 745
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
746
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
747
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
748
{
749
	spin_unlock_irq(ctx->disk->queue->queue_lock);
750 751
	rcu_read_unlock();
	put_disk(ctx->disk);
752
}
753
EXPORT_SYMBOL_GPL(blkg_conf_finish);
754

T
Tejun Heo 已提交
755
struct cftype blkcg_files[] = {
756 757
	{
		.name = "reset_stats",
T
Tejun Heo 已提交
758
		.write_u64 = blkcg_reset_stats,
759
	},
760
	{ }	/* terminate */
761 762
};

763
/**
764
 * blkcg_css_offline - cgroup css_offline callback
765 766 767 768 769 770 771 772 773
 * @cgroup: cgroup of interest
 *
 * This function is called when @cgroup is about to go away and responsible
 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
774
static void blkcg_css_offline(struct cgroup *cgroup)
775
{
T
Tejun Heo 已提交
776
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
777

778
	spin_lock_irq(&blkcg->lock);
779

780
	while (!hlist_empty(&blkcg->blkg_list)) {
T
Tejun Heo 已提交
781 782
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
T
Tejun Heo 已提交
783
		struct request_queue *q = blkg->q;
784

785 786 787 788 789 790
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
791
			spin_lock_irq(&blkcg->lock);
792
		}
793
	}
794

795
	spin_unlock_irq(&blkcg->lock);
796 797
}

798
static void blkcg_css_free(struct cgroup *cgroup)
799
{
T
Tejun Heo 已提交
800
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
801

T
Tejun Heo 已提交
802
	if (blkcg != &blkcg_root)
B
Ben Blum 已提交
803
		kfree(blkcg);
804 805
}

806
static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
807
{
T
Tejun Heo 已提交
808
	static atomic64_t id_seq = ATOMIC64_INIT(0);
T
Tejun Heo 已提交
809
	struct blkcg *blkcg;
810
	struct cgroup *parent = cgroup->parent;
811

812
	if (!parent) {
T
Tejun Heo 已提交
813
		blkcg = &blkcg_root;
814 815 816 817 818 819 820
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

821
	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
822
	blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
823
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
824 825
done:
	spin_lock_init(&blkcg->lock);
826
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
827 828 829 830 831
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

832 833 834 835 836 837 838 839 840 841 842 843 844 845
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
	might_sleep();

846
	return blk_throtl_init(q);
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
870
	spin_lock_irq(q->queue_lock);
871
	blkg_destroy_all(q);
872 873
	spin_unlock_irq(q->queue_lock);

874 875 876
	blk_throtl_exit(q);
}

877 878 879 880 881 882
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
T
Tejun Heo 已提交
883
static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
884
{
885
	struct task_struct *task;
886 887 888 889
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
890 891 892 893 894 895 896 897 898
	cgroup_taskset_for_each(task, cgrp, tset) {
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
899 900 901
	return ret;
}

902 903
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
904 905 906
	.css_alloc = blkcg_css_alloc,
	.css_offline = blkcg_css_offline,
	.css_free = blkcg_css_free,
T
Tejun Heo 已提交
907
	.can_attach = blkcg_can_attach,
908
	.subsys_id = blkio_subsys_id,
T
Tejun Heo 已提交
909
	.base_cftypes = blkcg_files,
910
	.module = THIS_MODULE,
911 912 913 914 915 916 917 918

	/*
	 * blkio subsystem is utterly broken in terms of hierarchy support.
	 * It treats all cgroups equally regardless of where they're
	 * located in the hierarchy - all cgroups are treated as if they're
	 * right below the root.  Fix it and remove the following.
	 */
	.broken_hierarchy = true,
919 920 921
};
EXPORT_SYMBOL_GPL(blkio_subsys);

922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
939
			  const struct blkcg_policy *pol)
940 941
{
	LIST_HEAD(pds);
942
	struct blkcg_gq *blkg, *new_blkg;
943 944
	struct blkg_policy_data *pd, *n;
	int cnt = 0, ret;
945
	bool preloaded;
946 947 948 949

	if (blkcg_policy_enabled(q, pol))
		return 0;

950
	/* preallocations for root blkg */
951 952
	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!new_blkg)
953 954
		return -ENOMEM;

955 956
	blk_queue_bypass_start(q);

957 958
	preloaded = !radix_tree_preload(GFP_KERNEL);

959 960 961 962 963
	/*
	 * Make sure the root blkg exists and count the existing blkgs.  As
	 * @q is bypassing at this point, blkg_lookup_create() can't be
	 * used.  Open code it.
	 */
964 965 966
	spin_lock_irq(q->queue_lock);

	rcu_read_lock();
967 968 969 970 971
	blkg = __blkg_lookup(&blkcg_root, q, false);
	if (blkg)
		blkg_free(new_blkg);
	else
		blkg = blkg_create(&blkcg_root, q, new_blkg);
972 973
	rcu_read_unlock();

974 975 976
	if (preloaded)
		radix_tree_preload_end();

977 978 979 980 981
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
		goto out_unlock;
	}
	q->root_blkg = blkg;
982
	q->root_rl.blkg = blkg;
983 984 985 986 987 988 989 990

	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;

	spin_unlock_irq(q->queue_lock);

	/* allocate policy_data for all existing blkgs */
	while (cnt--) {
991
		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
	}

	/*
	 * Install the allocated pds.  With @q bypassing, no new blkg
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		if (WARN_ON(list_empty(&pds))) {
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
1019
		pd->plid = pol->plid;
1020
		pol->pd_init_fn(blkg);
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
	list_for_each_entry_safe(pd, n, &pds, alloc_node)
		kfree(pd);
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
1046
			     const struct blkcg_policy *pol)
1047
{
T
Tejun Heo 已提交
1048
	struct blkcg_gq *blkg;
1049 1050 1051 1052 1053 1054 1055 1056 1057

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

1058 1059 1060 1061
	/* if no policy is left, no need for blkgs - shoot them down */
	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
		blkg_destroy_all(q);

1062 1063 1064 1065
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

1066 1067
		if (pol->pd_offline_fn)
			pol->pd_offline_fn(blkg);
1068 1069
		if (pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
1082
/**
T
Tejun Heo 已提交
1083 1084
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
T
Tejun Heo 已提交
1085
 *
T
Tejun Heo 已提交
1086 1087
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
T
Tejun Heo 已提交
1088
 */
T
Tejun Heo 已提交
1089
int blkcg_policy_register(struct blkcg_policy *pol)
1090
{
T
Tejun Heo 已提交
1091
	int i, ret;
1092

1093 1094 1095
	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
		return -EINVAL;

1096 1097
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
1098 1099 1100
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
T
Tejun Heo 已提交
1101
		if (!blkcg_policy[i])
T
Tejun Heo 已提交
1102 1103 1104
			break;
	if (i >= BLKCG_MAX_POLS)
		goto out_unlock;
1105

T
Tejun Heo 已提交
1106
	/* register and update blkgs */
T
Tejun Heo 已提交
1107 1108
	pol->plid = i;
	blkcg_policy[i] = pol;
T
Tejun Heo 已提交
1109 1110

	/* everything is in place, add intf files for the new policy */
T
Tejun Heo 已提交
1111 1112
	if (pol->cftypes)
		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
T
Tejun Heo 已提交
1113 1114
	ret = 0;
out_unlock:
1115
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
1116
	return ret;
1117
}
T
Tejun Heo 已提交
1118
EXPORT_SYMBOL_GPL(blkcg_policy_register);
1119

T
Tejun Heo 已提交
1120
/**
T
Tejun Heo 已提交
1121 1122
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
T
Tejun Heo 已提交
1123
 *
T
Tejun Heo 已提交
1124
 * Undo blkcg_policy_register(@pol).  Might sleep.
T
Tejun Heo 已提交
1125
 */
T
Tejun Heo 已提交
1126
void blkcg_policy_unregister(struct blkcg_policy *pol)
1127
{
1128 1129
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
1130
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
T
Tejun Heo 已提交
1131 1132 1133
		goto out_unlock;

	/* kill the intf files first */
T
Tejun Heo 已提交
1134 1135
	if (pol->cftypes)
		cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
1136

T
Tejun Heo 已提交
1137
	/* unregister and update blkgs */
T
Tejun Heo 已提交
1138
	blkcg_policy[pol->plid] = NULL;
T
Tejun Heo 已提交
1139
out_unlock:
1140
	mutex_unlock(&blkcg_pol_mutex);
1141
}
T
Tejun Heo 已提交
1142
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);