blk-cgroup.c 46.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
12 13 14 15
 *
 * For policy-specific per-blkcg data:
 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
16 17
 */
#include <linux/ioprio.h>
18
#include <linux/kdev_t.h>
19
#include <linux/module.h>
20
#include <linux/sched/signal.h>
21
#include <linux/err.h>
22
#include <linux/blkdev.h>
23
#include <linux/backing-dev.h>
24
#include <linux/slab.h>
25
#include <linux/genhd.h>
26
#include <linux/delay.h>
T
Tejun Heo 已提交
27
#include <linux/atomic.h>
28
#include <linux/ctype.h>
29
#include <linux/blk-cgroup.h>
30
#include <linux/tracehook.h>
31
#include "blk.h"
32

33 34
#define MAX_KEY_LEN 100

35 36 37 38 39 40 41 42
/*
 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
 * policy [un]register operations including cgroup file additions /
 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
 * allows grabbing it from cgroup callbacks.
 */
static DEFINE_MUTEX(blkcg_pol_register_mutex);
43
static DEFINE_MUTEX(blkcg_pol_mutex);
44

45
struct blkcg blkcg_root;
T
Tejun Heo 已提交
46
EXPORT_SYMBOL_GPL(blkcg_root);
47

T
Tejun Heo 已提交
48 49
struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;

T
Tejun Heo 已提交
50
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
51

T
Tejun Heo 已提交
52 53
static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */

54 55
static bool blkcg_debug_stats = false;

56
static bool blkcg_policy_enabled(struct request_queue *q,
T
Tejun Heo 已提交
57
				 const struct blkcg_policy *pol)
58 59 60 61
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

62 63 64 65 66 67
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
T
Tejun Heo 已提交
68
static void blkg_free(struct blkcg_gq *blkg)
69
{
70
	int i;
71 72 73 74

	if (!blkg)
		return;

75
	for (i = 0; i < BLKCG_MAX_POLS; i++)
76 77
		if (blkg->pd[i])
			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
78

79
	if (blkg->blkcg != &blkcg_root)
80
		blk_exit_rl(blkg->q, &blkg->rl);
81 82 83

	blkg_rwstat_exit(&blkg->stat_ios);
	blkg_rwstat_exit(&blkg->stat_bytes);
84
	kfree(blkg);
85 86 87 88 89 90
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
91
 * @gfp_mask: allocation mask to use
92
 *
93
 * Allocate a new blkg assocating @blkcg and @q.
94
 */
95 96
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
97
{
T
Tejun Heo 已提交
98
	struct blkcg_gq *blkg;
99
	int i;
100 101

	/* alloc and init base part */
102
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
103 104 105
	if (!blkg)
		return NULL;

106 107 108 109
	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
		goto err_free;

T
Tejun Heo 已提交
110
	blkg->q = q;
111
	INIT_LIST_HEAD(&blkg->q_node);
112
	blkg->blkcg = blkcg;
113
	atomic_set(&blkg->refcnt, 1);
114

115 116 117 118 119 120 121
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

T
Tejun Heo 已提交
122
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
123
		struct blkcg_policy *pol = blkcg_policy[i];
124
		struct blkg_policy_data *pd;
125

126
		if (!blkcg_policy_enabled(q, pol))
127 128 129
			continue;

		/* alloc per-policy data and attach it to blkg */
130
		pd = pol->pd_alloc_fn(gfp_mask, q->node);
131 132
		if (!pd)
			goto err_free;
133

134 135
		blkg->pd[i] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
136
		pd->plid = i;
137 138
	}

139
	return blkg;
140 141 142 143

err_free:
	blkg_free(blkg);
	return NULL;
144 145
}

T
Tejun Heo 已提交
146 147
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint)
148
{
T
Tejun Heo 已提交
149
	struct blkcg_gq *blkg;
150

151
	/*
152 153 154 155
	 * Hint didn't match.  Look up from the radix tree.  Note that the
	 * hint can only be updated under queue_lock as otherwise @blkg
	 * could have already been removed from blkg_tree.  The caller is
	 * responsible for grabbing queue_lock if @update_hint.
156 157
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
158 159 160 161 162
	if (blkg && blkg->q == q) {
		if (update_hint) {
			lockdep_assert_held(q->queue_lock);
			rcu_assign_pointer(blkcg->blkg_hint, blkg);
		}
163
		return blkg;
164
	}
165

166 167
	return NULL;
}
168
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
169

170
/*
171 172
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
173
 */
174
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
175 176
				    struct request_queue *q,
				    struct blkcg_gq *new_blkg)
177
{
178
	struct blkcg_gq *blkg;
179
	struct bdi_writeback_congested *wb_congested;
180
	int i, ret;
181

182 183 184
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

185
	/* blkg holds a reference to blkcg */
186
	if (!css_tryget_online(&blkcg->css)) {
187
		ret = -ENODEV;
188
		goto err_free_blkg;
189
	}
190

191
	wb_congested = wb_congested_get_create(q->backing_dev_info,
192 193 194
					       blkcg->css.id,
					       GFP_NOWAIT | __GFP_NOWARN);
	if (!wb_congested) {
195
		ret = -ENOMEM;
196
		goto err_put_css;
197 198
	}

199 200 201 202 203 204
	/* allocate */
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
		if (unlikely(!new_blkg)) {
			ret = -ENOMEM;
			goto err_put_congested;
205 206
		}
	}
207 208
	blkg = new_blkg;
	blkg->wb_congested = wb_congested;
209

210
	/* link parent */
T
Tejun Heo 已提交
211 212 213
	if (blkcg_parent(blkcg)) {
		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
		if (WARN_ON_ONCE(!blkg->parent)) {
214
			ret = -ENODEV;
215
			goto err_put_congested;
T
Tejun Heo 已提交
216 217 218 219
		}
		blkg_get(blkg->parent);
	}

220 221 222 223 224
	/* invoke per-policy init */
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_init_fn)
225
			pol->pd_init_fn(blkg->pd[i]);
226 227 228
	}

	/* insert */
229
	spin_lock(&blkcg->lock);
230 231 232 233
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
234 235 236 237 238

		for (i = 0; i < BLKCG_MAX_POLS; i++) {
			struct blkcg_policy *pol = blkcg_policy[i];

			if (blkg->pd[i] && pol->pd_online_fn)
239
				pol->pd_online_fn(blkg->pd[i]);
240
		}
241
	}
242
	blkg->online = true;
243
	spin_unlock(&blkcg->lock);
244

245
	if (!ret)
246
		return blkg;
247

T
Tejun Heo 已提交
248 249 250 251
	/* @blkg failed fully initialized, use the usual release path */
	blkg_put(blkg);
	return ERR_PTR(ret);

252 253 254
err_put_congested:
	wb_congested_put(wb_congested);
err_put_css:
255
	css_put(&blkcg->css);
256
err_free_blkg:
257
	blkg_free(new_blkg);
258
	return ERR_PTR(ret);
259
}
260

261
/**
262
 * blkg_lookup_create - lookup blkg, try to create one if not there
263 264 265 266
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
T
Tejun Heo 已提交
267 268 269
 * create one.  blkg creation is performed recursively from blkcg_root such
 * that all non-root blkg's have access to the parent blkg.  This function
 * should be called under RCU read lock and @q->queue_lock.
270 271 272 273 274
 *
 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 * dead and bypassing, returns ERR_PTR(-EBUSY).
 */
275 276
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
277
{
278 279 280 281 282
	struct blkcg_gq *blkg;

	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

283 284 285 286 287 288 289
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);

290 291 292 293
	blkg = __blkg_lookup(blkcg, q, true);
	if (blkg)
		return blkg;

T
Tejun Heo 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306
	/*
	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
	 * non-root blkgs have access to their parents.
	 */
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent = blkcg_parent(blkcg);

		while (parent && !__blkg_lookup(parent, q, false)) {
			pos = parent;
			parent = blkcg_parent(parent);
		}

307
		blkg = blkg_create(pos, q, NULL);
T
Tejun Heo 已提交
308 309 310
		if (pos == blkcg || IS_ERR(blkg))
			return blkg;
	}
311
}
312

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
static void blkg_pd_offline(struct blkcg_gq *blkg)
{
	int i;

	lockdep_assert_held(blkg->q->queue_lock);
	lockdep_assert_held(&blkg->blkcg->lock);

	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && !blkg->pd[i]->offline &&
		    pol->pd_offline_fn) {
			pol->pd_offline_fn(blkg->pd[i]);
			blkg->pd[i]->offline = true;
		}
	}
}

T
Tejun Heo 已提交
331
static void blkg_destroy(struct blkcg_gq *blkg)
332
{
T
Tejun Heo 已提交
333
	struct blkcg *blkcg = blkg->blkcg;
334
	struct blkcg_gq *parent = blkg->parent;
335

336
	lockdep_assert_held(blkg->q->queue_lock);
337
	lockdep_assert_held(&blkcg->lock);
338 339

	/* Something wrong if we are trying to remove same group twice */
340
	WARN_ON_ONCE(list_empty(&blkg->q_node));
341
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
342

343 344 345 346 347
	if (parent) {
		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
	}

348 349
	blkg->online = false;

350
	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
351
	list_del_init(&blkg->q_node);
352
	hlist_del_init_rcu(&blkg->blkcg_node);
353

354 355 356 357 358
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
359
	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
360 361
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

362 363 364 365 366 367 368
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

369 370 371 372
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
373
 * Destroy all blkgs associated with @q.
374
 */
375
static void blkg_destroy_all(struct request_queue *q)
376
{
T
Tejun Heo 已提交
377
	struct blkcg_gq *blkg, *n;
378

379
	lockdep_assert_held(q->queue_lock);
380

381
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
T
Tejun Heo 已提交
382
		struct blkcg *blkcg = blkg->blkcg;
383

384
		spin_lock(&blkcg->lock);
385
		blkg_pd_offline(blkg);
386 387
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
388
	}
389 390 391

	q->root_blkg = NULL;
	q->root_rl.blkg = NULL;
392 393
}

394 395 396 397 398 399 400 401 402
/*
 * A group is RCU protected, but having an rcu lock does not mean that one
 * can access all the fields of blkg and assume these are valid.  For
 * example, don't try to follow throtl_data and request queue links.
 *
 * Having a reference to blkg under an rcu allows accesses to only values
 * local to groups like group stats and group rate limits.
 */
void __blkg_release_rcu(struct rcu_head *rcu_head)
T
Tejun Heo 已提交
403
{
404
	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
405

T
Tejun Heo 已提交
406
	/* release the blkcg and parent blkg refs this blkg has been holding */
T
Tejun Heo 已提交
407
	css_put(&blkg->blkcg->css);
408
	if (blkg->parent)
T
Tejun Heo 已提交
409
		blkg_put(blkg->parent);
T
Tejun Heo 已提交
410

411 412
	wb_congested_put(blkg->wb_congested);

413
	blkg_free(blkg);
T
Tejun Heo 已提交
414
}
415
EXPORT_SYMBOL_GPL(__blkg_release_rcu);
T
Tejun Heo 已提交
416

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
433 434 435
		/* There are no more block groups, hence no request lists */
		if (list_empty(ent))
			return NULL;
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

452 453
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
			     struct cftype *cftype, u64 val)
454
{
455
	struct blkcg *blkcg = css_to_blkcg(css);
T
Tejun Heo 已提交
456
	struct blkcg_gq *blkg;
457
	int i;
458

459
	mutex_lock(&blkcg_pol_mutex);
460
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
461 462 463 464 465 466

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
467
	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
468 469 470
		blkg_rwstat_reset(&blkg->stat_bytes);
		blkg_rwstat_reset(&blkg->stat_ios);

T
Tejun Heo 已提交
471
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
472
			struct blkcg_policy *pol = blkcg_policy[i];
473

474 475
			if (blkg->pd[i] && pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg->pd[i]);
476
		}
477
	}
478

479
	spin_unlock_irq(&blkcg->lock);
480
	mutex_unlock(&blkcg_pol_mutex);
481 482 483
	return 0;
}

484
const char *blkg_dev_name(struct blkcg_gq *blkg)
485
{
486
	/* some drivers (floppy) instantiate a queue w/o disk registered */
487 488
	if (blkg->q->backing_dev_info->dev)
		return dev_name(blkg->q->backing_dev_info->dev);
489
	return NULL;
490
}
491
EXPORT_SYMBOL_GPL(blkg_dev_name);
492

493 494 495 496 497 498 499 500 501 502 503
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
504 505 506
 * policy data and @data and the matching queue lock held.  If @show_total
 * is %true, the sum of the return values from @prfill is printed with
 * "Total" label at the end.
507 508 509 510
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
T
Tejun Heo 已提交
511
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
512 513
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
514
		       const struct blkcg_policy *pol, int data,
515
		       bool show_total)
516
{
T
Tejun Heo 已提交
517
	struct blkcg_gq *blkg;
518
	u64 total = 0;
519

520
	rcu_read_lock();
521
	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
522
		spin_lock_irq(blkg->q->queue_lock);
523
		if (blkcg_policy_enabled(blkg->q, pol))
524
			total += prfill(sf, blkg->pd[pol->plid], data);
525 526 527
		spin_unlock_irq(blkg->q->queue_lock);
	}
	rcu_read_unlock();
528 529 530 531

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
532
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
533 534 535 536

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
537
 * @pd: policy private data of interest
538 539
 * @v: value to print
 *
540
 * Print @v to @sf for the device assocaited with @pd.
541
 */
542
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
543
{
544
	const char *dname = blkg_dev_name(pd->blkg);
545 546 547 548 549 550 551

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
552
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
553 554 555 556

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
557
 * @pd: policy private data of interest
558 559
 * @rwstat: rwstat to print
 *
560
 * Print @rwstat to @sf for the device assocaited with @pd.
561
 */
562
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
563
			 const struct blkg_rwstat *rwstat)
564 565 566 567 568 569 570
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
571
	const char *dname = blkg_dev_name(pd->blkg);
572 573 574 575 576 577 578 579
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
T
Tejun Heo 已提交
580
			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
581

T
Tejun Heo 已提交
582 583
	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
584 585 586
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}
T
Tejun Heo 已提交
587
EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
588

589 590 591
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
592 593
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
594 595 596
 *
 * prfill callback for printing a blkg_stat.
 */
597
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
598
{
599
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
600
}
601
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
602

603 604 605
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
606 607
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
608 609 610
 *
 * prfill callback for printing a blkg_rwstat.
 */
611 612
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
613
{
614
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
615

616
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
617
}
618
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
619

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
				    struct blkg_policy_data *pd, int off)
{
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);

	return __blkg_prfill_rwstat(sf, pd, &rwstat);
}

/**
 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 * @sf: seq_file to print to
 * @v: unused
 *
 * To be used as cftype->seq_show to print blkg->stat_bytes.
 * cftype->private must be set to the blkcg_policy.
 */
int blkg_print_stat_bytes(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_bytes), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);

/**
 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 * @sf: seq_file to print to
 * @v: unused
 *
 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 * must be set to the blkcg_policy.
 */
int blkg_print_stat_ios(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_ios), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_ios);

static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
					      struct blkg_policy_data *pd,
					      int off)
{
	struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
							      NULL, off);
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
}

/**
 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 * @sf: seq_file to print to
 * @v: unused
 */
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field_recursive,
			  (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_bytes), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);

/**
 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 * @sf: seq_file to print to
 * @v: unused
 */
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field_recursive,
			  (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_ios), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);

701 702
/**
 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
703 704 705
 * @blkg: blkg of interest
 * @pol: blkcg_policy which contains the blkg_stat
 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
706
 *
707 708 709 710 711 712
 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
 * online descendants and their aux counts.  The caller must be holding the
 * queue lock for online tests.
 *
 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
 * at @off bytes into @blkg's blkg_policy_data of the policy.
713
 */
714 715
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
			    struct blkcg_policy *pol, int off)
716 717
{
	struct blkcg_gq *pos_blkg;
718
	struct cgroup_subsys_state *pos_css;
719
	u64 sum = 0;
720

721
	lockdep_assert_held(blkg->q->queue_lock);
722 723

	rcu_read_lock();
724 725 726 727 728
	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
		struct blkg_stat *stat;

		if (!pos_blkg->online)
			continue;
729

730 731 732 733 734 735
		if (pol)
			stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
		else
			stat = (void *)blkg + off;

		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
736 737 738 739 740 741 742 743 744
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);

/**
 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
745 746 747
 * @blkg: blkg of interest
 * @pol: blkcg_policy which contains the blkg_rwstat
 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
748
 *
749 750 751 752 753 754
 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 * online descendants and their aux counts.  The caller must be holding the
 * queue lock for online tests.
 *
 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 * is at @off bytes into @blkg's blkg_policy_data of the policy.
755
 */
756 757
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
					     struct blkcg_policy *pol, int off)
758 759
{
	struct blkcg_gq *pos_blkg;
760
	struct cgroup_subsys_state *pos_css;
761
	struct blkg_rwstat sum = { };
762 763
	int i;

764
	lockdep_assert_held(blkg->q->queue_lock);
765 766

	rcu_read_lock();
767
	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
768
		struct blkg_rwstat *rwstat;
769 770 771 772

		if (!pos_blkg->online)
			continue;

773 774 775 776 777
		if (pol)
			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
		else
			rwstat = (void *)pos_blkg + off;

778
		for (i = 0; i < BLKG_RWSTAT_NR; i++)
779 780 781
			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
				&sum.aux_cnt[i]);
782 783 784 785 786 787 788
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
/* Performs queue bypass and policy enabled checks then looks up blkg. */
static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
					  const struct blkcg_policy *pol,
					  struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

	if (!blkcg_policy_enabled(q, pol))
		return ERR_PTR(-EOPNOTSUPP);

	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);

	return __blkg_lookup(blkcg, q, true /* update_hint */);
}

810 811 812
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
813
 * @pol: target policy
814 815 816 817
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
818 819 820
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 * part of @input following MAJ:MIN.  This function returns with RCU read
 * lock and queue lock held and must be paired with blkg_conf_finish().
821
 */
T
Tejun Heo 已提交
822
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
823
		   char *input, struct blkg_conf_ctx *ctx)
824
	__acquires(rcu) __acquires(disk->queue->queue_lock)
825
{
826
	struct gendisk *disk;
827
	struct request_queue *q;
T
Tejun Heo 已提交
828
	struct blkcg_gq *blkg;
T
Tejun Heo 已提交
829
	unsigned int major, minor;
830 831
	int key_len, part, ret;
	char *body;
832

833
	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
T
Tejun Heo 已提交
834
		return -EINVAL;
835

836 837 838 839 840
	body = input + key_len;
	if (!isspace(*body))
		return -EINVAL;
	body = skip_spaces(body);

T
Tejun Heo 已提交
841
	disk = get_gendisk(MKDEV(major, minor), &part);
842
	if (!disk)
843
		return -ENODEV;
844
	if (part) {
845 846
		ret = -ENODEV;
		goto fail;
847
	}
848

849
	q = disk->queue;
850

851 852
	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
853

854
	blkg = blkg_lookup_check(blkcg, pol, q);
T
Tejun Heo 已提交
855 856
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
		goto fail_unlock;
	}

	if (blkg)
		goto success;

	/*
	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
	 * non-root blkgs have access to their parents.
	 */
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent;
		struct blkcg_gq *new_blkg;

		parent = blkcg_parent(blkcg);
		while (parent && !__blkg_lookup(parent, q, false)) {
			pos = parent;
			parent = blkcg_parent(parent);
		}

		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
		spin_unlock_irq(q->queue_lock);
880
		rcu_read_unlock();
881 882 883 884 885

		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
		if (unlikely(!new_blkg)) {
			ret = -ENOMEM;
			goto fail;
886
		}
887

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
		rcu_read_lock();
		spin_lock_irq(q->queue_lock);

		blkg = blkg_lookup_check(pos, pol, q);
		if (IS_ERR(blkg)) {
			ret = PTR_ERR(blkg);
			goto fail_unlock;
		}

		if (blkg) {
			blkg_free(new_blkg);
		} else {
			blkg = blkg_create(pos, q, new_blkg);
			if (unlikely(IS_ERR(blkg))) {
				ret = PTR_ERR(blkg);
				goto fail_unlock;
			}
		}

		if (pos == blkcg)
			goto success;
	}
success:
911 912
	ctx->disk = disk;
	ctx->blkg = blkg;
913
	ctx->body = body;
T
Tejun Heo 已提交
914
	return 0;
915 916 917 918 919

fail_unlock:
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();
fail:
920
	put_disk_and_module(disk);
921 922 923 924 925 926 927 928 929 930 931
	/*
	 * If queue was bypassing, we should retry.  Do so after a
	 * short msleep().  It isn't strictly necessary but queue
	 * can be bypassing for some time and it's always nice to
	 * avoid busy looping.
	 */
	if (ret == -EBUSY) {
		msleep(10);
		ret = restart_syscall();
	}
	return ret;
932
}
933
EXPORT_SYMBOL_GPL(blkg_conf_prep);
934

935 936 937 938 939 940 941
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
942
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
943
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
944
{
945
	spin_unlock_irq(ctx->disk->queue->queue_lock);
946
	rcu_read_unlock();
947
	put_disk_and_module(ctx->disk);
948
}
949
EXPORT_SYMBOL_GPL(blkg_conf_finish);
950

951 952 953 954 955 956 957 958 959
static int blkcg_print_stat(struct seq_file *sf, void *v)
{
	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
	struct blkcg_gq *blkg;

	rcu_read_lock();

	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
		const char *dname;
960
		char *buf;
961 962
		struct blkg_rwstat rwstat;
		u64 rbytes, wbytes, rios, wios;
963 964 965
		size_t size = seq_get_buf(sf, &buf), off = 0;
		int i;
		bool has_stats = false;
966 967 968 969 970

		dname = blkg_dev_name(blkg);
		if (!dname)
			continue;

971 972 973 974 975 976 977 978
		/*
		 * Hooray string manipulation, count is the size written NOT
		 * INCLUDING THE \0, so size is now count+1 less than what we
		 * had before, but we want to start writing the next bit from
		 * the \0 so we only add count to buf.
		 */
		off += scnprintf(buf+off, size-off, "%s ", dname);

979 980 981 982 983 984 985 986 987 988 989 990 991 992
		spin_lock_irq(blkg->q->queue_lock);

		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
					offsetof(struct blkcg_gq, stat_bytes));
		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);

		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
					offsetof(struct blkcg_gq, stat_ios));
		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);

		spin_unlock_irq(blkg->q->queue_lock);

993 994 995 996 997 998 999 1000 1001 1002
		if (rbytes || wbytes || rios || wios) {
			has_stats = true;
			off += scnprintf(buf+off, size-off,
					 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu",
					 rbytes, wbytes, rios, wios);
		}

		if (!blkcg_debug_stats)
			goto next;

1003 1004 1005 1006 1007 1008 1009 1010
		if (atomic_read(&blkg->use_delay)) {
			has_stats = true;
			off += scnprintf(buf+off, size-off,
					 " use_delay=%d delay_nsec=%llu",
					 atomic_read(&blkg->use_delay),
					(unsigned long long)atomic64_read(&blkg->delay_nsec));
		}

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
			struct blkcg_policy *pol = blkcg_policy[i];
			size_t written;

			if (!blkg->pd[i] || !pol->pd_stat_fn)
				continue;

			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
			if (written)
				has_stats = true;
			off += written;
		}
next:
		if (has_stats) {
			off += scnprintf(buf+off, size-off, "\n");
			seq_commit(sf, off);
		}
1028 1029 1030 1031 1032 1033
	}

	rcu_read_unlock();
	return 0;
}

1034
static struct cftype blkcg_files[] = {
1035 1036
	{
		.name = "stat",
1037
		.flags = CFTYPE_NOT_ON_ROOT,
1038 1039 1040 1041 1042
		.seq_show = blkcg_print_stat,
	},
	{ }	/* terminate */
};

1043
static struct cftype blkcg_legacy_files[] = {
1044 1045
	{
		.name = "reset_stats",
T
Tejun Heo 已提交
1046
		.write_u64 = blkcg_reset_stats,
1047
	},
1048
	{ }	/* terminate */
1049 1050
};

1051
/**
1052
 * blkcg_css_offline - cgroup css_offline callback
1053
 * @css: css of interest
1054
 *
1055
 * This function is called when @css is about to go away and responsible
1056 1057 1058 1059
 * for offlining all blkgs pd and killing all wbs associated with @css.
 * blkgs pd offline should be done while holding both q and blkcg locks.
 * As blkcg lock is nested inside q lock, this function performs reverse
 * double lock dancing.
1060 1061 1062
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
1063
static void blkcg_css_offline(struct cgroup_subsys_state *css)
1064
{
1065
	struct blkcg *blkcg = css_to_blkcg(css);
1066
	struct blkcg_gq *blkg;
1067

1068
	spin_lock_irq(&blkcg->lock);
1069

1070
	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
1071
		struct request_queue *q = blkg->q;
1072

1073
		if (spin_trylock(q->queue_lock)) {
1074
			blkg_pd_offline(blkg);
1075 1076 1077 1078
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
1079
			spin_lock_irq(&blkcg->lock);
1080
		}
1081
	}
1082

1083
	spin_unlock_irq(&blkcg->lock);
1084 1085

	wb_blkcg_offline(blkcg);
1086 1087
}

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
/**
 * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
 * @blkcg: blkcg of interest
 *
 * This function is called when blkcg css is about to free and responsible for
 * destroying all blkgs associated with @blkcg.
 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
 * is nested inside q lock, this function performs reverse double lock dancing.
 */
static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
{
	spin_lock_irq(&blkcg->lock);
	while (!hlist_empty(&blkcg->blkg_list)) {
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						    struct blkcg_gq,
						    blkcg_node);
		struct request_queue *q = blkg->q;

		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
			spin_lock_irq(&blkcg->lock);
		}
	}
	spin_unlock_irq(&blkcg->lock);
}

1118
static void blkcg_css_free(struct cgroup_subsys_state *css)
1119
{
1120
	struct blkcg *blkcg = css_to_blkcg(css);
1121
	int i;
1122

1123 1124
	blkcg_destroy_all_blkgs(blkcg);

T
Tejun Heo 已提交
1125
	mutex_lock(&blkcg_pol_mutex);
1126

T
Tejun Heo 已提交
1127 1128
	list_del(&blkcg->all_blkcgs_node);

1129
	for (i = 0; i < BLKCG_MAX_POLS; i++)
1130 1131 1132 1133 1134
		if (blkcg->cpd[i])
			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);

	mutex_unlock(&blkcg_pol_mutex);

1135
	kfree(blkcg);
1136 1137
}

1138 1139
static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1140
{
T
Tejun Heo 已提交
1141
	struct blkcg *blkcg;
1142 1143
	struct cgroup_subsys_state *ret;
	int i;
1144

T
Tejun Heo 已提交
1145 1146
	mutex_lock(&blkcg_pol_mutex);

1147
	if (!parent_css) {
T
Tejun Heo 已提交
1148
		blkcg = &blkcg_root;
1149 1150 1151 1152
	} else {
		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
		if (!blkcg) {
			ret = ERR_PTR(-ENOMEM);
1153
			goto unlock;
1154
		}
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	}

	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];
		struct blkcg_policy_data *cpd;

		/*
		 * If the policy hasn't been attached yet, wait for it
		 * to be attached before doing anything else. Otherwise,
		 * check if the policy requires any specific per-cgroup
		 * data: if it does, allocate and initialize it.
		 */
1167
		if (!pol || !pol->cpd_alloc_fn)
1168 1169
			continue;

1170
		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1171 1172 1173 1174
		if (!cpd) {
			ret = ERR_PTR(-ENOMEM);
			goto free_pd_blkcg;
		}
1175 1176
		blkcg->cpd[i] = cpd;
		cpd->blkcg = blkcg;
1177
		cpd->plid = i;
1178 1179
		if (pol->cpd_init_fn)
			pol->cpd_init_fn(cpd);
1180
	}
1181 1182

	spin_lock_init(&blkcg->lock);
1183
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1184
	INIT_HLIST_HEAD(&blkcg->blkg_list);
1185 1186 1187
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif
T
Tejun Heo 已提交
1188 1189 1190
	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);

	mutex_unlock(&blkcg_pol_mutex);
1191
	return &blkcg->css;
1192 1193 1194

free_pd_blkcg:
	for (i--; i >= 0; i--)
1195 1196
		if (blkcg->cpd[i])
			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1197 1198 1199 1200

	if (blkcg != &blkcg_root)
		kfree(blkcg);
unlock:
T
Tejun Heo 已提交
1201
	mutex_unlock(&blkcg_pol_mutex);
1202
	return ret;
1203 1204
}

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
1217 1218
	struct blkcg_gq *new_blkg, *blkg;
	bool preloaded;
1219 1220
	int ret;

1221 1222 1223 1224 1225 1226
	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!new_blkg)
		return -ENOMEM;

	preloaded = !radix_tree_preload(GFP_KERNEL);

1227
	/* Make sure the root blkg exists. */
1228 1229
	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
1230
	blkg = blkg_create(&blkcg_root, q, new_blkg);
1231 1232 1233 1234
	if (IS_ERR(blkg))
		goto err_unlock;
	q->root_blkg = blkg;
	q->root_rl.blkg = blkg;
1235 1236 1237
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();

1238 1239 1240
	if (preloaded)
		radix_tree_preload_end();

1241 1242 1243 1244 1245 1246 1247
	ret = blk_throtl_init(q);
	if (ret) {
		spin_lock_irq(q->queue_lock);
		blkg_destroy_all(q);
		spin_unlock_irq(q->queue_lock);
	}
	return ret;
1248 1249 1250 1251 1252 1253 1254

err_unlock:
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();
	if (preloaded)
		radix_tree_preload_end();
	return PTR_ERR(blkg);
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

1267 1268 1269 1270 1271 1272 1273
	/*
	 * @q could be exiting and already have destroyed all blkgs as
	 * indicated by NULL root_blkg.  If so, don't confuse policies.
	 */
	if (!q->root_blkg)
		return;

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
1285
	spin_lock_irq(q->queue_lock);
1286
	blkg_destroy_all(q);
1287 1288
	spin_unlock_irq(q->queue_lock);

1289 1290 1291
	blk_throtl_exit(q);
}

1292 1293 1294 1295 1296 1297
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
1298
static int blkcg_can_attach(struct cgroup_taskset *tset)
1299
{
1300
	struct task_struct *task;
1301
	struct cgroup_subsys_state *dst_css;
1302 1303 1304 1305
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
1306
	cgroup_taskset_for_each(task, dst_css, tset) {
1307 1308 1309 1310 1311 1312 1313 1314
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
1315 1316 1317
	return ret;
}

1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
static void blkcg_bind(struct cgroup_subsys_state *root_css)
{
	int i;

	mutex_lock(&blkcg_pol_mutex);

	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];
		struct blkcg *blkcg;

		if (!pol || !pol->cpd_bind_fn)
			continue;

		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
			if (blkcg->cpd[pol->plid])
				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
	}
	mutex_unlock(&blkcg_pol_mutex);
}

1338 1339 1340 1341 1342 1343 1344
static void blkcg_exit(struct task_struct *tsk)
{
	if (tsk->throttle_queue)
		blk_put_queue(tsk->throttle_queue);
	tsk->throttle_queue = NULL;
}

1345
struct cgroup_subsys io_cgrp_subsys = {
1346 1347 1348
	.css_alloc = blkcg_css_alloc,
	.css_offline = blkcg_css_offline,
	.css_free = blkcg_css_free,
T
Tejun Heo 已提交
1349
	.can_attach = blkcg_can_attach,
1350
	.bind = blkcg_bind,
1351
	.dfl_cftypes = blkcg_files,
1352
	.legacy_cftypes = blkcg_legacy_files,
1353
	.legacy_name = "blkio",
1354
	.exit = blkcg_exit,
1355 1356 1357 1358 1359 1360 1361 1362
#ifdef CONFIG_MEMCG
	/*
	 * This ensures that, if available, memcg is automatically enabled
	 * together on the default hierarchy so that the owner cgroup can
	 * be retrieved from writeback pages.
	 */
	.depends_on = 1 << memory_cgrp_id,
#endif
1363
};
1364
EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1365

1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
1383
			  const struct blkcg_policy *pol)
1384
{
1385
	struct blkg_policy_data *pd_prealloc = NULL;
1386
	struct blkcg_gq *blkg;
1387
	int ret;
1388 1389 1390 1391

	if (blkcg_policy_enabled(q, pol))
		return 0;

1392
	if (q->mq_ops)
1393
		blk_mq_freeze_queue(q);
1394
	else
1395
		blk_queue_bypass_start(q);
1396 1397
pd_prealloc:
	if (!pd_prealloc) {
1398
		pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1399
		if (!pd_prealloc) {
1400
			ret = -ENOMEM;
1401
			goto out_bypass_end;
1402 1403 1404 1405 1406 1407
		}
	}

	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1408 1409 1410 1411
		struct blkg_policy_data *pd;

		if (blkg->pd[pol->plid])
			continue;
1412

1413
		pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1414 1415 1416 1417 1418 1419
		if (!pd)
			swap(pd, pd_prealloc);
		if (!pd) {
			spin_unlock_irq(q->queue_lock);
			goto pd_prealloc;
		}
1420 1421 1422

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
T
Tejun Heo 已提交
1423
		pd->plid = pol->plid;
1424
		if (pol->pd_init_fn)
1425
			pol->pd_init_fn(pd);
1426 1427 1428 1429
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
1430

1431
	spin_unlock_irq(q->queue_lock);
1432
out_bypass_end:
1433
	if (q->mq_ops)
1434
		blk_mq_unfreeze_queue(q);
1435
	else
1436
		blk_queue_bypass_end(q);
1437 1438
	if (pd_prealloc)
		pol->pd_free_fn(pd_prealloc);
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
1452
			     const struct blkcg_policy *pol)
1453
{
T
Tejun Heo 已提交
1454
	struct blkcg_gq *blkg;
1455 1456 1457 1458

	if (!blkcg_policy_enabled(q, pol))
		return;

1459
	if (q->mq_ops)
1460
		blk_mq_freeze_queue(q);
1461
	else
1462 1463
		blk_queue_bypass_start(q);

1464 1465 1466 1467 1468
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1469
		if (blkg->pd[pol->plid]) {
1470 1471
			if (!blkg->pd[pol->plid]->offline &&
			    pol->pd_offline_fn) {
1472
				pol->pd_offline_fn(blkg->pd[pol->plid]);
1473 1474
				blkg->pd[pol->plid]->offline = true;
			}
1475 1476 1477
			pol->pd_free_fn(blkg->pd[pol->plid]);
			blkg->pd[pol->plid] = NULL;
		}
1478 1479 1480
	}

	spin_unlock_irq(q->queue_lock);
1481

1482
	if (q->mq_ops)
1483
		blk_mq_unfreeze_queue(q);
1484
	else
1485
		blk_queue_bypass_end(q);
1486 1487 1488
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
1489
/**
T
Tejun Heo 已提交
1490 1491
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
T
Tejun Heo 已提交
1492
 *
T
Tejun Heo 已提交
1493 1494
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
T
Tejun Heo 已提交
1495
 */
1496
int blkcg_policy_register(struct blkcg_policy *pol)
1497
{
1498
	struct blkcg *blkcg;
T
Tejun Heo 已提交
1499
	int i, ret;
1500

1501
	mutex_lock(&blkcg_pol_register_mutex);
1502 1503
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
1504 1505 1506
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
T
Tejun Heo 已提交
1507
		if (!blkcg_policy[i])
T
Tejun Heo 已提交
1508 1509
			break;
	if (i >= BLKCG_MAX_POLS)
1510
		goto err_unlock;
1511

1512 1513 1514 1515 1516
	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
		goto err_unlock;

1517
	/* register @pol */
T
Tejun Heo 已提交
1518
	pol->plid = i;
1519 1520 1521
	blkcg_policy[pol->plid] = pol;

	/* allocate and install cpd's */
1522
	if (pol->cpd_alloc_fn) {
1523 1524 1525
		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
			struct blkcg_policy_data *cpd;

1526
			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1527
			if (!cpd)
1528 1529
				goto err_free_cpds;

1530 1531
			blkcg->cpd[pol->plid] = cpd;
			cpd->blkcg = blkcg;
1532
			cpd->plid = pol->plid;
1533
			pol->cpd_init_fn(cpd);
1534 1535 1536
		}
	}

1537
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
1538 1539

	/* everything is in place, add intf files for the new policy */
1540 1541 1542
	if (pol->dfl_cftypes)
		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
					       pol->dfl_cftypes));
1543
	if (pol->legacy_cftypes)
1544
		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1545
						  pol->legacy_cftypes));
1546 1547 1548
	mutex_unlock(&blkcg_pol_register_mutex);
	return 0;

1549
err_free_cpds:
1550
	if (pol->cpd_free_fn) {
1551
		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1552 1553 1554 1555
			if (blkcg->cpd[pol->plid]) {
				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
				blkcg->cpd[pol->plid] = NULL;
			}
1556 1557 1558
		}
	}
	blkcg_policy[pol->plid] = NULL;
1559
err_unlock:
1560
	mutex_unlock(&blkcg_pol_mutex);
1561
	mutex_unlock(&blkcg_pol_register_mutex);
T
Tejun Heo 已提交
1562
	return ret;
1563
}
T
Tejun Heo 已提交
1564
EXPORT_SYMBOL_GPL(blkcg_policy_register);
1565

T
Tejun Heo 已提交
1566
/**
T
Tejun Heo 已提交
1567 1568
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
T
Tejun Heo 已提交
1569
 *
T
Tejun Heo 已提交
1570
 * Undo blkcg_policy_register(@pol).  Might sleep.
T
Tejun Heo 已提交
1571
 */
T
Tejun Heo 已提交
1572
void blkcg_policy_unregister(struct blkcg_policy *pol)
1573
{
1574 1575
	struct blkcg *blkcg;

1576
	mutex_lock(&blkcg_pol_register_mutex);
1577

T
Tejun Heo 已提交
1578
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
T
Tejun Heo 已提交
1579 1580 1581
		goto out_unlock;

	/* kill the intf files first */
1582 1583
	if (pol->dfl_cftypes)
		cgroup_rm_cftypes(pol->dfl_cftypes);
1584 1585
	if (pol->legacy_cftypes)
		cgroup_rm_cftypes(pol->legacy_cftypes);
1586

1587
	/* remove cpds and unregister */
1588
	mutex_lock(&blkcg_pol_mutex);
1589

1590
	if (pol->cpd_free_fn) {
1591
		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1592 1593 1594 1595
			if (blkcg->cpd[pol->plid]) {
				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
				blkcg->cpd[pol->plid] = NULL;
			}
1596 1597
		}
	}
T
Tejun Heo 已提交
1598
	blkcg_policy[pol->plid] = NULL;
1599

1600
	mutex_unlock(&blkcg_pol_mutex);
1601 1602
out_unlock:
	mutex_unlock(&blkcg_pol_register_mutex);
1603
}
T
Tejun Heo 已提交
1604
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1605

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
/*
 * Scale the accumulated delay based on how long it has been since we updated
 * the delay.  We only call this when we are adding delay, in case it's been a
 * while since we added delay, and when we are checking to see if we need to
 * delay a task, to account for any delays that may have occurred.
 */
static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
{
	u64 old = atomic64_read(&blkg->delay_start);

	/*
	 * We only want to scale down every second.  The idea here is that we
	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
	 * time window.  We only want to throttle tasks for recent delay that
	 * has occurred, in 1 second time windows since that's the maximum
	 * things can be throttled.  We save the current delay window in
	 * blkg->last_delay so we know what amount is still left to be charged
	 * to the blkg from this point onward.  blkg->last_use keeps track of
	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
	 * are ok with whatever is happening now, and we can take away more of
	 * the accumulated delay as we've already throttled enough that
	 * everybody is happy with their IO latencies.
	 */
	if (time_before64(old + NSEC_PER_SEC, now) &&
	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
		u64 cur = atomic64_read(&blkg->delay_nsec);
		u64 sub = min_t(u64, blkg->last_delay, now - old);
		int cur_use = atomic_read(&blkg->use_delay);

		/*
		 * We've been unthrottled, subtract a larger chunk of our
		 * accumulated delay.
		 */
		if (cur_use < blkg->last_use)
			sub = max_t(u64, sub, blkg->last_delay >> 1);

		/*
		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
		 * should only ever be growing except here where we subtract out
		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
		 * rather not end up with negative numbers.
		 */
		if (unlikely(cur < sub)) {
			atomic64_set(&blkg->delay_nsec, 0);
			blkg->last_delay = 0;
		} else {
			atomic64_sub(sub, &blkg->delay_nsec);
			blkg->last_delay = cur - sub;
		}
		blkg->last_use = cur_use;
	}
}

/*
 * This is called when we want to actually walk up the hierarchy and check to
 * see if we need to throttle, and then actually throttle if there is some
 * accumulated delay.  This should only be called upon return to user space so
 * we're not holding some lock that would induce a priority inversion.
 */
static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
{
	u64 now = ktime_to_ns(ktime_get());
	u64 exp;
	u64 delay_nsec = 0;
	int tok;

	while (blkg->parent) {
		if (atomic_read(&blkg->use_delay)) {
			blkcg_scale_delay(blkg, now);
			delay_nsec = max_t(u64, delay_nsec,
					   atomic64_read(&blkg->delay_nsec));
		}
		blkg = blkg->parent;
	}

	if (!delay_nsec)
		return;

	/*
	 * Let's not sleep for all eternity if we've amassed a huge delay.
	 * Swapping or metadata IO can accumulate 10's of seconds worth of
	 * delay, and we want userspace to be able to do _something_ so cap the
	 * delays at 1 second.  If there's 10's of seconds worth of delay then
	 * the tasks will be delayed for 1 second for every syscall.
	 */
	delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);

	/*
	 * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
	 * that hasn't landed upstream yet.  Once that stuff is in place we need
	 * to do a psi_memstall_enter/leave if memdelay is set.
	 */

	exp = ktime_add_ns(now, delay_nsec);
	tok = io_schedule_prepare();
	do {
		__set_current_state(TASK_KILLABLE);
		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
			break;
	} while (!fatal_signal_pending(current));
	io_schedule_finish(tok);
}

/**
 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
 *
 * This is only called if we've been marked with set_notify_resume().  Obviously
 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
 * check to see if current->throttle_queue is set and if not this doesn't do
 * anything.  This should only ever be called by the resume code, it's not meant
 * to be called by people willy-nilly as it will actually do the work to
 * throttle the task if it is setup for throttling.
 */
void blkcg_maybe_throttle_current(void)
{
	struct request_queue *q = current->throttle_queue;
	struct cgroup_subsys_state *css;
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;
	bool use_memdelay = current->use_memdelay;

	if (!q)
		return;

	current->throttle_queue = NULL;
	current->use_memdelay = false;

	rcu_read_lock();
	css = kthread_blkcg();
	if (css)
		blkcg = css_to_blkcg(css);
	else
		blkcg = css_to_blkcg(task_css(current, io_cgrp_id));

	if (!blkcg)
		goto out;
	blkg = blkg_lookup(blkcg, q);
	if (!blkg)
		goto out;
	blkg = blkg_try_get(blkg);
	if (!blkg)
		goto out;
	rcu_read_unlock();
	blk_put_queue(q);

	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
	blkg_put(blkg);
	return;
out:
	rcu_read_unlock();
	blk_put_queue(q);
}
EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);

/**
 * blkcg_schedule_throttle - this task needs to check for throttling
 * @q - the request queue IO was submitted on
 * @use_memdelay - do we charge this to memory delay for PSI
 *
 * This is called by the IO controller when we know there's delay accumulated
 * for the blkg for this task.  We do not pass the blkg because there are places
 * we call this that may not have that information, the swapping code for
 * instance will only have a request_queue at that point.  This set's the
 * notify_resume for the task to check and see if it requires throttling before
 * returning to user space.
 *
 * We will only schedule once per syscall.  You can call this over and over
 * again and it will only do the check once upon return to user space, and only
 * throttle once.  If the task needs to be throttled again it'll need to be
 * re-set at the next time we see the task.
 */
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
{
	if (unlikely(current->flags & PF_KTHREAD))
		return;

	if (!blk_get_queue(q))
		return;

	if (current->throttle_queue)
		blk_put_queue(current->throttle_queue);
	current->throttle_queue = q;
	if (use_memdelay)
		current->use_memdelay = use_memdelay;
	set_notify_resume(current);
}
EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);

/**
 * blkcg_add_delay - add delay to this blkg
 * @now - the current time in nanoseconds
 * @delta - how many nanoseconds of delay to add
 *
 * Charge @delta to the blkg's current delay accumulation.  This is used to
 * throttle tasks if an IO controller thinks we need more throttling.
 */
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
{
	blkcg_scale_delay(blkg, now);
	atomic64_add(delta, &blkg->delay_nsec);
}
EXPORT_SYMBOL_GPL(blkcg_add_delay);

1809 1810
module_param(blkcg_debug_stats, bool, 0644);
MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");