blk-cgroup.c 24.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14
#include <linux/kdev_t.h>
15
#include <linux/module.h>
16
#include <linux/err.h>
17
#include <linux/blkdev.h>
18
#include <linux/slab.h>
19
#include <linux/genhd.h>
20
#include <linux/delay.h>
T
Tejun Heo 已提交
21
#include <linux/atomic.h>
22
#include "blk-cgroup.h"
23
#include "blk.h"
24

25 26
#define MAX_KEY_LEN 100

27
static DEFINE_MUTEX(blkcg_pol_mutex);
28

T
Tejun Heo 已提交
29 30
struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkcg_root);
31

T
Tejun Heo 已提交
32
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
33

34
static bool blkcg_policy_enabled(struct request_queue *q,
T
Tejun Heo 已提交
35
				 const struct blkcg_policy *pol)
36 37 38 39
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

40 41 42 43 44 45
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
T
Tejun Heo 已提交
46
static void blkg_free(struct blkcg_gq *blkg)
47
{
48
	int i;
49 50 51 52

	if (!blkg)
		return;

T
Tejun Heo 已提交
53
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
54
		struct blkcg_policy *pol = blkcg_policy[i];
55 56
		struct blkg_policy_data *pd = blkg->pd[i];

57 58 59
		if (!pd)
			continue;

60 61
		if (pol && pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
62 63

		kfree(pd);
64
	}
65

66
	blk_exit_rl(&blkg->rl);
67
	kfree(blkg);
68 69 70 71 72 73
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
74
 * @gfp_mask: allocation mask to use
75
 *
76
 * Allocate a new blkg assocating @blkcg and @q.
77
 */
78 79
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
80
{
T
Tejun Heo 已提交
81
	struct blkcg_gq *blkg;
82
	int i;
83 84

	/* alloc and init base part */
85
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
86 87 88
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
89
	blkg->q = q;
90
	INIT_LIST_HEAD(&blkg->q_node);
91
	blkg->blkcg = blkcg;
T
Tejun Heo 已提交
92
	blkg->refcnt = 1;
93

94 95 96 97 98 99 100
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

T
Tejun Heo 已提交
101
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
102
		struct blkcg_policy *pol = blkcg_policy[i];
103
		struct blkg_policy_data *pd;
104

105
		if (!blkcg_policy_enabled(q, pol))
106 107 108
			continue;

		/* alloc per-policy data and attach it to blkg */
109
		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
110 111
		if (!pd)
			goto err_free;
112

113 114 115
		blkg->pd[i] = pd;
		pd->blkg = blkg;

T
Tejun Heo 已提交
116
		/* invoke per-policy init */
117
		if (blkcg_policy_enabled(blkg->q, pol))
118
			pol->pd_init_fn(blkg);
119 120
	}

121
	return blkg;
122 123 124 125

err_free:
	blkg_free(blkg);
	return NULL;
126 127
}

T
Tejun Heo 已提交
128 129
static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
				      struct request_queue *q)
130
{
T
Tejun Heo 已提交
131
	struct blkcg_gq *blkg;
132

133 134 135 136 137 138 139 140 141 142 143 144 145 146
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	/*
	 * Hint didn't match.  Look up from the radix tree.  Note that we
	 * may not be holding queue_lock and thus are not sure whether
	 * @blkg from blkg_tree has already been removed or not, so we
	 * can't update hint to the lookup result.  Leave it to the caller.
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
	if (blkg && blkg->q == q)
		return blkg;

147 148 149 150 151 152 153 154 155 156 157 158
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
T
Tejun Heo 已提交
159
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
160 161 162 163 164 165 166 167 168
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q);
}
EXPORT_SYMBOL_GPL(blkg_lookup);

169 170 171 172
/*
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 */
T
Tejun Heo 已提交
173
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
174 175
					     struct request_queue *q,
					     struct blkcg_gq *new_blkg)
176
{
T
Tejun Heo 已提交
177
	struct blkcg_gq *blkg;
178
	int ret;
179

180 181 182
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

183
	/* lookup and update hint on success, see __blkg_lookup() for details */
184
	blkg = __blkg_lookup(blkcg, q);
185 186
	if (blkg) {
		rcu_assign_pointer(blkcg->blkg_hint, blkg);
187
		goto out_free;
188
	}
189

190
	/* blkg holds a reference to blkcg */
191 192 193 194
	if (!css_tryget(&blkcg->css)) {
		blkg = ERR_PTR(-EINVAL);
		goto out_free;
	}
195

196
	/* allocate */
197 198 199 200 201 202 203 204
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
		if (unlikely(!new_blkg)) {
			blkg = ERR_PTR(-ENOMEM);
			goto out_put;
		}
	}
	blkg = new_blkg;
205 206 207

	/* insert */
	spin_lock(&blkcg->lock);
208 209 210 211 212
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
	}
213
	spin_unlock(&blkcg->lock);
214

215 216
	if (!ret)
		return blkg;
217 218 219

	blkg = ERR_PTR(ret);
out_put:
220
	css_put(&blkcg->css);
221 222 223
out_free:
	blkg_free(new_blkg);
	return blkg;
224
}
225

T
Tejun Heo 已提交
226 227
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
228 229 230 231 232 233 234
{
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
235
	return __blkg_lookup_create(blkcg, q, NULL);
236
}
237
EXPORT_SYMBOL_GPL(blkg_lookup_create);
238

T
Tejun Heo 已提交
239
static void blkg_destroy(struct blkcg_gq *blkg)
240
{
T
Tejun Heo 已提交
241
	struct blkcg *blkcg = blkg->blkcg;
242

243
	lockdep_assert_held(blkg->q->queue_lock);
244
	lockdep_assert_held(&blkcg->lock);
245 246

	/* Something wrong if we are trying to remove same group twice */
247
	WARN_ON_ONCE(list_empty(&blkg->q_node));
248
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
249 250

	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
251
	list_del_init(&blkg->q_node);
252
	hlist_del_init_rcu(&blkg->blkcg_node);
253

254 255 256 257 258 259 260 261
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
	if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

262 263 264 265 266 267 268
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

269 270 271 272
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
273
 * Destroy all blkgs associated with @q.
274
 */
275
static void blkg_destroy_all(struct request_queue *q)
276
{
T
Tejun Heo 已提交
277
	struct blkcg_gq *blkg, *n;
278

279
	lockdep_assert_held(q->queue_lock);
280

281
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
T
Tejun Heo 已提交
282
		struct blkcg *blkcg = blkg->blkcg;
283

284 285 286
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
287
	}
288 289 290 291 292 293 294

	/*
	 * root blkg is destroyed.  Just clear the pointer since
	 * root_rl does not take reference on root blkg.
	 */
	q->root_blkg = NULL;
	q->root_rl.blkg = NULL;
295 296
}

T
Tejun Heo 已提交
297 298
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
T
Tejun Heo 已提交
299
	blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
T
Tejun Heo 已提交
300 301
}

T
Tejun Heo 已提交
302
void __blkg_release(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
{
	/* release the extra blkcg reference this blkg has been holding */
	css_put(&blkg->blkcg->css);

	/*
	 * A group is freed in rcu manner. But having an rcu lock does not
	 * mean that one can access all the fields of blkg and assume these
	 * are valid. For example, don't try to follow throtl_data and
	 * request queue links.
	 *
	 * Having a reference to blkg under an rcu allows acess to only
	 * values local to groups like group stats and group rate limits
	 */
	call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
EXPORT_SYMBOL_GPL(__blkg_release);

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

T
Tejun Heo 已提交
352 353
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
			     u64 val)
354
{
T
Tejun Heo 已提交
355 356
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
	struct blkcg_gq *blkg;
357
	struct hlist_node *n;
358
	int i;
359

360
	mutex_lock(&blkcg_pol_mutex);
361
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
362 363 364 365 366 367

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
368
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
369
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
T
Tejun Heo 已提交
370
			struct blkcg_policy *pol = blkcg_policy[i];
371

372
			if (blkcg_policy_enabled(blkg->q, pol) &&
373 374
			    pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg);
375
		}
376
	}
377

378
	spin_unlock_irq(&blkcg->lock);
379
	mutex_unlock(&blkcg_pol_mutex);
380 381 382
	return 0;
}

T
Tejun Heo 已提交
383
static const char *blkg_dev_name(struct blkcg_gq *blkg)
384
{
385 386 387 388
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
389 390
}

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 * policy data and @data.  If @show_total is %true, the sum of the return
 * values from @prfill is printed with "Total" label at the end.
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
T
Tejun Heo 已提交
408
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
409 410
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
411
		       const struct blkcg_policy *pol, int data,
412
		       bool show_total)
413
{
T
Tejun Heo 已提交
414
	struct blkcg_gq *blkg;
415 416
	struct hlist_node *n;
	u64 total = 0;
417

418 419
	spin_lock_irq(&blkcg->lock);
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
420
		if (blkcg_policy_enabled(blkg->q, pol))
421
			total += prfill(sf, blkg->pd[pol->plid], data);
422 423 424 425 426
	spin_unlock_irq(&blkcg->lock);

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
427
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
428 429 430 431

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
432
 * @pd: policy private data of interest
433 434
 * @v: value to print
 *
435
 * Print @v to @sf for the device assocaited with @pd.
436
 */
437
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
438
{
439
	const char *dname = blkg_dev_name(pd->blkg);
440 441 442 443 444 445 446

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
447
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
448 449 450 451

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
452
 * @pd: policy private data of interest
453 454
 * @rwstat: rwstat to print
 *
455
 * Print @rwstat to @sf for the device assocaited with @pd.
456
 */
457
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
458
			 const struct blkg_rwstat *rwstat)
459 460 461 462 463 464 465
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
466
	const char *dname = blkg_dev_name(pd->blkg);
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}

482 483 484
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
485 486
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
487 488 489
 *
 * prfill callback for printing a blkg_stat.
 */
490
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
491
{
492
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
493
}
494
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
495

496 497 498
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
499 500
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
501 502 503
 *
 * prfill callback for printing a blkg_rwstat.
 */
504 505
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
506
{
507
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
508

509
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
510
}
511
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
512

513 514 515
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
516
 * @pol: target policy
517 518 519 520 521
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
522 523
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
524
 */
T
Tejun Heo 已提交
525 526
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx)
527
	__acquires(rcu) __acquires(disk->queue->queue_lock)
528
{
529
	struct gendisk *disk;
T
Tejun Heo 已提交
530
	struct blkcg_gq *blkg;
T
Tejun Heo 已提交
531 532 533
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
534

T
Tejun Heo 已提交
535 536
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
537

T
Tejun Heo 已提交
538
	disk = get_gendisk(MKDEV(major, minor), &part);
T
Tejun Heo 已提交
539
	if (!disk || part)
T
Tejun Heo 已提交
540
		return -EINVAL;
541 542

	rcu_read_lock();
T
Tejun Heo 已提交
543
	spin_lock_irq(disk->queue->queue_lock);
544

545
	if (blkcg_policy_enabled(disk->queue, pol))
546
		blkg = blkg_lookup_create(blkcg, disk->queue);
547 548
	else
		blkg = ERR_PTR(-EINVAL);
549

T
Tejun Heo 已提交
550 551
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
552
		rcu_read_unlock();
553
		spin_unlock_irq(disk->queue->queue_lock);
554 555 556 557 558 559 560 561 562 563
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
564
		}
T
Tejun Heo 已提交
565
		return ret;
566
	}
567 568 569

	ctx->disk = disk;
	ctx->blkg = blkg;
T
Tejun Heo 已提交
570 571
	ctx->v = v;
	return 0;
572
}
573
EXPORT_SYMBOL_GPL(blkg_conf_prep);
574

575 576 577 578 579 580 581
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
582
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
583
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
584
{
585
	spin_unlock_irq(ctx->disk->queue->queue_lock);
586 587
	rcu_read_unlock();
	put_disk(ctx->disk);
588
}
589
EXPORT_SYMBOL_GPL(blkg_conf_finish);
590

T
Tejun Heo 已提交
591
struct cftype blkcg_files[] = {
592 593
	{
		.name = "reset_stats",
T
Tejun Heo 已提交
594
		.write_u64 = blkcg_reset_stats,
595
	},
596
	{ }	/* terminate */
597 598
};

599
/**
T
Tejun Heo 已提交
600
 * blkcg_pre_destroy - cgroup pre_destroy callback
601 602 603 604 605 606 607 608 609
 * @cgroup: cgroup of interest
 *
 * This function is called when @cgroup is about to go away and responsible
 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
T
Tejun Heo 已提交
610
static int blkcg_pre_destroy(struct cgroup *cgroup)
611
{
T
Tejun Heo 已提交
612
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
613

614
	spin_lock_irq(&blkcg->lock);
615

616
	while (!hlist_empty(&blkcg->blkg_list)) {
T
Tejun Heo 已提交
617 618
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
T
Tejun Heo 已提交
619
		struct request_queue *q = blkg->q;
620

621 622 623 624 625 626
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
627
			spin_lock_irq(&blkcg->lock);
628
		}
629
	}
630

631
	spin_unlock_irq(&blkcg->lock);
632 633 634
	return 0;
}

T
Tejun Heo 已提交
635
static void blkcg_destroy(struct cgroup *cgroup)
636
{
T
Tejun Heo 已提交
637
	struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
638

T
Tejun Heo 已提交
639
	if (blkcg != &blkcg_root)
B
Ben Blum 已提交
640
		kfree(blkcg);
641 642
}

T
Tejun Heo 已提交
643
static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
644
{
T
Tejun Heo 已提交
645
	static atomic64_t id_seq = ATOMIC64_INIT(0);
T
Tejun Heo 已提交
646
	struct blkcg *blkcg;
647
	struct cgroup *parent = cgroup->parent;
648

649
	if (!parent) {
T
Tejun Heo 已提交
650
		blkcg = &blkcg_root;
651 652 653 654 655 656 657
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

658
	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
659
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
660 661
done:
	spin_lock_init(&blkcg->lock);
662
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
663 664 665 666 667
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

668 669 670 671 672 673 674 675 676 677 678 679 680 681
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
	might_sleep();

682
	return blk_throtl_init(q);
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
706
	spin_lock_irq(q->queue_lock);
707
	blkg_destroy_all(q);
708 709
	spin_unlock_irq(q->queue_lock);

710 711 712
	blk_throtl_exit(q);
}

713 714 715 716 717 718
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
T
Tejun Heo 已提交
719
static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
720
{
721
	struct task_struct *task;
722 723 724 725
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
726 727 728 729 730 731 732 733 734
	cgroup_taskset_for_each(task, cgrp, tset) {
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
735 736 737
	return ret;
}

738 739
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
T
Tejun Heo 已提交
740 741 742 743
	.create = blkcg_create,
	.can_attach = blkcg_can_attach,
	.pre_destroy = blkcg_pre_destroy,
	.destroy = blkcg_destroy,
744
	.subsys_id = blkio_subsys_id,
T
Tejun Heo 已提交
745
	.base_cftypes = blkcg_files,
746
	.module = THIS_MODULE,
747 748 749 750 751 752 753 754

	/*
	 * blkio subsystem is utterly broken in terms of hierarchy support.
	 * It treats all cgroups equally regardless of where they're
	 * located in the hierarchy - all cgroups are treated as if they're
	 * right below the root.  Fix it and remove the following.
	 */
	.broken_hierarchy = true,
755 756 757
};
EXPORT_SYMBOL_GPL(blkio_subsys);

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
775
			  const struct blkcg_policy *pol)
776 777
{
	LIST_HEAD(pds);
T
Tejun Heo 已提交
778
	struct blkcg_gq *blkg;
779 780
	struct blkg_policy_data *pd, *n;
	int cnt = 0, ret;
781
	bool preloaded;
782 783 784 785

	if (blkcg_policy_enabled(q, pol))
		return 0;

786 787 788 789 790 791 792
	/* preallocations for root blkg */
	blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!blkg)
		return -ENOMEM;

	preloaded = !radix_tree_preload(GFP_KERNEL);

793 794 795 796 797 798
	blk_queue_bypass_start(q);

	/* make sure the root blkg exists and count the existing blkgs */
	spin_lock_irq(q->queue_lock);

	rcu_read_lock();
799
	blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
800 801
	rcu_read_unlock();

802 803 804
	if (preloaded)
		radix_tree_preload_end();

805 806 807 808 809
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
		goto out_unlock;
	}
	q->root_blkg = blkg;
810
	q->root_rl.blkg = blkg;
811 812 813 814 815 816 817 818

	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;

	spin_unlock_irq(q->queue_lock);

	/* allocate policy_data for all existing blkgs */
	while (cnt--) {
819
		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
	}

	/*
	 * Install the allocated pds.  With @q bypassing, no new blkg
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		if (WARN_ON(list_empty(&pds))) {
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
847
		pol->pd_init_fn(blkg);
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
	list_for_each_entry_safe(pd, n, &pds, alloc_node)
		kfree(pd);
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
873
			     const struct blkcg_policy *pol)
874
{
T
Tejun Heo 已提交
875
	struct blkcg_gq *blkg;
876 877 878 879 880 881 882 883 884

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

885 886 887 888
	/* if no policy is left, no need for blkgs - shoot them down */
	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
		blkg_destroy_all(q);

889 890 891 892
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

893 894
		if (pol->pd_exit_fn)
			pol->pd_exit_fn(blkg);
895 896 897 898 899 900 901 902 903 904 905 906

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
907
/**
T
Tejun Heo 已提交
908 909
 * blkcg_policy_register - register a blkcg policy
 * @pol: blkcg policy to register
T
Tejun Heo 已提交
910
 *
T
Tejun Heo 已提交
911 912
 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 * successful registration.  Returns 0 on success and -errno on failure.
T
Tejun Heo 已提交
913
 */
T
Tejun Heo 已提交
914
int blkcg_policy_register(struct blkcg_policy *pol)
915
{
T
Tejun Heo 已提交
916
	int i, ret;
917

918 919 920
	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
		return -EINVAL;

921 922
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
923 924 925
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
T
Tejun Heo 已提交
926
		if (!blkcg_policy[i])
T
Tejun Heo 已提交
927 928 929
			break;
	if (i >= BLKCG_MAX_POLS)
		goto out_unlock;
930

T
Tejun Heo 已提交
931
	/* register and update blkgs */
T
Tejun Heo 已提交
932 933
	pol->plid = i;
	blkcg_policy[i] = pol;
T
Tejun Heo 已提交
934 935

	/* everything is in place, add intf files for the new policy */
T
Tejun Heo 已提交
936 937
	if (pol->cftypes)
		WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
T
Tejun Heo 已提交
938 939
	ret = 0;
out_unlock:
940
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
941
	return ret;
942
}
T
Tejun Heo 已提交
943
EXPORT_SYMBOL_GPL(blkcg_policy_register);
944

T
Tejun Heo 已提交
945
/**
T
Tejun Heo 已提交
946 947
 * blkcg_policy_unregister - unregister a blkcg policy
 * @pol: blkcg policy to unregister
T
Tejun Heo 已提交
948
 *
T
Tejun Heo 已提交
949
 * Undo blkcg_policy_register(@pol).  Might sleep.
T
Tejun Heo 已提交
950
 */
T
Tejun Heo 已提交
951
void blkcg_policy_unregister(struct blkcg_policy *pol)
952
{
953 954
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
955
	if (WARN_ON(blkcg_policy[pol->plid] != pol))
T
Tejun Heo 已提交
956 957 958
		goto out_unlock;

	/* kill the intf files first */
T
Tejun Heo 已提交
959 960
	if (pol->cftypes)
		cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
961

T
Tejun Heo 已提交
962
	/* unregister and update blkgs */
T
Tejun Heo 已提交
963
	blkcg_policy[pol->plid] = NULL;
T
Tejun Heo 已提交
964
out_unlock:
965
	mutex_unlock(&blkcg_pol_mutex);
966
}
T
Tejun Heo 已提交
967
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);