blk-cgroup.c 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14
#include <linux/kdev_t.h>
15
#include <linux/module.h>
16
#include <linux/err.h>
17
#include <linux/blkdev.h>
18
#include <linux/slab.h>
19
#include <linux/genhd.h>
20
#include <linux/delay.h>
T
Tejun Heo 已提交
21
#include <linux/atomic.h>
22
#include "blk-cgroup.h"
23
#include "blk.h"
24

25 26
#define MAX_KEY_LEN 100

27
static DEFINE_MUTEX(blkcg_pol_mutex);
28

29
struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
30 31
EXPORT_SYMBOL_GPL(blkio_root_cgroup);

T
Tejun Heo 已提交
32
static struct blkio_policy_type *blkio_policy[BLKCG_MAX_POLS];
33

34 35 36 37 38
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
			    struct blkio_cgroup, css);
}
39
EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
40

41
static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
42 43 44 45
{
	return container_of(task_subsys_state(tsk, blkio_subsys_id),
			    struct blkio_cgroup, css);
}
46 47 48 49 50 51 52 53

struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
{
	if (bio && bio->bi_css)
		return container_of(bio->bi_css, struct blkio_cgroup, css);
	return task_blkio_cgroup(current);
}
EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
54

55 56 57 58 59 60 61 62 63 64 65
static bool blkcg_policy_enabled(struct request_queue *q,
				 const struct blkio_policy_type *pol)
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

static size_t blkg_pd_size(const struct blkio_policy_type *pol)
{
	return sizeof(struct blkg_policy_data) + pol->pdata_size;
}

66 67 68 69 70 71 72 73
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
static void blkg_free(struct blkio_group *blkg)
{
74
	int i;
75 76 77 78

	if (!blkg)
		return;

T
Tejun Heo 已提交
79
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
80
		struct blkio_policy_type *pol = blkio_policy[i];
81 82
		struct blkg_policy_data *pd = blkg->pd[i];

83 84 85 86 87 88 89
		if (!pd)
			continue;

		if (pol && pol->ops.blkio_exit_group_fn)
			pol->ops.blkio_exit_group_fn(blkg);

		kfree(pd);
90
	}
91

92
	kfree(blkg);
93 94 95 96 97 98 99
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
 *
100
 * Allocate a new blkg assocating @blkcg and @q.
101 102
 */
static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
103
				      struct request_queue *q)
104 105
{
	struct blkio_group *blkg;
106
	int i;
107 108 109 110 111 112

	/* alloc and init base part */
	blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
113
	blkg->q = q;
114
	INIT_LIST_HEAD(&blkg->q_node);
115
	blkg->blkcg = blkcg;
T
Tejun Heo 已提交
116
	blkg->refcnt = 1;
117

T
Tejun Heo 已提交
118
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
119 120
		struct blkio_policy_type *pol = blkio_policy[i];
		struct blkg_policy_data *pd;
121

122
		if (!blkcg_policy_enabled(q, pol))
123 124 125
			continue;

		/* alloc per-policy data and attach it to blkg */
126
		pd = kzalloc_node(blkg_pd_size(pol), GFP_ATOMIC, q->node);
127 128 129 130
		if (!pd) {
			blkg_free(blkg);
			return NULL;
		}
131

132 133
		blkg->pd[i] = pd;
		pd->blkg = blkg;
134 135
	}

136
	/* invoke per-policy init */
T
Tejun Heo 已提交
137
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
138 139
		struct blkio_policy_type *pol = blkio_policy[i];

140
		if (blkcg_policy_enabled(blkg->q, pol))
141 142 143
			pol->ops.blkio_init_group_fn(blkg);
	}

144 145 146
	return blkg;
}

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static struct blkio_group *__blkg_lookup(struct blkio_cgroup *blkcg,
					 struct request_queue *q)
{
	struct blkio_group *blkg;
	struct hlist_node *n;

	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
		if (blkg->q == q)
			return blkg;
	return NULL;
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
				struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q);
}
EXPORT_SYMBOL_GPL(blkg_lookup);

179 180
static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg,
						struct request_queue *q)
181
	__releases(q->queue_lock) __acquires(q->queue_lock)
182
{
183
	struct blkio_group *blkg;
184

185 186 187
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

188
	blkg = __blkg_lookup(blkcg, q);
189 190 191
	if (blkg)
		return blkg;

192
	/* blkg holds a reference to blkcg */
193 194 195 196 197 198
	if (!css_tryget(&blkcg->css))
		return ERR_PTR(-EINVAL);

	/*
	 * Allocate and initialize.
	 */
199
	blkg = blkg_alloc(blkcg, q);
200 201

	/* did alloc fail? */
202
	if (unlikely(!blkg)) {
203 204 205 206 207 208
		blkg = ERR_PTR(-ENOMEM);
		goto out;
	}

	/* insert */
	spin_lock(&blkcg->lock);
209
	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
210
	list_add(&blkg->q_node, &q->blkg_list);
211 212 213
	spin_unlock(&blkcg->lock);
out:
	return blkg;
214
}
215 216 217 218 219 220 221 222 223 224 225 226

struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
				       struct request_queue *q)
{
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
	return __blkg_lookup_create(blkcg, q);
}
227
EXPORT_SYMBOL_GPL(blkg_lookup_create);
228

229
static void blkg_destroy(struct blkio_group *blkg)
230 231
{
	struct request_queue *q = blkg->q;
232
	struct blkio_cgroup *blkcg = blkg->blkcg;
233 234

	lockdep_assert_held(q->queue_lock);
235
	lockdep_assert_held(&blkcg->lock);
236 237

	/* Something wrong if we are trying to remove same group twice */
238
	WARN_ON_ONCE(list_empty(&blkg->q_node));
239
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
240
	list_del_init(&blkg->q_node);
241
	hlist_del_init_rcu(&blkg->blkcg_node);
242 243 244 245 246 247 248 249

	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

250 251 252 253
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
254
 * Destroy all blkgs associated with @q.
255
 */
256
static void blkg_destroy_all(struct request_queue *q)
257
{
258
	struct blkio_group *blkg, *n;
259

260
	lockdep_assert_held(q->queue_lock);
261

262 263
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
		struct blkio_cgroup *blkcg = blkg->blkcg;
264

265 266 267
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
268 269 270
	}
}

T
Tejun Heo 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
	blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
}

void __blkg_release(struct blkio_group *blkg)
{
	/* release the extra blkcg reference this blkg has been holding */
	css_put(&blkg->blkcg->css);

	/*
	 * A group is freed in rcu manner. But having an rcu lock does not
	 * mean that one can access all the fields of blkg and assume these
	 * are valid. For example, don't try to follow throtl_data and
	 * request queue links.
	 *
	 * Having a reference to blkg under an rcu allows acess to only
	 * values local to groups like group stats and group rate limits
	 */
	call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
EXPORT_SYMBOL_GPL(__blkg_release);

294
static int
295
blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
296
{
T
Tejun Heo 已提交
297
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
298 299
	struct blkio_group *blkg;
	struct hlist_node *n;
300
	int i;
301

302
	mutex_lock(&blkcg_pol_mutex);
303
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
304 305 306 307 308 309

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
310
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
311
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
312
			struct blkio_policy_type *pol = blkio_policy[i];
313

314 315
			if (blkcg_policy_enabled(blkg->q, pol) &&
			    pol->ops.blkio_reset_group_stats_fn)
316
				pol->ops.blkio_reset_group_stats_fn(blkg);
317
		}
318
	}
319

320
	spin_unlock_irq(&blkcg->lock);
321
	mutex_unlock(&blkcg_pol_mutex);
322 323 324
	return 0;
}

325
static const char *blkg_dev_name(struct blkio_group *blkg)
326
{
327 328 329 330
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 * policy data and @data.  If @show_total is %true, the sum of the return
 * values from @prfill is printed with "Total" label at the end.
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
350
void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
351
		       u64 (*prfill)(struct seq_file *, void *, int),
352 353
		       const struct blkio_policy_type *pol, int data,
		       bool show_total)
354
{
355 356 357
	struct blkio_group *blkg;
	struct hlist_node *n;
	u64 total = 0;
358

359 360
	spin_lock_irq(&blkcg->lock);
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
361
		if (blkcg_policy_enabled(blkg->q, pol))
362
			total += prfill(sf, blkg->pd[pol->plid]->pdata, data);
363 364 365 366 367
	spin_unlock_irq(&blkcg->lock);

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
368
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
369 370 371 372

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
373
 * @pdata: policy private data of interest
374 375
 * @v: value to print
 *
376
 * Print @v to @sf for the device assocaited with @pdata.
377
 */
378
u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v)
379
{
380
	const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
381 382 383 384 385 386 387

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
388
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
389 390 391 392

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
393
 * @pdata: policy private data of interest
394 395
 * @rwstat: rwstat to print
 *
396
 * Print @rwstat to @sf for the device assocaited with @pdata.
397
 */
398
u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata,
399
			 const struct blkg_rwstat *rwstat)
400 401 402 403 404 405 406
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
407
	const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}

423 424 425 426 427 428 429 430 431
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
 * @pdata: policy private data of interest
 * @off: offset to the blkg_stat in @pdata
 *
 * prfill callback for printing a blkg_stat.
 */
u64 blkg_prfill_stat(struct seq_file *sf, void *pdata, int off)
432
{
433
	return __blkg_prfill_u64(sf, pdata, blkg_stat_read(pdata + off));
434
}
435
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
436

437 438 439 440 441 442 443 444 445
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
 * @pdata: policy private data of interest
 * @off: offset to the blkg_rwstat in @pdata
 *
 * prfill callback for printing a blkg_rwstat.
 */
u64 blkg_prfill_rwstat(struct seq_file *sf, void *pdata, int off)
446
{
447
	struct blkg_rwstat rwstat = blkg_rwstat_read(pdata + off);
448

449
	return __blkg_prfill_rwstat(sf, pdata, &rwstat);
450
}
451
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
452

453 454 455
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
456
 * @pol: target policy
457 458 459 460 461
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
462 463
 * value.  This function returns with RCU read lock and queue lock held and
 * must be paired with blkg_conf_finish().
464
 */
465 466
int blkg_conf_prep(struct blkio_cgroup *blkcg,
		   const struct blkio_policy_type *pol, const char *input,
467
		   struct blkg_conf_ctx *ctx)
468
	__acquires(rcu) __acquires(disk->queue->queue_lock)
469
{
470 471
	struct gendisk *disk;
	struct blkio_group *blkg;
T
Tejun Heo 已提交
472 473 474
	unsigned int major, minor;
	unsigned long long v;
	int part, ret;
475

T
Tejun Heo 已提交
476 477
	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
		return -EINVAL;
478

T
Tejun Heo 已提交
479
	disk = get_gendisk(MKDEV(major, minor), &part);
T
Tejun Heo 已提交
480
	if (!disk || part)
T
Tejun Heo 已提交
481
		return -EINVAL;
482 483

	rcu_read_lock();
T
Tejun Heo 已提交
484
	spin_lock_irq(disk->queue->queue_lock);
485

486
	if (blkcg_policy_enabled(disk->queue, pol))
487
		blkg = blkg_lookup_create(blkcg, disk->queue);
488 489
	else
		blkg = ERR_PTR(-EINVAL);
490

T
Tejun Heo 已提交
491 492
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
493
		rcu_read_unlock();
494
		spin_unlock_irq(disk->queue->queue_lock);
495 496 497 498 499 500 501 502 503 504
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
505
		}
T
Tejun Heo 已提交
506
		return ret;
507
	}
508 509 510

	ctx->disk = disk;
	ctx->blkg = blkg;
T
Tejun Heo 已提交
511 512
	ctx->v = v;
	return 0;
513
}
514
EXPORT_SYMBOL_GPL(blkg_conf_prep);
515

516 517 518 519 520 521 522
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
523
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
524
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
525
{
526
	spin_unlock_irq(ctx->disk->queue->queue_lock);
527 528
	rcu_read_unlock();
	put_disk(ctx->disk);
529
}
530
EXPORT_SYMBOL_GPL(blkg_conf_finish);
531

532
struct cftype blkio_files[] = {
533 534 535
	{
		.name = "reset_stats",
		.write_u64 = blkiocg_reset_stats,
536
	},
537
	{ }	/* terminate */
538 539
};

540 541 542 543 544 545 546 547 548 549 550
/**
 * blkiocg_pre_destroy - cgroup pre_destroy callback
 * @cgroup: cgroup of interest
 *
 * This function is called when @cgroup is about to go away and responsible
 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
551
static int blkiocg_pre_destroy(struct cgroup *cgroup)
552 553
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
554

555
	spin_lock_irq(&blkcg->lock);
556

557 558 559
	while (!hlist_empty(&blkcg->blkg_list)) {
		struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkio_group, blkcg_node);
T
Tejun Heo 已提交
560
		struct request_queue *q = blkg->q;
561

562 563 564 565 566 567
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
568
			spin_lock_irq(&blkcg->lock);
569
		}
570
	}
571

572
	spin_unlock_irq(&blkcg->lock);
573 574 575
	return 0;
}

576
static void blkiocg_destroy(struct cgroup *cgroup)
577 578 579
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);

B
Ben Blum 已提交
580 581
	if (blkcg != &blkio_root_cgroup)
		kfree(blkcg);
582 583
}

584
static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
585
{
T
Tejun Heo 已提交
586
	static atomic64_t id_seq = ATOMIC64_INIT(0);
587 588
	struct blkio_cgroup *blkcg;
	struct cgroup *parent = cgroup->parent;
589

590
	if (!parent) {
591 592 593 594 595 596 597 598
		blkcg = &blkio_root_cgroup;
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

599
	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
600
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
601 602 603 604 605 606 607
done:
	spin_lock_init(&blkcg->lock);
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

608 609 610 611 612 613 614 615 616 617 618 619 620 621
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
	might_sleep();

622
	return blk_throtl_init(q);
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
646
	spin_lock_irq(q->queue_lock);
647
	blkg_destroy_all(q);
648 649
	spin_unlock_irq(q->queue_lock);

650 651 652
	blk_throtl_exit(q);
}

653 654 655 656 657 658
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
659
static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
660
{
661
	struct task_struct *task;
662 663 664 665
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
666 667 668 669 670 671 672 673 674
	cgroup_taskset_for_each(task, cgrp, tset) {
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
675 676 677
	return ret;
}

678 679 680 681
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
	.create = blkiocg_create,
	.can_attach = blkiocg_can_attach,
682
	.pre_destroy = blkiocg_pre_destroy,
683 684
	.destroy = blkiocg_destroy,
	.subsys_id = blkio_subsys_id,
685
	.base_cftypes = blkio_files,
686 687 688 689
	.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
			  const struct blkio_policy_type *pol)
{
	LIST_HEAD(pds);
	struct blkio_group *blkg;
	struct blkg_policy_data *pd, *n;
	int cnt = 0, ret;

	if (blkcg_policy_enabled(q, pol))
		return 0;

	blk_queue_bypass_start(q);

	/* make sure the root blkg exists and count the existing blkgs */
	spin_lock_irq(q->queue_lock);

	rcu_read_lock();
723
	blkg = __blkg_lookup_create(&blkio_root_cgroup, q);
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
	rcu_read_unlock();

	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
		goto out_unlock;
	}
	q->root_blkg = blkg;

	list_for_each_entry(blkg, &q->blkg_list, q_node)
		cnt++;

	spin_unlock_irq(q->queue_lock);

	/* allocate policy_data for all existing blkgs */
	while (cnt--) {
		pd = kzalloc_node(blkg_pd_size(pol), GFP_KERNEL, q->node);
		if (!pd) {
			ret = -ENOMEM;
			goto out_free;
		}
		list_add_tail(&pd->alloc_node, &pds);
	}

	/*
	 * Install the allocated pds.  With @q bypassing, no new blkg
	 * should have been created while the queue lock was dropped.
	 */
	spin_lock_irq(q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		if (WARN_ON(list_empty(&pds))) {
			/* umm... this shouldn't happen, just abort */
			ret = -ENOMEM;
			goto out_unlock;
		}
		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
		list_del_init(&pd->alloc_node);

		/* grab blkcg lock too while installing @pd on @blkg */
		spin_lock(&blkg->blkcg->lock);

		blkg->pd[pol->plid] = pd;
		pd->blkg = blkg;
		pol->ops.blkio_init_group_fn(blkg);

		spin_unlock(&blkg->blkcg->lock);
	}

	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;
out_unlock:
	spin_unlock_irq(q->queue_lock);
out_free:
	blk_queue_bypass_end(q);
	list_for_each_entry_safe(pd, n, &pds, alloc_node)
		kfree(pd);
	return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);

/**
 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to deactivate
 *
 * Deactivate @pol on @q.  Follows the same synchronization rules as
 * blkcg_activate_policy().
 */
void blkcg_deactivate_policy(struct request_queue *q,
			     const struct blkio_policy_type *pol)
{
	struct blkio_group *blkg;

	if (!blkcg_policy_enabled(q, pol))
		return;

	blk_queue_bypass_start(q);
	spin_lock_irq(q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

805 806 807 808
	/* if no policy is left, no need for blkgs - shoot them down */
	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
		blkg_destroy_all(q);

809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

		if (pol->ops.blkio_exit_group_fn)
			pol->ops.blkio_exit_group_fn(blkg);

		kfree(blkg->pd[pol->plid]);
		blkg->pd[pol->plid] = NULL;

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
	blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

T
Tejun Heo 已提交
827 828 829 830 831 832 833 834 835
/**
 * blkio_policy_register - register a blkcg policy
 * @blkiop: blkcg policy to register
 *
 * Register @blkiop with blkcg core.  Might sleep and @blkiop may be
 * modified on successful registration.  Returns 0 on success and -errno on
 * failure.
 */
int blkio_policy_register(struct blkio_policy_type *blkiop)
836
{
T
Tejun Heo 已提交
837
	int i, ret;
838

839 840
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
841 842 843 844 845 846 847
	/* find an empty slot */
	ret = -ENOSPC;
	for (i = 0; i < BLKCG_MAX_POLS; i++)
		if (!blkio_policy[i])
			break;
	if (i >= BLKCG_MAX_POLS)
		goto out_unlock;
848

T
Tejun Heo 已提交
849 850 851 852 853
	/* register and update blkgs */
	blkiop->plid = i;
	blkio_policy[i] = blkiop;

	/* everything is in place, add intf files for the new policy */
854 855
	if (blkiop->cftypes)
		WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
T
Tejun Heo 已提交
856 857
	ret = 0;
out_unlock:
858
	mutex_unlock(&blkcg_pol_mutex);
T
Tejun Heo 已提交
859
	return ret;
860 861 862
}
EXPORT_SYMBOL_GPL(blkio_policy_register);

T
Tejun Heo 已提交
863 864 865 866 867 868
/**
 * blkiop_policy_unregister - unregister a blkcg policy
 * @blkiop: blkcg policy to unregister
 *
 * Undo blkio_policy_register(@blkiop).  Might sleep.
 */
869 870
void blkio_policy_unregister(struct blkio_policy_type *blkiop)
{
871 872
	mutex_lock(&blkcg_pol_mutex);

T
Tejun Heo 已提交
873 874 875 876
	if (WARN_ON(blkio_policy[blkiop->plid] != blkiop))
		goto out_unlock;

	/* kill the intf files first */
877 878 879
	if (blkiop->cftypes)
		cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes);

T
Tejun Heo 已提交
880
	/* unregister and update blkgs */
881
	blkio_policy[blkiop->plid] = NULL;
T
Tejun Heo 已提交
882
out_unlock:
883
	mutex_unlock(&blkcg_pol_mutex);
884 885
}
EXPORT_SYMBOL_GPL(blkio_policy_unregister);