blk-cgroup.c 40.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */
#include <linux/ioprio.h>
14 15
#include <linux/seq_file.h>
#include <linux/kdev_t.h>
16
#include <linux/module.h>
17
#include <linux/err.h>
18
#include <linux/blkdev.h>
19
#include <linux/slab.h>
20
#include <linux/genhd.h>
21
#include <linux/delay.h>
T
Tejun Heo 已提交
22
#include <linux/atomic.h>
23
#include "blk-cgroup.h"
24
#include "blk.h"
25

26 27
#define MAX_KEY_LEN 100

28 29
static DEFINE_SPINLOCK(blkio_list_lock);
static LIST_HEAD(blkio_list);
30

31 32 33
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);

34 35 36 37 38 39 40
/* List of groups pending per cpu stats allocation */
static DEFINE_SPINLOCK(alloc_list_lock);
static LIST_HEAD(alloc_list);

static void blkio_stat_alloc_fn(struct work_struct *);
static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);

41
struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
42 43
EXPORT_SYMBOL_GPL(blkio_root_cgroup);

44 45
static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];

46 47 48 49 50
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
	return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
			    struct blkio_cgroup, css);
}
51
EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
52

53
static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
54 55 56 57
{
	return container_of(task_subsys_state(tsk, blkio_subsys_id),
			    struct blkio_cgroup, css);
}
58 59 60 61 62 63 64 65

struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
{
	if (bio && bio->bi_css)
		return container_of(bio->bi_css, struct blkio_cgroup, css);
	return task_blkio_cgroup(current);
}
EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
66

67 68
static inline void blkio_update_group_weight(struct blkio_group *blkg,
					     int plid, unsigned int weight)
69 70 71 72 73
{
	struct blkio_policy_type *blkiop;

	list_for_each_entry(blkiop, &blkio_list, list) {
		/* If this policy does not own the blkg, do not send updates */
74
		if (blkiop->plid != plid)
75 76
			continue;
		if (blkiop->ops.blkio_update_group_weight_fn)
77
			blkiop->ops.blkio_update_group_weight_fn(blkg->q,
78
							blkg, weight);
79 80 81
	}
}

82
static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
83
					  u64 bps, int rw)
84 85 86 87 88 89
{
	struct blkio_policy_type *blkiop;

	list_for_each_entry(blkiop, &blkio_list, list) {

		/* If this policy does not own the blkg, do not send updates */
90
		if (blkiop->plid != plid)
91 92
			continue;

93
		if (rw == READ && blkiop->ops.blkio_update_group_read_bps_fn)
94
			blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
95
								blkg, bps);
96

97
		if (rw == WRITE && blkiop->ops.blkio_update_group_write_bps_fn)
98
			blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
99
								blkg, bps);
100 101 102
	}
}

103 104
static inline void blkio_update_group_iops(struct blkio_group *blkg, int plid,
					   u64 iops, int rw)
105 106 107 108 109 110
{
	struct blkio_policy_type *blkiop;

	list_for_each_entry(blkiop, &blkio_list, list) {

		/* If this policy does not own the blkg, do not send updates */
111
		if (blkiop->plid != plid)
112 113
			continue;

114
		if (rw == READ && blkiop->ops.blkio_update_group_read_iops_fn)
115
			blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
116
								blkg, iops);
117

118
		if (rw == WRITE && blkiop->ops.blkio_update_group_write_iops_fn)
119
			blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
120
								blkg,iops);
121 122 123
	}
}

124
#ifdef CONFIG_DEBUG_BLK_CGROUP
125
/* This should be called with the queue_lock held. */
126
static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
127 128
					    struct blkio_policy_type *pol,
					    struct blkio_group *curr_blkg)
129
{
130
	struct blkg_policy_data *pd = blkg->pd[pol->plid];
131 132

	if (blkio_blkg_waiting(&pd->stats))
133 134 135
		return;
	if (blkg == curr_blkg)
		return;
136 137
	pd->stats.start_group_wait_time = sched_clock();
	blkio_mark_blkg_waiting(&pd->stats);
138 139
}

140
/* This should be called with the queue_lock held. */
141 142 143 144 145 146 147 148 149
static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
{
	unsigned long long now;

	if (!blkio_blkg_waiting(stats))
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_group_wait_time))
150 151
		blkg_stat_add(&stats->group_wait_time,
			      now - stats->start_group_wait_time);
152 153 154
	blkio_clear_blkg_waiting(stats);
}

155
/* This should be called with the queue_lock held. */
156 157 158 159 160 161 162 163 164
static void blkio_end_empty_time(struct blkio_group_stats *stats)
{
	unsigned long long now;

	if (!blkio_blkg_empty(stats))
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_empty_time))
165 166
		blkg_stat_add(&stats->empty_time,
			      now - stats->start_empty_time);
167 168 169
	blkio_clear_blkg_empty(stats);
}

170 171
void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
					struct blkio_policy_type *pol)
172
{
173
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
174

175 176 177 178 179
	lockdep_assert_held(blkg->q->queue_lock);
	BUG_ON(blkio_blkg_idling(stats));

	stats->start_idle_time = sched_clock();
	blkio_mark_blkg_idling(stats);
180 181 182
}
EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);

183 184
void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
				    struct blkio_policy_type *pol)
185
{
186 187 188
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;

	lockdep_assert_held(blkg->q->queue_lock);
189 190

	if (blkio_blkg_idling(stats)) {
191 192
		unsigned long long now = sched_clock();

193 194 195
		if (time_after64(now, stats->start_idle_time))
			blkg_stat_add(&stats->idle_time,
				      now - stats->start_idle_time);
196 197 198 199 200
		blkio_clear_blkg_idling(stats);
	}
}
EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);

201 202
void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
					 struct blkio_policy_type *pol)
203
{
204
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
205

206 207
	lockdep_assert_held(blkg->q->queue_lock);

208 209 210
	blkg_stat_add(&stats->avg_queue_size_sum,
		      blkg_rwstat_sum(&stats->queued));
	blkg_stat_add(&stats->avg_queue_size_samples, 1);
211
	blkio_update_group_wait_time(stats);
212
}
213 214
EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);

215 216
void blkiocg_set_start_empty_time(struct blkio_group *blkg,
				  struct blkio_policy_type *pol)
D
Divyesh Shah 已提交
217
{
218
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
D
Divyesh Shah 已提交
219

220
	lockdep_assert_held(blkg->q->queue_lock);
D
Divyesh Shah 已提交
221

222
	if (blkg_rwstat_sum(&stats->queued))
D
Divyesh Shah 已提交
223 224 225
		return;

	/*
226 227 228
	 * group is already marked empty. This can happen if cfqq got new
	 * request in parent group and moved to this group while being added
	 * to service tree. Just ignore the event and move on.
D
Divyesh Shah 已提交
229
	 */
230
	if (blkio_blkg_empty(stats))
231 232
		return;

D
Divyesh Shah 已提交
233 234 235 236 237
	stats->start_empty_time = sched_clock();
	blkio_mark_blkg_empty(stats);
}
EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);

238
void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
239 240
				  struct blkio_policy_type *pol,
				  unsigned long dequeue)
241
{
242
	struct blkg_policy_data *pd = blkg->pd[pol->plid];
243

244 245
	lockdep_assert_held(blkg->q->queue_lock);

246
	blkg_stat_add(&pd->stats.dequeue, dequeue);
247 248
}
EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
249 250
#else
static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
251 252 253
					struct blkio_policy_type *pol,
					struct blkio_group *curr_blkg) { }
static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
254 255
#endif

256
void blkiocg_update_io_add_stats(struct blkio_group *blkg,
257 258 259
				 struct blkio_policy_type *pol,
				 struct blkio_group *curr_blkg, bool direction,
				 bool sync)
260
{
261
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
262
	int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
263 264 265

	lockdep_assert_held(blkg->q->queue_lock);

266
	blkg_rwstat_add(&stats->queued, rw, 1);
267
	blkio_end_empty_time(stats);
268
	blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
269
}
270
EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
271

272
void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
273 274
				    struct blkio_policy_type *pol,
				    bool direction, bool sync)
275
{
276
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
277
	int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
278 279

	lockdep_assert_held(blkg->q->queue_lock);
280

281
	blkg_rwstat_add(&stats->queued, rw, -1);
282
}
283
EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
284

285 286 287 288
void blkiocg_update_timeslice_used(struct blkio_group *blkg,
				   struct blkio_policy_type *pol,
				   unsigned long time,
				   unsigned long unaccounted_time)
289
{
290 291 292
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;

	lockdep_assert_held(blkg->q->queue_lock);
293

294
	blkg_stat_add(&stats->time, time);
295
#ifdef CONFIG_DEBUG_BLK_CGROUP
296
	blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
297
#endif
298
}
299
EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
300

301 302 303 304
/*
 * should be called under rcu read lock or queue lock to make sure blkg pointer
 * is valid.
 */
305
void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
306 307
				   struct blkio_policy_type *pol,
				   uint64_t bytes, bool direction, bool sync)
308
{
309
	int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
310
	struct blkg_policy_data *pd = blkg->pd[pol->plid];
311
	struct blkio_group_stats_cpu *stats_cpu;
312 313
	unsigned long flags;

314 315 316 317
	/* If per cpu stats are not allocated yet, don't do any accounting. */
	if (pd->stats_cpu == NULL)
		return;

318 319 320 321 322 323
	/*
	 * Disabling interrupts to provide mutual exclusion between two
	 * writes on same cpu. It probably is not needed for 64bit. Not
	 * optimizing that case yet.
	 */
	local_irq_save(flags);
324

325
	stats_cpu = this_cpu_ptr(pd->stats_cpu);
326

327 328 329 330
	blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
	blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
	blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);

331
	local_irq_restore(flags);
332
}
333
EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
334

335
void blkiocg_update_completion_stats(struct blkio_group *blkg,
336 337 338 339
				     struct blkio_policy_type *pol,
				     uint64_t start_time,
				     uint64_t io_start_time, bool direction,
				     bool sync)
340
{
341
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
342
	unsigned long long now = sched_clock();
343
	int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
344

345 346
	lockdep_assert_held(blkg->q->queue_lock);

347
	if (time_after64(now, io_start_time))
348
		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
349
	if (time_after64(io_start_time, start_time))
350 351
		blkg_rwstat_add(&stats->wait_time, rw,
				io_start_time - start_time);
352
}
353
EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
354

355
/*  Merged stats are per cpu.  */
356 357 358
void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
				    struct blkio_policy_type *pol,
				    bool direction, bool sync)
D
Divyesh Shah 已提交
359
{
360
	struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
361
	int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
362 363

	lockdep_assert_held(blkg->q->queue_lock);
D
Divyesh Shah 已提交
364

365
	blkg_rwstat_add(&stats->merged, rw, 1);
D
Divyesh Shah 已提交
366 367 368
}
EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
/*
 * Worker for allocating per cpu stat for blk groups. This is scheduled on
 * the system_nrt_wq once there are some groups on the alloc_list waiting
 * for allocation.
 */
static void blkio_stat_alloc_fn(struct work_struct *work)
{
	static void *pcpu_stats[BLKIO_NR_POLICIES];
	struct delayed_work *dwork = to_delayed_work(work);
	struct blkio_group *blkg;
	int i;
	bool empty = false;

alloc_stats:
	for (i = 0; i < BLKIO_NR_POLICIES; i++) {
		if (pcpu_stats[i] != NULL)
			continue;

		pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);

		/* Allocation failed. Try again after some time. */
		if (pcpu_stats[i] == NULL) {
			queue_delayed_work(system_nrt_wq, dwork,
						msecs_to_jiffies(10));
			return;
		}
	}

	spin_lock_irq(&blkio_list_lock);
	spin_lock(&alloc_list_lock);

	/* cgroup got deleted or queue exited. */
	if (!list_empty(&alloc_list)) {
		blkg = list_first_entry(&alloc_list, struct blkio_group,
						alloc_node);
		for (i = 0; i < BLKIO_NR_POLICIES; i++) {
			struct blkg_policy_data *pd = blkg->pd[i];

			if (blkio_policy[i] && pd && !pd->stats_cpu)
				swap(pd->stats_cpu, pcpu_stats[i]);
		}

		list_del_init(&blkg->alloc_node);
	}

	empty = list_empty(&alloc_list);

	spin_unlock(&alloc_list_lock);
	spin_unlock_irq(&blkio_list_lock);

	if (!empty)
		goto alloc_stats;
}

423 424 425 426 427 428 429 430
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
static void blkg_free(struct blkio_group *blkg)
{
431
	int i;
432 433 434 435

	if (!blkg)
		return;

436 437 438 439 440 441 442
	for (i = 0; i < BLKIO_NR_POLICIES; i++) {
		struct blkg_policy_data *pd = blkg->pd[i];

		if (pd) {
			free_percpu(pd->stats_cpu);
			kfree(pd);
		}
443
	}
444

445
	kfree(blkg);
446 447 448 449 450 451 452
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
 *
453
 * Allocate a new blkg assocating @blkcg and @q.
454 455
 */
static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
456
				      struct request_queue *q)
457 458
{
	struct blkio_group *blkg;
459
	int i;
460 461 462 463 464 465

	/* alloc and init base part */
	blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
	if (!blkg)
		return NULL;

T
Tejun Heo 已提交
466
	blkg->q = q;
467
	INIT_LIST_HEAD(&blkg->q_node);
468
	INIT_LIST_HEAD(&blkg->alloc_node);
469
	blkg->blkcg = blkcg;
T
Tejun Heo 已提交
470
	blkg->refcnt = 1;
471 472
	cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));

473 474 475
	for (i = 0; i < BLKIO_NR_POLICIES; i++) {
		struct blkio_policy_type *pol = blkio_policy[i];
		struct blkg_policy_data *pd;
476

477 478 479 480 481 482 483 484 485 486
		if (!pol)
			continue;

		/* alloc per-policy data and attach it to blkg */
		pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
				  q->node);
		if (!pd) {
			blkg_free(blkg);
			return NULL;
		}
487

488 489
		blkg->pd[i] = pd;
		pd->blkg = blkg;
490 491
	}

492
	/* invoke per-policy init */
493 494 495 496 497 498 499
	for (i = 0; i < BLKIO_NR_POLICIES; i++) {
		struct blkio_policy_type *pol = blkio_policy[i];

		if (pol)
			pol->ops.blkio_init_group_fn(blkg);
	}

500 501 502
	return blkg;
}

503 504 505 506
struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
				       struct request_queue *q,
				       bool for_root)
	__releases(q->queue_lock) __acquires(q->queue_lock)
507
{
508
	struct blkio_group *blkg;
509

510 511 512 513 514 515 516 517 518 519 520 521
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 * The following can be removed if blkg lookup is guaranteed to
	 * fail on a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)) && !for_root)
		return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);

522
	blkg = blkg_lookup(blkcg, q);
523 524 525
	if (blkg)
		return blkg;

526
	/* blkg holds a reference to blkcg */
527 528 529 530 531 532
	if (!css_tryget(&blkcg->css))
		return ERR_PTR(-EINVAL);

	/*
	 * Allocate and initialize.
	 */
533
	blkg = blkg_alloc(blkcg, q);
534 535

	/* did alloc fail? */
536
	if (unlikely(!blkg)) {
537 538 539 540 541 542
		blkg = ERR_PTR(-ENOMEM);
		goto out;
	}

	/* insert */
	spin_lock(&blkcg->lock);
543
	hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
544
	list_add(&blkg->q_node, &q->blkg_list);
545
	spin_unlock(&blkcg->lock);
546 547 548 549 550 551

	spin_lock(&alloc_list_lock);
	list_add(&blkg->alloc_node, &alloc_list);
	/* Queue per cpu stat allocation from worker thread. */
	queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
	spin_unlock(&alloc_list_lock);
552 553
out:
	return blkg;
554
}
555
EXPORT_SYMBOL_GPL(blkg_lookup_create);
556 557

/* called under rcu_read_lock(). */
558
struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
559
				struct request_queue *q)
560 561 562 563
{
	struct blkio_group *blkg;
	struct hlist_node *n;

564
	hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
565
		if (blkg->q == q)
566 567 568
			return blkg;
	return NULL;
}
569
EXPORT_SYMBOL_GPL(blkg_lookup);
570

571
static void blkg_destroy(struct blkio_group *blkg)
572 573
{
	struct request_queue *q = blkg->q;
574
	struct blkio_cgroup *blkcg = blkg->blkcg;
575 576

	lockdep_assert_held(q->queue_lock);
577
	lockdep_assert_held(&blkcg->lock);
578 579

	/* Something wrong if we are trying to remove same group twice */
580
	WARN_ON_ONCE(list_empty(&blkg->q_node));
581
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
582
	list_del_init(&blkg->q_node);
583
	hlist_del_init_rcu(&blkg->blkcg_node);
584

585 586 587 588
	spin_lock(&alloc_list_lock);
	list_del_init(&blkg->alloc_node);
	spin_unlock(&alloc_list_lock);

589 590 591 592 593 594 595
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
/*
 * XXX: This updates blkg policy data in-place for root blkg, which is
 * necessary across elevator switch and policy registration as root blkgs
 * aren't shot down.  This broken and racy implementation is temporary.
 * Eventually, blkg shoot down will be replaced by proper in-place update.
 */
void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
{
	struct blkio_policy_type *pol = blkio_policy[plid];
	struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
	struct blkg_policy_data *pd;

	if (!blkg)
		return;

	kfree(blkg->pd[plid]);
	blkg->pd[plid] = NULL;

	if (!pol)
		return;

	pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
	WARN_ON_ONCE(!pd);

	pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
	WARN_ON_ONCE(!pd->stats_cpu);

	blkg->pd[plid] = pd;
	pd->blkg = blkg;
	pol->ops.blkio_init_group_fn(blkg);
}
EXPORT_SYMBOL_GPL(update_root_blkg_pd);

629 630 631 632 633 634 635 636
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 * @destroy_root: whether to destroy root blkg or not
 *
 * Destroy blkgs associated with @q.  If @destroy_root is %true, all are
 * destroyed; otherwise, root blkg is left alone.
 */
637
void blkg_destroy_all(struct request_queue *q, bool destroy_root)
638
{
639
	struct blkio_group *blkg, *n;
640

641
	spin_lock_irq(q->queue_lock);
642

643 644
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
		struct blkio_cgroup *blkcg = blkg->blkcg;
645

646 647 648
		/* skip root? */
		if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
			continue;
649

650 651 652
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
653
	}
654 655

	spin_unlock_irq(q->queue_lock);
656
}
657
EXPORT_SYMBOL_GPL(blkg_destroy_all);
658

T
Tejun Heo 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
static void blkg_rcu_free(struct rcu_head *rcu_head)
{
	blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
}

void __blkg_release(struct blkio_group *blkg)
{
	/* release the extra blkcg reference this blkg has been holding */
	css_put(&blkg->blkcg->css);

	/*
	 * A group is freed in rcu manner. But having an rcu lock does not
	 * mean that one can access all the fields of blkg and assume these
	 * are valid. For example, don't try to follow throtl_data and
	 * request queue links.
	 *
	 * Having a reference to blkg under an rcu allows acess to only
	 * values local to groups like group stats and group rate limits
	 */
	call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
EXPORT_SYMBOL_GPL(__blkg_release);

682
static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
683
{
684
	struct blkg_policy_data *pd = blkg->pd[plid];
T
Tejun Heo 已提交
685
	int cpu;
686 687 688

	if (pd->stats_cpu == NULL)
		return;
T
Tejun Heo 已提交
689 690 691 692 693

	for_each_possible_cpu(cpu) {
		struct blkio_group_stats_cpu *sc =
			per_cpu_ptr(pd->stats_cpu, cpu);

694 695 696
		blkg_rwstat_reset(&sc->service_bytes);
		blkg_rwstat_reset(&sc->serviced);
		blkg_stat_reset(&sc->sectors);
697 698 699
	}
}

700
static int
701
blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
702
{
T
Tejun Heo 已提交
703
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
704 705 706
	struct blkio_group *blkg;
	struct hlist_node *n;

707
	spin_lock(&blkio_list_lock);
708
	spin_lock_irq(&blkcg->lock);
T
Tejun Heo 已提交
709 710 711 712 713 714

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
715
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
716
		struct blkio_policy_type *pol;
717

718 719
		list_for_each_entry(pol, &blkio_list, list) {
			struct blkg_policy_data *pd = blkg->pd[pol->plid];
T
Tejun Heo 已提交
720 721 722
			struct blkio_group_stats *stats = &pd->stats;

			/* queued stats shouldn't be cleared */
723 724 725 726
			blkg_rwstat_reset(&stats->merged);
			blkg_rwstat_reset(&stats->service_time);
			blkg_rwstat_reset(&stats->wait_time);
			blkg_stat_reset(&stats->time);
727
#ifdef CONFIG_DEBUG_BLK_CGROUP
728 729 730 731 732 733 734
			blkg_stat_reset(&stats->unaccounted_time);
			blkg_stat_reset(&stats->avg_queue_size_sum);
			blkg_stat_reset(&stats->avg_queue_size_samples);
			blkg_stat_reset(&stats->dequeue);
			blkg_stat_reset(&stats->group_wait_time);
			blkg_stat_reset(&stats->idle_time);
			blkg_stat_reset(&stats->empty_time);
735
#endif
736 737
			blkio_reset_stats_cpu(blkg, pol->plid);
		}
738
	}
739

740
	spin_unlock_irq(&blkcg->lock);
741
	spin_unlock(&blkio_list_lock);
742 743 744
	return 0;
}

745
static const char *blkg_dev_name(struct blkio_group *blkg)
746
{
747 748 749 750
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	return NULL;
751 752
}

753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 * policy data and @data.  If @show_total is %true, the sum of the return
 * values from @prfill is printed with "Total" label at the end.
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
static void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
			      u64 (*prfill)(struct seq_file *,
					    struct blkg_policy_data *, int),
			      int pol, int data, bool show_total)
774
{
775 776 777
	struct blkio_group *blkg;
	struct hlist_node *n;
	u64 total = 0;
778

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
	spin_lock_irq(&blkcg->lock);
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
		if (blkg->pd[pol])
			total += prfill(sf, blkg->pd[pol], data);
	spin_unlock_irq(&blkcg->lock);

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
 * @pd: policy data of interest
 * @v: value to print
 *
 * Print @v to @sf for the device assocaited with @pd.
 */
static u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd,
			     u64 v)
{
	const char *dname = blkg_dev_name(pd->blkg);

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
 * @pd: policy data of interest
 * @rwstat: rwstat to print
 *
 * Print @rwstat to @sf for the device assocaited with @pd.
 */
static u64 __blkg_prfill_rwstat(struct seq_file *sf,
				struct blkg_policy_data *pd,
				const struct blkg_rwstat *rwstat)
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
	const char *dname = blkg_dev_name(pd->blkg);
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
			   (unsigned long long)rwstat->cnt[i]);

	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}

static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
			    int off)
{
	return __blkg_prfill_u64(sf, pd,
				 blkg_stat_read((void *)&pd->stats + off));
}

static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
			      int off)
{
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off);

	return __blkg_prfill_rwstat(sf, pd, &rwstat);
}

/* print blkg_stat specified by BLKCG_STAT_PRIV() */
static int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
			    struct seq_file *sf)
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);

	blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat,
			  BLKCG_STAT_POL(cft->private),
			  BLKCG_STAT_OFF(cft->private), false);
	return 0;
}

/* print blkg_rwstat specified by BLKCG_STAT_PRIV() */
static int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
			      struct seq_file *sf)
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);

	blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat,
			  BLKCG_STAT_POL(cft->private),
			  BLKCG_STAT_OFF(cft->private), true);
	return 0;
}

static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
				struct blkg_policy_data *pd, int off)
{
	u64 v = 0;
	int cpu;
887

888
	for_each_possible_cpu(cpu) {
889
		struct blkio_group_stats_cpu *sc =
890 891
			per_cpu_ptr(pd->stats_cpu, cpu);

892
		v += blkg_stat_read((void *)sc + off);
893 894
	}

895
	return __blkg_prfill_u64(sf, pd, v);
896 897
}

898 899
static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
				  struct blkg_policy_data *pd, int off)
900
{
901 902 903 904 905 906
	struct blkg_rwstat rwstat = { }, tmp;
	int i, cpu;

	for_each_possible_cpu(cpu) {
		struct blkio_group_stats_cpu *sc =
			per_cpu_ptr(pd->stats_cpu, cpu);
907

908 909 910
		tmp = blkg_rwstat_read((void *)sc + off);
		for (i = 0; i < BLKG_RWSTAT_NR; i++)
			rwstat.cnt[i] += tmp.cnt[i];
911 912
	}

913 914
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
915

916 917 918 919 920 921 922 923 924 925
/* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
static int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
				struct seq_file *sf)
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);

	blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
			  BLKCG_STAT_POL(cft->private),
			  BLKCG_STAT_OFF(cft->private), false);
	return 0;
926 927
}

928 929 930
/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
				  struct seq_file *sf)
931
{
932
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
933

934 935 936 937 938
	blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
			  BLKCG_STAT_POL(cft->private),
			  BLKCG_STAT_OFF(cft->private), true);
	return 0;
}
939

940 941 942 943 944 945
#ifdef CONFIG_DEBUG_BLK_CGROUP
static u64 blkg_prfill_avg_queue_size(struct seq_file *sf,
				      struct blkg_policy_data *pd, int off)
{
	u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples);
	u64 v = 0;
T
Tejun Heo 已提交
946

947 948 949
	if (samples) {
		v = blkg_stat_read(&pd->stats.avg_queue_size_sum);
		do_div(v, samples);
950
	}
951 952 953
	__blkg_prfill_u64(sf, pd, v);
	return 0;
}
T
Tejun Heo 已提交
954

955 956 957 958 959
/* print avg_queue_size */
static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
				      struct seq_file *sf)
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
T
Tejun Heo 已提交
960

961 962 963
	blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size,
			  BLKIO_POLICY_PROP, 0, false);
	return 0;
964
}
965
#endif	/* CONFIG_DEBUG_BLK_CGROUP */
966

967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
struct blkg_conf_ctx {
	struct gendisk		*disk;
	struct blkio_group	*blkg;
	u64			v;
};

/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
 * value.  This function returns with RCU read locked and must be paired
 * with blkg_conf_finish().
 */
static int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
			  struct blkg_conf_ctx *ctx)
	__acquires(rcu)
987
{
988 989 990
	struct gendisk *disk;
	struct blkio_group *blkg;
	char *buf, *s[4], *p, *major_s, *minor_s;
991
	unsigned long major, minor;
992 993
	int i = 0, ret = -EINVAL;
	int part;
994
	dev_t dev;
995
	u64 temp;
996

997 998 999 1000
	buf = kstrdup(input, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	memset(s, 0, sizeof(s));

	while ((p = strsep(&buf, " ")) != NULL) {
		if (!*p)
			continue;

		s[i++] = p;

		/* Prevent from inputing too many things */
		if (i == 3)
			break;
	}

	if (i != 2)
1015
		goto out;
1016 1017 1018 1019 1020

	p = strsep(&s[0], ":");
	if (p != NULL)
		major_s = p;
	else
1021
		goto out;
1022 1023 1024

	minor_s = s[0];
	if (!minor_s)
1025
		goto out;
1026

1027 1028
	if (strict_strtoul(major_s, 10, &major))
		goto out;
1029

1030 1031
	if (strict_strtoul(minor_s, 10, &minor))
		goto out;
1032 1033 1034

	dev = MKDEV(major, minor);

1035 1036
	if (strict_strtoull(s[1], 10, &temp))
		goto out;
1037

1038
	disk = get_gendisk(dev, &part);
T
Tejun Heo 已提交
1039
	if (!disk || part)
1040 1041 1042 1043
		goto out;

	rcu_read_lock();

T
Tejun Heo 已提交
1044
	spin_lock_irq(disk->queue->queue_lock);
1045
	blkg = blkg_lookup_create(blkcg, disk->queue, false);
T
Tejun Heo 已提交
1046
	spin_unlock_irq(disk->queue->queue_lock);
1047

T
Tejun Heo 已提交
1048 1049
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
		rcu_read_unlock();
		put_disk(disk);
		/*
		 * If queue was bypassing, we should retry.  Do so after a
		 * short msleep().  It isn't strictly necessary but queue
		 * can be bypassing for some time and it's always nice to
		 * avoid busy looping.
		 */
		if (ret == -EBUSY) {
			msleep(10);
			ret = restart_syscall();
1061
		}
1062
		goto out;
1063
	}
1064 1065 1066 1067

	ctx->disk = disk;
	ctx->blkg = blkg;
	ctx->v = temp;
1068 1069
	ret = 0;
out:
1070
	kfree(buf);
1071
	return ret;
1072 1073
}

1074 1075 1076 1077 1078 1079 1080 1081 1082
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
static void blkg_conf_finish(struct blkg_conf_ctx *ctx)
	__releases(rcu)
1083
{
1084 1085
	rcu_read_unlock();
	put_disk(ctx->disk);
1086 1087
}

1088 1089 1090
/* for propio conf */
static u64 blkg_prfill_weight_device(struct seq_file *sf,
				     struct blkg_policy_data *pd, int off)
1091
{
1092 1093 1094
	if (!pd->conf.weight)
		return 0;
	return __blkg_prfill_u64(sf, pd, pd->conf.weight);
1095
}
1096

1097 1098
static int blkcg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
				     struct seq_file *sf)
1099
{
1100 1101 1102 1103
	blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
			  blkg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
			  false);
	return 0;
1104 1105
}

1106 1107
static int blkcg_print_weight(struct cgroup *cgrp, struct cftype *cft,
			      struct seq_file *sf)
1108
{
1109
	seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->weight);
1110 1111 1112
	return 0;
}

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
static int blkcg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
				   const char *buf)
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
	struct blkg_policy_data *pd;
	struct blkg_conf_ctx ctx;
	int ret;

	ret = blkg_conf_prep(blkcg, buf, &ctx);
	if (ret)
		return ret;

	ret = -EINVAL;
	pd = ctx.blkg->pd[BLKIO_POLICY_PROP];
	if (pd && (!ctx.v || (ctx.v >= BLKIO_WEIGHT_MIN &&
			      ctx.v <= BLKIO_WEIGHT_MAX))) {
		pd->conf.weight = ctx.v;
		blkio_update_group_weight(ctx.blkg, BLKIO_POLICY_PROP,
					  ctx.v ?: blkcg->weight);
		ret = 0;
	}

	blkg_conf_finish(&ctx);
	return ret;
}

T
Tejun Heo 已提交
1139
static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1140
{
T
Tejun Heo 已提交
1141
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
	struct blkio_group *blkg;
	struct hlist_node *n;

	if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
		return -EINVAL;

	spin_lock(&blkio_list_lock);
	spin_lock_irq(&blkcg->lock);
	blkcg->weight = (unsigned int)val;

1152
	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
T
Tejun Heo 已提交
1153
		struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_PROP];
1154

T
Tejun Heo 已提交
1155 1156 1157
		if (pd && !pd->conf.weight)
			blkio_update_group_weight(blkg, BLKIO_POLICY_PROP,
						  blkcg->weight);
1158
	}
1159 1160 1161 1162 1163 1164

	spin_unlock_irq(&blkcg->lock);
	spin_unlock(&blkio_list_lock);
	return 0;
}

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
/* for blk-throttle conf */
#ifdef CONFIG_BLK_DEV_THROTTLING
static u64 blkg_prfill_conf_u64(struct seq_file *sf,
				struct blkg_policy_data *pd, int off)
{
	u64 v = *(u64 *)((void *)&pd->conf + off);

	if (!v)
		return 0;
	return __blkg_prfill_u64(sf, pd, v);
}
1176

1177 1178 1179 1180 1181
static int blkcg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
				struct seq_file *sf)
{
	blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
			  blkg_prfill_conf_u64, BLKIO_POLICY_THROTL,
1182
			  cft->private, false);
1183 1184
	return 0;
}
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233

static int blkcg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
			      const char *buf, int rw,
			      void (*update)(struct blkio_group *, int, u64, int))
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
	struct blkg_policy_data *pd;
	struct blkg_conf_ctx ctx;
	int ret;

	ret = blkg_conf_prep(blkcg, buf, &ctx);
	if (ret)
		return ret;

	ret = -EINVAL;
	pd = ctx.blkg->pd[BLKIO_POLICY_THROTL];
	if (pd) {
		*(u64 *)((void *)&pd->conf + cft->private) = ctx.v;
		update(ctx.blkg, BLKIO_POLICY_THROTL, ctx.v ?: -1, rw);
		ret = 0;
	}

	blkg_conf_finish(&ctx);
	return ret;
}

static int blkcg_set_conf_bps_r(struct cgroup *cgrp, struct cftype *cft,
				const char *buf)
{
	return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_bps);
}

static int blkcg_set_conf_bps_w(struct cgroup *cgrp, struct cftype *cft,
				const char *buf)
{
	return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_bps);
}

static int blkcg_set_conf_iops_r(struct cgroup *cgrp, struct cftype *cft,
				 const char *buf)
{
	return blkcg_set_conf_u64(cgrp, cft, buf, READ, blkio_update_group_iops);
}

static int blkcg_set_conf_iops_w(struct cgroup *cgrp, struct cftype *cft,
				 const char *buf)
{
	return blkcg_set_conf_u64(cgrp, cft, buf, WRITE, blkio_update_group_iops);
}
1234
#endif
1235

1236
struct cftype blkio_files[] = {
1237 1238
	{
		.name = "weight_device",
1239
		.read_seq_string = blkcg_print_weight_device,
1240
		.write_string = blkcg_set_weight_device,
1241 1242
		.max_write_len = 256,
	},
1243 1244
	{
		.name = "weight",
1245
		.read_seq_string = blkcg_print_weight,
T
Tejun Heo 已提交
1246
		.write_u64 = blkcg_set_weight,
1247
	},
1248 1249
	{
		.name = "time",
1250 1251 1252
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, time)),
		.read_seq_string = blkcg_print_stat,
1253 1254 1255
	},
	{
		.name = "sectors",
1256 1257 1258
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats_cpu, sectors)),
		.read_seq_string = blkcg_print_cpu_stat,
1259 1260 1261
	},
	{
		.name = "io_service_bytes",
1262 1263 1264
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats_cpu, service_bytes)),
		.read_seq_string = blkcg_print_cpu_rwstat,
1265 1266 1267
	},
	{
		.name = "io_serviced",
1268 1269 1270
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats_cpu, serviced)),
		.read_seq_string = blkcg_print_cpu_rwstat,
1271 1272 1273
	},
	{
		.name = "io_service_time",
1274 1275 1276
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, service_time)),
		.read_seq_string = blkcg_print_rwstat,
1277 1278 1279
	},
	{
		.name = "io_wait_time",
1280 1281 1282
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, wait_time)),
		.read_seq_string = blkcg_print_rwstat,
1283
	},
D
Divyesh Shah 已提交
1284 1285
	{
		.name = "io_merged",
1286 1287 1288
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, merged)),
		.read_seq_string = blkcg_print_rwstat,
D
Divyesh Shah 已提交
1289
	},
1290 1291
	{
		.name = "io_queued",
1292 1293 1294
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, queued)),
		.read_seq_string = blkcg_print_rwstat,
1295
	},
1296 1297 1298
	{
		.name = "reset_stats",
		.write_u64 = blkiocg_reset_stats,
1299
	},
1300 1301 1302
#ifdef CONFIG_BLK_DEV_THROTTLING
	{
		.name = "throttle.read_bps_device",
1303
		.private = offsetof(struct blkio_group_conf, bps[READ]),
1304
		.read_seq_string = blkcg_print_conf_u64,
1305
		.write_string = blkcg_set_conf_bps_r,
1306 1307 1308 1309 1310
		.max_write_len = 256,
	},

	{
		.name = "throttle.write_bps_device",
1311
		.private = offsetof(struct blkio_group_conf, bps[WRITE]),
1312
		.read_seq_string = blkcg_print_conf_u64,
1313
		.write_string = blkcg_set_conf_bps_w,
1314 1315 1316 1317 1318
		.max_write_len = 256,
	},

	{
		.name = "throttle.read_iops_device",
1319
		.private = offsetof(struct blkio_group_conf, iops[READ]),
1320
		.read_seq_string = blkcg_print_conf_u64,
1321
		.write_string = blkcg_set_conf_iops_r,
1322 1323 1324 1325 1326
		.max_write_len = 256,
	},

	{
		.name = "throttle.write_iops_device",
1327
		.private = offsetof(struct blkio_group_conf, iops[WRITE]),
1328
		.read_seq_string = blkcg_print_conf_u64,
1329
		.write_string = blkcg_set_conf_iops_w,
1330 1331 1332 1333
		.max_write_len = 256,
	},
	{
		.name = "throttle.io_service_bytes",
1334 1335 1336
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
				offsetof(struct blkio_group_stats_cpu, service_bytes)),
		.read_seq_string = blkcg_print_cpu_rwstat,
1337 1338 1339
	},
	{
		.name = "throttle.io_serviced",
1340 1341 1342
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_THROTL,
				offsetof(struct blkio_group_stats_cpu, serviced)),
		.read_seq_string = blkcg_print_cpu_rwstat,
1343 1344 1345
	},
#endif /* CONFIG_BLK_DEV_THROTTLING */

1346
#ifdef CONFIG_DEBUG_BLK_CGROUP
1347 1348
	{
		.name = "avg_queue_size",
1349
		.read_seq_string = blkcg_print_avg_queue_size,
1350
	},
1351 1352
	{
		.name = "group_wait_time",
1353 1354 1355
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, group_wait_time)),
		.read_seq_string = blkcg_print_stat,
1356 1357 1358
	},
	{
		.name = "idle_time",
1359 1360 1361
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, idle_time)),
		.read_seq_string = blkcg_print_stat,
1362 1363 1364
	},
	{
		.name = "empty_time",
1365 1366 1367
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, empty_time)),
		.read_seq_string = blkcg_print_stat,
1368
	},
1369
	{
1370
		.name = "dequeue",
1371 1372 1373
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, dequeue)),
		.read_seq_string = blkcg_print_stat,
1374
	},
1375 1376
	{
		.name = "unaccounted_time",
1377 1378 1379
		.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
				offsetof(struct blkio_group_stats, unaccounted_time)),
		.read_seq_string = blkcg_print_stat,
1380
	},
1381
#endif
1382
	{ }	/* terminate */
1383 1384
};

1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
/**
 * blkiocg_pre_destroy - cgroup pre_destroy callback
 * @cgroup: cgroup of interest
 *
 * This function is called when @cgroup is about to go away and responsible
 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
1396
static int blkiocg_pre_destroy(struct cgroup *cgroup)
1397 1398
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1399

1400
	spin_lock_irq(&blkcg->lock);
1401

1402 1403 1404
	while (!hlist_empty(&blkcg->blkg_list)) {
		struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkio_group, blkcg_node);
T
Tejun Heo 已提交
1405
		struct request_queue *q = blkg->q;
1406

1407 1408 1409 1410 1411 1412
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
1413
			spin_lock_irq(&blkcg->lock);
1414
		}
1415
	}
1416

1417
	spin_unlock_irq(&blkcg->lock);
1418 1419 1420
	return 0;
}

1421
static void blkiocg_destroy(struct cgroup *cgroup)
1422 1423 1424
{
	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);

B
Ben Blum 已提交
1425 1426
	if (blkcg != &blkio_root_cgroup)
		kfree(blkcg);
1427 1428
}

1429
static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
1430
{
T
Tejun Heo 已提交
1431
	static atomic64_t id_seq = ATOMIC64_INIT(0);
1432 1433
	struct blkio_cgroup *blkcg;
	struct cgroup *parent = cgroup->parent;
1434

1435
	if (!parent) {
1436 1437 1438 1439 1440 1441 1442 1443 1444
		blkcg = &blkio_root_cgroup;
		goto done;
	}

	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
	if (!blkcg)
		return ERR_PTR(-ENOMEM);

	blkcg->weight = BLKIO_WEIGHT_DEFAULT;
T
Tejun Heo 已提交
1445
	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
1446 1447 1448 1449 1450 1451 1452
done:
	spin_lock_init(&blkcg->lock);
	INIT_HLIST_HEAD(&blkcg->blkg_list);

	return &blkcg->css;
}

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
1465 1466
	int ret;

1467 1468
	might_sleep();

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
	ret = blk_throtl_init(q);
	if (ret)
		return ret;

	mutex_lock(&all_q_mutex);
	INIT_LIST_HEAD(&q->all_q_node);
	list_add_tail(&q->all_q_node, &all_q_list);
	mutex_unlock(&all_q_mutex);

	return 0;
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
1502 1503 1504 1505
	mutex_lock(&all_q_mutex);
	list_del_init(&q->all_q_node);
	mutex_unlock(&all_q_mutex);

1506 1507
	blkg_destroy_all(q, true);

1508 1509 1510
	blk_throtl_exit(q);
}

1511 1512 1513 1514 1515 1516
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
1517
static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1518
{
1519
	struct task_struct *task;
1520 1521 1522 1523
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
1524 1525 1526 1527 1528 1529 1530 1531 1532
	cgroup_taskset_for_each(task, cgrp, tset) {
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
1533 1534 1535
	return ret;
}

1536 1537 1538 1539 1540 1541 1542 1543 1544
static void blkcg_bypass_start(void)
	__acquires(&all_q_mutex)
{
	struct request_queue *q;

	mutex_lock(&all_q_mutex);

	list_for_each_entry(q, &all_q_list, all_q_node) {
		blk_queue_bypass_start(q);
1545
		blkg_destroy_all(q, false);
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
	}
}

static void blkcg_bypass_end(void)
	__releases(&all_q_mutex)
{
	struct request_queue *q;

	list_for_each_entry(q, &all_q_list, all_q_node)
		blk_queue_bypass_end(q);

	mutex_unlock(&all_q_mutex);
}

1560 1561 1562 1563
struct cgroup_subsys blkio_subsys = {
	.name = "blkio",
	.create = blkiocg_create,
	.can_attach = blkiocg_can_attach,
1564
	.pre_destroy = blkiocg_pre_destroy,
1565 1566
	.destroy = blkiocg_destroy,
	.subsys_id = blkio_subsys_id,
1567
	.base_cftypes = blkio_files,
1568 1569 1570 1571
	.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);

1572 1573
void blkio_policy_register(struct blkio_policy_type *blkiop)
{
1574 1575
	struct request_queue *q;

1576
	blkcg_bypass_start();
1577
	spin_lock(&blkio_list_lock);
1578 1579 1580

	BUG_ON(blkio_policy[blkiop->plid]);
	blkio_policy[blkiop->plid] = blkiop;
1581
	list_add_tail(&blkiop->list, &blkio_list);
1582

1583
	spin_unlock(&blkio_list_lock);
1584 1585
	list_for_each_entry(q, &all_q_list, all_q_node)
		update_root_blkg_pd(q, blkiop->plid);
1586
	blkcg_bypass_end();
1587 1588 1589 1590 1591
}
EXPORT_SYMBOL_GPL(blkio_policy_register);

void blkio_policy_unregister(struct blkio_policy_type *blkiop)
{
1592 1593
	struct request_queue *q;

1594
	blkcg_bypass_start();
1595
	spin_lock(&blkio_list_lock);
1596 1597 1598

	BUG_ON(blkio_policy[blkiop->plid] != blkiop);
	blkio_policy[blkiop->plid] = NULL;
1599
	list_del_init(&blkiop->list);
1600

1601
	spin_unlock(&blkio_list_lock);
1602 1603
	list_for_each_entry(q, &all_q_list, all_q_node)
		update_root_blkg_pd(q, blkiop->plid);
1604
	blkcg_bypass_end();
1605 1606
}
EXPORT_SYMBOL_GPL(blkio_policy_unregister);