blk-throttle.c 60.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Interface for controlling IO bandwidth on a request queue
 *
 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
 */

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/blktrace_api.h>
12
#include <linux/blk-cgroup.h>
13
#include "blk.h"
14 15 16 17 18 19 20

/* Max dispatch from a group in 1 round */
static int throtl_grp_quantum = 8;

/* Total max dispatch from all groups in one round */
static int throtl_quantum = 32;

21 22 23
/* Throttling is performed over a slice and after that slice is renewed */
#define DFL_THROTL_SLICE_HD (HZ / 10)
#define DFL_THROTL_SLICE_SSD (HZ / 50)
24
#define MAX_THROTL_SLICE (HZ)
25 26 27
#define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */
#define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */
#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
28 29
/* default latency target is 0, eg, guarantee IO latency by default */
#define DFL_LATENCY_TARGET (0)
30

T
Tejun Heo 已提交
31
static struct blkcg_policy blkcg_policy_throtl;
32

33 34 35
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/*
 * To implement hierarchical throttling, throtl_grps form a tree and bios
 * are dispatched upwards level by level until they reach the top and get
 * issued.  When dispatching bios from the children and local group at each
 * level, if the bios are dispatched into a single bio_list, there's a risk
 * of a local or child group which can queue many bios at once filling up
 * the list starving others.
 *
 * To avoid such starvation, dispatched bios are queued separately
 * according to where they came from.  When they are again dispatched to
 * the parent, they're popped in round-robin order so that no single source
 * hogs the dispatch window.
 *
 * throtl_qnode is used to keep the queued bios separated by their sources.
 * Bios are queued to throtl_qnode which in turn is queued to
 * throtl_service_queue and then dispatched in round-robin order.
 *
 * It's also used to track the reference counts on blkg's.  A qnode always
 * belongs to a throtl_grp and gets queued on itself or the parent, so
 * incrementing the reference of the associated throtl_grp when a qnode is
 * queued and decrementing when dequeued is enough to keep the whole blkg
 * tree pinned while bios are in flight.
 */
struct throtl_qnode {
	struct list_head	node;		/* service_queue->queued[] */
	struct bio_list		bios;		/* queued bios */
	struct throtl_grp	*tg;		/* tg this qnode belongs to */
};

65
struct throtl_service_queue {
66 67
	struct throtl_service_queue *parent_sq;	/* the parent service_queue */

68 69 70 71
	/*
	 * Bios queued directly to this service_queue or dispatched from
	 * children throtl_grp's.
	 */
72
	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
73 74 75 76 77 78
	unsigned int		nr_queued[2];	/* number of queued bios */

	/*
	 * RB tree of active children throtl_grp's, which are sorted by
	 * their ->disptime.
	 */
79 80 81 82
	struct rb_root		pending_tree;	/* RB tree of active tgs */
	struct rb_node		*first_pending;	/* first node in the tree */
	unsigned int		nr_pending;	/* # queued in the tree */
	unsigned long		first_pending_disptime;	/* disptime of the first tg */
83
	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
84 85
};

86 87
enum tg_state_flags {
	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
88
	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
89 90
};

91 92
#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)

93
enum {
S
Shaohua Li 已提交
94
	LIMIT_LOW,
95 96 97 98
	LIMIT_MAX,
	LIMIT_CNT,
};

99
struct throtl_grp {
100 101 102
	/* must be the first member */
	struct blkg_policy_data pd;

103
	/* active throtl group service_queue member */
104 105
	struct rb_node rb_node;

106 107 108
	/* throtl_data this group belongs to */
	struct throtl_data *td;

109 110 111
	/* this group's service queue */
	struct throtl_service_queue service_queue;

112 113 114 115 116 117 118 119 120 121 122
	/*
	 * qnode_on_self is used when bios are directly queued to this
	 * throtl_grp so that local bios compete fairly with bios
	 * dispatched from children.  qnode_on_parent is used when bios are
	 * dispatched from this throtl_grp into its parent and will compete
	 * with the sibling qnode_on_parents and the parent's
	 * qnode_on_self.
	 */
	struct throtl_qnode qnode_on_self[2];
	struct throtl_qnode qnode_on_parent[2];

123 124 125 126 127 128 129 130 131
	/*
	 * Dispatch time in jiffies. This is the estimated time when group
	 * will unthrottle and is ready to dispatch more bio. It is used as
	 * key to sort active groups in service tree.
	 */
	unsigned long disptime;

	unsigned int flags;

132 133 134
	/* are there any throtl rules between this group and td? */
	bool has_rules[2];

S
Shaohua Li 已提交
135
	/* internally used bytes per second rate limits */
136
	uint64_t bps[2][LIMIT_CNT];
S
Shaohua Li 已提交
137 138
	/* user configured bps limits */
	uint64_t bps_conf[2][LIMIT_CNT];
139

S
Shaohua Li 已提交
140
	/* internally used IOPS limits */
141
	unsigned int iops[2][LIMIT_CNT];
S
Shaohua Li 已提交
142 143
	/* user configured IOPS limits */
	unsigned int iops_conf[2][LIMIT_CNT];
144

145 146
	/* Number of bytes disptached in current slice */
	uint64_t bytes_disp[2];
147 148
	/* Number of bio's dispatched in current slice */
	unsigned int io_disp[2];
149

S
Shaohua Li 已提交
150 151 152 153 154 155 156
	unsigned long last_low_overflow_time[2];

	uint64_t last_bytes_disp[2];
	unsigned int last_io_disp[2];

	unsigned long last_check_time;

157
	unsigned long latency_target; /* us */
158 159 160
	/* When did we start a new slice */
	unsigned long slice_start[2];
	unsigned long slice_end[2];
161 162 163 164 165

	unsigned long last_finish_time; /* ns / 1024 */
	unsigned long checked_last_finish_time; /* ns / 1024 */
	unsigned long avg_idletime; /* ns / 1024 */
	unsigned long idletime_threshold; /* us */
166 167 168 169 170
};

struct throtl_data
{
	/* service tree for active throtl groups */
171
	struct throtl_service_queue service_queue;
172 173 174 175 176 177

	struct request_queue *queue;

	/* Total Number of queued bios on READ and WRITE lists */
	unsigned int nr_queued[2];

178 179
	unsigned int throtl_slice;

180
	/* Work for dispatching throttled bios */
181
	struct work_struct dispatch_work;
182 183
	unsigned int limit_index;
	bool limit_valid[LIMIT_CNT];
S
Shaohua Li 已提交
184

185 186
	unsigned long dft_idletime_threshold; /* us */

S
Shaohua Li 已提交
187 188
	unsigned long low_upgrade_time;
	unsigned long low_downgrade_time;
189 190

	unsigned int scale;
191 192
};

193 194
static void throtl_pending_timer_fn(unsigned long arg);

195 196 197 198 199
static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
{
	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
}

T
Tejun Heo 已提交
200
static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
201
{
202
	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
203 204
}

T
Tejun Heo 已提交
205
static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
206
{
207
	return pd_to_blkg(&tg->pd);
208 209
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
/**
 * sq_to_tg - return the throl_grp the specified service queue belongs to
 * @sq: the throtl_service_queue of interest
 *
 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
 * embedded in throtl_data, %NULL is returned.
 */
static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
{
	if (sq && sq->parent_sq)
		return container_of(sq, struct throtl_grp, service_queue);
	else
		return NULL;
}

/**
 * sq_to_td - return throtl_data the specified service queue belongs to
 * @sq: the throtl_service_queue of interest
 *
229
 * A service_queue can be embedded in either a throtl_grp or throtl_data.
230 231 232 233 234 235 236 237 238 239 240 241
 * Determine the associated throtl_data accordingly and return it.
 */
static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
{
	struct throtl_grp *tg = sq_to_tg(sq);

	if (tg)
		return tg->td;
	else
		return container_of(sq, struct throtl_data, service_queue);
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
/*
 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
 * make the IO dispatch more smooth.
 * Scale up: linearly scale up according to lapsed time since upgrade. For
 *           every throtl_slice, the limit scales up 1/2 .low limit till the
 *           limit hits .max limit
 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
 */
static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
{
	/* arbitrary value to avoid too big scale */
	if (td->scale < 4096 && time_after_eq(jiffies,
	    td->low_upgrade_time + td->scale * td->throtl_slice))
		td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;

	return low + (low >> 1) * td->scale;
}

260 261
static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
{
262
	struct blkcg_gq *blkg = tg_to_blkg(tg);
263
	struct throtl_data *td;
264 265 266 267
	uint64_t ret;

	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
		return U64_MAX;
268 269 270 271

	td = tg->td;
	ret = tg->bps[rw][td->limit_index];
	if (ret == 0 && td->limit_index == LIMIT_LOW)
272
		return tg->bps[rw][LIMIT_MAX];
273 274 275 276 277 278 279 280

	if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
	    tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
		uint64_t adjusted;

		adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
		ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
	}
281
	return ret;
282 283 284 285
}

static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
{
286
	struct blkcg_gq *blkg = tg_to_blkg(tg);
287
	struct throtl_data *td;
288 289 290 291
	unsigned int ret;

	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
		return UINT_MAX;
292 293
	td = tg->td;
	ret = tg->iops[rw][td->limit_index];
294 295
	if (ret == 0 && tg->td->limit_index == LIMIT_LOW)
		return tg->iops[rw][LIMIT_MAX];
296 297 298 299 300 301 302 303 304 305

	if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
	    tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
		uint64_t adjusted;

		adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
		if (adjusted > UINT_MAX)
			adjusted = UINT_MAX;
		ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
	}
306
	return ret;
307 308
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322
/**
 * throtl_log - log debug message via blktrace
 * @sq: the service_queue being reported
 * @fmt: printf format string
 * @args: printf args
 *
 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 * throtl_grp; otherwise, just "throtl".
 */
#define throtl_log(sq, fmt, args...)	do {				\
	struct throtl_grp *__tg = sq_to_tg((sq));			\
	struct throtl_data *__td = sq_to_td((sq));			\
									\
	(void)__td;							\
323 324
	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
		break;							\
325 326
	if ((__tg)) {							\
		char __pbuf[128];					\
T
Tejun Heo 已提交
327
									\
328 329 330 331 332
		blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));	\
		blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
	} else {							\
		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
	}								\
T
Tejun Heo 已提交
333
} while (0)
334

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{
	INIT_LIST_HEAD(&qn->node);
	bio_list_init(&qn->bios);
	qn->tg = tg;
}

/**
 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 * @bio: bio being added
 * @qn: qnode to add bio to
 * @queued: the service_queue->queued[] list @qn belongs to
 *
 * Add @bio to @qn and put @qn on @queued if it's not already on.
 * @qn->tg's reference count is bumped when @qn is activated.  See the
 * comment on top of throtl_qnode definition for details.
 */
static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
				 struct list_head *queued)
{
	bio_list_add(&qn->bios, bio);
	if (list_empty(&qn->node)) {
		list_add_tail(&qn->node, queued);
		blkg_get(tg_to_blkg(qn->tg));
	}
}

/**
 * throtl_peek_queued - peek the first bio on a qnode list
 * @queued: the qnode list to peek
 */
static struct bio *throtl_peek_queued(struct list_head *queued)
{
	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
	struct bio *bio;

	if (list_empty(queued))
		return NULL;

	bio = bio_list_peek(&qn->bios);
	WARN_ON_ONCE(!bio);
	return bio;
}

/**
 * throtl_pop_queued - pop the first bio form a qnode list
 * @queued: the qnode list to pop a bio from
 * @tg_to_put: optional out argument for throtl_grp to put
 *
 * Pop the first bio from the qnode list @queued.  After popping, the first
 * qnode is removed from @queued if empty or moved to the end of @queued so
 * that the popping order is round-robin.
 *
 * When the first qnode is removed, its associated throtl_grp should be put
 * too.  If @tg_to_put is NULL, this function automatically puts it;
 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 * responsible for putting it.
 */
static struct bio *throtl_pop_queued(struct list_head *queued,
				     struct throtl_grp **tg_to_put)
{
	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
	struct bio *bio;

	if (list_empty(queued))
		return NULL;

	bio = bio_list_pop(&qn->bios);
	WARN_ON_ONCE(!bio);

	if (bio_list_empty(&qn->bios)) {
		list_del_init(&qn->node);
		if (tg_to_put)
			*tg_to_put = qn->tg;
		else
			blkg_put(tg_to_blkg(qn->tg));
	} else {
		list_move_tail(&qn->node, queued);
	}

	return bio;
}

418
/* init a service_queue, assumes the caller zeroed it */
419
static void throtl_service_queue_init(struct throtl_service_queue *sq)
420
{
421 422
	INIT_LIST_HEAD(&sq->queued[0]);
	INIT_LIST_HEAD(&sq->queued[1]);
423
	sq->pending_tree = RB_ROOT;
424 425 426 427
	setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
		    (unsigned long)sq);
}

428 429
static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
{
430
	struct throtl_grp *tg;
T
Tejun Heo 已提交
431
	int rw;
432 433 434

	tg = kzalloc_node(sizeof(*tg), gfp, node);
	if (!tg)
435
		return NULL;
436

437 438 439 440 441 442 443 444
	throtl_service_queue_init(&tg->service_queue);

	for (rw = READ; rw <= WRITE; rw++) {
		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
	}

	RB_CLEAR_NODE(&tg->rb_node);
445 446 447 448
	tg->bps[READ][LIMIT_MAX] = U64_MAX;
	tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
	tg->iops[READ][LIMIT_MAX] = UINT_MAX;
	tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
S
Shaohua Li 已提交
449 450 451 452 453
	tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
	/* LIMIT_LOW will have default value 0 */
454

455 456
	tg->latency_target = DFL_LATENCY_TARGET;

457
	return &tg->pd;
458 459
}

460
static void throtl_pd_init(struct blkg_policy_data *pd)
461
{
462 463
	struct throtl_grp *tg = pd_to_tg(pd);
	struct blkcg_gq *blkg = tg_to_blkg(tg);
464
	struct throtl_data *td = blkg->q->td;
465
	struct throtl_service_queue *sq = &tg->service_queue;
466

467
	/*
468
	 * If on the default hierarchy, we switch to properly hierarchical
469 470 471 472 473
	 * behavior where limits on a given throtl_grp are applied to the
	 * whole subtree rather than just the group itself.  e.g. If 16M
	 * read_bps limit is set on the root group, the whole system can't
	 * exceed 16M for the device.
	 *
474
	 * If not on the default hierarchy, the broken flat hierarchy
475 476 477 478 479
	 * behavior is retained where all throtl_grps are treated as if
	 * they're all separate root groups right below throtl_data.
	 * Limits of a group don't interact with limits of other groups
	 * regardless of the position of the group in the hierarchy.
	 */
480
	sq->parent_sq = &td->service_queue;
481
	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
482
		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
483
	tg->td = td;
484

485
	tg->idletime_threshold = td->dft_idletime_threshold;
486 487
}

488 489 490 491 492 493 494 495
/*
 * Set has_rules[] if @tg or any of its parents have limits configured.
 * This doesn't require walking up to the top of the hierarchy as the
 * parent's has_rules[] is guaranteed to be correct.
 */
static void tg_update_has_rules(struct throtl_grp *tg)
{
	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
496
	struct throtl_data *td = tg->td;
497 498 499 500
	int rw;

	for (rw = READ; rw <= WRITE; rw++)
		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
501 502 503
			(td->limit_valid[td->limit_index] &&
			 (tg_bps_limit(tg, rw) != U64_MAX ||
			  tg_iops_limit(tg, rw) != UINT_MAX));
504 505
}

506
static void throtl_pd_online(struct blkg_policy_data *pd)
507
{
508
	struct throtl_grp *tg = pd_to_tg(pd);
509 510 511 512
	/*
	 * We don't want new groups to escape the limits of its ancestors.
	 * Update has_rules[] after a new group is brought online.
	 */
513
	tg_update_has_rules(tg);
514 515
}

S
Shaohua Li 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
static void blk_throtl_update_limit_valid(struct throtl_data *td)
{
	struct cgroup_subsys_state *pos_css;
	struct blkcg_gq *blkg;
	bool low_valid = false;

	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);

		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
		    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
			low_valid = true;
	}
	rcu_read_unlock();

	td->limit_valid[LIMIT_LOW] = low_valid;
}

535
static void throtl_upgrade_state(struct throtl_data *td);
S
Shaohua Li 已提交
536 537 538 539 540 541 542 543 544 545 546
static void throtl_pd_offline(struct blkg_policy_data *pd)
{
	struct throtl_grp *tg = pd_to_tg(pd);

	tg->bps[READ][LIMIT_LOW] = 0;
	tg->bps[WRITE][LIMIT_LOW] = 0;
	tg->iops[READ][LIMIT_LOW] = 0;
	tg->iops[WRITE][LIMIT_LOW] = 0;

	blk_throtl_update_limit_valid(tg->td);

547 548
	if (!tg->td->limit_valid[tg->td->limit_index])
		throtl_upgrade_state(tg->td);
S
Shaohua Li 已提交
549 550
}

551 552
static void throtl_pd_free(struct blkg_policy_data *pd)
{
553 554
	struct throtl_grp *tg = pd_to_tg(pd);

555
	del_timer_sync(&tg->service_queue.pending_timer);
556
	kfree(tg);
557 558
}

559 560
static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue *parent_sq)
561 562
{
	/* Service tree is empty */
563
	if (!parent_sq->nr_pending)
564 565
		return NULL;

566 567
	if (!parent_sq->first_pending)
		parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
568

569 570
	if (parent_sq->first_pending)
		return rb_entry_tg(parent_sq->first_pending);
571 572 573 574 575 576 577 578 579 580

	return NULL;
}

static void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
	rb_erase(n, root);
	RB_CLEAR_NODE(n);
}

581 582
static void throtl_rb_erase(struct rb_node *n,
			    struct throtl_service_queue *parent_sq)
583
{
584 585 586 587
	if (parent_sq->first_pending == n)
		parent_sq->first_pending = NULL;
	rb_erase_init(n, &parent_sq->pending_tree);
	--parent_sq->nr_pending;
588 589
}

590
static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
591 592 593
{
	struct throtl_grp *tg;

594
	tg = throtl_rb_first(parent_sq);
595 596 597
	if (!tg)
		return;

598
	parent_sq->first_pending_disptime = tg->disptime;
599 600
}

601
static void tg_service_queue_add(struct throtl_grp *tg)
602
{
603
	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
604
	struct rb_node **node = &parent_sq->pending_tree.rb_node;
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	struct rb_node *parent = NULL;
	struct throtl_grp *__tg;
	unsigned long key = tg->disptime;
	int left = 1;

	while (*node != NULL) {
		parent = *node;
		__tg = rb_entry_tg(parent);

		if (time_before(key, __tg->disptime))
			node = &parent->rb_left;
		else {
			node = &parent->rb_right;
			left = 0;
		}
	}

	if (left)
623
		parent_sq->first_pending = &tg->rb_node;
624 625

	rb_link_node(&tg->rb_node, parent, node);
626
	rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
627 628
}

629
static void __throtl_enqueue_tg(struct throtl_grp *tg)
630
{
631
	tg_service_queue_add(tg);
632
	tg->flags |= THROTL_TG_PENDING;
633
	tg->service_queue.parent_sq->nr_pending++;
634 635
}

636
static void throtl_enqueue_tg(struct throtl_grp *tg)
637
{
638
	if (!(tg->flags & THROTL_TG_PENDING))
639
		__throtl_enqueue_tg(tg);
640 641
}

642
static void __throtl_dequeue_tg(struct throtl_grp *tg)
643
{
644
	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
645
	tg->flags &= ~THROTL_TG_PENDING;
646 647
}

648
static void throtl_dequeue_tg(struct throtl_grp *tg)
649
{
650
	if (tg->flags & THROTL_TG_PENDING)
651
		__throtl_dequeue_tg(tg);
652 653
}

654
/* Call with queue lock held */
655 656
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
					  unsigned long expires)
657
{
658
	unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
659 660 661 662 663 664 665 666 667 668

	/*
	 * Since we are adjusting the throttle limit dynamically, the sleep
	 * time calculated according to previous limit might be invalid. It's
	 * possible the cgroup sleep time is very long and no other cgroups
	 * have IO running so notify the limit changes. Make sure the cgroup
	 * doesn't sleep too long to avoid the missed notification.
	 */
	if (time_after(expires, max_expire))
		expires = max_expire;
669 670 671
	mod_timer(&sq->pending_timer, expires);
	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
		   expires - jiffies, jiffies);
672 673
}

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
/**
 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 * @sq: the service_queue to schedule dispatch for
 * @force: force scheduling
 *
 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 * dispatch time of the first pending child.  Returns %true if either timer
 * is armed or there's no pending child left.  %false if the current
 * dispatch window is still open and the caller should continue
 * dispatching.
 *
 * If @force is %true, the dispatch timer is always scheduled and this
 * function is guaranteed to return %true.  This is to be used when the
 * caller can't dispatch itself and needs to invoke pending_timer
 * unconditionally.  Note that forced scheduling is likely to induce short
 * delay before dispatch starts even if @sq->first_pending_disptime is not
 * in the future and thus shouldn't be used in hot paths.
 */
static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
					  bool force)
694
{
695
	/* any pending children left? */
696
	if (!sq->nr_pending)
697
		return true;
698

699
	update_min_dispatch_time(sq);
700

701
	/* is the next dispatch time in the future? */
702
	if (force || time_after(sq->first_pending_disptime, jiffies)) {
703
		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
704
		return true;
705 706
	}

707 708
	/* tell the caller to continue dispatching */
	return false;
709 710
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
		bool rw, unsigned long start)
{
	tg->bytes_disp[rw] = 0;
	tg->io_disp[rw] = 0;

	/*
	 * Previous slice has expired. We must have trimmed it after last
	 * bio dispatch. That means since start of last slice, we never used
	 * that bandwidth. Do try to make use of that bandwidth while giving
	 * credit.
	 */
	if (time_after_eq(start, tg->slice_start[rw]))
		tg->slice_start[rw] = start;

726
	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
727 728 729 730 731 732
	throtl_log(&tg->service_queue,
		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
		   tg->slice_end[rw], jiffies);
}

733
static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
734 735
{
	tg->bytes_disp[rw] = 0;
736
	tg->io_disp[rw] = 0;
737
	tg->slice_start[rw] = jiffies;
738
	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
739 740 741 742
	throtl_log(&tg->service_queue,
		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
		   tg->slice_end[rw], jiffies);
743 744
}

745 746
static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
					unsigned long jiffy_end)
747
{
748
	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
749 750
}

751 752
static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
				       unsigned long jiffy_end)
753
{
754
	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
755 756 757 758
	throtl_log(&tg->service_queue,
		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
		   tg->slice_end[rw], jiffies);
759 760 761
}

/* Determine if previously allocated or extended slice is complete or not */
762
static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
763 764
{
	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
765
		return false;
766 767 768 769 770

	return 1;
}

/* Trim the used slices and adjust slice start accordingly */
771
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
772
{
773 774
	unsigned long nr_slices, time_elapsed, io_trim;
	u64 bytes_trim, tmp;
775 776 777 778 779 780 781 782

	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));

	/*
	 * If bps are unlimited (-1), then time slice don't get
	 * renewed. Don't try to trim the slice if slice is used. A new
	 * slice will start when appropriate.
	 */
783
	if (throtl_slice_used(tg, rw))
784 785
		return;

786 787 788 789 790 791 792 793
	/*
	 * A bio has been dispatched. Also adjust slice_end. It might happen
	 * that initially cgroup limit was very low resulting in high
	 * slice_end, but later limit was bumped up and bio was dispached
	 * sooner, then we need to reduce slice_end. A high bogus slice_end
	 * is bad because it does not allow new slice to start.
	 */

794
	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
795

796 797
	time_elapsed = jiffies - tg->slice_start[rw];

798
	nr_slices = time_elapsed / tg->td->throtl_slice;
799 800 801

	if (!nr_slices)
		return;
802
	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
803 804
	do_div(tmp, HZ);
	bytes_trim = tmp;
805

806 807
	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
		HZ;
808

809
	if (!bytes_trim && !io_trim)
810 811 812 813 814 815 816
		return;

	if (tg->bytes_disp[rw] >= bytes_trim)
		tg->bytes_disp[rw] -= bytes_trim;
	else
		tg->bytes_disp[rw] = 0;

817 818 819 820 821
	if (tg->io_disp[rw] >= io_trim)
		tg->io_disp[rw] -= io_trim;
	else
		tg->io_disp[rw] = 0;

822
	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
823

824 825 826 827
	throtl_log(&tg->service_queue,
		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
828 829
}

830 831
static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
				  unsigned long *wait)
832 833
{
	bool rw = bio_data_dir(bio);
834
	unsigned int io_allowed;
835
	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
836
	u64 tmp;
837

838
	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
839

840 841
	/* Slice has just started. Consider one slice interval */
	if (!jiffy_elapsed)
842
		jiffy_elapsed_rnd = tg->td->throtl_slice;
843

844
	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
845

846 847 848 849 850 851 852
	/*
	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
	 * will allow dispatch after 1 second and after that slice should
	 * have been trimmed.
	 */

853
	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
854 855 856 857 858 859
	do_div(tmp, HZ);

	if (tmp > UINT_MAX)
		io_allowed = UINT_MAX;
	else
		io_allowed = tmp;
860 861

	if (tg->io_disp[rw] + 1 <= io_allowed) {
862 863
		if (wait)
			*wait = 0;
864
		return true;
865 866
	}

867
	/* Calc approx time to dispatch */
868
	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
869 870 871 872 873 874 875 876 877 878 879

	if (jiffy_wait > jiffy_elapsed)
		jiffy_wait = jiffy_wait - jiffy_elapsed;
	else
		jiffy_wait = 1;

	if (wait)
		*wait = jiffy_wait;
	return 0;
}

880 881
static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
				 unsigned long *wait)
882 883
{
	bool rw = bio_data_dir(bio);
884
	u64 bytes_allowed, extra_bytes, tmp;
885
	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
886 887 888 889 890

	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];

	/* Slice has just started. Consider one slice interval */
	if (!jiffy_elapsed)
891
		jiffy_elapsed_rnd = tg->td->throtl_slice;
892

893
	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
894

895
	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
896
	do_div(tmp, HZ);
897
	bytes_allowed = tmp;
898

899
	if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
900 901
		if (wait)
			*wait = 0;
902
		return true;
903 904 905
	}

	/* Calc approx time to dispatch */
906
	extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
907
	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
908 909 910 911 912 913 914 915 916 917 918

	if (!jiffy_wait)
		jiffy_wait = 1;

	/*
	 * This wait time is without taking into consideration the rounding
	 * up we did. Add that time also.
	 */
	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
	if (wait)
		*wait = jiffy_wait;
919 920 921 922 923 924 925
	return 0;
}

/*
 * Returns whether one can dispatch a bio or not. Also returns approx number
 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 */
926 927
static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
			    unsigned long *wait)
928 929 930 931 932 933 934 935 936 937
{
	bool rw = bio_data_dir(bio);
	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;

	/*
 	 * Currently whole state machine of group depends on first bio
	 * queued in the group bio list. So one should not be calling
	 * this function with a different bio if there are other bios
	 * queued.
	 */
938
	BUG_ON(tg->service_queue.nr_queued[rw] &&
939
	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
940

941
	/* If tg->bps = -1, then BW is unlimited */
942 943
	if (tg_bps_limit(tg, rw) == U64_MAX &&
	    tg_iops_limit(tg, rw) == UINT_MAX) {
944 945
		if (wait)
			*wait = 0;
946
		return true;
947 948 949 950 951
	}

	/*
	 * If previous slice expired, start a new one otherwise renew/extend
	 * existing slice to make sure it is at least throtl_slice interval
952 953 954
	 * long since now. New slice is started only for empty throttle group.
	 * If there is queued bio, that means there should be an active
	 * slice and it should be extended instead.
955
	 */
956
	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
957
		throtl_start_new_slice(tg, rw);
958
	else {
959 960 961 962
		if (time_before(tg->slice_end[rw],
		    jiffies + tg->td->throtl_slice))
			throtl_extend_slice(tg, rw,
				jiffies + tg->td->throtl_slice);
963 964
	}

965 966
	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
967 968 969 970 971 972 973 974 975 976 977
		if (wait)
			*wait = 0;
		return 1;
	}

	max_wait = max(bps_wait, iops_wait);

	if (wait)
		*wait = max_wait;

	if (time_before(tg->slice_end[rw], jiffies + max_wait))
978
		throtl_extend_slice(tg, rw, jiffies + max_wait);
979 980 981 982 983 984 985 986 987

	return 0;
}

static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
	bool rw = bio_data_dir(bio);

	/* Charge the bio to the group */
988
	tg->bytes_disp[rw] += bio->bi_iter.bi_size;
989
	tg->io_disp[rw]++;
S
Shaohua Li 已提交
990 991
	tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
	tg->last_io_disp[rw]++;
992

993
	/*
994
	 * BIO_THROTTLED is used to prevent the same bio to be throttled
995 996 997 998
	 * more than once as a throttled bio will go through blk-throtl the
	 * second time when it eventually gets issued.  Set it when a bio
	 * is being charged to a tg.
	 */
999 1000
	if (!bio_flagged(bio, BIO_THROTTLED))
		bio_set_flag(bio, BIO_THROTTLED);
1001 1002
}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
/**
 * throtl_add_bio_tg - add a bio to the specified throtl_grp
 * @bio: bio to add
 * @qn: qnode to use
 * @tg: the target throtl_grp
 *
 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
 * tg->qnode_on_self[] is used.
 */
static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
			      struct throtl_grp *tg)
1014
{
1015
	struct throtl_service_queue *sq = &tg->service_queue;
1016 1017
	bool rw = bio_data_dir(bio);

1018 1019 1020
	if (!qn)
		qn = &tg->qnode_on_self[rw];

1021 1022 1023 1024 1025 1026 1027 1028 1029
	/*
	 * If @tg doesn't currently have any bios queued in the same
	 * direction, queueing @bio can change when @tg should be
	 * dispatched.  Mark that @tg was empty.  This is automatically
	 * cleaered on the next tg_update_disptime().
	 */
	if (!sq->nr_queued[rw])
		tg->flags |= THROTL_TG_WAS_EMPTY;

1030 1031
	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);

1032
	sq->nr_queued[rw]++;
1033
	throtl_enqueue_tg(tg);
1034 1035
}

1036
static void tg_update_disptime(struct throtl_grp *tg)
1037
{
1038
	struct throtl_service_queue *sq = &tg->service_queue;
1039 1040 1041
	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
	struct bio *bio;

1042 1043
	bio = throtl_peek_queued(&sq->queued[READ]);
	if (bio)
1044
		tg_may_dispatch(tg, bio, &read_wait);
1045

1046 1047
	bio = throtl_peek_queued(&sq->queued[WRITE]);
	if (bio)
1048
		tg_may_dispatch(tg, bio, &write_wait);
1049 1050 1051 1052 1053

	min_wait = min(read_wait, write_wait);
	disptime = jiffies + min_wait;

	/* Update dispatch time */
1054
	throtl_dequeue_tg(tg);
1055
	tg->disptime = disptime;
1056
	throtl_enqueue_tg(tg);
1057 1058 1059

	/* see throtl_add_bio_tg() */
	tg->flags &= ~THROTL_TG_WAS_EMPTY;
1060 1061
}

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
					struct throtl_grp *parent_tg, bool rw)
{
	if (throtl_slice_used(parent_tg, rw)) {
		throtl_start_new_slice_with_credit(parent_tg, rw,
				child_tg->slice_start[rw]);
	}

}

1072
static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1073
{
1074
	struct throtl_service_queue *sq = &tg->service_queue;
1075 1076
	struct throtl_service_queue *parent_sq = sq->parent_sq;
	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1077
	struct throtl_grp *tg_to_put = NULL;
1078 1079
	struct bio *bio;

1080 1081 1082 1083 1084 1085 1086
	/*
	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
	 * from @tg may put its reference and @parent_sq might end up
	 * getting released prematurely.  Remember the tg to put and put it
	 * after @bio is transferred to @parent_sq.
	 */
	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1087
	sq->nr_queued[rw]--;
1088 1089

	throtl_charge_bio(tg, bio);
1090 1091 1092 1093 1094 1095 1096 1097 1098

	/*
	 * If our parent is another tg, we just need to transfer @bio to
	 * the parent using throtl_add_bio_tg().  If our parent is
	 * @td->service_queue, @bio is ready to be issued.  Put it on its
	 * bio_lists[] and decrease total number queued.  The caller is
	 * responsible for issuing these bios.
	 */
	if (parent_tg) {
1099
		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1100
		start_parent_slice_with_credit(tg, parent_tg, rw);
1101
	} else {
1102 1103
		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
				     &parent_sq->queued[rw]);
1104 1105 1106
		BUG_ON(tg->td->nr_queued[rw] <= 0);
		tg->td->nr_queued[rw]--;
	}
1107

1108
	throtl_trim_slice(tg, rw);
1109

1110 1111
	if (tg_to_put)
		blkg_put(tg_to_blkg(tg_to_put));
1112 1113
}

1114
static int throtl_dispatch_tg(struct throtl_grp *tg)
1115
{
1116
	struct throtl_service_queue *sq = &tg->service_queue;
1117 1118
	unsigned int nr_reads = 0, nr_writes = 0;
	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1119
	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1120 1121 1122 1123
	struct bio *bio;

	/* Try to dispatch 75% READS and 25% WRITES */

1124
	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1125
	       tg_may_dispatch(tg, bio, NULL)) {
1126

1127
		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1128 1129 1130 1131 1132 1133
		nr_reads++;

		if (nr_reads >= max_nr_reads)
			break;
	}

1134
	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1135
	       tg_may_dispatch(tg, bio, NULL)) {
1136

1137
		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1138 1139 1140 1141 1142 1143 1144 1145 1146
		nr_writes++;

		if (nr_writes >= max_nr_writes)
			break;
	}

	return nr_reads + nr_writes;
}

1147
static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1148 1149 1150 1151
{
	unsigned int nr_disp = 0;

	while (1) {
1152 1153
		struct throtl_grp *tg = throtl_rb_first(parent_sq);
		struct throtl_service_queue *sq = &tg->service_queue;
1154 1155 1156 1157 1158 1159 1160

		if (!tg)
			break;

		if (time_before(jiffies, tg->disptime))
			break;

1161
		throtl_dequeue_tg(tg);
1162

1163
		nr_disp += throtl_dispatch_tg(tg);
1164

1165
		if (sq->nr_queued[0] || sq->nr_queued[1])
1166
			tg_update_disptime(tg);
1167 1168 1169 1170 1171 1172 1173 1174

		if (nr_disp >= throtl_quantum)
			break;
	}

	return nr_disp;
}

1175 1176
static bool throtl_can_upgrade(struct throtl_data *td,
	struct throtl_grp *this_tg);
1177 1178 1179 1180 1181 1182 1183
/**
 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
 * @arg: the throtl_service_queue being serviced
 *
 * This timer is armed when a child throtl_grp with active bio's become
 * pending and queued on the service_queue's pending_tree and expires when
 * the first child throtl_grp should be dispatched.  This function
1184 1185 1186 1187 1188 1189 1190
 * dispatches bio's from the children throtl_grps to the parent
 * service_queue.
 *
 * If the parent's parent is another throtl_grp, dispatching is propagated
 * by either arming its pending_timer or repeating dispatch directly.  If
 * the top-level service_tree is reached, throtl_data->dispatch_work is
 * kicked so that the ready bio's are issued.
1191
 */
1192 1193 1194
static void throtl_pending_timer_fn(unsigned long arg)
{
	struct throtl_service_queue *sq = (void *)arg;
1195
	struct throtl_grp *tg = sq_to_tg(sq);
1196
	struct throtl_data *td = sq_to_td(sq);
1197
	struct request_queue *q = td->queue;
1198 1199
	struct throtl_service_queue *parent_sq;
	bool dispatched;
1200
	int ret;
1201 1202

	spin_lock_irq(q->queue_lock);
1203 1204 1205
	if (throtl_can_upgrade(td, NULL))
		throtl_upgrade_state(td);

1206 1207 1208
again:
	parent_sq = sq->parent_sq;
	dispatched = false;
1209

1210 1211
	while (true) {
		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1212 1213
			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1214 1215 1216 1217 1218 1219

		ret = throtl_select_dispatch(sq);
		if (ret) {
			throtl_log(sq, "bios disp=%u", ret);
			dispatched = true;
		}
1220

1221 1222
		if (throtl_schedule_next_dispatch(sq, false))
			break;
1223

1224 1225 1226 1227
		/* this dispatch windows is still open, relax and repeat */
		spin_unlock_irq(q->queue_lock);
		cpu_relax();
		spin_lock_irq(q->queue_lock);
1228
	}
1229

1230 1231
	if (!dispatched)
		goto out_unlock;
1232

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	if (parent_sq) {
		/* @parent_sq is another throl_grp, propagate dispatch */
		if (tg->flags & THROTL_TG_WAS_EMPTY) {
			tg_update_disptime(tg);
			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
				/* window is already open, repeat dispatching */
				sq = parent_sq;
				tg = sq_to_tg(sq);
				goto again;
			}
		}
	} else {
		/* reached the top-level, queue issueing */
		queue_work(kthrotld_workqueue, &td->dispatch_work);
	}
out_unlock:
1249
	spin_unlock_irq(q->queue_lock);
1250
}
1251

1252 1253 1254 1255 1256 1257 1258 1259
/**
 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
 * @work: work item being executed
 *
 * This function is queued for execution when bio's reach the bio_lists[]
 * of throtl_data->service_queue.  Those bio's are ready and issued by this
 * function.
 */
1260
static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
{
	struct throtl_data *td = container_of(work, struct throtl_data,
					      dispatch_work);
	struct throtl_service_queue *td_sq = &td->service_queue;
	struct request_queue *q = td->queue;
	struct bio_list bio_list_on_stack;
	struct bio *bio;
	struct blk_plug plug;
	int rw;

	bio_list_init(&bio_list_on_stack);

	spin_lock_irq(q->queue_lock);
1274 1275 1276
	for (rw = READ; rw <= WRITE; rw++)
		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
			bio_list_add(&bio_list_on_stack, bio);
1277 1278 1279
	spin_unlock_irq(q->queue_lock);

	if (!bio_list_empty(&bio_list_on_stack)) {
1280
		blk_start_plug(&plug);
1281 1282
		while((bio = bio_list_pop(&bio_list_on_stack)))
			generic_make_request(bio);
1283
		blk_finish_plug(&plug);
1284 1285 1286
	}
}

1287 1288
static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
			      int off)
1289
{
1290 1291
	struct throtl_grp *tg = pd_to_tg(pd);
	u64 v = *(u64 *)((void *)tg + off);
1292

1293
	if (v == U64_MAX)
1294
		return 0;
1295
	return __blkg_prfill_u64(sf, pd, v);
1296 1297
}

1298 1299
static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
			       int off)
1300
{
1301 1302
	struct throtl_grp *tg = pd_to_tg(pd);
	unsigned int v = *(unsigned int *)((void *)tg + off);
1303

1304
	if (v == UINT_MAX)
1305
		return 0;
1306
	return __blkg_prfill_u64(sf, pd, v);
1307 1308
}

1309
static int tg_print_conf_u64(struct seq_file *sf, void *v)
1310
{
1311 1312
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1313
	return 0;
1314 1315
}

1316
static int tg_print_conf_uint(struct seq_file *sf, void *v)
1317
{
1318 1319
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1320
	return 0;
1321 1322
}

1323
static void tg_conf_updated(struct throtl_grp *tg)
1324
{
1325
	struct throtl_service_queue *sq = &tg->service_queue;
1326
	struct cgroup_subsys_state *pos_css;
1327
	struct blkcg_gq *blkg;
1328

1329 1330
	throtl_log(&tg->service_queue,
		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1331 1332
		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1333

1334 1335 1336 1337 1338 1339 1340
	/*
	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
	 * considered to have rules if either the tg itself or any of its
	 * ancestors has rules.  This identifies groups without any
	 * restrictions in the whole hierarchy and allows them to bypass
	 * blk-throttle.
	 */
1341
	blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
1342 1343
		tg_update_has_rules(blkg_to_tg(blkg));

1344 1345 1346 1347 1348 1349 1350 1351
	/*
	 * We're already holding queue_lock and know @tg is valid.  Let's
	 * apply the new config directly.
	 *
	 * Restart the slices for both READ and WRITES. It might happen
	 * that a group's limit are dropped suddenly and we don't want to
	 * account recently dispatched IO with new low rate.
	 */
1352 1353
	throtl_start_new_slice(tg, 0);
	throtl_start_new_slice(tg, 1);
1354

1355
	if (tg->flags & THROTL_TG_PENDING) {
1356
		tg_update_disptime(tg);
1357
		throtl_schedule_next_dispatch(sq->parent_sq, true);
1358
	}
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
}

static ssize_t tg_set_conf(struct kernfs_open_file *of,
			   char *buf, size_t nbytes, loff_t off, bool is_u64)
{
	struct blkcg *blkcg = css_to_blkcg(of_css(of));
	struct blkg_conf_ctx ctx;
	struct throtl_grp *tg;
	int ret;
	u64 v;

	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
	if (ret)
		return ret;

	ret = -EINVAL;
	if (sscanf(ctx.body, "%llu", &v) != 1)
		goto out_finish;
	if (!v)
1378
		v = U64_MAX;
1379 1380 1381 1382 1383 1384 1385

	tg = blkg_to_tg(ctx.blkg);

	if (is_u64)
		*(u64 *)((void *)tg + of_cft(of)->private) = v;
	else
		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1386

1387
	tg_conf_updated(tg);
1388 1389
	ret = 0;
out_finish:
1390
	blkg_conf_finish(&ctx);
1391
	return ret ?: nbytes;
1392 1393
}

1394 1395
static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
			       char *buf, size_t nbytes, loff_t off)
1396
{
1397
	return tg_set_conf(of, buf, nbytes, off, true);
1398 1399
}

1400 1401
static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
1402
{
1403
	return tg_set_conf(of, buf, nbytes, off, false);
1404 1405
}

1406
static struct cftype throtl_legacy_files[] = {
1407 1408
	{
		.name = "throttle.read_bps_device",
1409
		.private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1410
		.seq_show = tg_print_conf_u64,
1411
		.write = tg_set_conf_u64,
1412 1413 1414
	},
	{
		.name = "throttle.write_bps_device",
1415
		.private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1416
		.seq_show = tg_print_conf_u64,
1417
		.write = tg_set_conf_u64,
1418 1419 1420
	},
	{
		.name = "throttle.read_iops_device",
1421
		.private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1422
		.seq_show = tg_print_conf_uint,
1423
		.write = tg_set_conf_uint,
1424 1425 1426
	},
	{
		.name = "throttle.write_iops_device",
1427
		.private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1428
		.seq_show = tg_print_conf_uint,
1429
		.write = tg_set_conf_uint,
1430 1431 1432
	},
	{
		.name = "throttle.io_service_bytes",
1433 1434
		.private = (unsigned long)&blkcg_policy_throtl,
		.seq_show = blkg_print_stat_bytes,
1435 1436 1437
	},
	{
		.name = "throttle.io_serviced",
1438 1439
		.private = (unsigned long)&blkcg_policy_throtl,
		.seq_show = blkg_print_stat_ios,
1440 1441 1442 1443
	},
	{ }	/* terminate */
};

S
Shaohua Li 已提交
1444
static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1445 1446 1447 1448 1449
			 int off)
{
	struct throtl_grp *tg = pd_to_tg(pd);
	const char *dname = blkg_dev_name(pd->blkg);
	char bufs[4][21] = { "max", "max", "max", "max" };
S
Shaohua Li 已提交
1450 1451
	u64 bps_dft;
	unsigned int iops_dft;
1452
	char idle_time[26] = "";
1453
	char latency_time[26] = "";
1454 1455 1456

	if (!dname)
		return 0;
1457

S
Shaohua Li 已提交
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
	if (off == LIMIT_LOW) {
		bps_dft = 0;
		iops_dft = 0;
	} else {
		bps_dft = U64_MAX;
		iops_dft = UINT_MAX;
	}

	if (tg->bps_conf[READ][off] == bps_dft &&
	    tg->bps_conf[WRITE][off] == bps_dft &&
	    tg->iops_conf[READ][off] == iops_dft &&
1469
	    tg->iops_conf[WRITE][off] == iops_dft &&
1470 1471 1472
	    (off != LIMIT_LOW ||
	     (tg->idletime_threshold == tg->td->dft_idletime_threshold &&
	      tg->latency_target == DFL_LATENCY_TARGET)))
1473 1474
		return 0;

S
Shaohua Li 已提交
1475
	if (tg->bps_conf[READ][off] != bps_dft)
1476
		snprintf(bufs[0], sizeof(bufs[0]), "%llu",
S
Shaohua Li 已提交
1477 1478
			tg->bps_conf[READ][off]);
	if (tg->bps_conf[WRITE][off] != bps_dft)
1479
		snprintf(bufs[1], sizeof(bufs[1]), "%llu",
S
Shaohua Li 已提交
1480 1481
			tg->bps_conf[WRITE][off]);
	if (tg->iops_conf[READ][off] != iops_dft)
1482
		snprintf(bufs[2], sizeof(bufs[2]), "%u",
S
Shaohua Li 已提交
1483 1484
			tg->iops_conf[READ][off]);
	if (tg->iops_conf[WRITE][off] != iops_dft)
1485
		snprintf(bufs[3], sizeof(bufs[3]), "%u",
S
Shaohua Li 已提交
1486
			tg->iops_conf[WRITE][off]);
1487 1488 1489 1490 1491 1492
	if (off == LIMIT_LOW) {
		if (tg->idletime_threshold == ULONG_MAX)
			strcpy(idle_time, " idle=max");
		else
			snprintf(idle_time, sizeof(idle_time), " idle=%lu",
				tg->idletime_threshold);
1493 1494 1495 1496 1497 1498

		if (tg->latency_target == ULONG_MAX)
			strcpy(latency_time, " latency=max");
		else
			snprintf(latency_time, sizeof(latency_time),
				" latency=%lu", tg->latency_target);
1499
	}
1500

1501 1502 1503
	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
		   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
		   latency_time);
1504 1505 1506
	return 0;
}

S
Shaohua Li 已提交
1507
static int tg_print_limit(struct seq_file *sf, void *v)
1508
{
S
Shaohua Li 已提交
1509
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1510 1511 1512 1513
			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
	return 0;
}

S
Shaohua Li 已提交
1514
static ssize_t tg_set_limit(struct kernfs_open_file *of,
1515 1516 1517 1518 1519 1520
			  char *buf, size_t nbytes, loff_t off)
{
	struct blkcg *blkcg = css_to_blkcg(of_css(of));
	struct blkg_conf_ctx ctx;
	struct throtl_grp *tg;
	u64 v[4];
1521
	unsigned long idle_time;
1522
	unsigned long latency_time;
1523
	int ret;
S
Shaohua Li 已提交
1524
	int index = of_cft(of)->private;
1525 1526 1527 1528 1529 1530 1531

	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
	if (ret)
		return ret;

	tg = blkg_to_tg(ctx.blkg);

S
Shaohua Li 已提交
1532 1533 1534 1535
	v[0] = tg->bps_conf[READ][index];
	v[1] = tg->bps_conf[WRITE][index];
	v[2] = tg->iops_conf[READ][index];
	v[3] = tg->iops_conf[WRITE][index];
1536

1537
	idle_time = tg->idletime_threshold;
1538
	latency_time = tg->latency_target;
1539 1540 1541
	while (true) {
		char tok[27];	/* wiops=18446744073709551616 */
		char *p;
1542
		u64 val = U64_MAX;
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
		int len;

		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
			break;
		if (tok[0] == '\0')
			break;
		ctx.body += len;

		ret = -EINVAL;
		p = tok;
		strsep(&p, "=");
		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
			goto out_finish;

		ret = -ERANGE;
		if (!val)
			goto out_finish;

		ret = -EINVAL;
		if (!strcmp(tok, "rbps"))
			v[0] = val;
		else if (!strcmp(tok, "wbps"))
			v[1] = val;
		else if (!strcmp(tok, "riops"))
			v[2] = min_t(u64, val, UINT_MAX);
		else if (!strcmp(tok, "wiops"))
			v[3] = min_t(u64, val, UINT_MAX);
1570 1571
		else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
			idle_time = val;
1572 1573
		else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
			latency_time = val;
1574 1575 1576 1577
		else
			goto out_finish;
	}

S
Shaohua Li 已提交
1578 1579 1580 1581
	tg->bps_conf[READ][index] = v[0];
	tg->bps_conf[WRITE][index] = v[1];
	tg->iops_conf[READ][index] = v[2];
	tg->iops_conf[WRITE][index] = v[3];
1582

S
Shaohua Li 已提交
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
	if (index == LIMIT_MAX) {
		tg->bps[READ][index] = v[0];
		tg->bps[WRITE][index] = v[1];
		tg->iops[READ][index] = v[2];
		tg->iops[WRITE][index] = v[3];
	}
	tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
		tg->bps_conf[READ][LIMIT_MAX]);
	tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
		tg->bps_conf[WRITE][LIMIT_MAX]);
	tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
		tg->iops_conf[READ][LIMIT_MAX]);
	tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
		tg->iops_conf[WRITE][LIMIT_MAX]);

	if (index == LIMIT_LOW) {
		blk_throtl_update_limit_valid(tg->td);
		if (tg->td->limit_valid[LIMIT_LOW])
			tg->td->limit_index = LIMIT_LOW;
1602 1603
		tg->idletime_threshold = (idle_time == ULONG_MAX) ?
			ULONG_MAX : idle_time;
1604 1605
		tg->latency_target = (latency_time == ULONG_MAX) ?
			ULONG_MAX : latency_time;
S
Shaohua Li 已提交
1606
	}
1607 1608 1609 1610 1611 1612 1613 1614
	tg_conf_updated(tg);
	ret = 0;
out_finish:
	blkg_conf_finish(&ctx);
	return ret ?: nbytes;
}

static struct cftype throtl_files[] = {
S
Shaohua Li 已提交
1615 1616 1617 1618 1619 1620 1621 1622 1623
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
	{
		.name = "low",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = tg_print_limit,
		.write = tg_set_limit,
		.private = LIMIT_LOW,
	},
#endif
1624 1625 1626
	{
		.name = "max",
		.flags = CFTYPE_NOT_ON_ROOT,
S
Shaohua Li 已提交
1627 1628 1629
		.seq_show = tg_print_limit,
		.write = tg_set_limit,
		.private = LIMIT_MAX,
1630 1631 1632 1633
	},
	{ }	/* terminate */
};

1634
static void throtl_shutdown_wq(struct request_queue *q)
1635 1636 1637
{
	struct throtl_data *td = q->td;

1638
	cancel_work_sync(&td->dispatch_work);
1639 1640
}

T
Tejun Heo 已提交
1641
static struct blkcg_policy blkcg_policy_throtl = {
1642
	.dfl_cftypes		= throtl_files,
1643
	.legacy_cftypes		= throtl_legacy_files,
1644

1645
	.pd_alloc_fn		= throtl_pd_alloc,
1646
	.pd_init_fn		= throtl_pd_init,
1647
	.pd_online_fn		= throtl_pd_online,
S
Shaohua Li 已提交
1648
	.pd_offline_fn		= throtl_pd_offline,
1649
	.pd_free_fn		= throtl_pd_free,
1650 1651
};

S
Shaohua Li 已提交
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
{
	unsigned long rtime = jiffies, wtime = jiffies;

	if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
		rtime = tg->last_low_overflow_time[READ];
	if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
		wtime = tg->last_low_overflow_time[WRITE];
	return min(rtime, wtime);
}

/* tg should not be an intermediate node */
static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
{
	struct throtl_service_queue *parent_sq;
	struct throtl_grp *parent = tg;
	unsigned long ret = __tg_last_low_overflow_time(tg);

	while (true) {
		parent_sq = parent->service_queue.parent_sq;
		parent = sq_to_tg(parent_sq);
		if (!parent)
			break;

		/*
		 * The parent doesn't have low limit, it always reaches low
		 * limit. Its overflow time is useless for children
		 */
		if (!parent->bps[READ][LIMIT_LOW] &&
		    !parent->iops[READ][LIMIT_LOW] &&
		    !parent->bps[WRITE][LIMIT_LOW] &&
		    !parent->iops[WRITE][LIMIT_LOW])
			continue;
		if (time_after(__tg_last_low_overflow_time(parent), ret))
			ret = __tg_last_low_overflow_time(parent);
	}
	return ret;
}

1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
static bool throtl_tg_is_idle(struct throtl_grp *tg)
{
	/*
	 * cgroup is idle if:
	 * - single idle is too long, longer than a fixed value (in case user
	 *   configure a too big threshold) or 4 times of slice
	 * - average think time is more than threshold
	 */
	unsigned long time = jiffies_to_usecs(4 * tg->td->throtl_slice);

	time = min_t(unsigned long, MAX_IDLE_TIME, time);
	return (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
	       tg->avg_idletime > tg->idletime_threshold;
}

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
{
	struct throtl_service_queue *sq = &tg->service_queue;
	bool read_limit, write_limit;

	/*
	 * if cgroup reaches low limit (if low limit is 0, the cgroup always
	 * reaches), it's ok to upgrade to next limit
	 */
	read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
	write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
	if (!read_limit && !write_limit)
		return true;
	if (read_limit && sq->nr_queued[READ] &&
	    (!write_limit || sq->nr_queued[WRITE]))
		return true;
	if (write_limit && sq->nr_queued[WRITE] &&
	    (!read_limit || sq->nr_queued[READ]))
		return true;
1725 1726

	if (time_after_eq(jiffies,
1727 1728
		tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
	    throtl_tg_is_idle(tg))
1729
		return true;
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
	return false;
}

static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
{
	while (true) {
		if (throtl_tg_can_upgrade(tg))
			return true;
		tg = sq_to_tg(tg->service_queue.parent_sq);
		if (!tg || !tg_to_blkg(tg)->parent)
			return false;
	}
	return false;
}

static bool throtl_can_upgrade(struct throtl_data *td,
	struct throtl_grp *this_tg)
{
	struct cgroup_subsys_state *pos_css;
	struct blkcg_gq *blkg;

	if (td->limit_index != LIMIT_LOW)
		return false;

1754
	if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
S
Shaohua Li 已提交
1755 1756
		return false;

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);

		if (tg == this_tg)
			continue;
		if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
			continue;
		if (!throtl_hierarchy_can_upgrade(tg)) {
			rcu_read_unlock();
			return false;
		}
	}
	rcu_read_unlock();
	return true;
}

1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
static void throtl_upgrade_check(struct throtl_grp *tg)
{
	unsigned long now = jiffies;

	if (tg->td->limit_index != LIMIT_LOW)
		return;

	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
		return;

	tg->last_check_time = now;

	if (!time_after_eq(now,
	     __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
		return;

	if (throtl_can_upgrade(tg->td, NULL))
		throtl_upgrade_state(tg->td);
}

1794 1795 1796 1797 1798 1799
static void throtl_upgrade_state(struct throtl_data *td)
{
	struct cgroup_subsys_state *pos_css;
	struct blkcg_gq *blkg;

	td->limit_index = LIMIT_MAX;
S
Shaohua Li 已提交
1800
	td->low_upgrade_time = jiffies;
1801
	td->scale = 0;
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);
		struct throtl_service_queue *sq = &tg->service_queue;

		tg->disptime = jiffies - 1;
		throtl_select_dispatch(sq);
		throtl_schedule_next_dispatch(sq, false);
	}
	rcu_read_unlock();
	throtl_select_dispatch(&td->service_queue);
	throtl_schedule_next_dispatch(&td->service_queue, false);
	queue_work(kthrotld_workqueue, &td->dispatch_work);
}

S
Shaohua Li 已提交
1817 1818
static void throtl_downgrade_state(struct throtl_data *td, int new)
{
1819 1820 1821 1822 1823 1824 1825
	td->scale /= 2;

	if (td->scale) {
		td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
		return;
	}

S
Shaohua Li 已提交
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
	td->limit_index = new;
	td->low_downgrade_time = jiffies;
}

static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
{
	struct throtl_data *td = tg->td;
	unsigned long now = jiffies;

	/*
	 * If cgroup is below low limit, consider downgrade and throttle other
	 * cgroups
	 */
1839 1840
	if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
	    time_after_eq(now, tg_last_low_overflow_time(tg) +
1841 1842 1843
					td->throtl_slice) &&
	    (!throtl_tg_is_idle(tg) ||
	     !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
S
Shaohua Li 已提交
1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
		return true;
	return false;
}

static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
{
	while (true) {
		if (!throtl_tg_can_downgrade(tg))
			return false;
		tg = sq_to_tg(tg->service_queue.parent_sq);
		if (!tg || !tg_to_blkg(tg)->parent)
			break;
	}
	return true;
}

static void throtl_downgrade_check(struct throtl_grp *tg)
{
	uint64_t bps;
	unsigned int iops;
	unsigned long elapsed_time;
	unsigned long now = jiffies;

	if (tg->td->limit_index != LIMIT_MAX ||
	    !tg->td->limit_valid[LIMIT_LOW])
		return;
	if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
		return;
1872
	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
S
Shaohua Li 已提交
1873 1874 1875 1876 1877
		return;

	elapsed_time = now - tg->last_check_time;
	tg->last_check_time = now;

1878 1879
	if (time_before(now, tg_last_low_overflow_time(tg) +
			tg->td->throtl_slice))
S
Shaohua Li 已提交
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
		return;

	if (tg->bps[READ][LIMIT_LOW]) {
		bps = tg->last_bytes_disp[READ] * HZ;
		do_div(bps, elapsed_time);
		if (bps >= tg->bps[READ][LIMIT_LOW])
			tg->last_low_overflow_time[READ] = now;
	}

	if (tg->bps[WRITE][LIMIT_LOW]) {
		bps = tg->last_bytes_disp[WRITE] * HZ;
		do_div(bps, elapsed_time);
		if (bps >= tg->bps[WRITE][LIMIT_LOW])
			tg->last_low_overflow_time[WRITE] = now;
	}

	if (tg->iops[READ][LIMIT_LOW]) {
		iops = tg->last_io_disp[READ] * HZ / elapsed_time;
		if (iops >= tg->iops[READ][LIMIT_LOW])
			tg->last_low_overflow_time[READ] = now;
	}

	if (tg->iops[WRITE][LIMIT_LOW]) {
		iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
		if (iops >= tg->iops[WRITE][LIMIT_LOW])
			tg->last_low_overflow_time[WRITE] = now;
	}

	/*
	 * If cgroup is below low limit, consider downgrade and throttle other
	 * cgroups
	 */
	if (throtl_hierarchy_can_downgrade(tg))
		throtl_downgrade_state(tg->td, LIMIT_LOW);

	tg->last_bytes_disp[READ] = 0;
	tg->last_bytes_disp[WRITE] = 0;
	tg->last_io_disp[READ] = 0;
	tg->last_io_disp[WRITE] = 0;
}

1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
static void blk_throtl_update_idletime(struct throtl_grp *tg)
{
	unsigned long now = ktime_get_ns() >> 10;
	unsigned long last_finish_time = tg->last_finish_time;

	if (now <= last_finish_time || last_finish_time == 0 ||
	    last_finish_time == tg->checked_last_finish_time)
		return;

	tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
	tg->checked_last_finish_time = last_finish_time;
}

1934 1935
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
		    struct bio *bio)
1936
{
1937
	struct throtl_qnode *qn = NULL;
1938
	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
1939
	struct throtl_service_queue *sq;
1940
	bool rw = bio_data_dir(bio);
1941
	bool throttled = false;
1942
	int ret;
1943

1944 1945
	WARN_ON_ONCE(!rcu_read_lock_held());

1946
	/* see throtl_charge_bio() */
1947
	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
1948
		goto out;
1949 1950

	spin_lock_irq(q->queue_lock);
1951 1952

	if (unlikely(blk_queue_bypass(q)))
1953
		goto out_unlock;
1954

1955 1956 1957 1958 1959 1960 1961
	ret = bio_associate_current(bio);
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
	if (ret == 0 || ret == -EBUSY)
		bio->bi_cg_private = tg;
#endif
	blk_throtl_update_idletime(tg);

1962 1963
	sq = &tg->service_queue;

1964
again:
1965
	while (true) {
S
Shaohua Li 已提交
1966 1967 1968
		if (tg->last_low_overflow_time[rw] == 0)
			tg->last_low_overflow_time[rw] = jiffies;
		throtl_downgrade_check(tg);
1969
		throtl_upgrade_check(tg);
1970 1971 1972
		/* throtl is FIFO - if bios are already queued, should queue */
		if (sq->nr_queued[rw])
			break;
1973

1974
		/* if above limits, break to queue */
1975
		if (!tg_may_dispatch(tg, bio, NULL)) {
S
Shaohua Li 已提交
1976
			tg->last_low_overflow_time[rw] = jiffies;
1977 1978 1979 1980
			if (throtl_can_upgrade(tg->td, tg)) {
				throtl_upgrade_state(tg->td);
				goto again;
			}
1981
			break;
1982
		}
1983 1984

		/* within limits, let's charge and dispatch directly */
1985
		throtl_charge_bio(tg, bio);
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997

		/*
		 * We need to trim slice even when bios are not being queued
		 * otherwise it might happen that a bio is not queued for
		 * a long time and slice keeps on extending and trim is not
		 * called for a long time. Now if limits are reduced suddenly
		 * we take into account all the IO dispatched so far at new
		 * low rate and * newly queued IO gets a really long dispatch
		 * time.
		 *
		 * So keep on trimming slice even if bio is not queued.
		 */
1998
		throtl_trim_slice(tg, rw);
1999 2000 2001 2002 2003 2004

		/*
		 * @bio passed through this layer without being throttled.
		 * Climb up the ladder.  If we''re already at the top, it
		 * can be executed directly.
		 */
2005
		qn = &tg->qnode_on_parent[rw];
2006 2007 2008 2009
		sq = sq->parent_sq;
		tg = sq_to_tg(sq);
		if (!tg)
			goto out_unlock;
2010 2011
	}

2012
	/* out-of-limit, queue to @tg */
2013 2014
	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
		   rw == READ ? 'R' : 'W',
2015 2016 2017
		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
		   tg_bps_limit(tg, rw),
		   tg->io_disp[rw], tg_iops_limit(tg, rw),
2018
		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
2019

S
Shaohua Li 已提交
2020 2021
	tg->last_low_overflow_time[rw] = jiffies;

2022
	tg->td->nr_queued[rw]++;
2023
	throtl_add_bio_tg(bio, qn, tg);
2024
	throttled = true;
2025

2026 2027 2028 2029 2030 2031
	/*
	 * Update @tg's dispatch time and force schedule dispatch if @tg
	 * was empty before @bio.  The forced scheduling isn't likely to
	 * cause undue delay as @bio is likely to be dispatched directly if
	 * its @tg's disptime is not in the future.
	 */
2032
	if (tg->flags & THROTL_TG_WAS_EMPTY) {
2033
		tg_update_disptime(tg);
2034
		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2035 2036
	}

2037
out_unlock:
2038
	spin_unlock_irq(q->queue_lock);
2039
out:
2040 2041 2042 2043 2044 2045
	/*
	 * As multiple blk-throtls may stack in the same issue path, we
	 * don't want bios to leave with the flag set.  Clear the flag if
	 * being issued.
	 */
	if (!throttled)
2046
		bio_clear_flag(bio, BIO_THROTTLED);
2047
	return throttled;
2048 2049
}

2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
void blk_throtl_bio_endio(struct bio *bio)
{
	struct throtl_grp *tg;

	tg = bio->bi_cg_private;
	if (!tg)
		return;
	bio->bi_cg_private = NULL;

	tg->last_finish_time = ktime_get_ns() >> 10;
}
#endif

2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
/*
 * Dispatch all bios from all children tg's queued on @parent_sq.  On
 * return, @parent_sq is guaranteed to not have any active children tg's
 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
 */
static void tg_drain_bios(struct throtl_service_queue *parent_sq)
{
	struct throtl_grp *tg;

	while ((tg = throtl_rb_first(parent_sq))) {
		struct throtl_service_queue *sq = &tg->service_queue;
		struct bio *bio;

		throtl_dequeue_tg(tg);

2079
		while ((bio = throtl_peek_queued(&sq->queued[READ])))
2080
			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2081
		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2082 2083 2084 2085
			tg_dispatch_one_bio(tg, bio_data_dir(bio));
	}
}

2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
/**
 * blk_throtl_drain - drain throttled bios
 * @q: request_queue to drain throttled bios for
 *
 * Dispatch all currently throttled bios on @q through ->make_request_fn().
 */
void blk_throtl_drain(struct request_queue *q)
	__releases(q->queue_lock) __acquires(q->queue_lock)
{
	struct throtl_data *td = q->td;
2096
	struct blkcg_gq *blkg;
2097
	struct cgroup_subsys_state *pos_css;
2098
	struct bio *bio;
2099
	int rw;
2100

2101
	queue_lockdep_assert_held(q);
2102
	rcu_read_lock();
2103

2104 2105 2106 2107 2108 2109
	/*
	 * Drain each tg while doing post-order walk on the blkg tree, so
	 * that all bios are propagated to td->service_queue.  It'd be
	 * better to walk service_queue tree directly but blkg walk is
	 * easier.
	 */
2110
	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2111
		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2112

2113 2114 2115 2116
	/* finally, transfer bios from top-level tg's into the td */
	tg_drain_bios(&td->service_queue);

	rcu_read_unlock();
2117 2118
	spin_unlock_irq(q->queue_lock);

2119
	/* all bios now should be in td->service_queue, issue them */
2120
	for (rw = READ; rw <= WRITE; rw++)
2121 2122
		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
						NULL)))
2123
			generic_make_request(bio);
2124 2125 2126 2127

	spin_lock_irq(q->queue_lock);
}

2128 2129 2130
int blk_throtl_init(struct request_queue *q)
{
	struct throtl_data *td;
2131
	int ret;
2132 2133 2134 2135 2136

	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
	if (!td)
		return -ENOMEM;

2137
	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2138
	throtl_service_queue_init(&td->service_queue);
2139

2140
	q->td = td;
2141
	td->queue = q;
V
Vivek Goyal 已提交
2142

2143
	td->limit_valid[LIMIT_MAX] = true;
S
Shaohua Li 已提交
2144
	td->limit_index = LIMIT_MAX;
S
Shaohua Li 已提交
2145 2146
	td->low_upgrade_time = jiffies;
	td->low_downgrade_time = jiffies;
2147

2148
	/* activate policy */
T
Tejun Heo 已提交
2149
	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2150
	if (ret)
2151
		kfree(td);
2152
	return ret;
2153 2154 2155 2156
}

void blk_throtl_exit(struct request_queue *q)
{
T
Tejun Heo 已提交
2157
	BUG_ON(!q->td);
2158
	throtl_shutdown_wq(q);
T
Tejun Heo 已提交
2159
	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2160
	kfree(q->td);
2161 2162
}

2163 2164 2165
void blk_throtl_register_queue(struct request_queue *q)
{
	struct throtl_data *td;
2166 2167
	struct cgroup_subsys_state *pos_css;
	struct blkcg_gq *blkg;
2168 2169 2170 2171

	td = q->td;
	BUG_ON(!td);

2172
	if (blk_queue_nonrot(q)) {
2173
		td->throtl_slice = DFL_THROTL_SLICE_SSD;
2174 2175
		td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_SSD;
	} else {
2176
		td->throtl_slice = DFL_THROTL_SLICE_HD;
2177 2178
		td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_HD;
	}
2179 2180 2181 2182
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
	/* if no low limit, use previous default */
	td->throtl_slice = DFL_THROTL_SLICE_HD;
#endif
2183 2184 2185 2186 2187 2188 2189 2190 2191

	/*
	 * some tg are created before queue is fully initialized, eg, nonrot
	 * isn't initialized yet
	 */
	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);

2192
		tg->idletime_threshold = td->dft_idletime_threshold;
2193 2194
	}
	rcu_read_unlock();
2195 2196
}

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
{
	if (!q->td)
		return -EINVAL;
	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
}

ssize_t blk_throtl_sample_time_store(struct request_queue *q,
	const char *page, size_t count)
{
	unsigned long v;
	unsigned long t;

	if (!q->td)
		return -EINVAL;
	if (kstrtoul(page, 10, &v))
		return -EINVAL;
	t = msecs_to_jiffies(v);
	if (t == 0 || t > MAX_THROTL_SLICE)
		return -EINVAL;
	q->td->throtl_slice = t;
	return count;
}
#endif

2223 2224
static int __init throtl_init(void)
{
2225 2226 2227 2228
	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
	if (!kthrotld_workqueue)
		panic("Failed to create kthrotld\n");

T
Tejun Heo 已提交
2229
	return blkcg_policy_register(&blkcg_policy_throtl);
2230 2231 2232
}

module_init(throtl_init);