blk-wbt.c 19.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * buffered writeback throttling. loosely based on CoDel. We can't drop
 * packets for IO scheduling, so the logic is something like this:
 *
 * - Monitor latencies in a defined window of time.
 * - If the minimum latency in the above window exceeds some target, increment
 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
 *   window is then shrunk to 100 / sqrt(scaling step + 1).
 * - For any window where we don't have solid data on what the latencies
 *   look like, retain status quo.
 * - If latencies look good, decrement scaling step.
 * - If we're only doing writes, allow the scaling step to go negative. This
 *   will temporarily boost write performance, snapping back to a stable
 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
 *   positive scaling steps where we shrink the monitoring window, a negative
 *   scaling step retains the default step==0 window size.
 *
 * Copyright (C) 2016 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/swap.h>

#include "blk-wbt.h"
28
#include "blk-rq-qos.h"
29 30 31 32

#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>

33
static inline void wbt_clear_state(struct request *rq)
34
{
35
	rq->wbt_flags = 0;
36 37
}

38
static inline enum wbt_flags wbt_flags(struct request *rq)
39
{
40
	return rq->wbt_flags;
41 42
}

43
static inline bool wbt_is_tracked(struct request *rq)
44
{
45
	return rq->wbt_flags & WBT_TRACKED;
46 47
}

48
static inline bool wbt_is_read(struct request *rq)
49
{
50
	return rq->wbt_flags & WBT_READ;
51 52
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
enum {
	/*
	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
	 * from here depending on device stats
	 */
	RWB_DEF_DEPTH	= 16,

	/*
	 * 100msec window
	 */
	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,

	/*
	 * Disregard stats, if we don't meet this minimum
	 */
	RWB_MIN_WRITE_SAMPLES	= 3,

	/*
	 * If we have this number of consecutive windows with not enough
	 * information to scale up or down, scale up.
	 */
	RWB_UNKNOWN_BUMP	= 5,
};

static inline bool rwb_enabled(struct rq_wb *rwb)
{
	return rwb && rwb->wb_normal != 0;
}

static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
{
	if (rwb_enabled(rwb)) {
		const unsigned long cur = jiffies;

		if (cur != *var)
			*var = cur;
	}
}

/*
 * If a task was rate throttled in balance_dirty_pages() within the last
 * second or so, use that to indicate a higher cleaning rate.
 */
static bool wb_recent_wait(struct rq_wb *rwb)
{
98
	struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
99 100 101 102

	return time_before(jiffies, wb->dirty_sleep + HZ);
}

103 104
static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
					  enum wbt_flags wb_acct)
105
{
106 107
	if (wb_acct & WBT_KSWAPD)
		return &rwb->rq_wait[WBT_RWQ_KSWAPD];
108 109
	else if (wb_acct & WBT_DISCARD)
		return &rwb->rq_wait[WBT_RWQ_DISCARD];
110 111

	return &rwb->rq_wait[WBT_RWQ_BG];
112 113 114 115 116 117 118 119 120
}

static void rwb_wake_all(struct rq_wb *rwb)
{
	int i;

	for (i = 0; i < WBT_NUM_RWQ; i++) {
		struct rq_wait *rqw = &rwb->rq_wait[i];

121
		if (wq_has_sleeper(&rqw->wait))
122 123 124 125
			wake_up_all(&rqw->wait);
	}
}

126 127
static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
			 enum wbt_flags wb_acct)
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
{
	int inflight, limit;

	inflight = atomic_dec_return(&rqw->inflight);

	/*
	 * wbt got disabled with IO in flight. Wake up any potential
	 * waiters, we don't have to do more than that.
	 */
	if (unlikely(!rwb_enabled(rwb))) {
		rwb_wake_all(rwb);
		return;
	}

	/*
143 144 145
	 * For discards, our limit is always the background. For writes, if
	 * the device does write back caching, drop further down before we
	 * wake people up.
146
	 */
147 148 149
	if (wb_acct & WBT_DISCARD)
		limit = rwb->wb_background;
	else if (rwb->wc && !wb_recent_wait(rwb))
150 151 152 153 154 155 156 157 158 159
		limit = 0;
	else
		limit = rwb->wb_normal;

	/*
	 * Don't wake anyone up if we are above the normal limit.
	 */
	if (inflight && inflight >= limit)
		return;

160
	if (wq_has_sleeper(&rqw->wait)) {
161 162 163
		int diff = limit - inflight;

		if (!inflight || diff >= rwb->wb_background / 2)
J
Jens Axboe 已提交
164
			wake_up_all(&rqw->wait);
165 166 167
	}
}

168 169 170 171 172 173 174 175 176 177 178 179
static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
{
	struct rq_wb *rwb = RQWB(rqos);
	struct rq_wait *rqw;

	if (!(wb_acct & WBT_TRACKED))
		return;

	rqw = get_rq_wait(rwb, wb_acct);
	wbt_rqw_done(rwb, rqw, wb_acct);
}

180 181 182 183
/*
 * Called on completion of a request. Note that it's also called when
 * a request is merged, when the request gets freed.
 */
184
static void wbt_done(struct rq_qos *rqos, struct request *rq)
185
{
186
	struct rq_wb *rwb = RQWB(rqos);
187

188 189
	if (!wbt_is_tracked(rq)) {
		if (rwb->sync_cookie == rq) {
190 191 192 193
			rwb->sync_issue = 0;
			rwb->sync_cookie = NULL;
		}

194
		if (wbt_is_read(rq))
195 196
			wb_timestamp(rwb, &rwb->last_comp);
	} else {
197
		WARN_ON_ONCE(rq == rwb->sync_cookie);
198
		__wbt_done(rqos, wbt_flags(rq));
199
	}
200
	wbt_clear_state(rq);
201 202
}

203
static inline bool stat_sample_valid(struct blk_rq_stat *stat)
204 205 206 207 208 209 210
{
	/*
	 * We need at least one read sample, and a minimum of
	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
	 * that it's writes impacting us, and not just some sole read on
	 * a device that is in a lower power state.
	 */
211 212
	return (stat[READ].nr_samples >= 1 &&
		stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
213 214 215 216
}

static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{
217
	u64 now, issue = READ_ONCE(rwb->sync_issue);
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232

	if (!issue || !rwb->sync_cookie)
		return 0;

	now = ktime_to_ns(ktime_get());
	return now - issue;
}

enum {
	LAT_OK = 1,
	LAT_UNKNOWN,
	LAT_UNKNOWN_WRITES,
	LAT_EXCEEDED,
};

233
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
234
{
235 236
	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
	struct rq_depth *rqd = &rwb->rq_depth;
237 238 239 240 241 242 243 244 245 246 247 248 249
	u64 thislat;

	/*
	 * If our stored sync issue exceeds the window size, or it
	 * exceeds our min target AND we haven't logged any entries,
	 * flag the latency as exceeded. wbt works off completion latencies,
	 * but for a flooded device, a single sync IO can take a long time
	 * to complete after being issued. If this time exceeds our
	 * monitoring window AND we didn't see any other completions in that
	 * window, then count that sync IO as a violation of the latency.
	 */
	thislat = rwb_sync_issue_lat(rwb);
	if (thislat > rwb->cur_win_nsec ||
250
	    (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
J
Jens Axboe 已提交
251
		trace_wbt_lat(bdi, thislat);
252 253 254 255 256 257 258 259 260 261 262 263 264
		return LAT_EXCEEDED;
	}

	/*
	 * No read/write mix, if stat isn't valid
	 */
	if (!stat_sample_valid(stat)) {
		/*
		 * If we had writes in this stat window and the window is
		 * current, we're only doing writes. If a task recently
		 * waited or still has writes in flights, consider us doing
		 * just writes as well.
		 */
265 266
		if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
		    wbt_inflight(rwb))
267 268 269 270 271 272 273
			return LAT_UNKNOWN_WRITES;
		return LAT_UNKNOWN;
	}

	/*
	 * If the 'min' latency exceeds our target, step down.
	 */
274 275
	if (stat[READ].min > rwb->min_lat_nsec) {
		trace_wbt_lat(bdi, stat[READ].min);
J
Jens Axboe 已提交
276
		trace_wbt_stat(bdi, stat);
277 278 279
		return LAT_EXCEEDED;
	}

280
	if (rqd->scale_step)
J
Jens Axboe 已提交
281
		trace_wbt_stat(bdi, stat);
282 283 284 285 286 287

	return LAT_OK;
}

static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
288 289
	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
	struct rq_depth *rqd = &rwb->rq_depth;
J
Jens Axboe 已提交
290

291 292
	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
293 294
}

295
static void calc_wb_limits(struct rq_wb *rwb)
296
{
297 298 299 300 301 302 303 304 305 306
	if (rwb->min_lat_nsec == 0) {
		rwb->wb_normal = rwb->wb_background = 0;
	} else if (rwb->rq_depth.max_depth <= 2) {
		rwb->wb_normal = rwb->rq_depth.max_depth;
		rwb->wb_background = 1;
	} else {
		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
	}
}
307

308 309 310 311
static void scale_up(struct rq_wb *rwb)
{
	rq_depth_scale_up(&rwb->rq_depth);
	calc_wb_limits(rwb);
312
	rwb->unknown_cnt = 0;
313
	rwb_trace_step(rwb, "scale up");
314 315 316 317
}

static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
318
	rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
319
	calc_wb_limits(rwb);
320 321 322
	rwb->unknown_cnt = 0;
	rwb_wake_all(rwb);
	rwb_trace_step(rwb, "scale down");
323 324 325 326
}

static void rwb_arm_timer(struct rq_wb *rwb)
{
327 328 329
	struct rq_depth *rqd = &rwb->rq_depth;

	if (rqd->scale_step > 0) {
330 331 332 333 334 335 336
		/*
		 * We should speed this up, using some variant of a fast
		 * integer inverse square root calculation. Since we only do
		 * this for every window expiration, it's not a huge deal,
		 * though.
		 */
		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
337
					int_sqrt((rqd->scale_step + 1) << 8));
338 339 340 341 342 343 344 345
	} else {
		/*
		 * For step < 0, we don't want to increase/decrease the
		 * window size.
		 */
		rwb->cur_win_nsec = rwb->win_nsec;
	}

346
	blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
347 348
}

349
static void wb_timer_fn(struct blk_stat_callback *cb)
350
{
351
	struct rq_wb *rwb = cb->data;
352
	struct rq_depth *rqd = &rwb->rq_depth;
353 354 355
	unsigned int inflight = wbt_inflight(rwb);
	int status;

356
	status = latency_exceeded(rwb, cb->stat);
357

358
	trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
J
Jens Axboe 已提交
359
			inflight);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388

	/*
	 * If we exceeded the latency target, step down. If we did not,
	 * step one level up. If we don't know enough to say either exceeded
	 * or ok, then don't do anything.
	 */
	switch (status) {
	case LAT_EXCEEDED:
		scale_down(rwb, true);
		break;
	case LAT_OK:
		scale_up(rwb);
		break;
	case LAT_UNKNOWN_WRITES:
		/*
		 * We started a the center step, but don't have a valid
		 * read/write sample, but we do have writes going on.
		 * Allow step to go negative, to increase write perf.
		 */
		scale_up(rwb);
		break;
	case LAT_UNKNOWN:
		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
			break;
		/*
		 * We get here when previously scaled reduced depth, and we
		 * currently don't have a valid read/write sample. For that
		 * case, slowly return to center state (step == 0).
		 */
389
		if (rqd->scale_step > 0)
390
			scale_up(rwb);
391
		else if (rqd->scale_step < 0)
392 393 394 395 396 397 398 399 400
			scale_down(rwb, false);
		break;
	default:
		break;
	}

	/*
	 * Re-arm timer, if we have IO in flight
	 */
401
	if (rqd->scale_step || inflight)
402 403 404
		rwb_arm_timer(rwb);
}

405
static void __wbt_update_limits(struct rq_wb *rwb)
406
{
407 408 409 410 411 412
	struct rq_depth *rqd = &rwb->rq_depth;

	rqd->scale_step = 0;
	rqd->scaled_max = false;

	rq_depth_calc_max_depth(rqd);
413 414 415 416 417
	calc_wb_limits(rwb);

	rwb_wake_all(rwb);
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
void wbt_update_limits(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return;
	__wbt_update_limits(RQWB(rqos));
}

u64 wbt_get_min_lat(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return 0;
	return RQWB(rqos)->min_lat_nsec;
}

void wbt_set_min_lat(struct request_queue *q, u64 val)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return;
	RQWB(rqos)->min_lat_nsec = val;
	RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
	__wbt_update_limits(RQWB(rqos));
}


445 446 447 448 449 450 451 452 453 454 455 456 457 458
static bool close_io(struct rq_wb *rwb)
{
	const unsigned long now = jiffies;

	return time_before(now, rwb->last_issue + HZ / 10) ||
		time_before(now, rwb->last_comp + HZ / 10);
}

#define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)

static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
{
	unsigned int limit;

459 460 461 462 463 464 465
	/*
	 * If we got disabled, just return UINT_MAX. This ensures that
	 * we'll properly inc a new IO, and dec+wakeup at the end.
	 */
	if (!rwb_enabled(rwb))
		return UINT_MAX;

466 467 468
	if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
		return rwb->wb_background;

469 470
	/*
	 * At this point we know it's a buffered write. If this is
W
weiping zhang 已提交
471
	 * kswapd trying to free memory, or REQ_SYNC is set, then
472 473 474 475 476 477
	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
	 * that. If the write is marked as a background write, then use
	 * the idle limit, or go to normal if we haven't had competing
	 * IO for a bit.
	 */
	if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
478
		limit = rwb->rq_depth.max_depth;
479 480 481 482 483 484 485 486 487 488 489 490
	else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
		/*
		 * If less than 100ms since we completed unrelated IO,
		 * limit us to half the depth for background writeback.
		 */
		limit = rwb->wb_background;
	} else
		limit = rwb->wb_normal;

	return limit;
}

J
Jens Axboe 已提交
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
struct wbt_wait_data {
	struct wait_queue_entry wq;
	struct task_struct *task;
	struct rq_wb *rwb;
	struct rq_wait *rqw;
	unsigned long rw;
	bool got_token;
};

static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
			     int wake_flags, void *key)
{
	struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
							wq);

	/*
	 * If we fail to get a budget, return -1 to interrupt the wake up
	 * loop in __wake_up_common.
	 */
	if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
		return -1;

	data->got_token = true;
	list_del_init(&curr->entry);
	wake_up_process(data->task);
	return 1;
}

519 520 521 522
/*
 * Block if we will exceed our limit, or if we are currently waiting for
 * the timer to kick off queuing again.
 */
523 524
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
		       unsigned long rw, spinlock_t *lock)
525 526
	__releases(lock)
	__acquires(lock)
527
{
528
	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
J
Jens Axboe 已提交
529 530 531 532 533 534 535 536 537 538
	struct wbt_wait_data data = {
		.wq = {
			.func	= wbt_wake_function,
			.entry	= LIST_HEAD_INIT(data.wq.entry),
		},
		.task = current,
		.rwb = rwb,
		.rqw = rqw,
		.rw = rw,
	};
539
	bool has_sleeper;
540

541 542
	has_sleeper = wq_has_sleeper(&rqw->wait);
	if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
543 544
		return;

J
Jens Axboe 已提交
545
	prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
546
	do {
J
Jens Axboe 已提交
547 548
		if (data.got_token)
			break;
549

J
Jens Axboe 已提交
550 551 552 553 554 555 556 557 558 559 560
		if (!has_sleeper &&
		    rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
			finish_wait(&rqw->wait, &data.wq);

			/*
			 * We raced with wbt_wake_function() getting a token,
			 * which means we now have two. Put our local token
			 * and wake anyone else potentially waiting for one.
			 */
			if (data.got_token)
				wbt_rqw_done(rwb, rqw, wb_acct);
561
			break;
J
Jens Axboe 已提交
562
		}
563

564
		if (lock) {
565
			spin_unlock_irq(lock);
566
			io_schedule();
567
			spin_lock_irq(lock);
568 569
		} else
			io_schedule();
J
Jens Axboe 已提交
570

571
		has_sleeper = false;
572 573
	} while (1);

J
Jens Axboe 已提交
574
	finish_wait(&rqw->wait, &data.wq);
575 576 577 578
}

static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
{
579 580 581 582 583 584 585 586 587 588 589 590
	switch (bio_op(bio)) {
	case REQ_OP_WRITE:
		/*
		 * Don't throttle WRITE_ODIRECT
		 */
		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
		    (REQ_SYNC | REQ_IDLE))
			return false;
		/* fallthrough */
	case REQ_OP_DISCARD:
		return true;
	default:
591
		return false;
592
	}
593 594
}

595 596 597 598
static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
{
	enum wbt_flags flags = 0;

599 600 601
	if (!rwb_enabled(rwb))
		return 0;

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
	if (bio_op(bio) == REQ_OP_READ) {
		flags = WBT_READ;
	} else if (wbt_should_throttle(rwb, bio)) {
		if (current_is_kswapd())
			flags |= WBT_KSWAPD;
		if (bio_op(bio) == REQ_OP_DISCARD)
			flags |= WBT_DISCARD;
		flags |= WBT_TRACKED;
	}
	return flags;
}

static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
{
	struct rq_wb *rwb = RQWB(rqos);
	enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
	__wbt_done(rqos, flags);
}

621 622 623 624 625 626
/*
 * Returns true if the IO request should be accounted, false if not.
 * May sleep, if we have exceeded the writeback limits. Caller can pass
 * in an irq held spinlock, if it holds one when calling this function.
 * If we do sleep, we'll release and re-grab it.
 */
627
static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
628
{
629
	struct rq_wb *rwb = RQWB(rqos);
630
	enum wbt_flags flags;
631

632
	flags = bio_to_wbt_flags(rwb, bio);
M
Ming Lei 已提交
633
	if (!(flags & WBT_TRACKED)) {
634
		if (flags & WBT_READ)
635
			wb_timestamp(rwb, &rwb->last_issue);
636
		return;
637 638
	}

639
	__wbt_wait(rwb, flags, bio->bi_opf, lock);
640

641
	if (!blk_stat_is_active(rwb->cb))
642
		rwb_arm_timer(rwb);
643
}
644

645 646 647 648
static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{
	struct rq_wb *rwb = RQWB(rqos);
	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
649 650
}

651
void wbt_issue(struct rq_qos *rqos, struct request *rq)
652
{
653 654
	struct rq_wb *rwb = RQWB(rqos);

655 656 657 658
	if (!rwb_enabled(rwb))
		return;

	/*
659 660 661 662 663
	 * Track sync issue, in case it takes a long time to complete. Allows us
	 * to react quicker, if a sync IO takes a long time to complete. Note
	 * that this is just a hint. The request can go away when it completes,
	 * so it's important we never dereference it. We only use the address to
	 * compare with, which is why we store the sync_issue time locally.
664
	 */
665 666
	if (wbt_is_read(rq) && !rwb->sync_issue) {
		rwb->sync_cookie = rq;
667
		rwb->sync_issue = rq->io_start_time_ns;
668 669 670
	}
}

671
void wbt_requeue(struct rq_qos *rqos, struct request *rq)
672
{
673
	struct rq_wb *rwb = RQWB(rqos);
674 675
	if (!rwb_enabled(rwb))
		return;
676
	if (rq == rwb->sync_cookie) {
677 678 679 680 681
		rwb->sync_issue = 0;
		rwb->sync_cookie = NULL;
	}
}

682
void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
683
{
684 685 686 687
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (rqos) {
		RQWB(rqos)->rq_depth.queue_depth = depth;
		__wbt_update_limits(RQWB(rqos));
688 689 690
	}
}

691
void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
692
{
693 694 695
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (rqos)
		RQWB(rqos)->wc = write_cache_on;
696 697
}

698 699 700 701 702
/*
 * Enable wbt if defaults are configured that way
 */
void wbt_enable_default(struct request_queue *q)
{
703
	struct rq_qos *rqos = wbt_rq_qos(q);
704
	/* Throttling already enabled? */
705
	if (rqos)
706 707 708 709 710 711 712 713 714 715 716 717
		return;

	/* Queue not registered? Maybe shutting down... */
	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
		return;

	if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
	    (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
		wbt_init(q);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);

718 719 720 721 722 723 724 725 726 727 728 729
u64 wbt_default_latency_nsec(struct request_queue *q)
{
	/*
	 * We default to 2msec for non-rotational storage, and 75msec
	 * for rotational storage.
	 */
	if (blk_queue_nonrot(q))
		return 2000000ULL;
	else
		return 75000000ULL;
}

J
Jens Axboe 已提交
730 731
static int wbt_data_dir(const struct request *rq)
{
732 733 734 735
	const int op = req_op(rq);

	if (op == REQ_OP_READ)
		return READ;
736
	else if (op_is_write(op))
737 738 739 740
		return WRITE;

	/* don't account */
	return -1;
J
Jens Axboe 已提交
741 742
}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
static void wbt_exit(struct rq_qos *rqos)
{
	struct rq_wb *rwb = RQWB(rqos);
	struct request_queue *q = rqos->q;

	blk_stat_remove_callback(q, rwb->cb);
	blk_stat_free_callback(rwb->cb);
	kfree(rwb);
}

/*
 * Disable wbt, if enabled by default.
 */
void wbt_disable_default(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	struct rq_wb *rwb;
	if (!rqos)
		return;
	rwb = RQWB(rqos);
	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
		rwb->wb_normal = 0;
}
EXPORT_SYMBOL_GPL(wbt_disable_default);


static struct rq_qos_ops wbt_rqos_ops = {
	.throttle = wbt_wait,
	.issue = wbt_issue,
772
	.track = wbt_track,
773 774
	.requeue = wbt_requeue,
	.done = wbt_done,
775
	.cleanup = wbt_cleanup,
776 777 778
	.exit = wbt_exit,
};

J
Jens Axboe 已提交
779
int wbt_init(struct request_queue *q)
780 781 782 783 784 785 786 787
{
	struct rq_wb *rwb;
	int i;

	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
	if (!rwb)
		return -ENOMEM;

J
Jens Axboe 已提交
788
	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
789 790 791 792 793
	if (!rwb->cb) {
		kfree(rwb);
		return -ENOMEM;
	}

794 795
	for (i = 0; i < WBT_NUM_RWQ; i++)
		rq_wait_init(&rwb->rq_wait[i]);
796

797 798 799
	rwb->rqos.id = RQ_QOS_WBT;
	rwb->rqos.ops = &wbt_rqos_ops;
	rwb->rqos.q = q;
800 801
	rwb->last_comp = rwb->last_issue = jiffies;
	rwb->win_nsec = RWB_WINDOW_NSEC;
802
	rwb->enable_state = WBT_STATE_ON_DEFAULT;
803 804 805
	rwb->wc = 1;
	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
	__wbt_update_limits(rwb);
806 807

	/*
808
	 * Assign rwb and add the stats callback.
809
	 */
810
	rq_qos_add(q, &rwb->rqos);
811
	blk_stat_add_callback(q, rwb->cb);
812

813
	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
814

815 816
	wbt_set_queue_depth(q, blk_queue_depth(q));
	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
817 818 819

	return 0;
}