blk-wbt.c 17.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * buffered writeback throttling. loosely based on CoDel. We can't drop
 * packets for IO scheduling, so the logic is something like this:
 *
 * - Monitor latencies in a defined window of time.
 * - If the minimum latency in the above window exceeds some target, increment
 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
 *   window is then shrunk to 100 / sqrt(scaling step + 1).
 * - For any window where we don't have solid data on what the latencies
 *   look like, retain status quo.
 * - If latencies look good, decrement scaling step.
 * - If we're only doing writes, allow the scaling step to go negative. This
 *   will temporarily boost write performance, snapping back to a stable
 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
 *   positive scaling steps where we shrink the monitoring window, a negative
 *   scaling step retains the default step==0 window size.
 *
 * Copyright (C) 2016 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/swap.h>

#include "blk-wbt.h"
28
#include "blk-rq-qos.h"
29 30 31 32

#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>

33
static inline void wbt_clear_state(struct request *rq)
34
{
35
	rq->wbt_flags = 0;
36 37
}

38
static inline enum wbt_flags wbt_flags(struct request *rq)
39
{
40
	return rq->wbt_flags;
41 42
}

43
static inline bool wbt_is_tracked(struct request *rq)
44
{
45
	return rq->wbt_flags & WBT_TRACKED;
46 47
}

48
static inline bool wbt_is_read(struct request *rq)
49
{
50
	return rq->wbt_flags & WBT_READ;
51 52
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
enum {
	/*
	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
	 * from here depending on device stats
	 */
	RWB_DEF_DEPTH	= 16,

	/*
	 * 100msec window
	 */
	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,

	/*
	 * Disregard stats, if we don't meet this minimum
	 */
	RWB_MIN_WRITE_SAMPLES	= 3,

	/*
	 * If we have this number of consecutive windows with not enough
	 * information to scale up or down, scale up.
	 */
	RWB_UNKNOWN_BUMP	= 5,
};

static inline bool rwb_enabled(struct rq_wb *rwb)
{
	return rwb && rwb->wb_normal != 0;
}

static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
{
	if (rwb_enabled(rwb)) {
		const unsigned long cur = jiffies;

		if (cur != *var)
			*var = cur;
	}
}

/*
 * If a task was rate throttled in balance_dirty_pages() within the last
 * second or so, use that to indicate a higher cleaning rate.
 */
static bool wb_recent_wait(struct rq_wb *rwb)
{
98
	struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
99 100 101 102

	return time_before(jiffies, wb->dirty_sleep + HZ);
}

103 104
static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
					  enum wbt_flags wb_acct)
105
{
106 107
	if (wb_acct & WBT_KSWAPD)
		return &rwb->rq_wait[WBT_RWQ_KSWAPD];
108 109
	else if (wb_acct & WBT_DISCARD)
		return &rwb->rq_wait[WBT_RWQ_DISCARD];
110 111

	return &rwb->rq_wait[WBT_RWQ_BG];
112 113 114 115 116 117 118 119 120
}

static void rwb_wake_all(struct rq_wb *rwb)
{
	int i;

	for (i = 0; i < WBT_NUM_RWQ; i++) {
		struct rq_wait *rqw = &rwb->rq_wait[i];

121
		if (wq_has_sleeper(&rqw->wait))
122 123 124 125
			wake_up_all(&rqw->wait);
	}
}

126
static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
127
{
128
	struct rq_wb *rwb = RQWB(rqos);
129 130 131 132 133 134
	struct rq_wait *rqw;
	int inflight, limit;

	if (!(wb_acct & WBT_TRACKED))
		return;

135
	rqw = get_rq_wait(rwb, wb_acct);
136 137 138 139 140 141 142 143 144 145 146 147
	inflight = atomic_dec_return(&rqw->inflight);

	/*
	 * wbt got disabled with IO in flight. Wake up any potential
	 * waiters, we don't have to do more than that.
	 */
	if (unlikely(!rwb_enabled(rwb))) {
		rwb_wake_all(rwb);
		return;
	}

	/*
148 149 150
	 * For discards, our limit is always the background. For writes, if
	 * the device does write back caching, drop further down before we
	 * wake people up.
151
	 */
152 153 154
	if (wb_acct & WBT_DISCARD)
		limit = rwb->wb_background;
	else if (rwb->wc && !wb_recent_wait(rwb))
155 156 157 158 159 160 161 162 163 164
		limit = 0;
	else
		limit = rwb->wb_normal;

	/*
	 * Don't wake anyone up if we are above the normal limit.
	 */
	if (inflight && inflight >= limit)
		return;

165
	if (wq_has_sleeper(&rqw->wait)) {
166 167 168
		int diff = limit - inflight;

		if (!inflight || diff >= rwb->wb_background / 2)
169
			wake_up(&rqw->wait);
170 171 172 173 174 175 176
	}
}

/*
 * Called on completion of a request. Note that it's also called when
 * a request is merged, when the request gets freed.
 */
177
static void wbt_done(struct rq_qos *rqos, struct request *rq)
178
{
179
	struct rq_wb *rwb = RQWB(rqos);
180

181 182
	if (!wbt_is_tracked(rq)) {
		if (rwb->sync_cookie == rq) {
183 184 185 186
			rwb->sync_issue = 0;
			rwb->sync_cookie = NULL;
		}

187
		if (wbt_is_read(rq))
188 189
			wb_timestamp(rwb, &rwb->last_comp);
	} else {
190
		WARN_ON_ONCE(rq == rwb->sync_cookie);
191
		__wbt_done(rqos, wbt_flags(rq));
192
	}
193
	wbt_clear_state(rq);
194 195
}

196
static inline bool stat_sample_valid(struct blk_rq_stat *stat)
197 198 199 200 201 202 203
{
	/*
	 * We need at least one read sample, and a minimum of
	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
	 * that it's writes impacting us, and not just some sole read on
	 * a device that is in a lower power state.
	 */
204 205
	return (stat[READ].nr_samples >= 1 &&
		stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
206 207 208 209
}

static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{
210
	u64 now, issue = READ_ONCE(rwb->sync_issue);
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225

	if (!issue || !rwb->sync_cookie)
		return 0;

	now = ktime_to_ns(ktime_get());
	return now - issue;
}

enum {
	LAT_OK = 1,
	LAT_UNKNOWN,
	LAT_UNKNOWN_WRITES,
	LAT_EXCEEDED,
};

226
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
227
{
228 229
	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
	struct rq_depth *rqd = &rwb->rq_depth;
230 231 232 233 234 235 236 237 238 239 240 241 242
	u64 thislat;

	/*
	 * If our stored sync issue exceeds the window size, or it
	 * exceeds our min target AND we haven't logged any entries,
	 * flag the latency as exceeded. wbt works off completion latencies,
	 * but for a flooded device, a single sync IO can take a long time
	 * to complete after being issued. If this time exceeds our
	 * monitoring window AND we didn't see any other completions in that
	 * window, then count that sync IO as a violation of the latency.
	 */
	thislat = rwb_sync_issue_lat(rwb);
	if (thislat > rwb->cur_win_nsec ||
243
	    (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
J
Jens Axboe 已提交
244
		trace_wbt_lat(bdi, thislat);
245 246 247 248 249 250 251 252 253 254 255 256 257
		return LAT_EXCEEDED;
	}

	/*
	 * No read/write mix, if stat isn't valid
	 */
	if (!stat_sample_valid(stat)) {
		/*
		 * If we had writes in this stat window and the window is
		 * current, we're only doing writes. If a task recently
		 * waited or still has writes in flights, consider us doing
		 * just writes as well.
		 */
258 259
		if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
		    wbt_inflight(rwb))
260 261 262 263 264 265 266
			return LAT_UNKNOWN_WRITES;
		return LAT_UNKNOWN;
	}

	/*
	 * If the 'min' latency exceeds our target, step down.
	 */
267 268
	if (stat[READ].min > rwb->min_lat_nsec) {
		trace_wbt_lat(bdi, stat[READ].min);
J
Jens Axboe 已提交
269
		trace_wbt_stat(bdi, stat);
270 271 272
		return LAT_EXCEEDED;
	}

273
	if (rqd->scale_step)
J
Jens Axboe 已提交
274
		trace_wbt_stat(bdi, stat);
275 276 277 278 279 280

	return LAT_OK;
}

static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
281 282
	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
	struct rq_depth *rqd = &rwb->rq_depth;
J
Jens Axboe 已提交
283

284 285
	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
286 287
}

288
static void calc_wb_limits(struct rq_wb *rwb)
289
{
290 291 292 293 294 295 296 297 298 299
	if (rwb->min_lat_nsec == 0) {
		rwb->wb_normal = rwb->wb_background = 0;
	} else if (rwb->rq_depth.max_depth <= 2) {
		rwb->wb_normal = rwb->rq_depth.max_depth;
		rwb->wb_background = 1;
	} else {
		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
	}
}
300

301 302 303 304
static void scale_up(struct rq_wb *rwb)
{
	rq_depth_scale_up(&rwb->rq_depth);
	calc_wb_limits(rwb);
305
	rwb->unknown_cnt = 0;
306
	rwb_trace_step(rwb, "scale up");
307 308 309 310
}

static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
311
	rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
312
	calc_wb_limits(rwb);
313 314 315
	rwb->unknown_cnt = 0;
	rwb_wake_all(rwb);
	rwb_trace_step(rwb, "scale down");
316 317 318 319
}

static void rwb_arm_timer(struct rq_wb *rwb)
{
320 321 322
	struct rq_depth *rqd = &rwb->rq_depth;

	if (rqd->scale_step > 0) {
323 324 325 326 327 328 329
		/*
		 * We should speed this up, using some variant of a fast
		 * integer inverse square root calculation. Since we only do
		 * this for every window expiration, it's not a huge deal,
		 * though.
		 */
		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
330
					int_sqrt((rqd->scale_step + 1) << 8));
331 332 333 334 335 336 337 338
	} else {
		/*
		 * For step < 0, we don't want to increase/decrease the
		 * window size.
		 */
		rwb->cur_win_nsec = rwb->win_nsec;
	}

339
	blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
340 341
}

342
static void wb_timer_fn(struct blk_stat_callback *cb)
343
{
344
	struct rq_wb *rwb = cb->data;
345
	struct rq_depth *rqd = &rwb->rq_depth;
346 347 348
	unsigned int inflight = wbt_inflight(rwb);
	int status;

349
	status = latency_exceeded(rwb, cb->stat);
350

351
	trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
J
Jens Axboe 已提交
352
			inflight);
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381

	/*
	 * If we exceeded the latency target, step down. If we did not,
	 * step one level up. If we don't know enough to say either exceeded
	 * or ok, then don't do anything.
	 */
	switch (status) {
	case LAT_EXCEEDED:
		scale_down(rwb, true);
		break;
	case LAT_OK:
		scale_up(rwb);
		break;
	case LAT_UNKNOWN_WRITES:
		/*
		 * We started a the center step, but don't have a valid
		 * read/write sample, but we do have writes going on.
		 * Allow step to go negative, to increase write perf.
		 */
		scale_up(rwb);
		break;
	case LAT_UNKNOWN:
		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
			break;
		/*
		 * We get here when previously scaled reduced depth, and we
		 * currently don't have a valid read/write sample. For that
		 * case, slowly return to center state (step == 0).
		 */
382
		if (rqd->scale_step > 0)
383
			scale_up(rwb);
384
		else if (rqd->scale_step < 0)
385 386 387 388 389 390 391 392 393
			scale_down(rwb, false);
		break;
	default:
		break;
	}

	/*
	 * Re-arm timer, if we have IO in flight
	 */
394
	if (rqd->scale_step || inflight)
395 396 397
		rwb_arm_timer(rwb);
}

398
static void __wbt_update_limits(struct rq_wb *rwb)
399
{
400 401 402 403 404 405
	struct rq_depth *rqd = &rwb->rq_depth;

	rqd->scale_step = 0;
	rqd->scaled_max = false;

	rq_depth_calc_max_depth(rqd);
406 407 408 409 410
	calc_wb_limits(rwb);

	rwb_wake_all(rwb);
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
void wbt_update_limits(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return;
	__wbt_update_limits(RQWB(rqos));
}

u64 wbt_get_min_lat(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return 0;
	return RQWB(rqos)->min_lat_nsec;
}

void wbt_set_min_lat(struct request_queue *q, u64 val)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return;
	RQWB(rqos)->min_lat_nsec = val;
	RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
	__wbt_update_limits(RQWB(rqos));
}


438 439 440 441 442 443 444 445 446 447 448 449 450 451
static bool close_io(struct rq_wb *rwb)
{
	const unsigned long now = jiffies;

	return time_before(now, rwb->last_issue + HZ / 10) ||
		time_before(now, rwb->last_comp + HZ / 10);
}

#define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)

static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
{
	unsigned int limit;

452 453 454 455 456 457 458
	/*
	 * If we got disabled, just return UINT_MAX. This ensures that
	 * we'll properly inc a new IO, and dec+wakeup at the end.
	 */
	if (!rwb_enabled(rwb))
		return UINT_MAX;

459 460 461
	if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
		return rwb->wb_background;

462 463
	/*
	 * At this point we know it's a buffered write. If this is
W
weiping zhang 已提交
464
	 * kswapd trying to free memory, or REQ_SYNC is set, then
465 466 467 468 469 470
	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
	 * that. If the write is marked as a background write, then use
	 * the idle limit, or go to normal if we haven't had competing
	 * IO for a bit.
	 */
	if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
471
		limit = rwb->rq_depth.max_depth;
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
	else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
		/*
		 * If less than 100ms since we completed unrelated IO,
		 * limit us to half the depth for background writeback.
		 */
		limit = rwb->wb_background;
	} else
		limit = rwb->wb_normal;

	return limit;
}

/*
 * Block if we will exceed our limit, or if we are currently waiting for
 * the timer to kick off queuing again.
 */
488 489
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
		       unsigned long rw, spinlock_t *lock)
490 491
	__releases(lock)
	__acquires(lock)
492
{
493
	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
494 495
	DECLARE_WAITQUEUE(wait, current);

496 497
	if (!wq_has_sleeper(&rqw->wait) &&
	    rq_wait_inc_below(rqw, get_limit(rwb, rw)))
498 499
		return;

500
	add_wait_queue_exclusive(&rqw->wait, &wait);
501
	do {
502 503 504
		set_current_state(TASK_UNINTERRUPTIBLE);

		if (rq_wait_inc_below(rqw, get_limit(rwb, rw)))
505 506
			break;

507
		if (lock) {
508
			spin_unlock_irq(lock);
509
			io_schedule();
510
			spin_lock_irq(lock);
511 512
		} else
			io_schedule();
513 514
	} while (1);

515 516
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&rqw->wait, &wait);
517 518 519 520
}

static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
{
521 522 523 524 525 526 527 528 529 530 531 532
	switch (bio_op(bio)) {
	case REQ_OP_WRITE:
		/*
		 * Don't throttle WRITE_ODIRECT
		 */
		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
		    (REQ_SYNC | REQ_IDLE))
			return false;
		/* fallthrough */
	case REQ_OP_DISCARD:
		return true;
	default:
533
		return false;
534
	}
535 536
}

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
{
	enum wbt_flags flags = 0;

	if (bio_op(bio) == REQ_OP_READ) {
		flags = WBT_READ;
	} else if (wbt_should_throttle(rwb, bio)) {
		if (current_is_kswapd())
			flags |= WBT_KSWAPD;
		if (bio_op(bio) == REQ_OP_DISCARD)
			flags |= WBT_DISCARD;
		flags |= WBT_TRACKED;
	}
	return flags;
}

static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
{
	struct rq_wb *rwb = RQWB(rqos);
	enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
	__wbt_done(rqos, flags);
}

560 561 562 563 564 565
/*
 * Returns true if the IO request should be accounted, false if not.
 * May sleep, if we have exceeded the writeback limits. Caller can pass
 * in an irq held spinlock, if it holds one when calling this function.
 * If we do sleep, we'll release and re-grab it.
 */
566
static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
567
{
568
	struct rq_wb *rwb = RQWB(rqos);
569
	enum wbt_flags flags;
570

571
	flags = bio_to_wbt_flags(rwb, bio);
M
Ming Lei 已提交
572
	if (!(flags & WBT_TRACKED)) {
573
		if (flags & WBT_READ)
574
			wb_timestamp(rwb, &rwb->last_issue);
575
		return;
576 577
	}

578
	if (current_is_kswapd())
579
		flags |= WBT_KSWAPD;
580
	if (bio_op(bio) == REQ_OP_DISCARD)
581
		flags |= WBT_DISCARD;
582

583
	__wbt_wait(rwb, flags, bio->bi_opf, lock);
584

585
	if (!blk_stat_is_active(rwb->cb))
586
		rwb_arm_timer(rwb);
587
}
588

589 590 591 592
static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{
	struct rq_wb *rwb = RQWB(rqos);
	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
593 594
}

595
void wbt_issue(struct rq_qos *rqos, struct request *rq)
596
{
597 598
	struct rq_wb *rwb = RQWB(rqos);

599 600 601 602
	if (!rwb_enabled(rwb))
		return;

	/*
603 604 605 606 607
	 * Track sync issue, in case it takes a long time to complete. Allows us
	 * to react quicker, if a sync IO takes a long time to complete. Note
	 * that this is just a hint. The request can go away when it completes,
	 * so it's important we never dereference it. We only use the address to
	 * compare with, which is why we store the sync_issue time locally.
608
	 */
609 610
	if (wbt_is_read(rq) && !rwb->sync_issue) {
		rwb->sync_cookie = rq;
611
		rwb->sync_issue = rq->io_start_time_ns;
612 613 614
	}
}

615
void wbt_requeue(struct rq_qos *rqos, struct request *rq)
616
{
617
	struct rq_wb *rwb = RQWB(rqos);
618 619
	if (!rwb_enabled(rwb))
		return;
620
	if (rq == rwb->sync_cookie) {
621 622 623 624 625
		rwb->sync_issue = 0;
		rwb->sync_cookie = NULL;
	}
}

626
void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
627
{
628 629 630 631
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (rqos) {
		RQWB(rqos)->rq_depth.queue_depth = depth;
		__wbt_update_limits(RQWB(rqos));
632 633 634
	}
}

635
void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
636
{
637 638 639
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (rqos)
		RQWB(rqos)->wc = write_cache_on;
640 641
}

642 643 644 645 646
/*
 * Enable wbt if defaults are configured that way
 */
void wbt_enable_default(struct request_queue *q)
{
647
	struct rq_qos *rqos = wbt_rq_qos(q);
648
	/* Throttling already enabled? */
649
	if (rqos)
650 651 652 653 654 655 656 657 658 659 660 661
		return;

	/* Queue not registered? Maybe shutting down... */
	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
		return;

	if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
	    (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
		wbt_init(q);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);

662 663 664 665 666 667 668 669 670 671 672 673
u64 wbt_default_latency_nsec(struct request_queue *q)
{
	/*
	 * We default to 2msec for non-rotational storage, and 75msec
	 * for rotational storage.
	 */
	if (blk_queue_nonrot(q))
		return 2000000ULL;
	else
		return 75000000ULL;
}

J
Jens Axboe 已提交
674 675
static int wbt_data_dir(const struct request *rq)
{
676 677 678 679
	const int op = req_op(rq);

	if (op == REQ_OP_READ)
		return READ;
680
	else if (op_is_write(op))
681 682 683 684
		return WRITE;

	/* don't account */
	return -1;
J
Jens Axboe 已提交
685 686
}

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
static void wbt_exit(struct rq_qos *rqos)
{
	struct rq_wb *rwb = RQWB(rqos);
	struct request_queue *q = rqos->q;

	blk_stat_remove_callback(q, rwb->cb);
	blk_stat_free_callback(rwb->cb);
	kfree(rwb);
}

/*
 * Disable wbt, if enabled by default.
 */
void wbt_disable_default(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	struct rq_wb *rwb;
	if (!rqos)
		return;
	rwb = RQWB(rqos);
	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
		rwb->wb_normal = 0;
}
EXPORT_SYMBOL_GPL(wbt_disable_default);


static struct rq_qos_ops wbt_rqos_ops = {
	.throttle = wbt_wait,
	.issue = wbt_issue,
716
	.track = wbt_track,
717 718
	.requeue = wbt_requeue,
	.done = wbt_done,
719
	.cleanup = wbt_cleanup,
720 721 722
	.exit = wbt_exit,
};

J
Jens Axboe 已提交
723
int wbt_init(struct request_queue *q)
724 725 726 727 728 729 730 731
{
	struct rq_wb *rwb;
	int i;

	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
	if (!rwb)
		return -ENOMEM;

J
Jens Axboe 已提交
732
	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
733 734 735 736 737
	if (!rwb->cb) {
		kfree(rwb);
		return -ENOMEM;
	}

738 739
	for (i = 0; i < WBT_NUM_RWQ; i++)
		rq_wait_init(&rwb->rq_wait[i]);
740

741 742 743
	rwb->rqos.id = RQ_QOS_WBT;
	rwb->rqos.ops = &wbt_rqos_ops;
	rwb->rqos.q = q;
744 745
	rwb->last_comp = rwb->last_issue = jiffies;
	rwb->win_nsec = RWB_WINDOW_NSEC;
746
	rwb->enable_state = WBT_STATE_ON_DEFAULT;
747 748 749
	rwb->wc = 1;
	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
	__wbt_update_limits(rwb);
750 751

	/*
752
	 * Assign rwb and add the stats callback.
753
	 */
754
	rq_qos_add(q, &rwb->rqos);
755
	blk_stat_add_callback(q, rwb->cb);
756

757
	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
758

759 760
	wbt_set_queue_depth(q, blk_queue_depth(q));
	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
761 762 763

	return 0;
}