blk-wbt.c 20.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * buffered writeback throttling. loosely based on CoDel. We can't drop
 * packets for IO scheduling, so the logic is something like this:
 *
 * - Monitor latencies in a defined window of time.
 * - If the minimum latency in the above window exceeds some target, increment
 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
 *   window is then shrunk to 100 / sqrt(scaling step + 1).
 * - For any window where we don't have solid data on what the latencies
 *   look like, retain status quo.
 * - If latencies look good, decrement scaling step.
 * - If we're only doing writes, allow the scaling step to go negative. This
 *   will temporarily boost write performance, snapping back to a stable
 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
 *   positive scaling steps where we shrink the monitoring window, a negative
 *   scaling step retains the default step==0 window size.
 *
 * Copyright (C) 2016 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/swap.h>

#include "blk-wbt.h"
29
#include "blk-rq-qos.h"
30 31 32 33

#define CREATE_TRACE_POINTS
#include <trace/events/wbt.h>

34
static inline void wbt_clear_state(struct request *rq)
35
{
36
	rq->wbt_flags = 0;
37 38
}

39
static inline enum wbt_flags wbt_flags(struct request *rq)
40
{
41
	return rq->wbt_flags;
42 43
}

44
static inline bool wbt_is_tracked(struct request *rq)
45
{
46
	return rq->wbt_flags & WBT_TRACKED;
47 48
}

49
static inline bool wbt_is_read(struct request *rq)
50
{
51
	return rq->wbt_flags & WBT_READ;
52 53
}

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
enum {
	/*
	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
	 * from here depending on device stats
	 */
	RWB_DEF_DEPTH	= 16,

	/*
	 * 100msec window
	 */
	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,

	/*
	 * Disregard stats, if we don't meet this minimum
	 */
	RWB_MIN_WRITE_SAMPLES	= 3,

	/*
	 * If we have this number of consecutive windows with not enough
	 * information to scale up or down, scale up.
	 */
	RWB_UNKNOWN_BUMP	= 5,
};

static inline bool rwb_enabled(struct rq_wb *rwb)
{
	return rwb && rwb->wb_normal != 0;
}

static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
{
	if (rwb_enabled(rwb)) {
		const unsigned long cur = jiffies;

		if (cur != *var)
			*var = cur;
	}
}

/*
 * If a task was rate throttled in balance_dirty_pages() within the last
 * second or so, use that to indicate a higher cleaning rate.
 */
static bool wb_recent_wait(struct rq_wb *rwb)
{
99
	struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
100 101 102 103

	return time_before(jiffies, wb->dirty_sleep + HZ);
}

104 105
static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
					  enum wbt_flags wb_acct)
106
{
107 108
	if (wb_acct & WBT_KSWAPD)
		return &rwb->rq_wait[WBT_RWQ_KSWAPD];
109 110
	else if (wb_acct & WBT_DISCARD)
		return &rwb->rq_wait[WBT_RWQ_DISCARD];
111 112

	return &rwb->rq_wait[WBT_RWQ_BG];
113 114 115 116 117 118 119 120 121
}

static void rwb_wake_all(struct rq_wb *rwb)
{
	int i;

	for (i = 0; i < WBT_NUM_RWQ; i++) {
		struct rq_wait *rqw = &rwb->rq_wait[i];

122
		if (wq_has_sleeper(&rqw->wait))
123 124 125 126
			wake_up_all(&rqw->wait);
	}
}

127 128
static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
			 enum wbt_flags wb_acct)
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
{
	int inflight, limit;

	inflight = atomic_dec_return(&rqw->inflight);

	/*
	 * wbt got disabled with IO in flight. Wake up any potential
	 * waiters, we don't have to do more than that.
	 */
	if (unlikely(!rwb_enabled(rwb))) {
		rwb_wake_all(rwb);
		return;
	}

	/*
144 145 146
	 * For discards, our limit is always the background. For writes, if
	 * the device does write back caching, drop further down before we
	 * wake people up.
147
	 */
148 149 150
	if (wb_acct & WBT_DISCARD)
		limit = rwb->wb_background;
	else if (rwb->wc && !wb_recent_wait(rwb))
151 152 153 154 155 156 157 158 159 160
		limit = 0;
	else
		limit = rwb->wb_normal;

	/*
	 * Don't wake anyone up if we are above the normal limit.
	 */
	if (inflight && inflight >= limit)
		return;

161
	if (wq_has_sleeper(&rqw->wait)) {
162 163 164
		int diff = limit - inflight;

		if (!inflight || diff >= rwb->wb_background / 2)
J
Jens Axboe 已提交
165
			wake_up_all(&rqw->wait);
166 167 168
	}
}

169 170 171 172 173 174 175 176 177 178 179 180
static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
{
	struct rq_wb *rwb = RQWB(rqos);
	struct rq_wait *rqw;

	if (!(wb_acct & WBT_TRACKED))
		return;

	rqw = get_rq_wait(rwb, wb_acct);
	wbt_rqw_done(rwb, rqw, wb_acct);
}

181 182 183 184
/*
 * Called on completion of a request. Note that it's also called when
 * a request is merged, when the request gets freed.
 */
185
static void wbt_done(struct rq_qos *rqos, struct request *rq)
186
{
187
	struct rq_wb *rwb = RQWB(rqos);
188

189 190
	if (!wbt_is_tracked(rq)) {
		if (rwb->sync_cookie == rq) {
191 192 193 194
			rwb->sync_issue = 0;
			rwb->sync_cookie = NULL;
		}

195
		if (wbt_is_read(rq))
196 197
			wb_timestamp(rwb, &rwb->last_comp);
	} else {
198
		WARN_ON_ONCE(rq == rwb->sync_cookie);
199
		__wbt_done(rqos, wbt_flags(rq));
200
	}
201
	wbt_clear_state(rq);
202 203
}

204
static inline bool stat_sample_valid(struct blk_rq_stat *stat)
205 206 207 208 209 210 211
{
	/*
	 * We need at least one read sample, and a minimum of
	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
	 * that it's writes impacting us, and not just some sole read on
	 * a device that is in a lower power state.
	 */
212 213
	return (stat[READ].nr_samples >= 1 &&
		stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
214 215 216 217
}

static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{
218
	u64 now, issue = READ_ONCE(rwb->sync_issue);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233

	if (!issue || !rwb->sync_cookie)
		return 0;

	now = ktime_to_ns(ktime_get());
	return now - issue;
}

enum {
	LAT_OK = 1,
	LAT_UNKNOWN,
	LAT_UNKNOWN_WRITES,
	LAT_EXCEEDED,
};

234
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
235
{
236 237
	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
	struct rq_depth *rqd = &rwb->rq_depth;
238 239 240 241 242 243 244 245 246 247 248 249 250
	u64 thislat;

	/*
	 * If our stored sync issue exceeds the window size, or it
	 * exceeds our min target AND we haven't logged any entries,
	 * flag the latency as exceeded. wbt works off completion latencies,
	 * but for a flooded device, a single sync IO can take a long time
	 * to complete after being issued. If this time exceeds our
	 * monitoring window AND we didn't see any other completions in that
	 * window, then count that sync IO as a violation of the latency.
	 */
	thislat = rwb_sync_issue_lat(rwb);
	if (thislat > rwb->cur_win_nsec ||
251
	    (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
J
Jens Axboe 已提交
252
		trace_wbt_lat(bdi, thislat);
253 254 255 256 257 258 259 260 261 262 263 264 265
		return LAT_EXCEEDED;
	}

	/*
	 * No read/write mix, if stat isn't valid
	 */
	if (!stat_sample_valid(stat)) {
		/*
		 * If we had writes in this stat window and the window is
		 * current, we're only doing writes. If a task recently
		 * waited or still has writes in flights, consider us doing
		 * just writes as well.
		 */
266 267
		if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
		    wbt_inflight(rwb))
268 269 270 271 272 273 274
			return LAT_UNKNOWN_WRITES;
		return LAT_UNKNOWN;
	}

	/*
	 * If the 'min' latency exceeds our target, step down.
	 */
275 276
	if (stat[READ].min > rwb->min_lat_nsec) {
		trace_wbt_lat(bdi, stat[READ].min);
J
Jens Axboe 已提交
277
		trace_wbt_stat(bdi, stat);
278 279 280
		return LAT_EXCEEDED;
	}

281
	if (rqd->scale_step)
J
Jens Axboe 已提交
282
		trace_wbt_stat(bdi, stat);
283 284 285 286 287 288

	return LAT_OK;
}

static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{
289 290
	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
	struct rq_depth *rqd = &rwb->rq_depth;
J
Jens Axboe 已提交
291

292 293
	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
294 295
}

296
static void calc_wb_limits(struct rq_wb *rwb)
297
{
298 299 300 301 302 303 304 305 306 307
	if (rwb->min_lat_nsec == 0) {
		rwb->wb_normal = rwb->wb_background = 0;
	} else if (rwb->rq_depth.max_depth <= 2) {
		rwb->wb_normal = rwb->rq_depth.max_depth;
		rwb->wb_background = 1;
	} else {
		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
	}
}
308

309 310
static void scale_up(struct rq_wb *rwb)
{
311 312
	if (!rq_depth_scale_up(&rwb->rq_depth))
		return;
313
	calc_wb_limits(rwb);
314
	rwb->unknown_cnt = 0;
315
	rwb_wake_all(rwb);
316
	rwb_trace_step(rwb, tracepoint_string("scale up"));
317 318 319 320
}

static void scale_down(struct rq_wb *rwb, bool hard_throttle)
{
321 322
	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
		return;
323
	calc_wb_limits(rwb);
324
	rwb->unknown_cnt = 0;
325
	rwb_trace_step(rwb, tracepoint_string("scale down"));
326 327 328 329
}

static void rwb_arm_timer(struct rq_wb *rwb)
{
330 331 332
	struct rq_depth *rqd = &rwb->rq_depth;

	if (rqd->scale_step > 0) {
333 334 335 336 337 338 339
		/*
		 * We should speed this up, using some variant of a fast
		 * integer inverse square root calculation. Since we only do
		 * this for every window expiration, it's not a huge deal,
		 * though.
		 */
		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
340
					int_sqrt((rqd->scale_step + 1) << 8));
341 342 343 344 345 346 347 348
	} else {
		/*
		 * For step < 0, we don't want to increase/decrease the
		 * window size.
		 */
		rwb->cur_win_nsec = rwb->win_nsec;
	}

349
	blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
350 351
}

352
static void wb_timer_fn(struct blk_stat_callback *cb)
353
{
354
	struct rq_wb *rwb = cb->data;
355
	struct rq_depth *rqd = &rwb->rq_depth;
356 357 358
	unsigned int inflight = wbt_inflight(rwb);
	int status;

359
	status = latency_exceeded(rwb, cb->stat);
360

361
	trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
J
Jens Axboe 已提交
362
			inflight);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391

	/*
	 * If we exceeded the latency target, step down. If we did not,
	 * step one level up. If we don't know enough to say either exceeded
	 * or ok, then don't do anything.
	 */
	switch (status) {
	case LAT_EXCEEDED:
		scale_down(rwb, true);
		break;
	case LAT_OK:
		scale_up(rwb);
		break;
	case LAT_UNKNOWN_WRITES:
		/*
		 * We started a the center step, but don't have a valid
		 * read/write sample, but we do have writes going on.
		 * Allow step to go negative, to increase write perf.
		 */
		scale_up(rwb);
		break;
	case LAT_UNKNOWN:
		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
			break;
		/*
		 * We get here when previously scaled reduced depth, and we
		 * currently don't have a valid read/write sample. For that
		 * case, slowly return to center state (step == 0).
		 */
392
		if (rqd->scale_step > 0)
393
			scale_up(rwb);
394
		else if (rqd->scale_step < 0)
395 396 397 398 399 400 401 402 403
			scale_down(rwb, false);
		break;
	default:
		break;
	}

	/*
	 * Re-arm timer, if we have IO in flight
	 */
404
	if (rqd->scale_step || inflight)
405 406 407
		rwb_arm_timer(rwb);
}

408
static void __wbt_update_limits(struct rq_wb *rwb)
409
{
410 411 412 413 414 415
	struct rq_depth *rqd = &rwb->rq_depth;

	rqd->scale_step = 0;
	rqd->scaled_max = false;

	rq_depth_calc_max_depth(rqd);
416 417 418 419 420
	calc_wb_limits(rwb);

	rwb_wake_all(rwb);
}

421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
void wbt_update_limits(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return;
	__wbt_update_limits(RQWB(rqos));
}

u64 wbt_get_min_lat(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return 0;
	return RQWB(rqos)->min_lat_nsec;
}

void wbt_set_min_lat(struct request_queue *q, u64 val)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (!rqos)
		return;
	RQWB(rqos)->min_lat_nsec = val;
	RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
	__wbt_update_limits(RQWB(rqos));
}


448 449 450 451 452 453 454 455 456 457 458 459 460 461
static bool close_io(struct rq_wb *rwb)
{
	const unsigned long now = jiffies;

	return time_before(now, rwb->last_issue + HZ / 10) ||
		time_before(now, rwb->last_comp + HZ / 10);
}

#define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)

static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
{
	unsigned int limit;

462 463 464 465 466 467 468
	/*
	 * If we got disabled, just return UINT_MAX. This ensures that
	 * we'll properly inc a new IO, and dec+wakeup at the end.
	 */
	if (!rwb_enabled(rwb))
		return UINT_MAX;

469 470 471
	if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
		return rwb->wb_background;

472 473
	/*
	 * At this point we know it's a buffered write. If this is
W
weiping zhang 已提交
474
	 * kswapd trying to free memory, or REQ_SYNC is set, then
475 476 477 478 479 480
	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
	 * that. If the write is marked as a background write, then use
	 * the idle limit, or go to normal if we haven't had competing
	 * IO for a bit.
	 */
	if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
481
		limit = rwb->rq_depth.max_depth;
482 483 484 485 486 487 488 489 490 491 492 493
	else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
		/*
		 * If less than 100ms since we completed unrelated IO,
		 * limit us to half the depth for background writeback.
		 */
		limit = rwb->wb_background;
	} else
		limit = rwb->wb_normal;

	return limit;
}

J
Jens Axboe 已提交
494 495
struct wbt_wait_data {
	struct rq_wb *rwb;
496
	enum wbt_flags wb_acct;
J
Jens Axboe 已提交
497 498 499
	unsigned long rw;
};

500
static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
J
Jens Axboe 已提交
501
{
502 503 504
	struct wbt_wait_data *data = private_data;
	return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
}
J
Jens Axboe 已提交
505

506 507 508 509
static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
{
	struct wbt_wait_data *data = private_data;
	wbt_rqw_done(data->rwb, rqw, data->wb_acct);
J
Jens Axboe 已提交
510 511
}

512 513 514 515
/*
 * Block if we will exceed our limit, or if we are currently waiting for
 * the timer to kick off queuing again.
 */
516
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
517
		       unsigned long rw)
518
{
519
	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
J
Jens Axboe 已提交
520 521
	struct wbt_wait_data data = {
		.rwb = rwb,
522
		.wb_acct = wb_acct,
J
Jens Axboe 已提交
523 524
		.rw = rw,
	};
525

526
	rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
527 528 529 530
}

static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
{
531 532 533 534 535 536 537 538 539 540 541 542
	switch (bio_op(bio)) {
	case REQ_OP_WRITE:
		/*
		 * Don't throttle WRITE_ODIRECT
		 */
		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
		    (REQ_SYNC | REQ_IDLE))
			return false;
		/* fallthrough */
	case REQ_OP_DISCARD:
		return true;
	default:
543
		return false;
544
	}
545 546
}

547 548 549 550
static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
{
	enum wbt_flags flags = 0;

551 552 553
	if (!rwb_enabled(rwb))
		return 0;

554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	if (bio_op(bio) == REQ_OP_READ) {
		flags = WBT_READ;
	} else if (wbt_should_throttle(rwb, bio)) {
		if (current_is_kswapd())
			flags |= WBT_KSWAPD;
		if (bio_op(bio) == REQ_OP_DISCARD)
			flags |= WBT_DISCARD;
		flags |= WBT_TRACKED;
	}
	return flags;
}

static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
{
	struct rq_wb *rwb = RQWB(rqos);
	enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
	__wbt_done(rqos, flags);
}

573 574 575 576 577 578
/*
 * Returns true if the IO request should be accounted, false if not.
 * May sleep, if we have exceeded the writeback limits. Caller can pass
 * in an irq held spinlock, if it holds one when calling this function.
 * If we do sleep, we'll release and re-grab it.
 */
579
static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
580
{
581
	struct rq_wb *rwb = RQWB(rqos);
582
	enum wbt_flags flags;
583

584
	flags = bio_to_wbt_flags(rwb, bio);
M
Ming Lei 已提交
585
	if (!(flags & WBT_TRACKED)) {
586
		if (flags & WBT_READ)
587
			wb_timestamp(rwb, &rwb->last_issue);
588
		return;
589 590
	}

591
	__wbt_wait(rwb, flags, bio->bi_opf);
592

593
	if (!blk_stat_is_active(rwb->cb))
594
		rwb_arm_timer(rwb);
595
}
596

597 598 599 600
static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{
	struct rq_wb *rwb = RQWB(rqos);
	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
601 602
}

603
static void wbt_issue(struct rq_qos *rqos, struct request *rq)
604
{
605 606
	struct rq_wb *rwb = RQWB(rqos);

607 608 609 610
	if (!rwb_enabled(rwb))
		return;

	/*
611 612 613 614 615
	 * Track sync issue, in case it takes a long time to complete. Allows us
	 * to react quicker, if a sync IO takes a long time to complete. Note
	 * that this is just a hint. The request can go away when it completes,
	 * so it's important we never dereference it. We only use the address to
	 * compare with, which is why we store the sync_issue time locally.
616
	 */
617 618
	if (wbt_is_read(rq) && !rwb->sync_issue) {
		rwb->sync_cookie = rq;
619
		rwb->sync_issue = rq->io_start_time_ns;
620 621 622
	}
}

623
static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
624
{
625
	struct rq_wb *rwb = RQWB(rqos);
626 627
	if (!rwb_enabled(rwb))
		return;
628
	if (rq == rwb->sync_cookie) {
629 630 631 632 633
		rwb->sync_issue = 0;
		rwb->sync_cookie = NULL;
	}
}

634
void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
635
{
636 637 638
	struct rq_qos *rqos = wbt_rq_qos(q);
	if (rqos)
		RQWB(rqos)->wc = write_cache_on;
639 640
}

641 642 643 644 645
/*
 * Enable wbt if defaults are configured that way
 */
void wbt_enable_default(struct request_queue *q)
{
646
	struct rq_qos *rqos = wbt_rq_qos(q);
647
	/* Throttling already enabled? */
648
	if (rqos)
649 650 651
		return;

	/* Queue not registered? Maybe shutting down... */
652
	if (!blk_queue_registered(q))
653 654
		return;

J
Jens Axboe 已提交
655
	if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
656 657 658 659
		wbt_init(q);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);

660 661 662 663 664 665 666 667 668 669 670 671
u64 wbt_default_latency_nsec(struct request_queue *q)
{
	/*
	 * We default to 2msec for non-rotational storage, and 75msec
	 * for rotational storage.
	 */
	if (blk_queue_nonrot(q))
		return 2000000ULL;
	else
		return 75000000ULL;
}

J
Jens Axboe 已提交
672 673
static int wbt_data_dir(const struct request *rq)
{
674 675 676 677
	const int op = req_op(rq);

	if (op == REQ_OP_READ)
		return READ;
678
	else if (op_is_write(op))
679 680 681 682
		return WRITE;

	/* don't account */
	return -1;
J
Jens Axboe 已提交
683 684
}

685 686 687 688 689 690
static void wbt_queue_depth_changed(struct rq_qos *rqos)
{
	RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
	__wbt_update_limits(RQWB(rqos));
}

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
static void wbt_exit(struct rq_qos *rqos)
{
	struct rq_wb *rwb = RQWB(rqos);
	struct request_queue *q = rqos->q;

	blk_stat_remove_callback(q, rwb->cb);
	blk_stat_free_callback(rwb->cb);
	kfree(rwb);
}

/*
 * Disable wbt, if enabled by default.
 */
void wbt_disable_default(struct request_queue *q)
{
	struct rq_qos *rqos = wbt_rq_qos(q);
	struct rq_wb *rwb;
	if (!rqos)
		return;
	rwb = RQWB(rqos);
711 712
	if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
		blk_stat_deactivate(rwb->cb);
713
		rwb->wb_normal = 0;
714
	}
715
}
716
EXPORT_SYMBOL_GPL(wbt_disable_default);
717

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
#ifdef CONFIG_BLK_DEBUG_FS
static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;
	struct rq_wb *rwb = RQWB(rqos);

	seq_printf(m, "%llu\n", rwb->cur_win_nsec);
	return 0;
}

static int wbt_enabled_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;
	struct rq_wb *rwb = RQWB(rqos);

	seq_printf(m, "%d\n", rwb->enable_state);
	return 0;
}

static int wbt_id_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;

	seq_printf(m, "%u\n", rqos->id);
	return 0;
}

static int wbt_inflight_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;
	struct rq_wb *rwb = RQWB(rqos);
	int i;

	for (i = 0; i < WBT_NUM_RWQ; i++)
		seq_printf(m, "%d: inflight %d\n", i,
			   atomic_read(&rwb->rq_wait[i].inflight));
	return 0;
}

static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;
	struct rq_wb *rwb = RQWB(rqos);

	seq_printf(m, "%lu\n", rwb->min_lat_nsec);
	return 0;
}

static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;
	struct rq_wb *rwb = RQWB(rqos);

	seq_printf(m, "%u\n", rwb->unknown_cnt);
	return 0;
}

static int wbt_normal_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;
	struct rq_wb *rwb = RQWB(rqos);

	seq_printf(m, "%u\n", rwb->wb_normal);
	return 0;
}

static int wbt_background_show(void *data, struct seq_file *m)
{
	struct rq_qos *rqos = data;
	struct rq_wb *rwb = RQWB(rqos);

	seq_printf(m, "%u\n", rwb->wb_background);
	return 0;
}

static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
	{"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
	{"enabled", 0400, wbt_enabled_show},
	{"id", 0400, wbt_id_show},
	{"inflight", 0400, wbt_inflight_show},
	{"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
	{"unknown_cnt", 0400, wbt_unknown_cnt_show},
	{"wb_normal", 0400, wbt_normal_show},
	{"wb_background", 0400, wbt_background_show},
	{},
};
#endif

806 807 808
static struct rq_qos_ops wbt_rqos_ops = {
	.throttle = wbt_wait,
	.issue = wbt_issue,
809
	.track = wbt_track,
810 811
	.requeue = wbt_requeue,
	.done = wbt_done,
812
	.cleanup = wbt_cleanup,
813
	.queue_depth_changed = wbt_queue_depth_changed,
814
	.exit = wbt_exit,
815 816 817
#ifdef CONFIG_BLK_DEBUG_FS
	.debugfs_attrs = wbt_debugfs_attrs,
#endif
818 819
};

J
Jens Axboe 已提交
820
int wbt_init(struct request_queue *q)
821 822 823 824 825 826 827 828
{
	struct rq_wb *rwb;
	int i;

	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
	if (!rwb)
		return -ENOMEM;

J
Jens Axboe 已提交
829
	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
830 831 832 833 834
	if (!rwb->cb) {
		kfree(rwb);
		return -ENOMEM;
	}

835 836
	for (i = 0; i < WBT_NUM_RWQ; i++)
		rq_wait_init(&rwb->rq_wait[i]);
837

838 839 840
	rwb->rqos.id = RQ_QOS_WBT;
	rwb->rqos.ops = &wbt_rqos_ops;
	rwb->rqos.q = q;
841 842
	rwb->last_comp = rwb->last_issue = jiffies;
	rwb->win_nsec = RWB_WINDOW_NSEC;
843
	rwb->enable_state = WBT_STATE_ON_DEFAULT;
844 845 846
	rwb->wc = 1;
	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
	__wbt_update_limits(rwb);
847 848

	/*
849
	 * Assign rwb and add the stats callback.
850
	 */
851
	rq_qos_add(q, &rwb->rqos);
852
	blk_stat_add_callback(q, rwb->cb);
853

854
	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
855

856
	wbt_queue_depth_changed(&rwb->rqos);
857
	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
858 859 860

	return 0;
}