elevator.c 26.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  Block device elevator/IO-scheduler.
 *
 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
 *
6
 * 30042000 Jens Axboe <axboe@kernel.dk> :
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 * Split the elevator a bit so that it is possible to choose a different
 * one or even write a new "plug in". There are three pieces:
 * - elevator_fn, inserts a new request in the queue list
 * - elevator_merge_fn, decides whether a new buffer can be merged with
 *   an existing request
 * - elevator_dequeue_fn, called when a request is taken off the active list
 *
 * 20082000 Dave Jones <davej@suse.de> :
 * Removed tests for max-bomb-segments, which was breaking elvtune
 *  when run without -bN
 *
 * Jens:
 * - Rework again to work with bio instead of buffer_heads
 * - loose bi_dev comparisons, partition handling is right now
 * - completely modularize elevator setup and teardown
 *
 */
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
34
#include <linux/blktrace_api.h>
35
#include <linux/hash.h>
36
#include <linux/uaccess.h>
L
Lin Ming 已提交
37
#include <linux/pm_runtime.h>
38
#include <linux/blk-cgroup.h>
L
Linus Torvalds 已提交
39

40 41
#include <trace/events/block.h>

J
Jens Axboe 已提交
42
#include "blk.h"
43
#include "blk-mq-sched.h"
J
Jens Axboe 已提交
44

L
Linus Torvalds 已提交
45 46 47
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);

48 49 50
/*
 * Merge hash stuff.
 */
51
#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
52

53 54 55 56
/*
 * Query io scheduler to see if the current process issuing bio may be
 * merged with rq.
 */
57
static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
58
{
59
	struct request_queue *q = rq->q;
J
Jens Axboe 已提交
60
	struct elevator_queue *e = q->elevator;
61

62 63 64
	if (e->uses_mq && e->type->ops.mq.allow_merge)
		return e->type->ops.mq.allow_merge(q, rq, bio);
	else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
65
		return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
66 67 68 69

	return 1;
}

L
Linus Torvalds 已提交
70 71 72
/*
 * can we safely merge with this request?
 */
73
bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
L
Linus Torvalds 已提交
74
{
75
	if (!blk_rq_merge_ok(rq, bio))
76
		return false;
77

78 79
	if (!elv_iosched_allow_bio_merge(rq, bio))
		return false;
L
Linus Torvalds 已提交
80

81
	return true;
L
Linus Torvalds 已提交
82
}
83
EXPORT_SYMBOL(elv_bio_merge_ok);
L
Linus Torvalds 已提交
84 85 86

static struct elevator_type *elevator_find(const char *name)
{
87
	struct elevator_type *e;
L
Linus Torvalds 已提交
88

89
	list_for_each_entry(e, &elv_list, list) {
90 91
		if (!strcmp(e->elevator_name, name))
			return e;
L
Linus Torvalds 已提交
92 93
	}

94
	return NULL;
L
Linus Torvalds 已提交
95 96 97 98 99 100 101
}

static void elevator_put(struct elevator_type *e)
{
	module_put(e->elevator_owner);
}

102
static struct elevator_type *elevator_get(const char *name, bool try_loading)
L
Linus Torvalds 已提交
103
{
104
	struct elevator_type *e;
L
Linus Torvalds 已提交
105

106
	spin_lock(&elv_list_lock);
107 108

	e = elevator_find(name);
109
	if (!e && try_loading) {
110
		spin_unlock(&elv_list_lock);
K
Kees Cook 已提交
111
		request_module("%s-iosched", name);
112 113 114 115
		spin_lock(&elv_list_lock);
		e = elevator_find(name);
	}

116 117 118
	if (e && !try_module_get(e->elevator_owner))
		e = NULL;

119
	spin_unlock(&elv_list_lock);
L
Linus Torvalds 已提交
120 121 122 123

	return e;
}

124
static char chosen_elevator[ELV_NAME_MAX];
L
Linus Torvalds 已提交
125

126
static int __init elevator_setup(char *str)
L
Linus Torvalds 已提交
127
{
128 129 130 131
	/*
	 * Be backwards-compatible with previous kernels, so users
	 * won't get the wrong elevator.
	 */
132
	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
133
	return 1;
L
Linus Torvalds 已提交
134 135 136 137
}

__setup("elevator=", elevator_setup);

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/* called during boot to load the elevator chosen by the elevator param */
void __init load_default_elevator_module(void)
{
	struct elevator_type *e;

	if (!chosen_elevator[0])
		return;

	spin_lock(&elv_list_lock);
	e = elevator_find(chosen_elevator);
	spin_unlock(&elv_list_lock);

	if (!e)
		request_module("%s-iosched", chosen_elevator);
}

154 155
static struct kobj_type elv_ktype;

156
struct elevator_queue *elevator_alloc(struct request_queue *q,
157
				  struct elevator_type *e)
158
{
J
Jens Axboe 已提交
159
	struct elevator_queue *eq;
160

161
	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
162
	if (unlikely(!eq))
163
		return NULL;
164

T
Tejun Heo 已提交
165
	eq->type = e;
166
	kobject_init(&eq->kobj, &elv_ktype);
167
	mutex_init(&eq->sysfs_lock);
168
	hash_init(eq->hash);
169
	eq->uses_mq = e->uses_mq;
170

171 172
	return eq;
}
173
EXPORT_SYMBOL(elevator_alloc);
174 175 176

static void elevator_release(struct kobject *kobj)
{
J
Jens Axboe 已提交
177
	struct elevator_queue *e;
178

J
Jens Axboe 已提交
179
	e = container_of(kobj, struct elevator_queue, kobj);
T
Tejun Heo 已提交
180
	elevator_put(e->type);
181 182 183
	kfree(e);
}

184
int elevator_init(struct request_queue *q, char *name)
L
Linus Torvalds 已提交
185 186
{
	struct elevator_type *e = NULL;
187
	int err;
L
Linus Torvalds 已提交
188

189 190 191 192 193 194
	/*
	 * q->sysfs_lock must be held to provide mutual exclusion between
	 * elevator_switch() and here.
	 */
	lockdep_assert_held(&q->sysfs_lock);

195 196 197
	if (unlikely(q->elevator))
		return 0;

T
Tejun Heo 已提交
198 199 200 201 202
	INIT_LIST_HEAD(&q->queue_head);
	q->last_merge = NULL;
	q->end_sector = 0;
	q->boundary_rq = NULL;

203
	if (name) {
204
		e = elevator_get(name, true);
205 206 207
		if (!e)
			return -EINVAL;
	}
L
Linus Torvalds 已提交
208

209 210 211 212 213
	/*
	 * Use the default elevator specified by config boot param or
	 * config option.  Don't try to load modules as we could be running
	 * off async and request_module() isn't allowed from async.
	 */
214
	if (!e && *chosen_elevator) {
215
		e = elevator_get(chosen_elevator, false);
216 217 218 219
		if (!e)
			printk(KERN_ERR "I/O scheduler %s not found\n",
							chosen_elevator);
	}
220

221
	if (!e) {
222 223 224 225 226 227 228
		if (q->mq_ops && q->nr_hw_queues == 1)
			e = elevator_get(CONFIG_DEFAULT_SQ_IOSCHED, false);
		else if (q->mq_ops)
			e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false);
		else
			e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);

229 230 231
		if (!e) {
			printk(KERN_ERR
				"Default I/O scheduler not found. " \
232 233 234 235 236
				"Using noop/none.\n");
			if (q->mq_ops) {
				elevator_put(e);
				return 0;
			}
237
			e = elevator_get("noop", false);
238
		}
239 240
	}

241 242 243 244 245 246 247 248 249
	if (e->uses_mq) {
		err = blk_mq_sched_setup(q);
		if (!err)
			err = e->ops.mq.init_sched(q, e);
	} else
		err = e->ops.sq.elevator_init_fn(q, e);
	if (err) {
		if (e->uses_mq)
			blk_mq_sched_teardown(q);
250
		elevator_put(e);
251
	}
252
	return err;
L
Linus Torvalds 已提交
253
}
254 255
EXPORT_SYMBOL(elevator_init);

J
Jens Axboe 已提交
256
void elevator_exit(struct elevator_queue *e)
L
Linus Torvalds 已提交
257
{
258
	mutex_lock(&e->sysfs_lock);
259 260 261
	if (e->uses_mq && e->type->ops.mq.exit_sched)
		e->type->ops.mq.exit_sched(e);
	else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
262
		e->type->ops.sq.elevator_exit_fn(e);
263
	mutex_unlock(&e->sysfs_lock);
L
Linus Torvalds 已提交
264

265
	kobject_put(&e->kobj);
L
Linus Torvalds 已提交
266
}
267 268
EXPORT_SYMBOL(elevator_exit);

269 270
static inline void __elv_rqhash_del(struct request *rq)
{
271
	hash_del(&rq->hash);
272
	rq->rq_flags &= ~RQF_HASHED;
273 274
}

275
void elv_rqhash_del(struct request_queue *q, struct request *rq)
276 277 278 279
{
	if (ELV_ON_HASH(rq))
		__elv_rqhash_del(rq);
}
280
EXPORT_SYMBOL_GPL(elv_rqhash_del);
281

282
void elv_rqhash_add(struct request_queue *q, struct request *rq)
283
{
J
Jens Axboe 已提交
284
	struct elevator_queue *e = q->elevator;
285 286

	BUG_ON(ELV_ON_HASH(rq));
287
	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
288
	rq->rq_flags |= RQF_HASHED;
289
}
290
EXPORT_SYMBOL_GPL(elv_rqhash_add);
291

292
void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
293 294 295 296 297
{
	__elv_rqhash_del(rq);
	elv_rqhash_add(q, rq);
}

298
struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
299
{
J
Jens Axboe 已提交
300
	struct elevator_queue *e = q->elevator;
301
	struct hlist_node *next;
302 303
	struct request *rq;

304
	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
305 306 307 308 309 310 311 312 313 314 315 316 317 318
		BUG_ON(!ELV_ON_HASH(rq));

		if (unlikely(!rq_mergeable(rq))) {
			__elv_rqhash_del(rq);
			continue;
		}

		if (rq_hash_key(rq) == offset)
			return rq;
	}

	return NULL;
}

319 320 321 322
/*
 * RB-tree support functions for inserting/lookup/removal of requests
 * in a sorted RB tree.
 */
323
void elv_rb_add(struct rb_root *root, struct request *rq)
324 325 326 327 328 329 330 331 332
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct request *__rq;

	while (*p) {
		parent = *p;
		__rq = rb_entry(parent, struct request, rb_node);

333
		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
334
			p = &(*p)->rb_left;
335
		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
			p = &(*p)->rb_right;
	}

	rb_link_node(&rq->rb_node, parent, p);
	rb_insert_color(&rq->rb_node, root);
}
EXPORT_SYMBOL(elv_rb_add);

void elv_rb_del(struct rb_root *root, struct request *rq)
{
	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
	rb_erase(&rq->rb_node, root);
	RB_CLEAR_NODE(&rq->rb_node);
}
EXPORT_SYMBOL(elv_rb_del);

struct request *elv_rb_find(struct rb_root *root, sector_t sector)
{
	struct rb_node *n = root->rb_node;
	struct request *rq;

	while (n) {
		rq = rb_entry(n, struct request, rb_node);

360
		if (sector < blk_rq_pos(rq))
361
			n = n->rb_left;
362
		else if (sector > blk_rq_pos(rq))
363 364 365 366 367 368 369 370 371
			n = n->rb_right;
		else
			return rq;
	}

	return NULL;
}
EXPORT_SYMBOL(elv_rb_find);

372 373
/*
 * Insert rq into dispatch queue of q.  Queue lock must be held on
U
Uwe Kleine-König 已提交
374
 * entry.  rq is sort instead into the dispatch queue. To be used by
375
 * specific elevators.
376
 */
377
void elv_dispatch_sort(struct request_queue *q, struct request *rq)
378 379 380 381
{
	sector_t boundary;
	struct list_head *entry;

382 383
	if (q->last_merge == rq)
		q->last_merge = NULL;
384 385 386

	elv_rqhash_del(q, rq);

387
	q->nr_sorted--;
388

J
Jens Axboe 已提交
389
	boundary = q->end_sector;
390 391 392
	list_for_each_prev(entry, &q->queue_head) {
		struct request *pos = list_entry_rq(entry);

A
Adrian Hunter 已提交
393
		if (req_op(rq) != req_op(pos))
394
			break;
395 396
		if (rq_data_dir(rq) != rq_data_dir(pos))
			break;
397
		if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
398
			break;
399 400
		if (blk_rq_pos(rq) >= boundary) {
			if (blk_rq_pos(pos) < boundary)
401 402
				continue;
		} else {
403
			if (blk_rq_pos(pos) >= boundary)
404 405
				break;
		}
406
		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
407 408 409 410 411
			break;
	}

	list_add(&rq->queuelist, entry);
}
412 413
EXPORT_SYMBOL(elv_dispatch_sort);

414
/*
415 416 417
 * Insert rq into dispatch queue of q.  Queue lock must be held on
 * entry.  rq is added to the back of the dispatch queue. To be used by
 * specific elevators.
418 419 420 421 422 423 424 425 426 427 428 429 430 431
 */
void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
{
	if (q->last_merge == rq)
		q->last_merge = NULL;

	elv_rqhash_del(q, rq);

	q->nr_sorted--;

	q->end_sector = rq_end_sector(rq);
	q->boundary_rq = rq;
	list_add_tail(&rq->queuelist, &q->queue_head);
}
432 433
EXPORT_SYMBOL(elv_dispatch_add_tail);

434
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
L
Linus Torvalds 已提交
435
{
J
Jens Axboe 已提交
436
	struct elevator_queue *e = q->elevator;
437
	struct request *__rq;
438 439
	int ret;

440 441 442 443 444 445
	/*
	 * Levels of merges:
	 * 	nomerges:  No merges at all attempted
	 * 	noxmerges: Only simple one-hit cache try
	 * 	merges:	   All merge tries attempted
	 */
446
	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
447 448
		return ELEVATOR_NO_MERGE;

449 450 451
	/*
	 * First try one-hit cache.
	 */
452
	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
453
		ret = blk_try_merge(q->last_merge, bio);
454 455 456 457 458
		if (ret != ELEVATOR_NO_MERGE) {
			*req = q->last_merge;
			return ret;
		}
	}
L
Linus Torvalds 已提交
459

460
	if (blk_queue_noxmerges(q))
461 462
		return ELEVATOR_NO_MERGE;

463 464 465
	/*
	 * See if our hash lookup can find a potential backmerge.
	 */
466
	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
467
	if (__rq && elv_bio_merge_ok(__rq, bio)) {
468 469 470 471
		*req = __rq;
		return ELEVATOR_BACK_MERGE;
	}

472 473 474
	if (e->uses_mq && e->type->ops.mq.request_merge)
		return e->type->ops.mq.request_merge(q, req, bio);
	else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
475
		return e->type->ops.sq.elevator_merge_fn(q, req, bio);
L
Linus Torvalds 已提交
476 477 478 479

	return ELEVATOR_NO_MERGE;
}

480 481 482 483 484 485 486
/*
 * Attempt to do an insertion back merge. Only check for the case where
 * we can append 'rq' to an existing request, so we can throw 'rq' away
 * afterwards.
 *
 * Returns true if we merged, false otherwise
 */
487
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
488 489
{
	struct request *__rq;
S
Shaohua Li 已提交
490
	bool ret;
491 492 493 494 495 496 497 498 499 500 501 502 503

	if (blk_queue_nomerges(q))
		return false;

	/*
	 * First try one-hit cache.
	 */
	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
		return true;

	if (blk_queue_noxmerges(q))
		return false;

S
Shaohua Li 已提交
504
	ret = false;
505 506 507
	/*
	 * See if our hash lookup can find a potential backmerge.
	 */
S
Shaohua Li 已提交
508 509 510 511 512 513 514 515 516
	while (1) {
		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
			break;

		/* The merged request could be merged with others, try again */
		ret = true;
		rq = __rq;
	}
S
Shaohua Li 已提交
517

S
Shaohua Li 已提交
518
	return ret;
519 520
}

521
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
L
Linus Torvalds 已提交
522
{
J
Jens Axboe 已提交
523
	struct elevator_queue *e = q->elevator;
L
Linus Torvalds 已提交
524

525 526 527
	if (e->uses_mq && e->type->ops.mq.request_merged)
		e->type->ops.mq.request_merged(q, rq, type);
	else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
528
		e->type->ops.sq.elevator_merged_fn(q, rq, type);
529

530 531
	if (type == ELEVATOR_BACK_MERGE)
		elv_rqhash_reposition(q, rq);
532

533
	q->last_merge = rq;
L
Linus Torvalds 已提交
534 535
}

536
void elv_merge_requests(struct request_queue *q, struct request *rq,
L
Linus Torvalds 已提交
537 538
			     struct request *next)
{
J
Jens Axboe 已提交
539
	struct elevator_queue *e = q->elevator;
540 541 542 543 544 545 546 547 548
	bool next_sorted = false;

	if (e->uses_mq && e->type->ops.mq.requests_merged)
		e->type->ops.mq.requests_merged(q, rq, next);
	else if (e->type->ops.sq.elevator_merge_req_fn) {
		next_sorted = next->rq_flags & RQF_SORTED;
		if (next_sorted)
			e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
	}
549

550 551
	elv_rqhash_reposition(q, rq);

552 553 554 555 556
	if (next_sorted) {
		elv_rqhash_del(q, next);
		q->nr_sorted--;
	}

557
	q->last_merge = rq;
L
Linus Torvalds 已提交
558 559
}

D
Divyesh Shah 已提交
560 561 562 563 564
void elv_bio_merged(struct request_queue *q, struct request *rq,
			struct bio *bio)
{
	struct elevator_queue *e = q->elevator;

565 566 567
	if (WARN_ON_ONCE(e->uses_mq))
		return;

568 569
	if (e->type->ops.sq.elevator_bio_merged_fn)
		e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
D
Divyesh Shah 已提交
570 571
}

572
#ifdef CONFIG_PM
L
Lin Ming 已提交
573 574
static void blk_pm_requeue_request(struct request *rq)
{
575
	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
L
Lin Ming 已提交
576 577 578 579 580
		rq->q->nr_pending--;
}

static void blk_pm_add_request(struct request_queue *q, struct request *rq)
{
581
	if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
L
Lin Ming 已提交
582 583 584 585 586 587 588 589 590 591 592
	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
		pm_request_resume(q->dev);
}
#else
static inline void blk_pm_requeue_request(struct request *rq) {}
static inline void blk_pm_add_request(struct request_queue *q,
				      struct request *rq)
{
}
#endif

593
void elv_requeue_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
594 595 596 597 598
{
	/*
	 * it already went through dequeue, we need to decrement the
	 * in_flight count again
	 */
599
	if (blk_account_rq(rq)) {
600
		q->in_flight[rq_is_sync(rq)]--;
601
		if (rq->rq_flags & RQF_SORTED)
602
			elv_deactivate_rq(q, rq);
603
	}
L
Linus Torvalds 已提交
604

605
	rq->rq_flags &= ~RQF_STARTED;
L
Linus Torvalds 已提交
606

L
Lin Ming 已提交
607 608
	blk_pm_requeue_request(rq);

609
	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
L
Linus Torvalds 已提交
610 611
}

612
void elv_drain_elevator(struct request_queue *q)
613
{
614
	struct elevator_queue *e = q->elevator;
615
	static int printed;
T
Tejun Heo 已提交
616

617 618 619
	if (WARN_ON_ONCE(e->uses_mq))
		return;

T
Tejun Heo 已提交
620 621
	lockdep_assert_held(q->queue_lock);

622
	while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
623
		;
T
Tejun Heo 已提交
624
	if (q->nr_sorted && printed++ < 10) {
625 626
		printk(KERN_ERR "%s: forced dispatching is broken "
		       "(nr_sorted=%u), please report this\n",
T
Tejun Heo 已提交
627
		       q->elevator->type->elevator_name, q->nr_sorted);
628 629 630
	}
}

631
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
L
Linus Torvalds 已提交
632
{
633
	trace_block_rq_insert(q, rq);
634

L
Lin Ming 已提交
635 636
	blk_pm_add_request(q, rq);

L
Linus Torvalds 已提交
637 638
	rq->q = q;

639
	if (rq->rq_flags & RQF_SOFTBARRIER) {
640
		/* barriers are scheduling boundary, update end_sector */
641
		if (rq->cmd_type == REQ_TYPE_FS) {
642 643 644
			q->end_sector = rq_end_sector(rq);
			q->boundary_rq = rq;
		}
645
	} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
646 647
		    (where == ELEVATOR_INSERT_SORT ||
		     where == ELEVATOR_INSERT_SORT_MERGE))
648 649
		where = ELEVATOR_INSERT_BACK;

650
	switch (where) {
651
	case ELEVATOR_INSERT_REQUEUE:
652
	case ELEVATOR_INSERT_FRONT:
653
		rq->rq_flags |= RQF_SOFTBARRIER;
654 655 656 657
		list_add(&rq->queuelist, &q->queue_head);
		break;

	case ELEVATOR_INSERT_BACK:
658
		rq->rq_flags |= RQF_SOFTBARRIER;
659
		elv_drain_elevator(q);
660 661 662 663 664 665 666 667 668 669 670
		list_add_tail(&rq->queuelist, &q->queue_head);
		/*
		 * We kick the queue here for the following reasons.
		 * - The elevator might have returned NULL previously
		 *   to delay requests and returned them now.  As the
		 *   queue wasn't empty before this request, ll_rw_blk
		 *   won't run the queue on return, resulting in hang.
		 * - Usually, back inserted requests won't be merged
		 *   with anything.  There's no point in delaying queue
		 *   processing.
		 */
671
		__blk_run_queue(q);
672 673
		break;

674 675 676 677 678 679 680 681
	case ELEVATOR_INSERT_SORT_MERGE:
		/*
		 * If we succeed in merging this request with one in the
		 * queue already, we are done - rq has now been freed,
		 * so no need to do anything further.
		 */
		if (elv_attempt_insert_merge(q, rq))
			break;
682
	case ELEVATOR_INSERT_SORT:
683
		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
684
		rq->rq_flags |= RQF_SORTED;
685
		q->nr_sorted++;
686 687 688 689 690 691
		if (rq_mergeable(rq)) {
			elv_rqhash_add(q, rq);
			if (!q->last_merge)
				q->last_merge = rq;
		}

692 693 694 695 696
		/*
		 * Some ioscheds (cfq) run q->request_fn directly, so
		 * rq cannot be accessed after calling
		 * elevator_add_req_fn.
		 */
697
		q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
698 699
		break;

700
	case ELEVATOR_INSERT_FLUSH:
701
		rq->rq_flags |= RQF_SOFTBARRIER;
702 703
		blk_insert_flush(rq);
		break;
704 705
	default:
		printk(KERN_ERR "%s: bad insertion point %d\n",
706
		       __func__, where);
707 708
		BUG();
	}
L
Linus Torvalds 已提交
709
}
710 711
EXPORT_SYMBOL(__elv_add_request);

J
Jens Axboe 已提交
712
void elv_add_request(struct request_queue *q, struct request *rq, int where)
L
Linus Torvalds 已提交
713 714 715 716
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
J
Jens Axboe 已提交
717
	__elv_add_request(q, rq, where);
L
Linus Torvalds 已提交
718 719
	spin_unlock_irqrestore(q->queue_lock, flags);
}
720 721
EXPORT_SYMBOL(elv_add_request);

722
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
723
{
J
Jens Axboe 已提交
724
	struct elevator_queue *e = q->elevator;
L
Linus Torvalds 已提交
725

726 727 728
	if (e->uses_mq && e->type->ops.mq.next_request)
		return e->type->ops.mq.next_request(q, rq);
	else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
729
		return e->type->ops.sq.elevator_latter_req_fn(q, rq);
730

L
Linus Torvalds 已提交
731 732 733
	return NULL;
}

734
struct request *elv_former_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
735
{
J
Jens Axboe 已提交
736
	struct elevator_queue *e = q->elevator;
L
Linus Torvalds 已提交
737

738 739 740
	if (e->uses_mq && e->type->ops.mq.former_request)
		return e->type->ops.mq.former_request(q, rq);
	if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
741
		return e->type->ops.sq.elevator_former_req_fn(q, rq);
L
Linus Torvalds 已提交
742 743 744
	return NULL;
}

745 746
int elv_set_request(struct request_queue *q, struct request *rq,
		    struct bio *bio, gfp_t gfp_mask)
L
Linus Torvalds 已提交
747
{
J
Jens Axboe 已提交
748
	struct elevator_queue *e = q->elevator;
L
Linus Torvalds 已提交
749

750 751 752
	if (WARN_ON_ONCE(e->uses_mq))
		return 0;

753 754
	if (e->type->ops.sq.elevator_set_req_fn)
		return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
L
Linus Torvalds 已提交
755 756 757
	return 0;
}

758
void elv_put_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
759
{
J
Jens Axboe 已提交
760
	struct elevator_queue *e = q->elevator;
L
Linus Torvalds 已提交
761

762 763 764
	if (WARN_ON_ONCE(e->uses_mq))
		return;

765 766
	if (e->type->ops.sq.elevator_put_req_fn)
		e->type->ops.sq.elevator_put_req_fn(rq);
L
Linus Torvalds 已提交
767 768
}

769
int elv_may_queue(struct request_queue *q, unsigned int op)
L
Linus Torvalds 已提交
770
{
J
Jens Axboe 已提交
771
	struct elevator_queue *e = q->elevator;
L
Linus Torvalds 已提交
772

773 774 775
	if (WARN_ON_ONCE(e->uses_mq))
		return 0;

776 777
	if (e->type->ops.sq.elevator_may_queue_fn)
		return e->type->ops.sq.elevator_may_queue_fn(q, op);
L
Linus Torvalds 已提交
778 779 780 781

	return ELV_MQUEUE_MAY;
}

782
void elv_completed_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
783
{
J
Jens Axboe 已提交
784
	struct elevator_queue *e = q->elevator;
L
Linus Torvalds 已提交
785

786 787 788
	if (WARN_ON_ONCE(e->uses_mq))
		return;

L
Linus Torvalds 已提交
789 790 791
	/*
	 * request is released from the driver, io must be done
	 */
792
	if (blk_account_rq(rq)) {
793
		q->in_flight[rq_is_sync(rq)]--;
794
		if ((rq->rq_flags & RQF_SORTED) &&
795 796
		    e->type->ops.sq.elevator_completed_req_fn)
			e->type->ops.sq.elevator_completed_req_fn(q, rq);
797
	}
L
Linus Torvalds 已提交
798 799
}

800 801 802 803
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)

static ssize_t
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
L
Linus Torvalds 已提交
804
{
805
	struct elv_fs_entry *entry = to_elv(attr);
J
Jens Axboe 已提交
806
	struct elevator_queue *e;
807 808 809 810 811
	ssize_t error;

	if (!entry->show)
		return -EIO;

J
Jens Axboe 已提交
812
	e = container_of(kobj, struct elevator_queue, kobj);
813
	mutex_lock(&e->sysfs_lock);
T
Tejun Heo 已提交
814
	error = e->type ? entry->show(e, page) : -ENOENT;
815 816 817
	mutex_unlock(&e->sysfs_lock);
	return error;
}
L
Linus Torvalds 已提交
818

819 820 821 822 823
static ssize_t
elv_attr_store(struct kobject *kobj, struct attribute *attr,
	       const char *page, size_t length)
{
	struct elv_fs_entry *entry = to_elv(attr);
J
Jens Axboe 已提交
824
	struct elevator_queue *e;
825
	ssize_t error;
L
Linus Torvalds 已提交
826

827 828
	if (!entry->store)
		return -EIO;
L
Linus Torvalds 已提交
829

J
Jens Axboe 已提交
830
	e = container_of(kobj, struct elevator_queue, kobj);
831
	mutex_lock(&e->sysfs_lock);
T
Tejun Heo 已提交
832
	error = e->type ? entry->store(e, page, length) : -ENOENT;
833 834 835 836
	mutex_unlock(&e->sysfs_lock);
	return error;
}

837
static const struct sysfs_ops elv_sysfs_ops = {
838 839 840 841 842 843 844 845 846
	.show	= elv_attr_show,
	.store	= elv_attr_store,
};

static struct kobj_type elv_ktype = {
	.sysfs_ops	= &elv_sysfs_ops,
	.release	= elevator_release,
};

847
int elv_register_queue(struct request_queue *q)
848
{
849
	struct elevator_queue *e = q->elevator;
850 851
	int error;

852
	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
853
	if (!error) {
T
Tejun Heo 已提交
854
		struct elv_fs_entry *attr = e->type->elevator_attrs;
855
		if (attr) {
856 857
			while (attr->attr.name) {
				if (sysfs_create_file(&e->kobj, &attr->attr))
858
					break;
859
				attr++;
860 861 862
			}
		}
		kobject_uevent(&e->kobj, KOBJ_ADD);
863
		e->registered = 1;
864
		if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
865
			e->type->ops.sq.elevator_registered_fn(q);
866 867
	}
	return error;
L
Linus Torvalds 已提交
868
}
869
EXPORT_SYMBOL(elv_register_queue);
J
Jens Axboe 已提交
870

L
Linus Torvalds 已提交
871 872
void elv_unregister_queue(struct request_queue *q)
{
873 874 875 876 877 878 879
	if (q) {
		struct elevator_queue *e = q->elevator;

		kobject_uevent(&e->kobj, KOBJ_REMOVE);
		kobject_del(&e->kobj);
		e->registered = 0;
	}
L
Linus Torvalds 已提交
880
}
881
EXPORT_SYMBOL(elv_unregister_queue);
L
Linus Torvalds 已提交
882

883
int elv_register(struct elevator_type *e)
L
Linus Torvalds 已提交
884
{
885
	char *def = "";
886

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
	/* create icq_cache if requested */
	if (e->icq_size) {
		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
			return -EINVAL;

		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
			 "%s_io_cq", e->elevator_name);
		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
						 e->icq_align, 0, NULL);
		if (!e->icq_cache)
			return -ENOMEM;
	}

	/* register, don't allow duplicate names */
902
	spin_lock(&elv_list_lock);
903 904 905 906 907 908
	if (elevator_find(e->elevator_name)) {
		spin_unlock(&elv_list_lock);
		if (e->icq_cache)
			kmem_cache_destroy(e->icq_cache);
		return -EBUSY;
	}
L
Linus Torvalds 已提交
909
	list_add_tail(&e->list, &elv_list);
910
	spin_unlock(&elv_list_lock);
L
Linus Torvalds 已提交
911

912
	/* print pretty message */
913 914 915
	if (!strcmp(e->elevator_name, chosen_elevator) ||
			(!*chosen_elevator &&
			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
916 917
				def = " (default)";

918 919
	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
								def);
920
	return 0;
L
Linus Torvalds 已提交
921 922 923 924 925
}
EXPORT_SYMBOL_GPL(elv_register);

void elv_unregister(struct elevator_type *e)
{
926
	/* unregister */
927
	spin_lock(&elv_list_lock);
L
Linus Torvalds 已提交
928
	list_del_init(&e->list);
929
	spin_unlock(&elv_list_lock);
930 931 932 933 934 935 936 937 938 939

	/*
	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
	 * sure all RCU operations are complete before proceeding.
	 */
	if (e->icq_cache) {
		rcu_barrier();
		kmem_cache_destroy(e->icq_cache);
		e->icq_cache = NULL;
	}
L
Linus Torvalds 已提交
940 941 942 943 944 945 946
}
EXPORT_SYMBOL_GPL(elv_unregister);

/*
 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 * we don't free the old io scheduler, before we have allocated what we
 * need for the new one. this way we have a chance of going back to the old
T
Tejun Heo 已提交
947
 * one, if the new one fails init for some reason.
L
Linus Torvalds 已提交
948
 */
949
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
L
Linus Torvalds 已提交
950
{
951
	struct elevator_queue *old = q->elevator;
952
	bool old_registered = false;
953
	int err;
L
Linus Torvalds 已提交
954

955 956 957 958 959
	if (q->mq_ops) {
		blk_mq_freeze_queue(q);
		blk_mq_quiesce_queue(q);
	}

960 961 962 963 964 965 966
	/*
	 * Turn on BYPASS and drain all requests w/ elevator private data.
	 * Block layer doesn't call into a quiesced elevator - all requests
	 * are directly put on the dispatch list without elevator data
	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
	 * merge happens either.
	 */
967 968 969 970 971
	if (old) {
		old_registered = old->registered;

		if (old->uses_mq)
			blk_mq_sched_teardown(q);
T
Tejun Heo 已提交
972

973 974
		if (!q->mq_ops)
			blk_queue_bypass_start(q);
L
Linus Torvalds 已提交
975

976 977 978 979 980 981 982 983
		/* unregister and clear all auxiliary data of the old elevator */
		if (old_registered)
			elv_unregister_queue(q);

		spin_lock_irq(q->queue_lock);
		ioc_clear_queue(q);
		spin_unlock_irq(q->queue_lock);
	}
984

985
	/* allocate, init and register new elevator */
986 987 988 989 990 991 992 993 994
	if (new_e) {
		if (new_e->uses_mq) {
			err = blk_mq_sched_setup(q);
			if (!err)
				err = new_e->ops.mq.init_sched(q, new_e);
		} else
			err = new_e->ops.sq.elevator_init_fn(q, new_e);
		if (err)
			goto fail_init;
995 996 997 998

		err = elv_register_queue(q);
		if (err)
			goto fail_register;
999 1000
	} else
		q->elevator = NULL;
1001 1002

	/* done, kill the old one and finish */
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
	if (old) {
		elevator_exit(old);
		if (!q->mq_ops)
			blk_queue_bypass_end(q);
	}

	if (q->mq_ops) {
		blk_mq_unfreeze_queue(q);
		blk_mq_start_stopped_hw_queues(q, true);
	}
N
Nick Piggin 已提交
1013

1014 1015 1016 1017
	if (new_e)
		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
	else
		blk_add_trace_msg(q, "elv switch: none");
1018

1019
	return 0;
L
Linus Torvalds 已提交
1020 1021

fail_register:
1022 1023
	if (q->mq_ops)
		blk_mq_sched_teardown(q);
1024 1025 1026
	elevator_exit(q->elevator);
fail_init:
	/* switch failed, restore and re-register old elevator */
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
	if (old) {
		q->elevator = old;
		elv_register_queue(q);
		if (!q->mq_ops)
			blk_queue_bypass_end(q);
	}
	if (q->mq_ops) {
		blk_mq_unfreeze_queue(q);
		blk_mq_start_stopped_hw_queues(q, true);
	}
N
Nick Piggin 已提交
1037

1038
	return err;
L
Linus Torvalds 已提交
1039 1040
}

1041 1042 1043
/*
 * Switch this queue to the given IO scheduler.
 */
1044
static int __elevator_change(struct request_queue *q, const char *name)
L
Linus Torvalds 已提交
1045 1046 1047 1048
{
	char elevator_name[ELV_NAME_MAX];
	struct elevator_type *e;

1049 1050 1051 1052 1053
	/*
	 * Special case for mq, turn off scheduling
	 */
	if (q->mq_ops && !strncmp(name, "none", 4))
		return elevator_switch(q, NULL);
1054

1055
	strlcpy(elevator_name, name, sizeof(elevator_name));
1056
	e = elevator_get(strstrip(elevator_name), true);
L
Linus Torvalds 已提交
1057 1058 1059 1060 1061
	if (!e) {
		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
		return -EINVAL;
	}

1062 1063
	if (q->elevator &&
	    !strcmp(elevator_name, q->elevator->type->elevator_name)) {
1064
		elevator_put(e);
1065
		return 0;
1066
	}
L
Linus Torvalds 已提交
1067

1068 1069 1070 1071 1072 1073 1074 1075 1076
	if (!e->uses_mq && q->mq_ops) {
		elevator_put(e);
		return -EINVAL;
	}
	if (e->uses_mq && !q->mq_ops) {
		elevator_put(e);
		return -EINVAL;
	}

1077 1078
	return elevator_switch(q, e);
}
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090

int elevator_change(struct request_queue *q, const char *name)
{
	int ret;

	/* Protect q->elevator from elevator_init() */
	mutex_lock(&q->sysfs_lock);
	ret = __elevator_change(q, name);
	mutex_unlock(&q->sysfs_lock);

	return ret;
}
1091 1092 1093 1094 1095 1096 1097
EXPORT_SYMBOL(elevator_change);

ssize_t elv_iosched_store(struct request_queue *q, const char *name,
			  size_t count)
{
	int ret;

1098
	if (!(q->mq_ops || q->request_fn))
1099 1100
		return count;

1101
	ret = __elevator_change(q, name);
1102 1103 1104 1105 1106
	if (!ret)
		return count;

	printk(KERN_ERR "elevator: switch to %s failed\n", name);
	return ret;
L
Linus Torvalds 已提交
1107 1108
}

1109
ssize_t elv_iosched_show(struct request_queue *q, char *name)
L
Linus Torvalds 已提交
1110
{
J
Jens Axboe 已提交
1111
	struct elevator_queue *e = q->elevator;
1112
	struct elevator_type *elv = NULL;
1113
	struct elevator_type *__e;
L
Linus Torvalds 已提交
1114 1115
	int len = 0;

1116
	if (!blk_queue_stackable(q))
1117 1118
		return sprintf(name, "none\n");

1119 1120 1121 1122
	if (!q->elevator)
		len += sprintf(name+len, "[none] ");
	else
		elv = e->type;
1123

1124
	spin_lock(&elv_list_lock);
1125
	list_for_each_entry(__e, &elv_list, list) {
1126
		if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
L
Linus Torvalds 已提交
1127
			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1128 1129 1130 1131 1132
			continue;
		}
		if (__e->uses_mq && q->mq_ops)
			len += sprintf(name+len, "%s ", __e->elevator_name);
		else if (!__e->uses_mq && !q->mq_ops)
L
Linus Torvalds 已提交
1133 1134
			len += sprintf(name+len, "%s ", __e->elevator_name);
	}
1135
	spin_unlock(&elv_list_lock);
L
Linus Torvalds 已提交
1136

1137 1138 1139
	if (q->mq_ops && q->elevator)
		len += sprintf(name+len, "none");

L
Linus Torvalds 已提交
1140 1141 1142 1143
	len += sprintf(len+name, "\n");
	return len;
}

1144 1145
struct request *elv_rb_former_request(struct request_queue *q,
				      struct request *rq)
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
{
	struct rb_node *rbprev = rb_prev(&rq->rb_node);

	if (rbprev)
		return rb_entry_rq(rbprev);

	return NULL;
}
EXPORT_SYMBOL(elv_rb_former_request);

1156 1157
struct request *elv_rb_latter_request(struct request_queue *q,
				      struct request *rq)
1158 1159 1160 1161 1162 1163 1164 1165 1166
{
	struct rb_node *rbnext = rb_next(&rq->rb_node);

	if (rbnext)
		return rb_entry_rq(rbnext);

	return NULL;
}
EXPORT_SYMBOL(elv_rb_latter_request);