elevator.c 24.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  Block device elevator/IO-scheduler.
 *
 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
 *
6
 * 30042000 Jens Axboe <axboe@kernel.dk> :
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 * Split the elevator a bit so that it is possible to choose a different
 * one or even write a new "plug in". There are three pieces:
 * - elevator_fn, inserts a new request in the queue list
 * - elevator_merge_fn, decides whether a new buffer can be merged with
 *   an existing request
 * - elevator_dequeue_fn, called when a request is taken off the active list
 *
 * 20082000 Dave Jones <davej@suse.de> :
 * Removed tests for max-bomb-segments, which was breaking elvtune
 *  when run without -bN
 *
 * Jens:
 * - Rework again to work with bio instead of buffer_heads
 * - loose bi_dev comparisons, partition handling is right now
 * - completely modularize elevator setup and teardown
 *
 */
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
T
Tejun Heo 已提交
34
#include <linux/delay.h>
35
#include <linux/blktrace_api.h>
36
#include <linux/hash.h>
L
Linus Torvalds 已提交
37 38 39 40 41 42

#include <asm/uaccess.h>

static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);

43 44 45 46 47 48 49 50 51 52
/*
 * Merge hash stuff.
 */
static const int elv_hash_shift = 6;
#define ELV_HASH_BLOCK(sec)	((sec) >> 3)
#define ELV_HASH_FN(sec)	(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
#define ELV_HASH_ENTRIES	(1 << elv_hash_shift)
#define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
/*
 * Query io scheduler to see if the current process issuing bio may be
 * merged with rq.
 */
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
	request_queue_t *q = rq->q;
	elevator_t *e = q->elevator;

	if (e->ops->elevator_allow_merge_fn)
		return e->ops->elevator_allow_merge_fn(q, rq, bio);

	return 1;
}

L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/*
 * can we safely merge with this request?
 */
inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
{
	if (!rq_mergeable(rq))
		return 0;

	/*
	 * different data direction or already started, don't merge
	 */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return 0;

	/*
83
	 * must be same device and not a special request
L
Linus Torvalds 已提交
84
	 */
85 86 87 88 89
	if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special)
		return 0;

	if (!elv_iosched_allow_merge(rq, bio))
		return 0;
L
Linus Torvalds 已提交
90

91
	return 1;
L
Linus Torvalds 已提交
92 93 94
}
EXPORT_SYMBOL(elv_rq_merge_ok);

95
static inline int elv_try_merge(struct request *__rq, struct bio *bio)
L
Linus Torvalds 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
{
	int ret = ELEVATOR_NO_MERGE;

	/*
	 * we can merge and sequence is ok, check if it's possible
	 */
	if (elv_rq_merge_ok(__rq, bio)) {
		if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
			ret = ELEVATOR_BACK_MERGE;
		else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
			ret = ELEVATOR_FRONT_MERGE;
	}

	return ret;
}

static struct elevator_type *elevator_find(const char *name)
{
114
	struct elevator_type *e;
L
Linus Torvalds 已提交
115 116 117 118
	struct list_head *entry;

	list_for_each(entry, &elv_list) {

119
		e = list_entry(entry, struct elevator_type, list);
L
Linus Torvalds 已提交
120

121 122
		if (!strcmp(e->elevator_name, name))
			return e;
L
Linus Torvalds 已提交
123 124
	}

125
	return NULL;
L
Linus Torvalds 已提交
126 127 128 129 130 131 132 133 134
}

static void elevator_put(struct elevator_type *e)
{
	module_put(e->elevator_owner);
}

static struct elevator_type *elevator_get(const char *name)
{
135
	struct elevator_type *e;
L
Linus Torvalds 已提交
136

137 138 139 140 141 142 143
	spin_lock_irq(&elv_list_lock);

	e = elevator_find(name);
	if (e && !try_module_get(e->elevator_owner))
		e = NULL;

	spin_unlock_irq(&elv_list_lock);
L
Linus Torvalds 已提交
144 145 146 147

	return e;
}

J
Jens Axboe 已提交
148
static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
L
Linus Torvalds 已提交
149
{
150
	return eq->ops->elevator_init_fn(q);
J
Jens Axboe 已提交
151
}
L
Linus Torvalds 已提交
152

J
Jens Axboe 已提交
153 154 155
static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
			   void *data)
{
L
Linus Torvalds 已提交
156
	q->elevator = eq;
J
Jens Axboe 已提交
157
	eq->elevator_data = data;
L
Linus Torvalds 已提交
158 159 160 161
}

static char chosen_elevator[16];

162
static int __init elevator_setup(char *str)
L
Linus Torvalds 已提交
163
{
164 165 166 167
	/*
	 * Be backwards-compatible with previous kernels, so users
	 * won't get the wrong elevator.
	 */
168
	if (!strcmp(str, "as"))
169
		strcpy(chosen_elevator, "anticipatory");
Z
Zachary Amsden 已提交
170
	else
171
		strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
172
	return 1;
L
Linus Torvalds 已提交
173 174 175 176
}

__setup("elevator=", elevator_setup);

177 178
static struct kobj_type elv_ktype;

179
static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
180
{
181 182 183
	elevator_t *eq;
	int i;

184
	eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
185 186 187 188 189 190 191 192 193 194 195
	if (unlikely(!eq))
		goto err;

	memset(eq, 0, sizeof(*eq));
	eq->ops = &e->ops;
	eq->elevator_type = e;
	kobject_init(&eq->kobj);
	snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
	eq->kobj.ktype = &elv_ktype;
	mutex_init(&eq->sysfs_lock);

196 197
	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
					GFP_KERNEL, q->node);
198 199 200 201 202 203
	if (!eq->hash)
		goto err;

	for (i = 0; i < ELV_HASH_ENTRIES; i++)
		INIT_HLIST_HEAD(&eq->hash[i]);

204
	return eq;
205 206 207 208
err:
	kfree(eq);
	elevator_put(e);
	return NULL;
209 210 211 212 213
}

static void elevator_release(struct kobject *kobj)
{
	elevator_t *e = container_of(kobj, elevator_t, kobj);
214

215
	elevator_put(e->elevator_type);
216
	kfree(e->hash);
217 218 219
	kfree(e);
}

L
Linus Torvalds 已提交
220 221 222 223 224
int elevator_init(request_queue_t *q, char *name)
{
	struct elevator_type *e = NULL;
	struct elevator_queue *eq;
	int ret = 0;
J
Jens Axboe 已提交
225
	void *data;
L
Linus Torvalds 已提交
226

T
Tejun Heo 已提交
227 228 229 230 231
	INIT_LIST_HEAD(&q->queue_head);
	q->last_merge = NULL;
	q->end_sector = 0;
	q->boundary_rq = NULL;

232
	if (name && !(e = elevator_get(name)))
L
Linus Torvalds 已提交
233 234
		return -EINVAL;

235 236 237 238 239 240
	if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
		printk("I/O scheduler %s not found\n", chosen_elevator);

	if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
		printk("Default I/O scheduler not found, using no-op\n");
		e = elevator_get("noop");
241 242
	}

243
	eq = elevator_alloc(q, e);
244
	if (!eq)
L
Linus Torvalds 已提交
245 246
		return -ENOMEM;

J
Jens Axboe 已提交
247 248
	data = elevator_init_queue(q, eq);
	if (!data) {
249
		kobject_put(&eq->kobj);
J
Jens Axboe 已提交
250 251
		return -ENOMEM;
	}
L
Linus Torvalds 已提交
252

J
Jens Axboe 已提交
253
	elevator_attach(q, eq, data);
L
Linus Torvalds 已提交
254 255 256
	return ret;
}

257 258
EXPORT_SYMBOL(elevator_init);

L
Linus Torvalds 已提交
259 260
void elevator_exit(elevator_t *e)
{
261
	mutex_lock(&e->sysfs_lock);
L
Linus Torvalds 已提交
262 263
	if (e->ops->elevator_exit_fn)
		e->ops->elevator_exit_fn(e);
264 265
	e->ops = NULL;
	mutex_unlock(&e->sysfs_lock);
L
Linus Torvalds 已提交
266

267
	kobject_put(&e->kobj);
L
Linus Torvalds 已提交
268 269
}

270 271
EXPORT_SYMBOL(elevator_exit);

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static inline void __elv_rqhash_del(struct request *rq)
{
	hlist_del_init(&rq->hash);
}

static void elv_rqhash_del(request_queue_t *q, struct request *rq)
{
	if (ELV_ON_HASH(rq))
		__elv_rqhash_del(rq);
}

static void elv_rqhash_add(request_queue_t *q, struct request *rq)
{
	elevator_t *e = q->elevator;

	BUG_ON(ELV_ON_HASH(rq));
	hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
}

static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
{
	__elv_rqhash_del(rq);
	elv_rqhash_add(q, rq);
}

static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
{
	elevator_t *e = q->elevator;
	struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
	struct hlist_node *entry, *next;
	struct request *rq;

	hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
		BUG_ON(!ELV_ON_HASH(rq));

		if (unlikely(!rq_mergeable(rq))) {
			__elv_rqhash_del(rq);
			continue;
		}

		if (rq_hash_key(rq) == offset)
			return rq;
	}

	return NULL;
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/*
 * RB-tree support functions for inserting/lookup/removal of requests
 * in a sorted RB tree.
 */
struct request *elv_rb_add(struct rb_root *root, struct request *rq)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct request *__rq;

	while (*p) {
		parent = *p;
		__rq = rb_entry(parent, struct request, rb_node);

		if (rq->sector < __rq->sector)
			p = &(*p)->rb_left;
		else if (rq->sector > __rq->sector)
			p = &(*p)->rb_right;
		else
			return __rq;
	}

	rb_link_node(&rq->rb_node, parent, p);
	rb_insert_color(&rq->rb_node, root);
	return NULL;
}

EXPORT_SYMBOL(elv_rb_add);

void elv_rb_del(struct rb_root *root, struct request *rq)
{
	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
	rb_erase(&rq->rb_node, root);
	RB_CLEAR_NODE(&rq->rb_node);
}

EXPORT_SYMBOL(elv_rb_del);

struct request *elv_rb_find(struct rb_root *root, sector_t sector)
{
	struct rb_node *n = root->rb_node;
	struct request *rq;

	while (n) {
		rq = rb_entry(n, struct request, rb_node);

		if (sector < rq->sector)
			n = n->rb_left;
		else if (sector > rq->sector)
			n = n->rb_right;
		else
			return rq;
	}

	return NULL;
}

EXPORT_SYMBOL(elv_rb_find);

378 379
/*
 * Insert rq into dispatch queue of q.  Queue lock must be held on
380 381
 * entry.  rq is sort insted into the dispatch queue. To be used by
 * specific elevators.
382
 */
J
Jens Axboe 已提交
383
void elv_dispatch_sort(request_queue_t *q, struct request *rq)
384 385 386 387
{
	sector_t boundary;
	struct list_head *entry;

388 389
	if (q->last_merge == rq)
		q->last_merge = NULL;
390 391 392

	elv_rqhash_del(q, rq);

393
	q->nr_sorted--;
394

J
Jens Axboe 已提交
395
	boundary = q->end_sector;
396

397 398 399
	list_for_each_prev(entry, &q->queue_head) {
		struct request *pos = list_entry_rq(entry);

400
		if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
			break;
		if (rq->sector >= boundary) {
			if (pos->sector < boundary)
				continue;
		} else {
			if (pos->sector >= boundary)
				break;
		}
		if (rq->sector >= pos->sector)
			break;
	}

	list_add(&rq->queuelist, entry);
}

416 417
EXPORT_SYMBOL(elv_dispatch_sort);

418
/*
419 420 421
 * Insert rq into dispatch queue of q.  Queue lock must be held on
 * entry.  rq is added to the back of the dispatch queue. To be used by
 * specific elevators.
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
 */
void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
{
	if (q->last_merge == rq)
		q->last_merge = NULL;

	elv_rqhash_del(q, rq);

	q->nr_sorted--;

	q->end_sector = rq_end_sector(rq);
	q->boundary_rq = rq;
	list_add_tail(&rq->queuelist, &q->queue_head);
}

437 438
EXPORT_SYMBOL(elv_dispatch_add_tail);

L
Linus Torvalds 已提交
439 440 441
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
	elevator_t *e = q->elevator;
442
	struct request *__rq;
443 444
	int ret;

445 446 447
	/*
	 * First try one-hit cache.
	 */
448 449 450 451 452 453 454
	if (q->last_merge) {
		ret = elv_try_merge(q->last_merge, bio);
		if (ret != ELEVATOR_NO_MERGE) {
			*req = q->last_merge;
			return ret;
		}
	}
L
Linus Torvalds 已提交
455

456 457 458 459 460 461 462 463 464
	/*
	 * See if our hash lookup can find a potential backmerge.
	 */
	__rq = elv_rqhash_find(q, bio->bi_sector);
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
		*req = __rq;
		return ELEVATOR_BACK_MERGE;
	}

L
Linus Torvalds 已提交
465 466 467 468 469 470
	if (e->ops->elevator_merge_fn)
		return e->ops->elevator_merge_fn(q, req, bio);

	return ELEVATOR_NO_MERGE;
}

471
void elv_merged_request(request_queue_t *q, struct request *rq, int type)
L
Linus Torvalds 已提交
472 473 474 475
{
	elevator_t *e = q->elevator;

	if (e->ops->elevator_merged_fn)
476
		e->ops->elevator_merged_fn(q, rq, type);
477

478 479
	if (type == ELEVATOR_BACK_MERGE)
		elv_rqhash_reposition(q, rq);
480

481
	q->last_merge = rq;
L
Linus Torvalds 已提交
482 483 484 485 486 487 488 489 490
}

void elv_merge_requests(request_queue_t *q, struct request *rq,
			     struct request *next)
{
	elevator_t *e = q->elevator;

	if (e->ops->elevator_merge_req_fn)
		e->ops->elevator_merge_req_fn(q, rq, next);
491

492 493 494 495
	elv_rqhash_reposition(q, rq);
	elv_rqhash_del(q, next);

	q->nr_sorted--;
496
	q->last_merge = rq;
L
Linus Torvalds 已提交
497 498
}

499
void elv_requeue_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
500 501 502 503 504 505 506
{
	elevator_t *e = q->elevator;

	/*
	 * it already went through dequeue, we need to decrement the
	 * in_flight count again
	 */
507
	if (blk_account_rq(rq)) {
L
Linus Torvalds 已提交
508
		q->in_flight--;
509 510 511
		if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
			e->ops->elevator_deactivate_req_fn(q, rq);
	}
L
Linus Torvalds 已提交
512

513
	rq->cmd_flags &= ~REQ_STARTED;
L
Linus Torvalds 已提交
514

515
	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
L
Linus Torvalds 已提交
516 517
}

518 519 520 521 522 523 524 525 526 527 528 529 530 531
static void elv_drain_elevator(request_queue_t *q)
{
	static int printed;
	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
		;
	if (q->nr_sorted == 0)
		return;
	if (printed++ < 10) {
		printk(KERN_ERR "%s: forced dispatching is broken "
		       "(nr_sorted=%u), please report this\n",
		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
	}
}

532
void elv_insert(request_queue_t *q, struct request *rq, int where)
L
Linus Torvalds 已提交
533
{
534 535
	struct list_head *pos;
	unsigned ordseq;
J
Jens Axboe 已提交
536
	int unplug_it = 1;
537

538 539
	blk_add_trace_rq(q, rq, BLK_TA_INSERT);

L
Linus Torvalds 已提交
540 541
	rq->q = q;

542 543
	switch (where) {
	case ELEVATOR_INSERT_FRONT:
544
		rq->cmd_flags |= REQ_SOFTBARRIER;
545 546 547 548 549

		list_add(&rq->queuelist, &q->queue_head);
		break;

	case ELEVATOR_INSERT_BACK:
550
		rq->cmd_flags |= REQ_SOFTBARRIER;
551
		elv_drain_elevator(q);
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
		list_add_tail(&rq->queuelist, &q->queue_head);
		/*
		 * We kick the queue here for the following reasons.
		 * - The elevator might have returned NULL previously
		 *   to delay requests and returned them now.  As the
		 *   queue wasn't empty before this request, ll_rw_blk
		 *   won't run the queue on return, resulting in hang.
		 * - Usually, back inserted requests won't be merged
		 *   with anything.  There's no point in delaying queue
		 *   processing.
		 */
		blk_remove_plug(q);
		q->request_fn(q);
		break;

	case ELEVATOR_INSERT_SORT:
		BUG_ON(!blk_fs_request(rq));
569
		rq->cmd_flags |= REQ_SORTED;
570
		q->nr_sorted++;
571 572 573 574 575 576
		if (rq_mergeable(rq)) {
			elv_rqhash_add(q, rq);
			if (!q->last_merge)
				q->last_merge = rq;
		}

577 578 579 580 581 582
		/*
		 * Some ioscheds (cfq) run q->request_fn directly, so
		 * rq cannot be accessed after calling
		 * elevator_add_req_fn.
		 */
		q->elevator->ops->elevator_add_req_fn(q, rq);
583 584
		break;

585 586 587 588 589 590
	case ELEVATOR_INSERT_REQUEUE:
		/*
		 * If ordered flush isn't in progress, we do front
		 * insertion; otherwise, requests should be requeued
		 * in ordseq order.
		 */
591
		rq->cmd_flags |= REQ_SOFTBARRIER;
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606

		if (q->ordseq == 0) {
			list_add(&rq->queuelist, &q->queue_head);
			break;
		}

		ordseq = blk_ordered_req_seq(rq);

		list_for_each(pos, &q->queue_head) {
			struct request *pos_rq = list_entry_rq(pos);
			if (ordseq <= blk_ordered_req_seq(pos_rq))
				break;
		}

		list_add_tail(&rq->queuelist, pos);
J
Jens Axboe 已提交
607 608 609 610 611
		/*
		 * most requeues happen because of a busy condition, don't
		 * force unplug of the queue for that case.
		 */
		unplug_it = 0;
612 613
		break;

614 615 616 617 618 619
	default:
		printk(KERN_ERR "%s: bad insertion point %d\n",
		       __FUNCTION__, where);
		BUG();
	}

J
Jens Axboe 已提交
620
	if (unplug_it && blk_queue_plugged(q)) {
621 622 623 624 625 626
		int nrq = q->rq.count[READ] + q->rq.count[WRITE]
			- q->in_flight;

		if (nrq >= q->unplug_thresh)
			__generic_unplug_device(q);
	}
L
Linus Torvalds 已提交
627 628
}

629 630 631 632
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
		       int plug)
{
	if (q->ordcolor)
633
		rq->cmd_flags |= REQ_ORDERED_COLOR;
634

635
	if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
		/*
		 * toggle ordered color
		 */
		if (blk_barrier_rq(rq))
			q->ordcolor ^= 1;

		/*
		 * barriers implicitly indicate back insertion
		 */
		if (where == ELEVATOR_INSERT_SORT)
			where = ELEVATOR_INSERT_BACK;

		/*
		 * this request is scheduling boundary, update
		 * end_sector
		 */
		if (blk_fs_request(rq)) {
			q->end_sector = rq_end_sector(rq);
			q->boundary_rq = rq;
		}
656
	} else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
657 658 659 660 661 662 663 664
		where = ELEVATOR_INSERT_BACK;

	if (plug)
		blk_plug_device(q);

	elv_insert(q, rq, where);
}

665 666
EXPORT_SYMBOL(__elv_add_request);

L
Linus Torvalds 已提交
667 668 669 670 671 672 673 674 675 676
void elv_add_request(request_queue_t *q, struct request *rq, int where,
		     int plug)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__elv_add_request(q, rq, where, plug);
	spin_unlock_irqrestore(q->queue_lock, flags);
}

677 678
EXPORT_SYMBOL(elv_add_request);

L
Linus Torvalds 已提交
679 680
static inline struct request *__elv_next_request(request_queue_t *q)
{
681 682
	struct request *rq;

683 684 685 686 687 688
	while (1) {
		while (!list_empty(&q->queue_head)) {
			rq = list_entry_rq(q->queue_head.next);
			if (blk_do_ordered(q, &rq))
				return rq;
		}
L
Linus Torvalds 已提交
689

690 691
		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
			return NULL;
L
Linus Torvalds 已提交
692 693 694 695 696 697 698 699 700
	}
}

struct request *elv_next_request(request_queue_t *q)
{
	struct request *rq;
	int ret;

	while ((rq = __elv_next_request(q)) != NULL) {
701
		if (!(rq->cmd_flags & REQ_STARTED)) {
702 703 704 705 706 707 708 709 710 711
			elevator_t *e = q->elevator;

			/*
			 * This is the first time the device driver
			 * sees this request (possibly after
			 * requeueing).  Notify IO scheduler.
			 */
			if (blk_sorted_rq(rq) &&
			    e->ops->elevator_activate_req_fn)
				e->ops->elevator_activate_req_fn(q, rq);
L
Linus Torvalds 已提交
712

713 714 715 716 717
			/*
			 * just mark as started even if we don't start
			 * it, a request that has been delayed should
			 * not be passed by new incoming requests
			 */
718
			rq->cmd_flags |= REQ_STARTED;
719
			blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
720
		}
L
Linus Torvalds 已提交
721

722
		if (!q->boundary_rq || q->boundary_rq == rq) {
J
Jens Axboe 已提交
723
			q->end_sector = rq_end_sector(rq);
724 725
			q->boundary_rq = NULL;
		}
L
Linus Torvalds 已提交
726

727
		if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
L
Linus Torvalds 已提交
728 729 730 731 732 733
			break;

		ret = q->prep_rq_fn(q, rq);
		if (ret == BLKPREP_OK) {
			break;
		} else if (ret == BLKPREP_DEFER) {
734 735 736
			/*
			 * the request may have been (partially) prepped.
			 * we need to keep this request in the front to
737 738
			 * avoid resource deadlock.  REQ_STARTED will
			 * prevent other fs requests from passing this one.
739
			 */
L
Linus Torvalds 已提交
740 741 742 743 744 745 746 747 748
			rq = NULL;
			break;
		} else if (ret == BLKPREP_KILL) {
			int nr_bytes = rq->hard_nr_sectors << 9;

			if (!nr_bytes)
				nr_bytes = rq->data_len;

			blkdev_dequeue_request(rq);
749
			rq->cmd_flags |= REQ_QUIET;
L
Linus Torvalds 已提交
750
			end_that_request_chunk(rq, 0, nr_bytes);
751
			end_that_request_last(rq, 0);
L
Linus Torvalds 已提交
752 753 754 755 756 757 758 759 760 761
		} else {
			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
								ret);
			break;
		}
	}

	return rq;
}

762 763
EXPORT_SYMBOL(elv_next_request);

764
void elv_dequeue_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
765
{
766
	BUG_ON(list_empty(&rq->queuelist));
767
	BUG_ON(ELV_ON_HASH(rq));
768 769

	list_del_init(&rq->queuelist);
L
Linus Torvalds 已提交
770 771 772 773

	/*
	 * the time frame between a request being removed from the lists
	 * and to it is freed is accounted as io that is in progress at
774
	 * the driver side.
L
Linus Torvalds 已提交
775 776 777 778 779
	 */
	if (blk_account_rq(rq))
		q->in_flight++;
}

780 781
EXPORT_SYMBOL(elv_dequeue_request);

L
Linus Torvalds 已提交
782 783 784 785
int elv_queue_empty(request_queue_t *q)
{
	elevator_t *e = q->elevator;

786 787 788
	if (!list_empty(&q->queue_head))
		return 0;

L
Linus Torvalds 已提交
789 790 791
	if (e->ops->elevator_queue_empty_fn)
		return e->ops->elevator_queue_empty_fn(q);

792
	return 1;
L
Linus Torvalds 已提交
793 794
}

795 796
EXPORT_SYMBOL(elv_queue_empty);

L
Linus Torvalds 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
struct request *elv_latter_request(request_queue_t *q, struct request *rq)
{
	elevator_t *e = q->elevator;

	if (e->ops->elevator_latter_req_fn)
		return e->ops->elevator_latter_req_fn(q, rq);
	return NULL;
}

struct request *elv_former_request(request_queue_t *q, struct request *rq)
{
	elevator_t *e = q->elevator;

	if (e->ops->elevator_former_req_fn)
		return e->ops->elevator_former_req_fn(q, rq);
	return NULL;
}

815
int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
L
Linus Torvalds 已提交
816 817 818 819
{
	elevator_t *e = q->elevator;

	if (e->ops->elevator_set_req_fn)
820
		return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
L
Linus Torvalds 已提交
821 822 823 824 825 826 827 828 829 830

	rq->elevator_private = NULL;
	return 0;
}

void elv_put_request(request_queue_t *q, struct request *rq)
{
	elevator_t *e = q->elevator;

	if (e->ops->elevator_put_req_fn)
831
		e->ops->elevator_put_req_fn(rq);
L
Linus Torvalds 已提交
832 833
}

834
int elv_may_queue(request_queue_t *q, int rw)
L
Linus Torvalds 已提交
835 836 837 838
{
	elevator_t *e = q->elevator;

	if (e->ops->elevator_may_queue_fn)
839
		return e->ops->elevator_may_queue_fn(q, rw);
L
Linus Torvalds 已提交
840 841 842 843 844 845 846 847 848 849 850

	return ELV_MQUEUE_MAY;
}

void elv_completed_request(request_queue_t *q, struct request *rq)
{
	elevator_t *e = q->elevator;

	/*
	 * request is released from the driver, io must be done
	 */
851
	if (blk_account_rq(rq)) {
L
Linus Torvalds 已提交
852
		q->in_flight--;
853 854 855
		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
			e->ops->elevator_completed_req_fn(q, rq);
	}
856

857 858 859 860 861 862 863
	/*
	 * Check if the queue is waiting for fs requests to be
	 * drained for flush sequence.
	 */
	if (unlikely(q->ordseq)) {
		struct request *first_rq = list_entry_rq(q->queue_head.next);
		if (q->in_flight == 0 &&
864 865 866 867 868
		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
		    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
			q->request_fn(q);
		}
869
	}
L
Linus Torvalds 已提交
870 871
}

872 873 874 875
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)

static ssize_t
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
L
Linus Torvalds 已提交
876
{
877 878 879 880 881 882 883 884 885 886 887 888
	elevator_t *e = container_of(kobj, elevator_t, kobj);
	struct elv_fs_entry *entry = to_elv(attr);
	ssize_t error;

	if (!entry->show)
		return -EIO;

	mutex_lock(&e->sysfs_lock);
	error = e->ops ? entry->show(e, page) : -ENOENT;
	mutex_unlock(&e->sysfs_lock);
	return error;
}
L
Linus Torvalds 已提交
889

890 891 892 893 894 895 896
static ssize_t
elv_attr_store(struct kobject *kobj, struct attribute *attr,
	       const char *page, size_t length)
{
	elevator_t *e = container_of(kobj, elevator_t, kobj);
	struct elv_fs_entry *entry = to_elv(attr);
	ssize_t error;
L
Linus Torvalds 已提交
897

898 899
	if (!entry->store)
		return -EIO;
L
Linus Torvalds 已提交
900

901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
	mutex_lock(&e->sysfs_lock);
	error = e->ops ? entry->store(e, page, length) : -ENOENT;
	mutex_unlock(&e->sysfs_lock);
	return error;
}

static struct sysfs_ops elv_sysfs_ops = {
	.show	= elv_attr_show,
	.store	= elv_attr_store,
};

static struct kobj_type elv_ktype = {
	.sysfs_ops	= &elv_sysfs_ops,
	.release	= elevator_release,
};

int elv_register_queue(struct request_queue *q)
{
	elevator_t *e = q->elevator;
	int error;

	e->kobj.parent = &q->kobj;

	error = kobject_add(&e->kobj);
	if (!error) {
926
		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
927
		if (attr) {
928 929
			while (attr->attr.name) {
				if (sysfs_create_file(&e->kobj, &attr->attr))
930
					break;
931
				attr++;
932 933 934 935 936
			}
		}
		kobject_uevent(&e->kobj, KOBJ_ADD);
	}
	return error;
L
Linus Torvalds 已提交
937 938
}

J
Jens Axboe 已提交
939 940 941 942 943 944
static void __elv_unregister_queue(elevator_t *e)
{
	kobject_uevent(&e->kobj, KOBJ_REMOVE);
	kobject_del(&e->kobj);
}

L
Linus Torvalds 已提交
945 946
void elv_unregister_queue(struct request_queue *q)
{
J
Jens Axboe 已提交
947 948
	if (q)
		__elv_unregister_queue(q->elevator);
L
Linus Torvalds 已提交
949 950 951 952
}

int elv_register(struct elevator_type *e)
{
953
	spin_lock_irq(&elv_list_lock);
954
	BUG_ON(elevator_find(e->elevator_name));
L
Linus Torvalds 已提交
955 956 957 958
	list_add_tail(&e->list, &elv_list);
	spin_unlock_irq(&elv_list_lock);

	printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
959 960 961 962
	if (!strcmp(e->elevator_name, chosen_elevator) ||
			(!*chosen_elevator &&
			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
				printk(" (default)");
L
Linus Torvalds 已提交
963 964 965 966 967 968 969
	printk("\n");
	return 0;
}
EXPORT_SYMBOL_GPL(elv_register);

void elv_unregister(struct elevator_type *e)
{
970 971 972 973 974
	struct task_struct *g, *p;

	/*
	 * Iterate every thread in the process to remove the io contexts.
	 */
975 976 977 978
	if (e->ops.trim) {
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
979 980
			if (p->io_context)
				e->ops.trim(p->io_context);
981 982 983 984
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	}
985

L
Linus Torvalds 已提交
986 987 988 989 990 991 992 993 994 995
	spin_lock_irq(&elv_list_lock);
	list_del_init(&e->list);
	spin_unlock_irq(&elv_list_lock);
}
EXPORT_SYMBOL_GPL(elv_unregister);

/*
 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 * we don't free the old io scheduler, before we have allocated what we
 * need for the new one. this way we have a chance of going back to the old
T
Tejun Heo 已提交
996
 * one, if the new one fails init for some reason.
L
Linus Torvalds 已提交
997
 */
998
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
L
Linus Torvalds 已提交
999
{
T
Tejun Heo 已提交
1000
	elevator_t *old_elevator, *e;
J
Jens Axboe 已提交
1001
	void *data;
L
Linus Torvalds 已提交
1002

T
Tejun Heo 已提交
1003 1004 1005
	/*
	 * Allocate new elevator
	 */
1006
	e = elevator_alloc(q, new_e);
L
Linus Torvalds 已提交
1007
	if (!e)
1008
		return 0;
L
Linus Torvalds 已提交
1009

J
Jens Axboe 已提交
1010 1011 1012 1013 1014 1015
	data = elevator_init_queue(q, e);
	if (!data) {
		kobject_put(&e->kobj);
		return 0;
	}

L
Linus Torvalds 已提交
1016
	/*
T
Tejun Heo 已提交
1017
	 * Turn on BYPASS and drain all requests w/ elevator private data
L
Linus Torvalds 已提交
1018
	 */
T
Tejun Heo 已提交
1019 1020
	spin_lock_irq(q->queue_lock);

J
Jens Axboe 已提交
1021
	set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
T
Tejun Heo 已提交
1022

1023
	elv_drain_elevator(q);
T
Tejun Heo 已提交
1024 1025

	while (q->rq.elvpriv) {
1026 1027
		blk_remove_plug(q);
		q->request_fn(q);
T
Tejun Heo 已提交
1028
		spin_unlock_irq(q->queue_lock);
J
Jens Axboe 已提交
1029
		msleep(10);
T
Tejun Heo 已提交
1030
		spin_lock_irq(q->queue_lock);
1031
		elv_drain_elevator(q);
T
Tejun Heo 已提交
1032 1033
	}

L
Linus Torvalds 已提交
1034
	/*
J
Jens Axboe 已提交
1035
	 * Remember old elevator.
L
Linus Torvalds 已提交
1036 1037 1038 1039 1040 1041
	 */
	old_elevator = q->elevator;

	/*
	 * attach and start new elevator
	 */
J
Jens Axboe 已提交
1042 1043 1044 1045 1046
	elevator_attach(q, e, data);

	spin_unlock_irq(q->queue_lock);

	__elv_unregister_queue(old_elevator);
L
Linus Torvalds 已提交
1047 1048 1049 1050 1051

	if (elv_register_queue(q))
		goto fail_register;

	/*
T
Tejun Heo 已提交
1052
	 * finally exit old elevator and turn off BYPASS.
L
Linus Torvalds 已提交
1053 1054
	 */
	elevator_exit(old_elevator);
J
Jens Axboe 已提交
1055
	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1056
	return 1;
L
Linus Torvalds 已提交
1057 1058 1059 1060 1061 1062 1063 1064 1065

fail_register:
	/*
	 * switch failed, exit the new io scheduler and reattach the old
	 * one again (along with re-adding the sysfs dir)
	 */
	elevator_exit(e);
	q->elevator = old_elevator;
	elv_register_queue(q);
J
Jens Axboe 已提交
1066
	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1067
	return 0;
L
Linus Torvalds 已提交
1068 1069 1070 1071 1072
}

ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
{
	char elevator_name[ELV_NAME_MAX];
1073
	size_t len;
L
Linus Torvalds 已提交
1074 1075
	struct elevator_type *e;

1076 1077 1078
	elevator_name[sizeof(elevator_name) - 1] = '\0';
	strncpy(elevator_name, name, sizeof(elevator_name) - 1);
	len = strlen(elevator_name);
L
Linus Torvalds 已提交
1079

1080 1081
	if (len && elevator_name[len - 1] == '\n')
		elevator_name[len - 1] = '\0';
L
Linus Torvalds 已提交
1082 1083 1084 1085 1086 1087 1088

	e = elevator_get(elevator_name);
	if (!e) {
		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
		return -EINVAL;
	}

1089 1090
	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
		elevator_put(e);
L
Linus Torvalds 已提交
1091
		return count;
1092
	}
L
Linus Torvalds 已提交
1093

1094 1095
	if (!elevator_switch(q, e))
		printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
L
Linus Torvalds 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	return count;
}

ssize_t elv_iosched_show(request_queue_t *q, char *name)
{
	elevator_t *e = q->elevator;
	struct elevator_type *elv = e->elevator_type;
	struct list_head *entry;
	int len = 0;

1106
	spin_lock_irq(&elv_list_lock);
L
Linus Torvalds 已提交
1107 1108 1109 1110 1111 1112 1113 1114 1115
	list_for_each(entry, &elv_list) {
		struct elevator_type *__e;

		__e = list_entry(entry, struct elevator_type, list);
		if (!strcmp(elv->elevator_name, __e->elevator_name))
			len += sprintf(name+len, "[%s] ", elv->elevator_name);
		else
			len += sprintf(name+len, "%s ", __e->elevator_name);
	}
1116
	spin_unlock_irq(&elv_list_lock);
L
Linus Torvalds 已提交
1117 1118 1119 1120 1121

	len += sprintf(len+name, "\n");
	return len;
}

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
{
	struct rb_node *rbprev = rb_prev(&rq->rb_node);

	if (rbprev)
		return rb_entry_rq(rbprev);

	return NULL;
}

EXPORT_SYMBOL(elv_rb_former_request);

struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
{
	struct rb_node *rbnext = rb_next(&rq->rb_node);

	if (rbnext)
		return rb_entry_rq(rbnext);

	return NULL;
}

EXPORT_SYMBOL(elv_rb_latter_request);