as-iosched.c 38.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *  Anticipatory & deadline i/o scheduler.
 *
4
 *  Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
N
Nick Piggin 已提交
5
 *                     Nick Piggin <nickpiggin@yahoo.com.au>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
 *
 */
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/rbtree.h>
#include <linux/interrupt.h>

#define REQ_SYNC	1
#define REQ_ASYNC	0

/*
 * See Documentation/block/as-iosched.txt
 */

/*
 * max time before a read is submitted.
 */
#define default_read_expire (HZ / 8)

/*
 * ditto for writes, these limits are not hard, even
 * if the disk is capable of satisfying them.
 */
#define default_write_expire (HZ / 4)

/*
 * read_batch_expire describes how long we will allow a stream of reads to
 * persist before looking to see whether it is time to switch over to writes.
 */
#define default_read_batch_expire (HZ / 2)

/*
 * write_batch_expire describes how long we want a stream of writes to run for.
 * This is not a hard limit, but a target we set for the auto-tuning thingy.
 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
 * a short amount of time...
 */
#define default_write_batch_expire (HZ / 8)

/*
 * max time we may wait to anticipate a read (default around 6ms)
 */
#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)

/*
 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
 * however huge values tend to interfere and not decay fast enough. A program
 * might be in a non-io phase of operation. Waiting on user input for example,
 * or doing a lengthy computation. A small penalty can be justified there, and
 * will still catch out those processes that constantly have large thinktimes.
 */
#define MAX_THINKTIME (HZ/50UL)

/* Bits in as_io_context.state */
enum as_io_states {
N
Nick Piggin 已提交
68
	AS_TASK_RUNNING=0,	/* Process has not exited */
L
Linus Torvalds 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
	AS_TASK_IOSTARTED,	/* Process has started some IO */
	AS_TASK_IORUNNING,	/* Process has completed some IO */
};

enum anticipation_status {
	ANTIC_OFF=0,		/* Not anticipating (normal operation)	*/
	ANTIC_WAIT_REQ,		/* The last read has not yet completed  */
	ANTIC_WAIT_NEXT,	/* Currently anticipating a request vs
				   last read (which has completed) */
	ANTIC_FINISHED,		/* Anticipating but have found a candidate
				 * or timed out */
};

struct as_data {
	/*
	 * run time data
	 */

	struct request_queue *q;	/* the "owner" queue */

	/*
	 * requests (as_rq s) are present on both sort_list and fifo_list
	 */
	struct rb_root sort_list[2];
	struct list_head fifo_list[2];

J
Jens Axboe 已提交
95
	struct request *next_rq[2];	/* next in sort order */
L
Linus Torvalds 已提交
96 97 98 99
	sector_t last_sector[2];	/* last REQ_SYNC & REQ_ASYNC sectors */

	unsigned long exit_prob;	/* probability a task will exit while
					   being waited on */
N
Nick Piggin 已提交
100 101 102
	unsigned long exit_no_coop;	/* probablility an exited task will
					   not be part of a later cooperating
					   request */
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
	unsigned long new_ttime_total; 	/* mean thinktime on new proc */
	unsigned long new_ttime_mean;
	u64 new_seek_total;		/* mean seek on new proc */
	sector_t new_seek_mean;

	unsigned long current_batch_expires;
	unsigned long last_check_fifo[2];
	int changed_batch;		/* 1: waiting for old batch to end */
	int new_batch;			/* 1: waiting on first read complete */
	int batch_data_dir;		/* current batch REQ_SYNC / REQ_ASYNC */
	int write_batch_count;		/* max # of reqs in a write batch */
	int current_write_count;	/* how many requests left this batch */
	int write_batch_idled;		/* has the write batch gone idle? */

	enum anticipation_status antic_status;
	unsigned long antic_start;	/* jiffies: when it started */
	struct timer_list antic_timer;	/* anticipatory scheduling timer */
	struct work_struct antic_work;	/* Deferred unplugging */
	struct io_context *io_context;	/* Identify the expected process */
	int ioc_finished; /* IO associated with io_context is finished */
	int nr_dispatched;

	/*
	 * settings that change how the i/o scheduler behaves
	 */
	unsigned long fifo_expire[2];
	unsigned long batch_expire[2];
	unsigned long antic_expire;
};

/*
 * per-request data.
 */
enum arq_state {
	AS_RQ_NEW=0,		/* New - not referenced and not on any lists */
	AS_RQ_QUEUED,		/* In the request queue. It belongs to the
				   scheduler */
	AS_RQ_DISPATCHED,	/* On the dispatch list. It belongs to the
				   driver now */
	AS_RQ_PRESCHED,		/* Debug poisoning for requests being used */
	AS_RQ_REMOVED,
	AS_RQ_MERGED,
	AS_RQ_POSTSCHED,	/* when they shouldn't be */
};

J
Jens Axboe 已提交
148 149 150
#define RQ_IOC(rq)	((struct io_context *) (rq)->elevator_private)
#define RQ_STATE(rq)	((enum arq_state)(rq)->elevator_private2)
#define RQ_SET_STATE(rq, state)	((rq)->elevator_private2 = (void *) state)
L
Linus Torvalds 已提交
151

152
static DEFINE_PER_CPU(unsigned long, ioc_count);
153 154
static struct completion *ioc_gone;

J
Jens Axboe 已提交
155
static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
156 157
static void as_antic_stop(struct as_data *ad);

L
Linus Torvalds 已提交
158 159 160 161 162 163 164 165
/*
 * IO Context helper functions
 */

/* Called to deallocate the as_io_context */
static void free_as_io_context(struct as_io_context *aic)
{
	kfree(aic);
166 167
	elv_ioc_count_dec(ioc_count);
	if (ioc_gone && !elv_ioc_count_read(ioc_count))
168
		complete(ioc_gone);
L
Linus Torvalds 已提交
169 170
}

171 172
static void as_trim(struct io_context *ioc)
{
173
	spin_lock_irq(&ioc->lock);
174 175
	if (ioc->aic)
		free_as_io_context(ioc->aic);
176
	ioc->aic = NULL;
177
	spin_unlock_irq(&ioc->lock);
178 179
}

L
Linus Torvalds 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/* Called when the task exits */
static void exit_as_io_context(struct as_io_context *aic)
{
	WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
	clear_bit(AS_TASK_RUNNING, &aic->state);
}

static struct as_io_context *alloc_as_io_context(void)
{
	struct as_io_context *ret;

	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
	if (ret) {
		ret->dtor = free_as_io_context;
		ret->exit = exit_as_io_context;
		ret->state = 1 << AS_TASK_RUNNING;
		atomic_set(&ret->nr_queued, 0);
		atomic_set(&ret->nr_dispatched, 0);
		spin_lock_init(&ret->lock);
		ret->ttime_total = 0;
		ret->ttime_samples = 0;
		ret->ttime_mean = 0;
		ret->seek_total = 0;
		ret->seek_samples = 0;
		ret->seek_mean = 0;
205
		elv_ioc_count_inc(ioc_count);
L
Linus Torvalds 已提交
206 207 208 209 210 211 212 213 214
	}

	return ret;
}

/*
 * If the current task has no AS IO context then create one and initialise it.
 * Then take a ref on the task's io context and return it.
 */
215
static struct io_context *as_get_io_context(int node)
L
Linus Torvalds 已提交
216
{
217
	struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
L
Linus Torvalds 已提交
218 219 220 221 222 223 224 225 226 227
	if (ioc && !ioc->aic) {
		ioc->aic = alloc_as_io_context();
		if (!ioc->aic) {
			put_io_context(ioc);
			ioc = NULL;
		}
	}
	return ioc;
}

J
Jens Axboe 已提交
228
static void as_put_io_context(struct request *rq)
229 230 231
{
	struct as_io_context *aic;

J
Jens Axboe 已提交
232
	if (unlikely(!RQ_IOC(rq)))
233 234
		return;

J
Jens Axboe 已提交
235
	aic = RQ_IOC(rq)->aic;
236

J
Jens Axboe 已提交
237
	if (rq_is_sync(rq) && aic) {
238 239 240
		unsigned long flags;

		spin_lock_irqsave(&aic->lock, flags);
241 242
		set_bit(AS_TASK_IORUNNING, &aic->state);
		aic->last_end_request = jiffies;
243
		spin_unlock_irqrestore(&aic->lock, flags);
244 245
	}

J
Jens Axboe 已提交
246
	put_io_context(RQ_IOC(rq));
247 248
}

L
Linus Torvalds 已提交
249 250 251
/*
 * rb tree support functions
 */
252
#define RQ_RB_ROOT(ad, rq)	(&(ad)->sort_list[rq_is_sync((rq))])
L
Linus Torvalds 已提交
253

J
Jens Axboe 已提交
254
static void as_add_rq_rb(struct as_data *ad, struct request *rq)
255
{
256
	struct request *alias;
257

258
	while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
J
Jens Axboe 已提交
259
		as_move_to_dispatch(ad, alias);
260 261 262 263
		as_antic_stop(ad);
	}
}

J
Jens Axboe 已提交
264
static inline void as_del_rq_rb(struct as_data *ad, struct request *rq)
L
Linus Torvalds 已提交
265
{
266
	elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
L
Linus Torvalds 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
}

/*
 * IO Scheduler proper
 */

#define MAXBACK (1024 * 1024)	/*
				 * Maximum distance the disk will go backward
				 * for a request.
				 */

#define BACK_PENALTY	2

/*
 * as_choose_req selects the preferred one of two requests of the same data_dir
 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
 */
J
Jens Axboe 已提交
284 285
static struct request *
as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
L
Linus Torvalds 已提交
286 287 288 289 290 291
{
	int data_dir;
	sector_t last, s1, s2, d1, d2;
	int r1_wrap=0, r2_wrap=0;	/* requests are behind the disk head */
	const sector_t maxback = MAXBACK;

J
Jens Axboe 已提交
292 293 294 295
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
L
Linus Torvalds 已提交
296

J
Jens Axboe 已提交
297
	data_dir = rq_is_sync(rq1);
L
Linus Torvalds 已提交
298 299

	last = ad->last_sector[data_dir];
J
Jens Axboe 已提交
300 301
	s1 = rq1->sector;
	s2 = rq2->sector;
L
Linus Torvalds 已提交
302

J
Jens Axboe 已提交
303
	BUG_ON(data_dir != rq_is_sync(rq2));
L
Linus Torvalds 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1+maxback >= last)
		d1 = (last - s1)*BACK_PENALTY;
	else {
		r1_wrap = 1;
		d1 = 0; /* shut up, gcc */
	}

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2+maxback >= last)
		d2 = (last - s2)*BACK_PENALTY;
	else {
		r2_wrap = 1;
		d2 = 0;
	}

	/* Found required data */
	if (!r1_wrap && r2_wrap)
J
Jens Axboe 已提交
330
		return rq1;
L
Linus Torvalds 已提交
331
	else if (!r2_wrap && r1_wrap)
J
Jens Axboe 已提交
332
		return rq2;
L
Linus Torvalds 已提交
333 334 335
	else if (r1_wrap && r2_wrap) {
		/* both behind the head */
		if (s1 <= s2)
J
Jens Axboe 已提交
336
			return rq1;
L
Linus Torvalds 已提交
337
		else
J
Jens Axboe 已提交
338
			return rq2;
L
Linus Torvalds 已提交
339 340 341 342
	}

	/* Both requests in front of the head */
	if (d1 < d2)
J
Jens Axboe 已提交
343
		return rq1;
L
Linus Torvalds 已提交
344
	else if (d2 < d1)
J
Jens Axboe 已提交
345
		return rq2;
L
Linus Torvalds 已提交
346 347
	else {
		if (s1 >= s2)
J
Jens Axboe 已提交
348
			return rq1;
L
Linus Torvalds 已提交
349
		else
J
Jens Axboe 已提交
350
			return rq2;
L
Linus Torvalds 已提交
351 352 353 354
	}
}

/*
J
Jens Axboe 已提交
355
 * as_find_next_rq finds the next request after @prev in elevator order.
L
Linus Torvalds 已提交
356 357 358
 * this with as_choose_req form the basis for how the scheduler chooses
 * what request to process next. Anticipation works on top of this.
 */
J
Jens Axboe 已提交
359 360
static struct request *
as_find_next_rq(struct as_data *ad, struct request *last)
L
Linus Torvalds 已提交
361 362 363
{
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
J
Jens Axboe 已提交
364
	struct request *next = NULL, *prev = NULL;
L
Linus Torvalds 已提交
365

366
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
L
Linus Torvalds 已提交
367 368

	if (rbprev)
J
Jens Axboe 已提交
369
		prev = rb_entry_rq(rbprev);
L
Linus Torvalds 已提交
370 371

	if (rbnext)
J
Jens Axboe 已提交
372
		next = rb_entry_rq(rbnext);
L
Linus Torvalds 已提交
373
	else {
374
		const int data_dir = rq_is_sync(last);
L
Linus Torvalds 已提交
375

376 377
		rbnext = rb_first(&ad->sort_list[data_dir]);
		if (rbnext && rbnext != &last->rb_node)
J
Jens Axboe 已提交
378
			next = rb_entry_rq(rbnext);
379
	}
L
Linus Torvalds 已提交
380

381
	return as_choose_req(ad, next, prev);
L
Linus Torvalds 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
}

/*
 * anticipatory scheduling functions follow
 */

/*
 * as_antic_expired tells us when we have anticipated too long.
 * The funny "absolute difference" math on the elapsed time is to handle
 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
 */
static int as_antic_expired(struct as_data *ad)
{
	long delta_jif;

	delta_jif = jiffies - ad->antic_start;
	if (unlikely(delta_jif < 0))
		delta_jif = -delta_jif;
	if (delta_jif < ad->antic_expire)
		return 0;

	return 1;
}

/*
 * as_antic_waitnext starts anticipating that a nice request will soon be
 * submitted. See also as_antic_waitreq
 */
static void as_antic_waitnext(struct as_data *ad)
{
	unsigned long timeout;

	BUG_ON(ad->antic_status != ANTIC_OFF
			&& ad->antic_status != ANTIC_WAIT_REQ);

	timeout = ad->antic_start + ad->antic_expire;

	mod_timer(&ad->antic_timer, timeout);

	ad->antic_status = ANTIC_WAIT_NEXT;
}

/*
 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
 * until the request that we're anticipating on has finished. This means we
 * are timing from when the candidate process wakes up hopefully.
 */
static void as_antic_waitreq(struct as_data *ad)
{
	BUG_ON(ad->antic_status == ANTIC_FINISHED);
	if (ad->antic_status == ANTIC_OFF) {
		if (!ad->io_context || ad->ioc_finished)
			as_antic_waitnext(ad);
		else
			ad->antic_status = ANTIC_WAIT_REQ;
	}
}

/*
 * This is called directly by the functions in this file to stop anticipation.
 * We kill the timer and schedule a call to the request_fn asap.
 */
static void as_antic_stop(struct as_data *ad)
{
	int status = ad->antic_status;

	if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
		if (status == ANTIC_WAIT_NEXT)
			del_timer(&ad->antic_timer);
		ad->antic_status = ANTIC_FINISHED;
		/* see as_work_handler */
		kblockd_schedule_work(&ad->antic_work);
	}
}

/*
 * as_antic_timeout is the timer function set by as_antic_waitnext.
 */
static void as_antic_timeout(unsigned long data)
{
	struct request_queue *q = (struct request_queue *)data;
	struct as_data *ad = q->elevator->elevator_data;
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	if (ad->antic_status == ANTIC_WAIT_REQ
			|| ad->antic_status == ANTIC_WAIT_NEXT) {
469 470 471
		struct as_io_context *aic;
		spin_lock(&ad->io_context->lock);
		aic = ad->io_context->aic;
L
Linus Torvalds 已提交
472 473 474 475 476

		ad->antic_status = ANTIC_FINISHED;
		kblockd_schedule_work(&ad->antic_work);

		if (aic->ttime_samples == 0) {
N
Nick Piggin 已提交
477
			/* process anticipated on has exited or timed out*/
L
Linus Torvalds 已提交
478 479
			ad->exit_prob = (7*ad->exit_prob + 256)/8;
		}
N
Nick Piggin 已提交
480 481 482 483
		if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
			/* process not "saved" by a cooperating request */
			ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
		}
484
		spin_unlock(&ad->io_context->lock);
L
Linus Torvalds 已提交
485 486 487 488
	}
	spin_unlock_irqrestore(q->queue_lock, flags);
}

N
Nick Piggin 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic,
				unsigned long ttime)
{
	/* fixed point: 1.0 == 1<<8 */
	if (aic->ttime_samples == 0) {
		ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
		ad->new_ttime_mean = ad->new_ttime_total / 256;

		ad->exit_prob = (7*ad->exit_prob)/8;
	}
	aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
	aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
	aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
}

static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
				sector_t sdist)
{
	u64 total;

	if (aic->seek_samples == 0) {
		ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
		ad->new_seek_mean = ad->new_seek_total / 256;
	}

	/*
	 * Don't allow the seek distance to get too large from the
	 * odd fragment, pagein, etc
	 */
	if (aic->seek_samples <= 60) /* second&third seek */
		sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
	else
		sdist = min(sdist, (aic->seek_mean * 4)	+ 2*1024*64);

	aic->seek_samples = (7*aic->seek_samples + 256) / 8;
	aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
	total = aic->seek_total + (aic->seek_samples/2);
	do_div(total, aic->seek_samples);
	aic->seek_mean = (sector_t)total;
}

/*
 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
 * updates @aic->ttime_mean based on that. It is called when a new
 * request is queued.
 */
static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
				struct request *rq)
{
538
	int data_dir = rq_is_sync(rq);
N
Nick Piggin 已提交
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
	unsigned long thinktime = 0;
	sector_t seek_dist;

	if (aic == NULL)
		return;

	if (data_dir == REQ_SYNC) {
		unsigned long in_flight = atomic_read(&aic->nr_queued)
					+ atomic_read(&aic->nr_dispatched);
		spin_lock(&aic->lock);
		if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
			test_bit(AS_TASK_IOSTARTED, &aic->state)) {
			/* Calculate read -> read thinktime */
			if (test_bit(AS_TASK_IORUNNING, &aic->state)
							&& in_flight == 0) {
				thinktime = jiffies - aic->last_end_request;
				thinktime = min(thinktime, MAX_THINKTIME-1);
			}
			as_update_thinktime(ad, aic, thinktime);

			/* Calculate read -> read seek distance */
			if (aic->last_request_pos < rq->sector)
				seek_dist = rq->sector - aic->last_request_pos;
			else
				seek_dist = aic->last_request_pos - rq->sector;
			as_update_seekdist(ad, aic, seek_dist);
		}
		aic->last_request_pos = rq->sector + rq->nr_sectors;
		set_bit(AS_TASK_IOSTARTED, &aic->state);
		spin_unlock(&aic->lock);
	}
}

L
Linus Torvalds 已提交
572 573 574 575
/*
 * as_close_req decides if one request is considered "close" to the
 * previous one issued.
 */
N
Nick Piggin 已提交
576
static int as_close_req(struct as_data *ad, struct as_io_context *aic,
J
Jens Axboe 已提交
577
			struct request *rq)
L
Linus Torvalds 已提交
578
{
N
Nick Piggin 已提交
579
	unsigned long delay;	/* jiffies */
L
Linus Torvalds 已提交
580
	sector_t last = ad->last_sector[ad->batch_data_dir];
J
Jens Axboe 已提交
581
	sector_t next = rq->sector;
L
Linus Torvalds 已提交
582
	sector_t delta; /* acceptable close offset (in sectors) */
N
Nick Piggin 已提交
583
	sector_t s;
L
Linus Torvalds 已提交
584 585 586 587

	if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
		delay = 0;
	else
N
Nick Piggin 已提交
588
		delay = jiffies - ad->antic_start;
L
Linus Torvalds 已提交
589

N
Nick Piggin 已提交
590 591
	if (delay == 0)
		delta = 8192;
N
Nick Piggin 已提交
592
	else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire)
N
Nick Piggin 已提交
593
		delta = 8192 << delay;
L
Linus Torvalds 已提交
594 595 596
	else
		return 1;

N
Nick Piggin 已提交
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	if ((last <= next + (delta>>1)) && (next <= last + delta))
		return 1;

	if (last < next)
		s = next - last;
	else
		s = last - next;

	if (aic->seek_samples == 0) {
		/*
		 * Process has just started IO. Use past statistics to
		 * gauge success possibility
		 */
		if (ad->new_seek_mean > s) {
			/* this request is better than what we're expecting */
			return 1;
		}

	} else {
		if (aic->seek_mean > s) {
			/* this request is better than what we're expecting */
			return 1;
		}
	}

	return 0;
L
Linus Torvalds 已提交
623 624 625 626 627 628 629 630 631 632 633
}

/*
 * as_can_break_anticipation returns true if we have been anticipating this
 * request.
 *
 * It also returns true if the process against which we are anticipating
 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
 * dispatch it ASAP, because we know that application will not be submitting
 * any new reads.
 *
N
Nick Piggin 已提交
634
 * If the task which has submitted the request has exited, break anticipation.
L
Linus Torvalds 已提交
635 636 637
 *
 * If this task has queued some other IO, do not enter enticipation.
 */
J
Jens Axboe 已提交
638
static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
L
Linus Torvalds 已提交
639 640 641 642 643 644
{
	struct io_context *ioc;
	struct as_io_context *aic;

	ioc = ad->io_context;
	BUG_ON(!ioc);
645
	spin_lock(&ioc->lock);
L
Linus Torvalds 已提交
646

J
Jens Axboe 已提交
647
	if (rq && ioc == RQ_IOC(rq)) {
L
Linus Torvalds 已提交
648
		/* request from same process */
649
		spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
650 651 652 653 654 655 656 657
		return 1;
	}

	if (ad->ioc_finished && as_antic_expired(ad)) {
		/*
		 * In this situation status should really be FINISHED,
		 * however the timer hasn't had the chance to run yet.
		 */
658
		spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
659 660 661 662
		return 1;
	}

	aic = ioc->aic;
663 664
	if (!aic) {
		spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
665
		return 0;
666
	}
L
Linus Torvalds 已提交
667 668 669

	if (atomic_read(&aic->nr_queued) > 0) {
		/* process has more requests queued */
670
		spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
671 672 673 674 675
		return 1;
	}

	if (atomic_read(&aic->nr_dispatched) > 0) {
		/* process has more requests dispatched */
676
		spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
677 678 679
		return 1;
	}

J
Jens Axboe 已提交
680
	if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) {
L
Linus Torvalds 已提交
681 682 683
		/*
		 * Found a close request that is not one of ours.
		 *
N
Nick Piggin 已提交
684 685
		 * This makes close requests from another process update
		 * our IO history. Is generally useful when there are
L
Linus Torvalds 已提交
686 687 688
		 * two or more cooperating processes working in the same
		 * area.
		 */
N
Nick Piggin 已提交
689 690 691 692 693 694 695
		if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
			if (aic->ttime_samples == 0)
				ad->exit_prob = (7*ad->exit_prob + 256)/8;

			ad->exit_no_coop = (7*ad->exit_no_coop)/8;
		}

J
Jens Axboe 已提交
696
		as_update_iohist(ad, aic, rq);
697
		spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
698 699 700
		return 1;
	}

N
Nick Piggin 已提交
701 702 703 704 705
	if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
		/* process anticipated on has exited */
		if (aic->ttime_samples == 0)
			ad->exit_prob = (7*ad->exit_prob + 256)/8;

706 707
		if (ad->exit_no_coop > 128) {
			spin_unlock(&ioc->lock);
N
Nick Piggin 已提交
708
			return 1;
709
		}
N
Nick Piggin 已提交
710
	}
L
Linus Torvalds 已提交
711 712

	if (aic->ttime_samples == 0) {
713 714
		if (ad->new_ttime_mean > ad->antic_expire) {
			spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
715
			return 1;
716 717 718
		}
		if (ad->exit_prob * ad->exit_no_coop > 128*256) {
			spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
719
			return 1;
720
		}
L
Linus Torvalds 已提交
721 722
	} else if (aic->ttime_mean > ad->antic_expire) {
		/* the process thinks too much between requests */
723
		spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
724 725
		return 1;
	}
726
	spin_unlock(&ioc->lock);
L
Linus Torvalds 已提交
727 728 729 730
	return 0;
}

/*
J
Jens Axboe 已提交
731
 * as_can_anticipate indicates whether we should either run rq
L
Linus Torvalds 已提交
732 733
 * or keep anticipating a better request.
 */
J
Jens Axboe 已提交
734
static int as_can_anticipate(struct as_data *ad, struct request *rq)
L
Linus Torvalds 已提交
735 736 737 738 739 740 741 742 743 744 745 746 747
{
	if (!ad->io_context)
		/*
		 * Last request submitted was a write
		 */
		return 0;

	if (ad->antic_status == ANTIC_FINISHED)
		/*
		 * Don't restart if we have just finished. Run the next request
		 */
		return 0;

J
Jens Axboe 已提交
748
	if (as_can_break_anticipation(ad, rq))
L
Linus Torvalds 已提交
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
		/*
		 * This request is a good candidate. Don't keep anticipating,
		 * run it.
		 */
		return 0;

	/*
	 * OK from here, we haven't finished, and don't have a decent request!
	 * Status is either ANTIC_OFF so start waiting,
	 * ANTIC_WAIT_REQ so continue waiting for request to finish
	 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
	 */

	return 1;
}

/*
J
Jens Axboe 已提交
766
 * as_update_rq must be called whenever a request (rq) is added to
L
Linus Torvalds 已提交
767 768 769
 * the sort_list. This function keeps caches up to date, and checks if the
 * request might be one we are "anticipating"
 */
J
Jens Axboe 已提交
770
static void as_update_rq(struct as_data *ad, struct request *rq)
L
Linus Torvalds 已提交
771
{
J
Jens Axboe 已提交
772
	const int data_dir = rq_is_sync(rq);
L
Linus Torvalds 已提交
773

J
Jens Axboe 已提交
774 775
	/* keep the next_rq cache up to date */
	ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]);
L
Linus Torvalds 已提交
776 777 778 779 780 781 782 783

	/*
	 * have we been anticipating this request?
	 * or does it come from the same process as the one we are anticipating
	 * for?
	 */
	if (ad->antic_status == ANTIC_WAIT_REQ
			|| ad->antic_status == ANTIC_WAIT_NEXT) {
J
Jens Axboe 已提交
784
		if (as_can_break_anticipation(ad, rq))
L
Linus Torvalds 已提交
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
			as_antic_stop(ad);
	}
}

/*
 * Gathers timings and resizes the write batch automatically
 */
static void update_write_batch(struct as_data *ad)
{
	unsigned long batch = ad->batch_expire[REQ_ASYNC];
	long write_time;

	write_time = (jiffies - ad->current_batch_expires) + batch;
	if (write_time < 0)
		write_time = 0;

	if (write_time > batch && !ad->write_batch_idled) {
		if (write_time > batch * 3)
			ad->write_batch_count /= 2;
		else
			ad->write_batch_count--;
	} else if (write_time < batch && ad->current_write_count == 0) {
		if (batch > write_time * 3)
			ad->write_batch_count *= 2;
		else
			ad->write_batch_count++;
	}

	if (ad->write_batch_count < 1)
		ad->write_batch_count = 1;
}

/*
 * as_completed_request is to be called when a request has completed and
 * returned something to the requesting process, be it an error or data.
 */
821
static void as_completed_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
822 823 824 825 826
{
	struct as_data *ad = q->elevator->elevator_data;

	WARN_ON(!list_empty(&rq->queuelist));

J
Jens Axboe 已提交
827 828
	if (RQ_STATE(rq) != AS_RQ_REMOVED) {
		printk("rq->state %d\n", RQ_STATE(rq));
L
Linus Torvalds 已提交
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
		WARN_ON(1);
		goto out;
	}

	if (ad->changed_batch && ad->nr_dispatched == 1) {
		kblockd_schedule_work(&ad->antic_work);
		ad->changed_batch = 0;

		if (ad->batch_data_dir == REQ_SYNC)
			ad->new_batch = 1;
	}
	WARN_ON(ad->nr_dispatched == 0);
	ad->nr_dispatched--;

	/*
	 * Start counting the batch from when a request of that direction is
	 * actually serviced. This should help devices with big TCQ windows
	 * and writeback caches
	 */
848
	if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
L
Linus Torvalds 已提交
849 850 851 852 853 854
		update_write_batch(ad);
		ad->current_batch_expires = jiffies +
				ad->batch_expire[REQ_SYNC];
		ad->new_batch = 0;
	}

J
Jens Axboe 已提交
855
	if (ad->io_context == RQ_IOC(rq) && ad->io_context) {
L
Linus Torvalds 已提交
856 857 858 859 860 861 862 863 864 865 866
		ad->antic_start = jiffies;
		ad->ioc_finished = 1;
		if (ad->antic_status == ANTIC_WAIT_REQ) {
			/*
			 * We were waiting on this request, now anticipate
			 * the next one
			 */
			as_antic_waitnext(ad);
		}
	}

J
Jens Axboe 已提交
867
	as_put_io_context(rq);
L
Linus Torvalds 已提交
868
out:
J
Jens Axboe 已提交
869
	RQ_SET_STATE(rq, AS_RQ_POSTSCHED);
L
Linus Torvalds 已提交
870 871 872 873 874 875 876 877
}

/*
 * as_remove_queued_request removes a request from the pre dispatch queue
 * without updating refcounts. It is expected the caller will drop the
 * reference unless it replaces the request at somepart of the elevator
 * (ie. the dispatch queue)
 */
878 879
static void as_remove_queued_request(struct request_queue *q,
				     struct request *rq)
L
Linus Torvalds 已提交
880
{
881
	const int data_dir = rq_is_sync(rq);
L
Linus Torvalds 已提交
882
	struct as_data *ad = q->elevator->elevator_data;
J
Jens Axboe 已提交
883
	struct io_context *ioc;
L
Linus Torvalds 已提交
884

J
Jens Axboe 已提交
885
	WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
L
Linus Torvalds 已提交
886

J
Jens Axboe 已提交
887 888 889 890
	ioc = RQ_IOC(rq);
	if (ioc && ioc->aic) {
		BUG_ON(!atomic_read(&ioc->aic->nr_queued));
		atomic_dec(&ioc->aic->nr_queued);
L
Linus Torvalds 已提交
891 892 893
	}

	/*
J
Jens Axboe 已提交
894
	 * Update the "next_rq" cache if we are about to remove its
L
Linus Torvalds 已提交
895 896
	 * entry
	 */
J
Jens Axboe 已提交
897 898
	if (ad->next_rq[data_dir] == rq)
		ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
L
Linus Torvalds 已提交
899

900
	rq_fifo_clear(rq);
J
Jens Axboe 已提交
901
	as_del_rq_rb(ad, rq);
L
Linus Torvalds 已提交
902 903 904
}

/*
A
Aaron Carroll 已提交
905
 * as_fifo_expired returns 0 if there are no expired requests on the fifo,
L
Linus Torvalds 已提交
906 907 908 909 910 911 912 913
 * 1 otherwise.  It is ratelimited so that we only perform the check once per
 * `fifo_expire' interval.  Otherwise a large number of expired requests
 * would create a hopeless seekstorm.
 *
 * See as_antic_expired comment.
 */
static int as_fifo_expired(struct as_data *ad, int adir)
{
914
	struct request *rq;
L
Linus Torvalds 已提交
915 916 917 918 919 920 921 922 923 924 925 926 927
	long delta_jif;

	delta_jif = jiffies - ad->last_check_fifo[adir];
	if (unlikely(delta_jif < 0))
		delta_jif = -delta_jif;
	if (delta_jif < ad->fifo_expire[adir])
		return 0;

	ad->last_check_fifo[adir] = jiffies;

	if (list_empty(&ad->fifo_list[adir]))
		return 0;

928
	rq = rq_entry_fifo(ad->fifo_list[adir].next);
L
Linus Torvalds 已提交
929

930
	return time_after(jiffies, rq_fifo_time(rq));
L
Linus Torvalds 已提交
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
}

/*
 * as_batch_expired returns true if the current batch has expired. A batch
 * is a set of reads or a set of writes.
 */
static inline int as_batch_expired(struct as_data *ad)
{
	if (ad->changed_batch || ad->new_batch)
		return 0;

	if (ad->batch_data_dir == REQ_SYNC)
		/* TODO! add a check so a complete fifo gets written? */
		return time_after(jiffies, ad->current_batch_expires);

	return time_after(jiffies, ad->current_batch_expires)
		|| ad->current_write_count == 0;
}

/*
 * move an entry to dispatch queue
 */
J
Jens Axboe 已提交
953
static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
L
Linus Torvalds 已提交
954
{
955
	const int data_dir = rq_is_sync(rq);
L
Linus Torvalds 已提交
956

957
	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
L
Linus Torvalds 已提交
958 959 960 961 962 963

	as_antic_stop(ad);
	ad->antic_status = ANTIC_OFF;

	/*
	 * This has to be set in order to be correctly updated by
J
Jens Axboe 已提交
964
	 * as_find_next_rq
L
Linus Torvalds 已提交
965 966 967 968
	 */
	ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;

	if (data_dir == REQ_SYNC) {
J
Jens Axboe 已提交
969
		struct io_context *ioc = RQ_IOC(rq);
L
Linus Torvalds 已提交
970
		/* In case we have to anticipate after this */
J
Jens Axboe 已提交
971
		copy_io_context(&ad->io_context, &ioc);
L
Linus Torvalds 已提交
972 973 974 975 976 977 978 979 980 981 982
	} else {
		if (ad->io_context) {
			put_io_context(ad->io_context);
			ad->io_context = NULL;
		}

		if (ad->current_write_count != 0)
			ad->current_write_count--;
	}
	ad->ioc_finished = 0;

J
Jens Axboe 已提交
983
	ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
L
Linus Torvalds 已提交
984 985 986 987 988

	/*
	 * take it off the sort and fifo list, add to dispatch queue
	 */
	as_remove_queued_request(ad->q, rq);
J
Jens Axboe 已提交
989
	WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
L
Linus Torvalds 已提交
990

991 992
	elv_dispatch_sort(ad->q, rq);

J
Jens Axboe 已提交
993 994 995
	RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
	if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
		atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
L
Linus Torvalds 已提交
996 997 998 999 1000 1001 1002 1003
	ad->nr_dispatched++;
}

/*
 * as_dispatch_request selects the best request according to
 * read/write expire, batch expire, etc, and moves it to the dispatch
 * queue. Returns 1 if a request was found, 0 otherwise.
 */
1004
static int as_dispatch_request(struct request_queue *q, int force)
L
Linus Torvalds 已提交
1005
{
1006
	struct as_data *ad = q->elevator->elevator_data;
L
Linus Torvalds 已提交
1007 1008
	const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
	const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
J
Jens Axboe 已提交
1009
	struct request *rq;
L
Linus Torvalds 已提交
1010

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
	if (unlikely(force)) {
		/*
		 * Forced dispatch, accounting is useless.  Reset
		 * accounting states and dump fifo_lists.  Note that
		 * batch_data_dir is reset to REQ_SYNC to avoid
		 * screwing write batch accounting as write batch
		 * accounting occurs on W->R transition.
		 */
		int dispatched = 0;

		ad->batch_data_dir = REQ_SYNC;
		ad->changed_batch = 0;
		ad->new_batch = 0;

J
Jens Axboe 已提交
1025 1026
		while (ad->next_rq[REQ_SYNC]) {
			as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]);
1027 1028 1029 1030
			dispatched++;
		}
		ad->last_check_fifo[REQ_SYNC] = jiffies;

J
Jens Axboe 已提交
1031 1032
		while (ad->next_rq[REQ_ASYNC]) {
			as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]);
1033 1034 1035 1036 1037 1038 1039
			dispatched++;
		}
		ad->last_check_fifo[REQ_ASYNC] = jiffies;

		return dispatched;
	}

L
Linus Torvalds 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	/* Signal that the write batch was uncontended, so we can't time it */
	if (ad->batch_data_dir == REQ_ASYNC && !reads) {
		if (ad->current_write_count == 0 || !writes)
			ad->write_batch_idled = 1;
	}

	if (!(reads || writes)
		|| ad->antic_status == ANTIC_WAIT_REQ
		|| ad->antic_status == ANTIC_WAIT_NEXT
		|| ad->changed_batch)
		return 0;

N
Nick Piggin 已提交
1052
	if (!(reads && writes && as_batch_expired(ad))) {
L
Linus Torvalds 已提交
1053 1054 1055
		/*
		 * batch is still running or no reads or no writes
		 */
J
Jens Axboe 已提交
1056
		rq = ad->next_rq[ad->batch_data_dir];
L
Linus Torvalds 已提交
1057 1058 1059 1060 1061

		if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
			if (as_fifo_expired(ad, REQ_SYNC))
				goto fifo_expired;

J
Jens Axboe 已提交
1062
			if (as_can_anticipate(ad, rq)) {
L
Linus Torvalds 已提交
1063 1064 1065 1066 1067
				as_antic_waitreq(ad);
				return 0;
			}
		}

J
Jens Axboe 已提交
1068
		if (rq) {
L
Linus Torvalds 已提交
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
			/* we have a "next request" */
			if (reads && !writes)
				ad->current_batch_expires =
					jiffies + ad->batch_expire[REQ_SYNC];
			goto dispatch_request;
		}
	}

	/*
	 * at this point we are not running a batch. select the appropriate
	 * data direction (read / write)
	 */

	if (reads) {
1083
		BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
L
Linus Torvalds 已提交
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095

		if (writes && ad->batch_data_dir == REQ_SYNC)
			/*
			 * Last batch was a read, switch to writes
			 */
			goto dispatch_writes;

		if (ad->batch_data_dir == REQ_ASYNC) {
			WARN_ON(ad->new_batch);
			ad->changed_batch = 1;
		}
		ad->batch_data_dir = REQ_SYNC;
J
Jens Axboe 已提交
1096
		rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next);
L
Linus Torvalds 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
		ad->last_check_fifo[ad->batch_data_dir] = jiffies;
		goto dispatch_request;
	}

	/*
	 * the last batch was a read
	 */

	if (writes) {
dispatch_writes:
1107
		BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
L
Linus Torvalds 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121

		if (ad->batch_data_dir == REQ_SYNC) {
			ad->changed_batch = 1;

			/*
			 * new_batch might be 1 when the queue runs out of
			 * reads. A subsequent submission of a write might
			 * cause a change of batch before the read is finished.
			 */
			ad->new_batch = 0;
		}
		ad->batch_data_dir = REQ_ASYNC;
		ad->current_write_count = ad->write_batch_count;
		ad->write_batch_idled = 0;
1122 1123
		rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next);
		ad->last_check_fifo[REQ_ASYNC] = jiffies;
L
Linus Torvalds 已提交
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		goto dispatch_request;
	}

	BUG();
	return 0;

dispatch_request:
	/*
	 * If a request has expired, service it.
	 */

	if (as_fifo_expired(ad, ad->batch_data_dir)) {
fifo_expired:
J
Jens Axboe 已提交
1137
		rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
L
Linus Torvalds 已提交
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	}

	if (ad->changed_batch) {
		WARN_ON(ad->new_batch);

		if (ad->nr_dispatched)
			return 0;

		if (ad->batch_data_dir == REQ_ASYNC)
			ad->current_batch_expires = jiffies +
					ad->batch_expire[REQ_ASYNC];
		else
			ad->new_batch = 1;

		ad->changed_batch = 0;
	}

	/*
J
Jens Axboe 已提交
1156
	 * rq is the selected appropriate request.
L
Linus Torvalds 已提交
1157
	 */
J
Jens Axboe 已提交
1158
	as_move_to_dispatch(ad, rq);
L
Linus Torvalds 已提交
1159 1160 1161 1162 1163

	return 1;
}

/*
J
Jens Axboe 已提交
1164
 * add rq to rbtree and fifo
L
Linus Torvalds 已提交
1165
 */
1166
static void as_add_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1167
{
1168
	struct as_data *ad = q->elevator->elevator_data;
L
Linus Torvalds 已提交
1169 1170
	int data_dir;

J
Jens Axboe 已提交
1171
	RQ_SET_STATE(rq, AS_RQ_NEW);
1172

1173
	data_dir = rq_is_sync(rq);
L
Linus Torvalds 已提交
1174

1175
	rq->elevator_private = as_get_io_context(q->node);
L
Linus Torvalds 已提交
1176

J
Jens Axboe 已提交
1177 1178 1179
	if (RQ_IOC(rq)) {
		as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
		atomic_inc(&RQ_IOC(rq)->aic->nr_queued);
L
Linus Torvalds 已提交
1180 1181
	}

J
Jens Axboe 已提交
1182
	as_add_rq_rb(ad, rq);
L
Linus Torvalds 已提交
1183

1184
	/*
A
Aaron Carroll 已提交
1185
	 * set expire time and add to fifo list
1186
	 */
1187 1188
	rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
	list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
L
Linus Torvalds 已提交
1189

J
Jens Axboe 已提交
1190 1191
	as_update_rq(ad, rq); /* keep state machine up to date */
	RQ_SET_STATE(rq, AS_RQ_QUEUED);
L
Linus Torvalds 已提交
1192 1193
}

1194
static void as_activate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1195
{
J
Jens Axboe 已提交
1196 1197 1198 1199
	WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
	RQ_SET_STATE(rq, AS_RQ_REMOVED);
	if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
		atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
L
Linus Torvalds 已提交
1200 1201
}

1202
static void as_deactivate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1203
{
J
Jens Axboe 已提交
1204 1205 1206 1207
	WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
	RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
	if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
		atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
L
Linus Torvalds 已提交
1208 1209 1210 1211 1212 1213 1214 1215
}

/*
 * as_queue_empty tells us if there are requests left in the device. It may
 * not be the case that a driver can get the next request even if the queue
 * is not empty - it is used in the block layer to check for plugging and
 * merging opportunities
 */
1216
static int as_queue_empty(struct request_queue *q)
L
Linus Torvalds 已提交
1217 1218 1219
{
	struct as_data *ad = q->elevator->elevator_data;

1220 1221
	return list_empty(&ad->fifo_list[REQ_ASYNC])
		&& list_empty(&ad->fifo_list[REQ_SYNC]);
L
Linus Torvalds 已提交
1222 1223 1224
}

static int
1225
as_merge(struct request_queue *q, struct request **req, struct bio *bio)
L
Linus Torvalds 已提交
1226 1227 1228 1229 1230 1231 1232 1233
{
	struct as_data *ad = q->elevator->elevator_data;
	sector_t rb_key = bio->bi_sector + bio_sectors(bio);
	struct request *__rq;

	/*
	 * check for front merge
	 */
1234
	__rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key);
1235 1236 1237
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
L
Linus Torvalds 已提交
1238 1239 1240 1241 1242
	}

	return ELEVATOR_NO_MERGE;
}

1243 1244
static void as_merged_request(struct request_queue *q, struct request *req,
			      int type)
L
Linus Torvalds 已提交
1245 1246 1247 1248 1249 1250
{
	struct as_data *ad = q->elevator->elevator_data;

	/*
	 * if the merge was a front merge, we need to reposition request
	 */
1251
	if (type == ELEVATOR_FRONT_MERGE) {
J
Jens Axboe 已提交
1252 1253
		as_del_rq_rb(ad, req);
		as_add_rq_rb(ad, req);
L
Linus Torvalds 已提交
1254 1255 1256 1257 1258 1259 1260 1261
		/*
		 * Note! At this stage of this and the next function, our next
		 * request may not be optimal - eg the request may have "grown"
		 * behind the disk head. We currently don't bother adjusting.
		 */
	}
}

1262
static void as_merged_requests(struct request_queue *q, struct request *req,
N
Nick Piggin 已提交
1263
			 	struct request *next)
L
Linus Torvalds 已提交
1264 1265
{
	/*
J
Jens Axboe 已提交
1266 1267
	 * if next expires before rq, assign its expire time to arq
	 * and move into next position (next will be deleted) in fifo
L
Linus Torvalds 已提交
1268
	 */
1269 1270
	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
		if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
J
Jens Axboe 已提交
1271 1272 1273
			struct io_context *rioc = RQ_IOC(req);
			struct io_context *nioc = RQ_IOC(next);

1274 1275
			list_move(&req->queuelist, &next->queuelist);
			rq_set_fifo_time(req, rq_fifo_time(next));
L
Linus Torvalds 已提交
1276 1277 1278 1279
			/*
			 * Don't copy here but swap, because when anext is
			 * removed below, it must contain the unused context
			 */
1280 1281 1282 1283 1284 1285 1286
			if (rioc != nioc) {
				double_spin_lock(&rioc->lock, &nioc->lock,
								rioc < nioc);
				swap_io_context(&rioc, &nioc);
				double_spin_unlock(&rioc->lock, &nioc->lock,
								rioc < nioc);
			}
L
Linus Torvalds 已提交
1287 1288 1289 1290 1291 1292 1293
		}
	}

	/*
	 * kill knowledge of next, this one is a goner
	 */
	as_remove_queued_request(q, next);
J
Jens Axboe 已提交
1294
	as_put_io_context(next);
L
Linus Torvalds 已提交
1295

J
Jens Axboe 已提交
1296
	RQ_SET_STATE(next, AS_RQ_MERGED);
L
Linus Torvalds 已提交
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
}

/*
 * This is executed in a "deferred" process context, by kblockd. It calls the
 * driver's request_fn so the driver can submit that request.
 *
 * IMPORTANT! This guy will reenter the elevator, so set up all queue global
 * state before calling, and don't rely on any state over calls.
 *
 * FIXME! dispatch queue is not a queue at all!
 */
1308
static void as_work_handler(struct work_struct *work)
L
Linus Torvalds 已提交
1309
{
1310 1311
	struct as_data *ad = container_of(work, struct as_data, antic_work);
	struct request_queue *q = ad->q;
L
Linus Torvalds 已提交
1312 1313 1314
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
1315
	blk_start_queueing(q);
L
Linus Torvalds 已提交
1316 1317 1318
	spin_unlock_irqrestore(q->queue_lock, flags);
}

1319
static int as_may_queue(struct request_queue *q, int rw)
L
Linus Torvalds 已提交
1320 1321 1322 1323 1324 1325
{
	int ret = ELV_MQUEUE_MAY;
	struct as_data *ad = q->elevator->elevator_data;
	struct io_context *ioc;
	if (ad->antic_status == ANTIC_WAIT_REQ ||
			ad->antic_status == ANTIC_WAIT_NEXT) {
1326
		ioc = as_get_io_context(q->node);
L
Linus Torvalds 已提交
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
		if (ad->io_context == ioc)
			ret = ELV_MQUEUE_MUST;
		put_io_context(ioc);
	}

	return ret;
}

static void as_exit_queue(elevator_t *e)
{
	struct as_data *ad = e->elevator_data;

	del_timer_sync(&ad->antic_timer);
A
Andrew Morton 已提交
1340
	kblockd_flush_work(&ad->antic_work);
L
Linus Torvalds 已提交
1341 1342 1343 1344 1345 1346 1347 1348 1349

	BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
	BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));

	put_io_context(ad->io_context);
	kfree(ad);
}

/*
J
Jens Axboe 已提交
1350
 * initialize elevator private data (as_data).
L
Linus Torvalds 已提交
1351
 */
1352
static void *as_init_queue(struct request_queue *q)
L
Linus Torvalds 已提交
1353 1354 1355
{
	struct as_data *ad;

1356
	ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
L
Linus Torvalds 已提交
1357
	if (!ad)
J
Jens Axboe 已提交
1358
		return NULL;
L
Linus Torvalds 已提交
1359 1360 1361 1362 1363 1364 1365

	ad->q = q; /* Identify what queue the data belongs to */

	/* anticipatory scheduling helpers */
	ad->antic_timer.function = as_antic_timeout;
	ad->antic_timer.data = (unsigned long)q;
	init_timer(&ad->antic_timer);
1366
	INIT_WORK(&ad->antic_work, as_work_handler);
L
Linus Torvalds 已提交
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382

	INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
	INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
	ad->sort_list[REQ_SYNC] = RB_ROOT;
	ad->sort_list[REQ_ASYNC] = RB_ROOT;
	ad->fifo_expire[REQ_SYNC] = default_read_expire;
	ad->fifo_expire[REQ_ASYNC] = default_write_expire;
	ad->antic_expire = default_antic_expire;
	ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
	ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;

	ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
	ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
	if (ad->write_batch_count < 2)
		ad->write_batch_count = 2;

J
Jens Axboe 已提交
1383
	return ad;
L
Linus Torvalds 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
}

/*
 * sysfs parts below
 */

static ssize_t
as_var_show(unsigned int var, char *page)
{
	return sprintf(page, "%d\n", var);
}

static ssize_t
as_var_store(unsigned long *var, const char *page, size_t count)
{
	char *p = (char *) page;

1401
	*var = simple_strtoul(p, &p, 10);
L
Linus Torvalds 已提交
1402 1403 1404
	return count;
}

1405
static ssize_t est_time_show(elevator_t *e, char *page)
L
Linus Torvalds 已提交
1406
{
1407
	struct as_data *ad = e->elevator_data;
L
Linus Torvalds 已提交
1408 1409
	int pos = 0;

N
Nick Piggin 已提交
1410 1411 1412 1413 1414
	pos += sprintf(page+pos, "%lu %% exit probability\n",
				100*ad->exit_prob/256);
	pos += sprintf(page+pos, "%lu %% probability of exiting without a "
				"cooperating process submitting IO\n",
				100*ad->exit_no_coop/256);
L
Linus Torvalds 已提交
1415
	pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
N
Nick Piggin 已提交
1416 1417
	pos += sprintf(page+pos, "%llu sectors new seek distance\n",
				(unsigned long long)ad->new_seek_mean);
L
Linus Torvalds 已提交
1418 1419 1420 1421 1422

	return pos;
}

#define SHOW_FUNCTION(__FUNC, __VAR)				\
1423
static ssize_t __FUNC(elevator_t *e, char *page)		\
L
Linus Torvalds 已提交
1424
{								\
1425
	struct as_data *ad = e->elevator_data;			\
L
Linus Torvalds 已提交
1426 1427
	return as_var_show(jiffies_to_msecs((__VAR)), (page));	\
}
1428 1429 1430 1431 1432
SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
L
Linus Torvalds 已提交
1433 1434 1435
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)				\
1436
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
L
Linus Torvalds 已提交
1437
{									\
1438 1439
	struct as_data *ad = e->elevator_data;				\
	int ret = as_var_store(__PTR, (page), count);			\
L
Linus Torvalds 已提交
1440 1441 1442 1443 1444 1445 1446
	if (*(__PTR) < (MIN))						\
		*(__PTR) = (MIN);					\
	else if (*(__PTR) > (MAX))					\
		*(__PTR) = (MAX);					\
	*(__PTR) = msecs_to_jiffies(*(__PTR));				\
	return ret;							\
}
1447 1448 1449 1450
STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
STORE_FUNCTION(as_read_batch_expire_store,
L
Linus Torvalds 已提交
1451
			&ad->batch_expire[REQ_SYNC], 0, INT_MAX);
1452
STORE_FUNCTION(as_write_batch_expire_store,
L
Linus Torvalds 已提交
1453 1454 1455
			&ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
#undef STORE_FUNCTION

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
#define AS_ATTR(name) \
	__ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)

static struct elv_fs_entry as_attrs[] = {
	__ATTR_RO(est_time),
	AS_ATTR(read_expire),
	AS_ATTR(write_expire),
	AS_ATTR(antic_expire),
	AS_ATTR(read_batch_expire),
	AS_ATTR(write_batch_expire),
	__ATTR_NULL
L
Linus Torvalds 已提交
1467 1468 1469 1470 1471 1472 1473
};

static struct elevator_type iosched_as = {
	.ops = {
		.elevator_merge_fn = 		as_merge,
		.elevator_merged_fn =		as_merged_request,
		.elevator_merge_req_fn =	as_merged_requests,
1474 1475 1476
		.elevator_dispatch_fn =		as_dispatch_request,
		.elevator_add_req_fn =		as_add_request,
		.elevator_activate_req_fn =	as_activate_request,
L
Linus Torvalds 已提交
1477 1478 1479
		.elevator_deactivate_req_fn = 	as_deactivate_request,
		.elevator_queue_empty_fn =	as_queue_empty,
		.elevator_completed_req_fn =	as_completed_request,
1480 1481
		.elevator_former_req_fn =	elv_rb_former_request,
		.elevator_latter_req_fn =	elv_rb_latter_request,
L
Linus Torvalds 已提交
1482 1483 1484
		.elevator_may_queue_fn =	as_may_queue,
		.elevator_init_fn =		as_init_queue,
		.elevator_exit_fn =		as_exit_queue,
1485
		.trim =				as_trim,
L
Linus Torvalds 已提交
1486 1487
	},

1488
	.elevator_attrs = as_attrs,
L
Linus Torvalds 已提交
1489 1490 1491 1492 1493 1494
	.elevator_name = "anticipatory",
	.elevator_owner = THIS_MODULE,
};

static int __init as_init(void)
{
1495 1496 1497
	elv_register(&iosched_as);

	return 0;
L
Linus Torvalds 已提交
1498 1499 1500 1501
}

static void __exit as_exit(void)
{
1502
	DECLARE_COMPLETION_ONSTACK(all_gone);
L
Linus Torvalds 已提交
1503
	elv_unregister(&iosched_as);
1504
	ioc_gone = &all_gone;
1505 1506
	/* ioc_gone's update must be visible before reading ioc_count */
	smp_wmb();
1507
	if (elv_ioc_count_read(ioc_count))
1508
		wait_for_completion(ioc_gone);
1509
	synchronize_rcu();
L
Linus Torvalds 已提交
1510 1511 1512 1513 1514 1515 1516 1517
}

module_init(as_init);
module_exit(as_exit);

MODULE_AUTHOR("Nick Piggin");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("anticipatory IO scheduler");