cfq-iosched.c 52.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
7
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
L
Linus Torvalds 已提交
8 9
 */
#include <linux/module.h>
A
Al Viro 已提交
10 11
#include <linux/blkdev.h>
#include <linux/elevator.h>
L
Linus Torvalds 已提交
12 13
#include <linux/hash.h>
#include <linux/rbtree.h>
14
#include <linux/ioprio.h>
L
Linus Torvalds 已提交
15 16 17 18

/*
 * tunables
 */
19 20 21 22
static const int cfq_quantum = 4;		/* max queue in one round of service */
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2;		/* penalty of a backwards seek */
L
Linus Torvalds 已提交
23

24
static const int cfq_slice_sync = HZ / 10;
J
Jens Axboe 已提交
25
static int cfq_slice_async = HZ / 25;
26
static const int cfq_slice_async_rq = 2;
27
static int cfq_slice_idle = HZ / 125;
28 29 30 31 32 33

#define CFQ_IDLE_GRACE		(HZ / 10)
#define CFQ_SLICE_SCALE		(5)

#define CFQ_KEY_ASYNC		(0)

L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41 42
/*
 * for the hash of cfqq inside the cfqd
 */
#define CFQ_QHASH_SHIFT		6
#define CFQ_QHASH_ENTRIES	(1 << CFQ_QHASH_SHIFT)
#define list_entry_qhash(entry)	hlist_entry((entry), struct cfq_queue, cfq_hash)

#define list_entry_cfqq(ptr)	list_entry((ptr), struct cfq_queue, cfq_list)

J
Jens Axboe 已提交
43 44
#define RQ_CIC(rq)		((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq)		((rq)->elevator_private2)
L
Linus Torvalds 已提交
45

46 47
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
L
Linus Torvalds 已提交
48

49
static DEFINE_PER_CPU(unsigned long, ioc_count);
50 51
static struct completion *ioc_gone;

52 53 54 55
#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

J
Jens Axboe 已提交
56 57 58 59 60 61 62 63 64 65
#define ASYNC			(0)
#define SYNC			(1)

#define cfq_cfqq_dispatched(cfqq)	\
	((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])

#define cfq_cfqq_class_sync(cfqq)	((cfqq)->key != CFQ_KEY_ASYNC)

#define cfq_cfqq_sync(cfqq)		\
	(cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
66

67 68
#define sample_valid(samples)	((samples) > 80)

69 70 71
/*
 * Per block device queue structure
 */
L
Linus Torvalds 已提交
72
struct cfq_data {
73 74 75 76 77 78 79 80 81 82 83 84 85 86
	request_queue_t *queue;

	/*
	 * rr list of queues with requests and the count of them
	 */
	struct list_head rr_list[CFQ_PRIO_LISTS];
	struct list_head busy_rr;
	struct list_head cur_rr;
	struct list_head idle_rr;
	unsigned int busy_queues;

	/*
	 * cfqq lookup hash
	 */
L
Linus Torvalds 已提交
87 88
	struct hlist_head *cfq_hash;

89
	int rq_in_driver;
90
	int hw_tag;
L
Linus Torvalds 已提交
91

92 93 94 95 96
	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
	struct work_struct unplug_work;
L
Linus Torvalds 已提交
97

98 99 100 101 102 103
	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;
	int cur_prio, cur_end_prio;
	unsigned int dispatch_slice;

	struct timer_list idle_class_timer;
L
Linus Torvalds 已提交
104 105

	sector_t last_sector;
106
	unsigned long last_end_request;
L
Linus Torvalds 已提交
107 108 109 110 111

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
112
	unsigned int cfq_fifo_expire[2];
L
Linus Torvalds 已提交
113 114
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
115 116 117
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
118 119

	struct list_head cic_list;
L
Linus Torvalds 已提交
120 121
};

122 123 124
/*
 * Per process-grouping structure
 */
L
Linus Torvalds 已提交
125 126 127 128 129
struct cfq_queue {
	/* reference count */
	atomic_t ref;
	/* parent cfq_data */
	struct cfq_data *cfqd;
130
	/* cfqq lookup hash */
L
Linus Torvalds 已提交
131 132
	struct hlist_node cfq_hash;
	/* hash key */
133
	unsigned int key;
134
	/* member of the rr/busy/cur/idle cfqd list */
L
Linus Torvalds 已提交
135 136 137 138
	struct list_head cfq_list;
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
J
Jens Axboe 已提交
139
	struct request *next_rq;
L
Linus Torvalds 已提交
140 141 142 143
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
144 145
	/* pending metadata requests */
	int meta_pending;
L
Linus Torvalds 已提交
146
	/* fifo list of requests in sort_list */
147
	struct list_head fifo;
L
Linus Torvalds 已提交
148

149
	unsigned long slice_end;
150
	unsigned long service_last;
151
	long slice_resid;
L
Linus Torvalds 已提交
152

J
Jens Axboe 已提交
153 154
	/* number of requests that are on the dispatch list */
	int on_dispatch[2];
155 156 157 158 159

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
	unsigned short ioprio_class, org_ioprio_class;

J
Jens Axboe 已提交
160 161
	/* various state flags, see below */
	unsigned int flags;
L
Linus Torvalds 已提交
162 163
};

J
Jens Axboe 已提交
164
enum cfqq_state_flags {
165 166 167 168 169 170 171 172 173
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
	CFQ_CFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_must_dispatch,	/* must dispatch, even if expired */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
	CFQ_CFQQ_FLAG_queue_new,	/* queue never been serviced */
174
	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
J
Jens Axboe 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
	cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
	cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
	return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
199
CFQ_CFQQ_FNS(queue_new);
200
CFQ_CFQQ_FNS(slice_new);
J
Jens Axboe 已提交
201 202 203
#undef CFQ_CFQQ_FNS

static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
J
Jens Axboe 已提交
204
static void cfq_dispatch_insert(request_queue_t *, struct request *);
205
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
L
Linus Torvalds 已提交
206

A
Andrew Morton 已提交
207 208 209 210 211 212
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
213
	if (cfqd->busy_queues)
A
Andrew Morton 已提交
214 215 216 217 218 219 220
		kblockd_schedule_work(&cfqd->unplug_work);
}

static int cfq_queue_empty(request_queue_t *q)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

221
	return !cfqd->busy_queues;
A
Andrew Morton 已提交
222 223
}

224
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
225
{
226 227 228 229
	/*
	 * Use the per-process queue, for read requests and syncronous writes
	 */
	if (!(rw & REQ_RW) || is_sync)
230 231 232 233 234
		return task->pid;

	return CFQ_KEY_ASYNC;
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
/*
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
 */
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];

	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
254 255 256 257 258 259 260 261
	cfqq->slice_end += cfqq->slice_resid;

	/*
	 * Don't carry over residual for more than one slice, we only want
	 * to slightly correct the fairness. Carrying over forever would
	 * easily introduce oscillations.
	 */
	cfqq->slice_resid = 0;
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
}

/*
 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 * isn't valid until the first request from the dispatch is activated
 * and the slice time set.
 */
static inline int cfq_slice_used(struct cfq_queue *cfqq)
{
	if (cfq_cfqq_slice_new(cfqq))
		return 0;
	if (time_before(jiffies, cfqq->slice_end))
		return 0;

	return 1;
}

L
Linus Torvalds 已提交
279
/*
J
Jens Axboe 已提交
280
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
L
Linus Torvalds 已提交
281
 * We choose the request that is closest to the head right now. Distance
282
 * behind the head is penalized and only allowed to a certain extent.
L
Linus Torvalds 已提交
283
 */
J
Jens Axboe 已提交
284 285
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
L
Linus Torvalds 已提交
286 287 288
{
	sector_t last, s1, s2, d1 = 0, d2 = 0;
	unsigned long back_max;
289 290 291
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
L
Linus Torvalds 已提交
292

J
Jens Axboe 已提交
293 294 295 296
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
J
Jens Axboe 已提交
297

J
Jens Axboe 已提交
298 299 300 301
	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
		return rq1;
	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
		return rq2;
302 303 304 305
	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
		return rq1;
	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
		return rq2;
L
Linus Torvalds 已提交
306

J
Jens Axboe 已提交
307 308
	s1 = rq1->sector;
	s2 = rq2->sector;
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

	last = cfqd->last_sector;

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
327
		wrap |= CFQ_RQ1_WRAP;
L
Linus Torvalds 已提交
328 329 330 331 332 333

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
334
		wrap |= CFQ_RQ2_WRAP;
L
Linus Torvalds 已提交
335 336

	/* Found required data */
337 338 339 340 341 342

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
J
Jens Axboe 已提交
343
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
344
		if (d1 < d2)
J
Jens Axboe 已提交
345
			return rq1;
346
		else if (d2 < d1)
J
Jens Axboe 已提交
347
			return rq2;
348 349
		else {
			if (s1 >= s2)
J
Jens Axboe 已提交
350
				return rq1;
351
			else
J
Jens Axboe 已提交
352
				return rq2;
353
		}
L
Linus Torvalds 已提交
354

355
	case CFQ_RQ2_WRAP:
J
Jens Axboe 已提交
356
		return rq1;
357
	case CFQ_RQ1_WRAP:
J
Jens Axboe 已提交
358 359
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
360 361 362 363 364 365 366 367
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
J
Jens Axboe 已提交
368
			return rq1;
L
Linus Torvalds 已提交
369
		else
J
Jens Axboe 已提交
370
			return rq2;
L
Linus Torvalds 已提交
371 372 373 374 375 376
	}
}

/*
 * would be nice to take fifo expire time into account as well
 */
J
Jens Axboe 已提交
377 378 379
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
L
Linus Torvalds 已提交
380
{
381 382
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
J
Jens Axboe 已提交
383
	struct request *next = NULL, *prev = NULL;
L
Linus Torvalds 已提交
384

385
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
L
Linus Torvalds 已提交
386 387

	if (rbprev)
J
Jens Axboe 已提交
388
		prev = rb_entry_rq(rbprev);
L
Linus Torvalds 已提交
389

390
	if (rbnext)
J
Jens Axboe 已提交
391
		next = rb_entry_rq(rbnext);
392 393 394
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
J
Jens Axboe 已提交
395
			next = rb_entry_rq(rbnext);
396
	}
L
Linus Torvalds 已提交
397

398
	return cfq_choose_req(cfqd, next, prev);
L
Linus Torvalds 已提交
399 400
}

401
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
L
Linus Torvalds 已提交
402
{
403
	struct cfq_data *cfqd = cfqq->cfqd;
404 405
	struct list_head *list, *n;
	struct cfq_queue *__cfqq;
L
Linus Torvalds 已提交
406

407 408 409 410 411
	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
	if (!cfq_cfqq_on_rr(cfqq))
		return;
L
Linus Torvalds 已提交
412

413
	list_del(&cfqq->cfq_list);
L
Linus Torvalds 已提交
414

415 416 417 418 419 420 421 422 423 424 425 426
	if (cfq_class_rt(cfqq))
		list = &cfqd->cur_rr;
	else if (cfq_class_idle(cfqq))
		list = &cfqd->idle_rr;
	else {
		/*
		 * if cfqq has requests in flight, don't allow it to be
		 * found in cfq_set_active_queue before it has finished them.
		 * this is done to increase fairness between a process that
		 * has lots of io pending vs one that only generates one
		 * sporadically or synchronously
		 */
J
Jens Axboe 已提交
427
		if (cfq_cfqq_dispatched(cfqq))
428 429 430
			list = &cfqd->busy_rr;
		else
			list = &cfqd->rr_list[cfqq->ioprio];
L
Linus Torvalds 已提交
431 432
	}

433
	if (preempted || cfq_cfqq_queue_new(cfqq)) {
434 435 436 437 438 439
		/*
		 * If this queue was preempted or is new (never been serviced),
		 * let it be added first for fairness but beind other new
		 * queues.
		 */
		n = list;
440 441 442 443
		while (n->next != list) {
			__cfqq = list_entry_cfqq(n->next);
			if (!cfq_cfqq_queue_new(__cfqq))
				break;
L
Linus Torvalds 已提交
444

445 446
			n = n->next;
		}
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
		list_add_tail(&cfqq->cfq_list, n);
	} else if (!cfq_cfqq_class_sync(cfqq)) {
		/*
		 * async queue always goes to the end. this wont be overly
		 * unfair to writes, as the sort of the sync queue wont be
		 * allowed to pass the async queue again.
		 */
		list_add_tail(&cfqq->cfq_list, list);
	} else {
		/*
		 * sort by last service, but don't cross a new or async
		 * queue. we don't cross a new queue because it hasn't been
		 * service before, and we don't cross an async queue because
		 * it gets added to the end on expire.
		 */
		n = list;
		while ((n = n->prev) != list) {
			struct cfq_queue *__cfqq = list_entry_cfqq(n);
L
Linus Torvalds 已提交
465

466 467 468 469 470 471
			if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last)
				break;
			if (time_before(__cfqq->service_last, cfqq->service_last))
				break;
		}
		list_add(&cfqq->cfq_list, n);
L
Linus Torvalds 已提交
472 473 474 475 476
	}
}

/*
 * add to busy list of queues for service, trying to be fair in ordering
477
 * the pending list according to last request service
L
Linus Torvalds 已提交
478 479
 */
static inline void
480
cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
481
{
J
Jens Axboe 已提交
482 483
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
484 485
	cfqd->busy_queues++;

486
	cfq_resort_rr_list(cfqq, 0);
L
Linus Torvalds 已提交
487 488 489 490 491
}

static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
J
Jens Axboe 已提交
492 493
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
494
	list_del_init(&cfqq->cfq_list);
L
Linus Torvalds 已提交
495 496 497 498 499 500 501 502

	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
}

/*
 * rb tree support functions
 */
J
Jens Axboe 已提交
503
static inline void cfq_del_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
504
{
J
Jens Axboe 已提交
505
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
506
	struct cfq_data *cfqd = cfqq->cfqd;
J
Jens Axboe 已提交
507
	const int sync = rq_is_sync(rq);
L
Linus Torvalds 已提交
508

509 510
	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
L
Linus Torvalds 已提交
511

J
Jens Axboe 已提交
512
	elv_rb_del(&cfqq->sort_list, rq);
L
Linus Torvalds 已提交
513

514
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
515
		cfq_del_cfqq_rr(cfqd, cfqq);
L
Linus Torvalds 已提交
516 517
}

J
Jens Axboe 已提交
518
static void cfq_add_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
519
{
J
Jens Axboe 已提交
520
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
521
	struct cfq_data *cfqd = cfqq->cfqd;
522
	struct request *__alias;
L
Linus Torvalds 已提交
523

524
	cfqq->queued[rq_is_sync(rq)]++;
L
Linus Torvalds 已提交
525 526 527 528 529

	/*
	 * looks a little odd, but the first insert might return an alias.
	 * if that happens, put the alias on the dispatch list
	 */
530
	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
J
Jens Axboe 已提交
531
		cfq_dispatch_insert(cfqd->queue, __alias);
532 533 534

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
L
Linus Torvalds 已提交
535 536 537
}

static inline void
J
Jens Axboe 已提交
538
cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
L
Linus Torvalds 已提交
539
{
540 541
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
J
Jens Axboe 已提交
542
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
543 544
}

545 546
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
L
Linus Torvalds 已提交
547
{
548
	struct task_struct *tsk = current;
549
	pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
550
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
551

552
	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
553 554 555
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

556
		return elv_rb_find(&cfqq->sort_list, sector);
557
	}
L
Linus Torvalds 已提交
558 559 560 561

	return NULL;
}

562
static void cfq_activate_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
563
{
564
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
565

566
	cfqd->rq_in_driver++;
567 568 569 570 571 572 573 574 575

	/*
	 * If the depth is larger 1, it really could be queueing. But lets
	 * make the mark a little higher - idling could still be good for
	 * low queueing, and a low queueing number could also just indicate
	 * a SCSI mid layer like behaviour where limit+1 is often seen.
	 */
	if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
		cfqd->hw_tag = 1;
L
Linus Torvalds 已提交
576 577
}

578
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
579
{
580 581 582 583
	struct cfq_data *cfqd = q->elevator->elevator_data;

	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;
L
Linus Torvalds 已提交
584 585
}

586
static void cfq_remove_request(struct request *rq)
L
Linus Torvalds 已提交
587
{
J
Jens Axboe 已提交
588
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
589

J
Jens Axboe 已提交
590 591
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
L
Linus Torvalds 已提交
592

593
	list_del_init(&rq->queuelist);
J
Jens Axboe 已提交
594
	cfq_del_rq_rb(rq);
595 596 597 598 599

	if (rq_is_meta(rq)) {
		WARN_ON(!cfqq->meta_pending);
		cfqq->meta_pending--;
	}
L
Linus Torvalds 已提交
600 601 602 603 604 605 606 607
}

static int
cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

608
	__rq = cfq_find_rq_fmerge(cfqd, bio);
609
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
610 611
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
L
Linus Torvalds 已提交
612 613 614 615 616
	}

	return ELEVATOR_NO_MERGE;
}

617 618
static void cfq_merged_request(request_queue_t *q, struct request *req,
			       int type)
L
Linus Torvalds 已提交
619
{
620
	if (type == ELEVATOR_FRONT_MERGE) {
J
Jens Axboe 已提交
621
		struct cfq_queue *cfqq = RQ_CFQQ(req);
L
Linus Torvalds 已提交
622

J
Jens Axboe 已提交
623
		cfq_reposition_rq_rb(cfqq, req);
L
Linus Torvalds 已提交
624 625 626 627 628 629 630
	}
}

static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
		    struct request *next)
{
631 632 633 634 635 636 637
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
	    time_before(next->start_time, rq->start_time))
		list_move(&rq->queuelist, &next->queuelist);

638
	cfq_remove_request(next);
639 640
}

641 642 643 644 645 646 647 648 649
static int cfq_allow_merge(request_queue_t *q, struct request *rq,
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	const int rw = bio_data_dir(bio);
	struct cfq_queue *cfqq;
	pid_t key;

	/*
650
	 * Disallow merge of a sync bio into an async request.
651
	 */
652
	if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
653 654 655
		return 0;

	/*
656 657
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
658
	 */
659
	key = cfq_queue_pid(current, rw, bio_sync(bio));
660
	cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
661 662 663

	if (cfqq == RQ_CFQQ(rq))
		return 1;
664

665
	return 0;
666 667
}

668 669 670 671 672 673 674 675 676 677
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	if (cfqq) {
		/*
		 * stop potential idle class queues waiting service
		 */
		del_timer(&cfqd->idle_class_timer);

		cfqq->slice_end = 0;
J
Jens Axboe 已提交
678 679
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
680
		cfq_mark_cfqq_slice_new(cfqq);
681 682 683 684 685
	}

	cfqd->active_queue = cfqq;
}

686 687 688 689 690 691 692 693 694 695
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		    int preempted)
{
	if (cfq_cfqq_wait_request(cfqq))
		del_timer(&cfqd->idle_slice_timer);

696
	if (!preempted && !cfq_cfqq_dispatched(cfqq))
697 698 699 700
		cfq_schedule_dispatch(cfqd);

	cfq_clear_cfqq_must_dispatch(cfqq);
	cfq_clear_cfqq_wait_request(cfqq);
701
	cfq_clear_cfqq_queue_new(cfqq);
702 703 704 705 706

	/*
	 * store what was left of this slice, if the queue idled out
	 * or was preempted
	 */
707 708
	if (!cfq_cfqq_slice_new(cfqq))
		cfqq->slice_resid = cfqq->slice_end - jiffies;
709

710
	cfq_resort_rr_list(cfqq, preempted);
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
		put_io_context(cfqd->active_cic->ioc);
		cfqd->active_cic = NULL;
	}

	cfqd->dispatch_slice = 0;
}

static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
		__cfq_slice_expired(cfqd, cfqq, preempted);
}

731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
/*
 * 0
 * 0,1
 * 0,1,2
 * 0,1,2,3
 * 0,1,2,3,4
 * 0,1,2,3,4,5
 * 0,1,2,3,4,5,6
 * 0,1,2,3,4,5,6,7
 */
static int cfq_get_next_prio_level(struct cfq_data *cfqd)
{
	int prio, wrap;

	prio = -1;
	wrap = 0;
	do {
		int p;

		for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
			if (!list_empty(&cfqd->rr_list[p])) {
				prio = p;
				break;
			}
		}

		if (prio != -1)
			break;
		cfqd->cur_prio = 0;
		if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
			cfqd->cur_end_prio = 0;
			if (wrap)
				break;
			wrap = 1;
L
Linus Torvalds 已提交
765
		}
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	} while (1);

	if (unlikely(prio == -1))
		return -1;

	BUG_ON(prio >= CFQ_PRIO_LISTS);

	list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);

	cfqd->cur_prio = prio + 1;
	if (cfqd->cur_prio > cfqd->cur_end_prio) {
		cfqd->cur_end_prio = cfqd->cur_prio;
		cfqd->cur_prio = 0;
	}
	if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
		cfqd->cur_prio = 0;
		cfqd->cur_end_prio = 0;
L
Linus Torvalds 已提交
783 784
	}

785 786 787
	return prio;
}

J
Jens Axboe 已提交
788
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
789
{
790
	struct cfq_queue *cfqq = NULL;
791

792 793 794 795 796 797
	if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
		/*
		 * if current list is non-empty, grab first entry. if it is
		 * empty, get next prio level and grab first entry then if any
		 * are spliced
		 */
798
		cfqq = list_entry_cfqq(cfqd->cur_rr.next);
799 800 801 802 803
	} else if (!list_empty(&cfqd->busy_rr)) {
		/*
		 * If no new queues are available, check if the busy list has
		 * some before falling back to idle io.
		 */
804
		cfqq = list_entry_cfqq(cfqd->busy_rr.next);
805 806 807 808 809 810
	} else if (!list_empty(&cfqd->idle_rr)) {
		/*
		 * if we have idle queues and no rt or be queues had pending
		 * requests, either allow immediate service if the grace period
		 * has passed or arm the idle grace timer
		 */
811 812 813 814 815 816 817 818 819
		unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;

		if (time_after_eq(jiffies, end))
			cfqq = list_entry_cfqq(cfqd->idle_rr.next);
		else
			mod_timer(&cfqd->idle_class_timer, end);
	}

	__cfq_set_active_queue(cfqd, cfqq);
J
Jens Axboe 已提交
820
	return cfqq;
821 822
}

823 824
#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))

825
static int cfq_arm_slice_timer(struct cfq_data *cfqd)
826
{
827
	struct cfq_queue *cfqq = cfqd->active_queue;
828
	struct cfq_io_context *cic;
829 830
	unsigned long sl;

831
	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
832 833 834 835 836 837

	/*
	 * idle is disabled, either manually or by past process history
	 */
	if (!cfqd->cfq_slice_idle)
		return 0;
J
Jens Axboe 已提交
838
	if (!cfq_cfqq_idle_window(cfqq))
839 840 841 842
		return 0;
	/*
	 * task has exited, don't wait
	 */
843 844
	cic = cfqd->active_cic;
	if (!cic || !cic->ioc->task)
845 846
		return 0;

J
Jens Axboe 已提交
847 848
	cfq_mark_cfqq_must_dispatch(cfqq);
	cfq_mark_cfqq_wait_request(cfqq);
849

850
	sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
851 852 853 854 855 856

	/*
	 * we don't want to idle for seeks, but we do want to allow
	 * fair distribution of slice time for a process doing back-to-back
	 * seeks. so allow a little bit of time for him to submit a new rq
	 */
857
	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
858
		sl = min(sl, msecs_to_jiffies(2));
859

860
	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
861
	return 1;
L
Linus Torvalds 已提交
862 863
}

J
Jens Axboe 已提交
864
static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
865 866
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
867
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
868

869 870 871
	cfq_remove_request(rq);
	cfqq->on_dispatch[rq_is_sync(rq)]++;
	elv_dispatch_sort(q, rq);
872 873 874

	rq = list_entry(q->queue_head.prev, struct request, queuelist);
	cfqd->last_sector = rq->sector + rq->nr_sectors;
L
Linus Torvalds 已提交
875 876 877 878 879
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
J
Jens Axboe 已提交
880
static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
881 882
{
	struct cfq_data *cfqd = cfqq->cfqd;
883
	struct request *rq;
884
	int fifo;
L
Linus Torvalds 已提交
885

J
Jens Axboe 已提交
886
	if (cfq_cfqq_fifo_expire(cfqq))
L
Linus Torvalds 已提交
887
		return NULL;
888 889
	if (list_empty(&cfqq->fifo))
		return NULL;
L
Linus Torvalds 已提交
890

891 892
	fifo = cfq_cfqq_class_sync(cfqq);
	rq = rq_entry_fifo(cfqq->fifo.next);
L
Linus Torvalds 已提交
893

894 895 896
	if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
		cfq_mark_cfqq_fifo_expire(cfqq);
		return rq;
L
Linus Torvalds 已提交
897 898 899 900 901
	}

	return NULL;
}

902 903 904 905
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
L
Linus Torvalds 已提交
906

907
	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
L
Linus Torvalds 已提交
908

909
	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
L
Linus Torvalds 已提交
910 911
}

912 913 914
/*
 * get next queue for service
 */
915
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
L
Linus Torvalds 已提交
916 917 918
{
	struct cfq_queue *cfqq;

919 920 921
	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
L
Linus Torvalds 已提交
922

923 924 925
	/*
	 * slice has expired
	 */
926
	if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq))
J
Jens Axboe 已提交
927
		goto expire;
L
Linus Torvalds 已提交
928

929 930 931 932
	/*
	 * if queue has requests, dispatch one. if not, check if
	 * enough slice is left to wait for one
	 */
933
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
934
		goto keep_queue;
935
	else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) {
936 937 938
		cfqq = NULL;
		goto keep_queue;
	} else if (cfq_cfqq_class_sync(cfqq)) {
939
		if (cfq_arm_slice_timer(cfqd))
940 941 942
			return NULL;
	}

J
Jens Axboe 已提交
943
expire:
944
	cfq_slice_expired(cfqd, 0);
J
Jens Axboe 已提交
945 946
new_queue:
	cfqq = cfq_set_active_queue(cfqd);
947
keep_queue:
J
Jens Axboe 已提交
948
	return cfqq;
949 950 951 952 953 954 955 956
}

static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			int max_dispatch)
{
	int dispatched = 0;

957
	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
958 959

	do {
J
Jens Axboe 已提交
960
		struct request *rq;
L
Linus Torvalds 已提交
961 962

		/*
963
		 * follow expired path, else get first next available
L
Linus Torvalds 已提交
964
		 */
J
Jens Axboe 已提交
965 966
		if ((rq = cfq_check_fifo(cfqq)) == NULL)
			rq = cfqq->next_rq;
967 968 969 970

		/*
		 * finally, insert request into driver dispatch list
		 */
J
Jens Axboe 已提交
971
		cfq_dispatch_insert(cfqd->queue, rq);
L
Linus Torvalds 已提交
972

973 974
		cfqd->dispatch_slice++;
		dispatched++;
L
Linus Torvalds 已提交
975

976
		if (!cfqd->active_cic) {
J
Jens Axboe 已提交
977 978
			atomic_inc(&RQ_CIC(rq)->ioc->refcount);
			cfqd->active_cic = RQ_CIC(rq);
979
		}
L
Linus Torvalds 已提交
980

981
		if (RB_EMPTY_ROOT(&cfqq->sort_list))
982 983 984 985 986 987 988 989 990 991
			break;

	} while (dispatched < max_dispatch);

	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
	if ((!cfq_cfqq_sync(cfqq) &&
	    cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
992 993
	    cfq_class_idle(cfqq)) {
		cfqq->slice_end = jiffies + 1;
994
		cfq_slice_expired(cfqd, 0);
995
	}
996 997 998 999

	return dispatched;
}

1000 1001 1002 1003
static int
cfq_forced_dispatch_cfqqs(struct list_head *list)
{
	struct cfq_queue *cfqq, *next;
1004
	int dispatched;
1005

1006
	dispatched = 0;
1007
	list_for_each_entry_safe(cfqq, next, list, cfq_list) {
J
Jens Axboe 已提交
1008 1009
		while (cfqq->next_rq) {
			cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1010 1011 1012 1013
			dispatched++;
		}
		BUG_ON(!list_empty(&cfqq->fifo));
	}
1014

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
	return dispatched;
}

static int
cfq_forced_dispatch(struct cfq_data *cfqd)
{
	int i, dispatched = 0;

	for (i = 0; i < CFQ_PRIO_LISTS; i++)
		dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);

	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);

	cfq_slice_expired(cfqd, 0);

	BUG_ON(cfqd->busy_queues);

	return dispatched;
}

1037
static int
1038
cfq_dispatch_requests(request_queue_t *q, int force)
1039 1040
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
1041 1042
	struct cfq_queue *cfqq, *prev_cfqq;
	int dispatched;
1043 1044 1045 1046

	if (!cfqd->busy_queues)
		return 0;

1047 1048 1049
	if (unlikely(force))
		return cfq_forced_dispatch(cfqd);

1050 1051 1052
	dispatched = 0;
	prev_cfqq = NULL;
	while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1053 1054
		int max_dispatch;

1055 1056 1057 1058 1059 1060
		/*
		 * Don't repeat dispatch from the previous queue.
		 */
		if (prev_cfqq == cfqq)
			break;

J
Jens Axboe 已提交
1061 1062
		cfq_clear_cfqq_must_dispatch(cfqq);
		cfq_clear_cfqq_wait_request(cfqq);
1063 1064
		del_timer(&cfqd->idle_slice_timer);

1065 1066 1067
		max_dispatch = cfqd->cfq_quantum;
		if (cfq_class_idle(cfqq))
			max_dispatch = 1;
L
Linus Torvalds 已提交
1068

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
		dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);

		/*
		 * If the dispatch cfqq has idling enabled and is still
		 * the active queue, break out.
		 */
		if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
			break;

		prev_cfqq = cfqq;
L
Linus Torvalds 已提交
1079 1080
	}

1081
	return dispatched;
L
Linus Torvalds 已提交
1082 1083 1084
}

/*
J
Jens Axboe 已提交
1085 1086
 * task holds one reference to the queue, dropped when task exits. each rq
 * in-flight on this queue also holds a reference, dropped when rq is freed.
L
Linus Torvalds 已提交
1087 1088 1089 1090 1091
 *
 * queue lock must be held here.
 */
static void cfq_put_queue(struct cfq_queue *cfqq)
{
1092 1093 1094
	struct cfq_data *cfqd = cfqq->cfqd;

	BUG_ON(atomic_read(&cfqq->ref) <= 0);
L
Linus Torvalds 已提交
1095 1096 1097 1098 1099

	if (!atomic_dec_and_test(&cfqq->ref))
		return;

	BUG_ON(rb_first(&cfqq->sort_list));
1100
	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
J
Jens Axboe 已提交
1101
	BUG_ON(cfq_cfqq_on_rr(cfqq));
L
Linus Torvalds 已提交
1102

1103
	if (unlikely(cfqd->active_queue == cfqq))
J
Jens Axboe 已提交
1104
		__cfq_slice_expired(cfqd, cfqq, 0);
1105

L
Linus Torvalds 已提交
1106 1107 1108 1109 1110 1111 1112 1113
	/*
	 * it's on the empty list and still hashed
	 */
	list_del(&cfqq->cfq_list);
	hlist_del(&cfqq->cfq_hash);
	kmem_cache_free(cfq_pool, cfqq);
}

J
Jens Axboe 已提交
1114
static struct cfq_queue *
J
Jens Axboe 已提交
1115 1116
__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
		    const int hashval)
L
Linus Torvalds 已提交
1117 1118
{
	struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1119 1120
	struct hlist_node *entry;
	struct cfq_queue *__cfqq;
L
Linus Torvalds 已提交
1121

1122
	hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
A
Al Viro 已提交
1123
		const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
L
Linus Torvalds 已提交
1124

1125
		if (__cfqq->key == key && (__p == prio || !prio))
L
Linus Torvalds 已提交
1126 1127 1128 1129 1130 1131 1132
			return __cfqq;
	}

	return NULL;
}

static struct cfq_queue *
J
Jens Axboe 已提交
1133
cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
L
Linus Torvalds 已提交
1134
{
J
Jens Axboe 已提交
1135
	return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
L
Linus Torvalds 已提交
1136 1137
}

1138
static void cfq_free_io_context(struct io_context *ioc)
L
Linus Torvalds 已提交
1139
{
1140
	struct cfq_io_context *__cic;
1141 1142
	struct rb_node *n;
	int freed = 0;
L
Linus Torvalds 已提交
1143

1144 1145 1146
	while ((n = rb_first(&ioc->cic_root)) != NULL) {
		__cic = rb_entry(n, struct cfq_io_context, rb_node);
		rb_erase(&__cic->rb_node, &ioc->cic_root);
1147
		kmem_cache_free(cfq_ioc_pool, __cic);
1148
		freed++;
L
Linus Torvalds 已提交
1149 1150
	}

1151 1152 1153
	elv_ioc_count_mod(ioc_count, -freed);

	if (ioc_gone && !elv_ioc_count_read(ioc_count))
1154
		complete(ioc_gone);
L
Linus Torvalds 已提交
1155 1156
}

1157
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1158
{
1159 1160
	if (unlikely(cfqq == cfqd->active_queue))
		__cfq_slice_expired(cfqd, cfqq, 0);
1161

1162 1163
	cfq_put_queue(cfqq);
}
1164

1165 1166 1167
static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
					 struct cfq_io_context *cic)
{
1168 1169 1170 1171
	list_del_init(&cic->queue_list);
	smp_wmb();
	cic->key = NULL;

1172
	if (cic->cfqq[ASYNC]) {
1173
		cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1174 1175 1176 1177
		cic->cfqq[ASYNC] = NULL;
	}

	if (cic->cfqq[SYNC]) {
1178
		cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1179 1180
		cic->cfqq[SYNC] = NULL;
	}
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
}


/*
 * Called with interrupts disabled
 */
static void cfq_exit_single_io_context(struct cfq_io_context *cic)
{
	struct cfq_data *cfqd = cic->key;

	if (cfqd) {
		request_queue_t *q = cfqd->queue;

1194
		spin_lock_irq(q->queue_lock);
1195
		__cfq_exit_single_io_context(cfqd, cic);
1196
		spin_unlock_irq(q->queue_lock);
1197
	}
L
Linus Torvalds 已提交
1198 1199
}

1200
static void cfq_exit_io_context(struct io_context *ioc)
L
Linus Torvalds 已提交
1201
{
1202
	struct cfq_io_context *__cic;
1203
	struct rb_node *n;
1204

L
Linus Torvalds 已提交
1205 1206 1207
	/*
	 * put the reference this task is holding to the various queues
	 */
1208 1209 1210 1211 1212

	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		__cic = rb_entry(n, struct cfq_io_context, rb_node);

1213
		cfq_exit_single_io_context(__cic);
1214
		n = rb_next(n);
L
Linus Torvalds 已提交
1215 1216 1217
	}
}

1218
static struct cfq_io_context *
A
Al Viro 已提交
1219
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1220
{
1221
	struct cfq_io_context *cic;
L
Linus Torvalds 已提交
1222

1223
	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
L
Linus Torvalds 已提交
1224
	if (cic) {
1225
		memset(cic, 0, sizeof(*cic));
1226
		cic->last_end_request = jiffies;
1227
		INIT_LIST_HEAD(&cic->queue_list);
1228 1229
		cic->dtor = cfq_free_io_context;
		cic->exit = cfq_exit_io_context;
1230
		elv_ioc_count_inc(ioc_count);
L
Linus Torvalds 已提交
1231 1232 1233 1234 1235
	}

	return cic;
}

1236 1237 1238 1239 1240
static void cfq_init_prio_data(struct cfq_queue *cfqq)
{
	struct task_struct *tsk = current;
	int ioprio_class;

J
Jens Axboe 已提交
1241
	if (!cfq_cfqq_prio_changed(cfqq))
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
		return;

	ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
	switch (ioprio_class) {
		default:
			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
		case IOPRIO_CLASS_NONE:
			/*
			 * no prio set, place us in the middle of the BE classes
			 */
			cfqq->ioprio = task_nice_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_RT:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_RT;
			break;
		case IOPRIO_CLASS_BE:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_IDLE:
			cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
			cfqq->ioprio = 7;
J
Jens Axboe 已提交
1266
			cfq_clear_cfqq_idle_window(cfqq);
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
			break;
	}

	/*
	 * keep track of original prio settings in case we have to temporarily
	 * elevate the priority of this queue
	 */
	cfqq->org_ioprio = cfqq->ioprio;
	cfqq->org_ioprio_class = cfqq->ioprio_class;

1277
	cfq_resort_rr_list(cfqq, 0);
J
Jens Axboe 已提交
1278
	cfq_clear_cfqq_prio_changed(cfqq);
1279 1280
}

1281
static inline void changed_ioprio(struct cfq_io_context *cic)
1282
{
1283 1284
	struct cfq_data *cfqd = cic->key;
	struct cfq_queue *cfqq;
1285
	unsigned long flags;
1286

1287 1288 1289
	if (unlikely(!cfqd))
		return;

1290
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300

	cfqq = cic->cfqq[ASYNC];
	if (cfqq) {
		struct cfq_queue *new_cfqq;
		new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
					 GFP_ATOMIC);
		if (new_cfqq) {
			cic->cfqq[ASYNC] = new_cfqq;
			cfq_put_queue(cfqq);
		}
1301
	}
1302 1303 1304 1305 1306

	cfqq = cic->cfqq[SYNC];
	if (cfqq)
		cfq_mark_cfqq_prio_changed(cfqq);

1307
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1308 1309
}

1310
static void cfq_ioc_set_ioprio(struct io_context *ioc)
1311
{
1312
	struct cfq_io_context *cic;
1313
	struct rb_node *n;
1314

1315
	ioc->ioprio_changed = 0;
1316

1317 1318 1319
	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		cic = rb_entry(n, struct cfq_io_context, rb_node);
1320

1321
		changed_ioprio(cic);
1322 1323
		n = rb_next(n);
	}
1324 1325 1326
}

static struct cfq_queue *
1327
cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
A
Al Viro 已提交
1328
	      gfp_t gfp_mask)
1329 1330 1331
{
	const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
	struct cfq_queue *cfqq, *new_cfqq = NULL;
1332
	unsigned short ioprio;
1333 1334

retry:
1335
	ioprio = tsk->ioprio;
J
Jens Axboe 已提交
1336
	cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1337 1338 1339 1340 1341 1342

	if (!cfqq) {
		if (new_cfqq) {
			cfqq = new_cfqq;
			new_cfqq = NULL;
		} else if (gfp_mask & __GFP_WAIT) {
1343 1344 1345 1346 1347 1348
			/*
			 * Inform the allocator of the fact that we will
			 * just repeat this allocation if it fails, to allow
			 * the allocator to do whatever it needs to attempt to
			 * free memory.
			 */
1349
			spin_unlock_irq(cfqd->queue->queue_lock);
1350
			new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1351 1352 1353
			spin_lock_irq(cfqd->queue->queue_lock);
			goto retry;
		} else {
1354
			cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
			if (!cfqq)
				goto out;
		}

		memset(cfqq, 0, sizeof(*cfqq));

		INIT_HLIST_NODE(&cfqq->cfq_hash);
		INIT_LIST_HEAD(&cfqq->cfq_list);
		INIT_LIST_HEAD(&cfqq->fifo);

		cfqq->key = key;
		hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
		atomic_set(&cfqq->ref, 0);
		cfqq->cfqd = cfqd;
1369

1370
		cfq_mark_cfqq_idle_window(cfqq);
J
Jens Axboe 已提交
1371
		cfq_mark_cfqq_prio_changed(cfqq);
1372
		cfq_mark_cfqq_queue_new(cfqq);
J
Jens Axboe 已提交
1373
		cfq_init_prio_data(cfqq);
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	}

	if (new_cfqq)
		kmem_cache_free(cfq_pool, new_cfqq);

	atomic_inc(&cfqq->ref);
out:
	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
	return cfqq;
}

1385 1386 1387
static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{
1388
	WARN_ON(!list_empty(&cic->queue_list));
1389 1390
	rb_erase(&cic->rb_node, &ioc->cic_root);
	kmem_cache_free(cfq_ioc_pool, cic);
1391
	elv_ioc_count_dec(ioc_count);
1392 1393
}

1394 1395 1396
static struct cfq_io_context *
cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{
1397
	struct rb_node *n;
1398
	struct cfq_io_context *cic;
1399
	void *k, *key = cfqd;
1400

1401 1402
restart:
	n = ioc->cic_root.rb_node;
1403 1404
	while (n) {
		cic = rb_entry(n, struct cfq_io_context, rb_node);
1405 1406 1407
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = cic->key;
		if (unlikely(!k)) {
1408 1409 1410
			cfq_drop_dead_cic(ioc, cic);
			goto restart;
		}
1411

1412
		if (key < k)
1413
			n = n->rb_left;
1414
		else if (key > k)
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
			n = n->rb_right;
		else
			return cic;
	}

	return NULL;
}

static inline void
cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
	     struct cfq_io_context *cic)
{
1427 1428
	struct rb_node **p;
	struct rb_node *parent;
1429
	struct cfq_io_context *__cic;
1430
	unsigned long flags;
1431
	void *k;
1432 1433 1434 1435

	cic->ioc = ioc;
	cic->key = cfqd;

1436 1437 1438
restart:
	parent = NULL;
	p = &ioc->cic_root.rb_node;
1439 1440 1441
	while (*p) {
		parent = *p;
		__cic = rb_entry(parent, struct cfq_io_context, rb_node);
1442 1443 1444
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = __cic->key;
		if (unlikely(!k)) {
1445
			cfq_drop_dead_cic(ioc, __cic);
1446 1447
			goto restart;
		}
1448

1449
		if (cic->key < k)
1450
			p = &(*p)->rb_left;
1451
		else if (cic->key > k)
1452 1453 1454 1455 1456 1457 1458
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&cic->rb_node, parent, p);
	rb_insert_color(&cic->rb_node, &ioc->cic_root);
1459

1460
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1461
	list_add(&cic->queue_list, &cfqd->cic_list);
1462
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1463 1464
}

L
Linus Torvalds 已提交
1465 1466 1467
/*
 * Setup general io context and cfq io context. There can be several cfq
 * io contexts per general io context, if this process is doing io to more
1468
 * than one device managed by cfq.
L
Linus Torvalds 已提交
1469 1470
 */
static struct cfq_io_context *
1471
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1472
{
1473
	struct io_context *ioc = NULL;
L
Linus Torvalds 已提交
1474 1475
	struct cfq_io_context *cic;

1476
	might_sleep_if(gfp_mask & __GFP_WAIT);
L
Linus Torvalds 已提交
1477

1478
	ioc = get_io_context(gfp_mask, cfqd->queue->node);
L
Linus Torvalds 已提交
1479 1480 1481
	if (!ioc)
		return NULL;

1482 1483 1484
	cic = cfq_cic_rb_lookup(cfqd, ioc);
	if (cic)
		goto out;
L
Linus Torvalds 已提交
1485

1486 1487 1488
	cic = cfq_alloc_io_context(cfqd, gfp_mask);
	if (cic == NULL)
		goto err;
L
Linus Torvalds 已提交
1489

1490
	cfq_cic_link(cfqd, ioc, cic);
L
Linus Torvalds 已提交
1491
out:
1492 1493 1494 1495
	smp_read_barrier_depends();
	if (unlikely(ioc->ioprio_changed))
		cfq_ioc_set_ioprio(ioc);

L
Linus Torvalds 已提交
1496 1497 1498 1499 1500 1501
	return cic;
err:
	put_io_context(ioc);
	return NULL;
}

1502 1503
static void
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
L
Linus Torvalds 已提交
1504
{
1505 1506
	unsigned long elapsed = jiffies - cic->last_end_request;
	unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1507

1508 1509 1510 1511
	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
}
L
Linus Torvalds 已提交
1512

1513
static void
1514
cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
1515 1516 1517 1518
{
	sector_t sdist;
	u64 total;

J
Jens Axboe 已提交
1519 1520
	if (cic->last_request_pos < rq->sector)
		sdist = rq->sector - cic->last_request_pos;
1521
	else
J
Jens Axboe 已提交
1522
		sdist = cic->last_request_pos - rq->sector;
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538

	/*
	 * Don't allow the seek distance to get too large from the
	 * odd fragment, pagein, etc
	 */
	if (cic->seek_samples <= 60) /* second&third seek */
		sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
	else
		sdist = min(sdist, (cic->seek_mean * 4)	+ 2*1024*64);

	cic->seek_samples = (7*cic->seek_samples + 256) / 8;
	cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
	total = cic->seek_total + (cic->seek_samples/2);
	do_div(total, cic->seek_samples);
	cic->seek_mean = (sector_t)total;
}
L
Linus Torvalds 已提交
1539

1540 1541 1542 1543 1544 1545 1546 1547
/*
 * Disable idle window if the process thinks too long or seeks so much that
 * it doesn't matter
 */
static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		       struct cfq_io_context *cic)
{
J
Jens Axboe 已提交
1548
	int enable_idle = cfq_cfqq_idle_window(cfqq);
L
Linus Torvalds 已提交
1549

1550 1551
	if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
	    (cfqd->hw_tag && CIC_SEEKY(cic)))
1552 1553 1554 1555 1556 1557
		enable_idle = 0;
	else if (sample_valid(cic->ttime_samples)) {
		if (cic->ttime_mean > cfqd->cfq_slice_idle)
			enable_idle = 0;
		else
			enable_idle = 1;
L
Linus Torvalds 已提交
1558 1559
	}

J
Jens Axboe 已提交
1560 1561 1562 1563
	if (enable_idle)
		cfq_mark_cfqq_idle_window(cfqq);
	else
		cfq_clear_cfqq_idle_window(cfqq);
1564
}
L
Linus Torvalds 已提交
1565

1566 1567 1568 1569 1570 1571 1572

/*
 * Check if new_cfqq should preempt the currently active queue. Return 0 for
 * no or if we aren't sure, a 1 will cause a preempt.
 */
static int
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
J
Jens Axboe 已提交
1573
		   struct request *rq)
1574 1575 1576 1577 1578 1579 1580
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfq_class_idle(new_cfqq))
		return 0;

	if (!cfqq)
1581
		return 0;
1582 1583 1584

	if (cfq_class_idle(cfqq))
		return 1;
J
Jens Axboe 已提交
1585
	if (!cfq_cfqq_wait_request(new_cfqq))
1586
		return 0;
1587 1588 1589 1590
	/*
	 * if the new request is sync, but the currently running queue is
	 * not, let the sync request have priority.
	 */
J
Jens Axboe 已提交
1591
	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1592
		return 1;
1593 1594 1595 1596 1597 1598
	/*
	 * So both queues are sync. Let the new request get disk time if
	 * it's a metadata request and the current queue is doing regular IO.
	 */
	if (rq_is_meta(rq) && !cfqq->meta_pending)
		return 1;
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608

	return 0;
}

/*
 * cfqq preempts the active queue. if we allowed preempt with no slice left,
 * let it have half of its nominal slice.
 */
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
1609
	cfq_slice_expired(cfqd, 1);
1610

1611 1612 1613 1614 1615 1616 1617
	/*
	 * Put the new queue at the front of the of the current list,
	 * so we know that it will be selected next.
	 */
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	list_move(&cfqq->cfq_list, &cfqd->cur_rr);

1618 1619
	cfqq->slice_end = 0;
	cfq_mark_cfqq_slice_new(cfqq);
1620 1621 1622
}

/*
J
Jens Axboe 已提交
1623
 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1624 1625 1626
 * something we should do about it
 */
static void
J
Jens Axboe 已提交
1627 1628
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		struct request *rq)
1629
{
J
Jens Axboe 已提交
1630
	struct cfq_io_context *cic = RQ_CIC(rq);
1631

1632 1633 1634
	if (rq_is_meta(rq))
		cfqq->meta_pending++;

1635
	/*
1636
	 * check if this request is a better next-serve candidate)) {
1637
	 */
J
Jens Axboe 已提交
1638 1639
	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
	BUG_ON(!cfqq->next_rq);
1640

J
Jens Axboe 已提交
1641 1642 1643 1644
	/*
	 * we never wait for an async request and we don't allow preemption
	 * of an async request. so just return early
	 */
J
Jens Axboe 已提交
1645
	if (!rq_is_sync(rq)) {
1646 1647 1648 1649 1650 1651 1652
		/*
		 * sync process issued an async request, if it's waiting
		 * then expire it and kick rq handling.
		 */
		if (cic == cfqd->active_cic &&
		    del_timer(&cfqd->idle_slice_timer)) {
			cfq_slice_expired(cfqd, 0);
1653
			blk_start_queueing(cfqd->queue);
1654
		}
J
Jens Axboe 已提交
1655
		return;
1656
	}
1657

J
Jens Axboe 已提交
1658
	cfq_update_io_thinktime(cfqd, cic);
1659
	cfq_update_io_seektime(cic, rq);
J
Jens Axboe 已提交
1660 1661
	cfq_update_idle_window(cfqd, cfqq, cic);

J
Jens Axboe 已提交
1662
	cic->last_request_pos = rq->sector + rq->nr_sectors;
1663 1664 1665 1666 1667 1668 1669

	if (cfqq == cfqd->active_queue) {
		/*
		 * if we are waiting for a request for this queue, let it rip
		 * immediately and flag that we must not expire this queue
		 * just now
		 */
J
Jens Axboe 已提交
1670 1671
		if (cfq_cfqq_wait_request(cfqq)) {
			cfq_mark_cfqq_must_dispatch(cfqq);
1672
			del_timer(&cfqd->idle_slice_timer);
1673
			blk_start_queueing(cfqd->queue);
1674
		}
J
Jens Axboe 已提交
1675
	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1676 1677 1678 1679 1680 1681
		/*
		 * not the active queue - expire current slice if it is
		 * idle and has expired it's mean thinktime or this new queue
		 * has some old slice time left and is of higher priority
		 */
		cfq_preempt_queue(cfqd, cfqq);
J
Jens Axboe 已提交
1682
		cfq_mark_cfqq_must_dispatch(cfqq);
1683
		blk_start_queueing(cfqd->queue);
1684
	}
L
Linus Torvalds 已提交
1685 1686
}

1687
static void cfq_insert_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
1688
{
1689
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1690
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1691 1692

	cfq_init_prio_data(cfqq);
L
Linus Torvalds 已提交
1693

J
Jens Axboe 已提交
1694
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
1695

1696 1697
	list_add_tail(&rq->queuelist, &cfqq->fifo);

J
Jens Axboe 已提交
1698
	cfq_rq_enqueued(cfqd, cfqq, rq);
L
Linus Torvalds 已提交
1699 1700 1701 1702
}

static void cfq_completed_request(request_queue_t *q, struct request *rq)
{
J
Jens Axboe 已提交
1703
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1704
	struct cfq_data *cfqd = cfqq->cfqd;
1705
	const int sync = rq_is_sync(rq);
1706
	unsigned long now;
L
Linus Torvalds 已提交
1707

1708
	now = jiffies;
L
Linus Torvalds 已提交
1709

1710 1711 1712 1713
	WARN_ON(!cfqd->rq_in_driver);
	WARN_ON(!cfqq->on_dispatch[sync]);
	cfqd->rq_in_driver--;
	cfqq->on_dispatch[sync]--;
1714
	cfqq->service_last = now;
L
Linus Torvalds 已提交
1715

1716 1717
	if (!cfq_class_idle(cfqq))
		cfqd->last_end_request = now;
J
Jens Axboe 已提交
1718

1719
	cfq_resort_rr_list(cfqq, 0);
L
Linus Torvalds 已提交
1720

1721
	if (sync)
J
Jens Axboe 已提交
1722
		RQ_CIC(rq)->last_end_request = now;
1723 1724 1725 1726 1727 1728

	/*
	 * If this is the active queue, check if it needs to be expired,
	 * or if we want to idle in case it has no pending requests.
	 */
	if (cfqd->active_queue == cfqq) {
1729 1730 1731 1732 1733
		if (cfq_cfqq_slice_new(cfqq)) {
			cfq_set_prio_slice(cfqd, cfqq);
			cfq_clear_cfqq_slice_new(cfqq);
		}
		if (cfq_slice_used(cfqq))
1734
			cfq_slice_expired(cfqd, 0);
1735
		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1736
			if (!cfq_arm_slice_timer(cfqd))
1737 1738 1739
				cfq_schedule_dispatch(cfqd);
		}
	}
L
Linus Torvalds 已提交
1740 1741
}

1742 1743 1744 1745 1746
/*
 * we temporarily boost lower priority queues if they are holding fs exclusive
 * resources. they are boosted to normal prio (CLASS_BE/4)
 */
static void cfq_prio_boost(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1747
{
1748 1749
	const int ioprio_class = cfqq->ioprio_class;
	const int ioprio = cfqq->ioprio;
L
Linus Torvalds 已提交
1750

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	if (has_fs_excl()) {
		/*
		 * boost idle prio on transactions that would lock out other
		 * users of the filesystem
		 */
		if (cfq_class_idle(cfqq))
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
		if (cfqq->ioprio > IOPRIO_NORM)
			cfqq->ioprio = IOPRIO_NORM;
	} else {
		/*
		 * check if we need to unboost the queue
		 */
		if (cfqq->ioprio_class != cfqq->org_ioprio_class)
			cfqq->ioprio_class = cfqq->org_ioprio_class;
		if (cfqq->ioprio != cfqq->org_ioprio)
			cfqq->ioprio = cfqq->org_ioprio;
	}
L
Linus Torvalds 已提交
1769

1770 1771 1772
	/*
	 * refile between round-robin lists if we moved the priority class
	 */
1773
	if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio))
1774 1775
		cfq_resort_rr_list(cfqq, 0);
}
L
Linus Torvalds 已提交
1776

1777
static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1778
{
J
Jens Axboe 已提交
1779
	if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
A
Andrew Morton 已提交
1780
	    !cfq_cfqq_must_alloc_slice(cfqq)) {
J
Jens Axboe 已提交
1781
		cfq_mark_cfqq_must_alloc_slice(cfqq);
1782
		return ELV_MQUEUE_MUST;
J
Jens Axboe 已提交
1783
	}
L
Linus Torvalds 已提交
1784

1785 1786 1787
	return ELV_MQUEUE_MAY;
}

1788
static int cfq_may_queue(request_queue_t *q, int rw)
1789 1790 1791 1792
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct task_struct *tsk = current;
	struct cfq_queue *cfqq;
1793 1794 1795
	unsigned int key;

	key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1796 1797 1798 1799 1800 1801 1802

	/*
	 * don't force setup of a queue from here, as a call to may_queue
	 * does not necessarily imply that a request actually will be queued.
	 * so just lookup a possibly existing queue, or return 'may queue'
	 * if that fails
	 */
1803
	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1804 1805 1806 1807
	if (cfqq) {
		cfq_init_prio_data(cfqq);
		cfq_prio_boost(cfqq);

1808
		return __cfq_may_queue(cfqq);
1809 1810 1811
	}

	return ELV_MQUEUE_MAY;
L
Linus Torvalds 已提交
1812 1813 1814 1815 1816
}

/*
 * queue lock held here
 */
1817
static void cfq_put_request(struct request *rq)
L
Linus Torvalds 已提交
1818
{
J
Jens Axboe 已提交
1819
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
1820

J
Jens Axboe 已提交
1821
	if (cfqq) {
1822
		const int rw = rq_data_dir(rq);
L
Linus Torvalds 已提交
1823

1824 1825
		BUG_ON(!cfqq->allocated[rw]);
		cfqq->allocated[rw]--;
L
Linus Torvalds 已提交
1826

J
Jens Axboe 已提交
1827
		put_io_context(RQ_CIC(rq)->ioc);
L
Linus Torvalds 已提交
1828 1829

		rq->elevator_private = NULL;
J
Jens Axboe 已提交
1830
		rq->elevator_private2 = NULL;
L
Linus Torvalds 已提交
1831 1832 1833 1834 1835 1836

		cfq_put_queue(cfqq);
	}
}

/*
1837
 * Allocate cfq data structures associated with this request.
L
Linus Torvalds 已提交
1838
 */
1839
static int
1840
cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1841 1842
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1843
	struct task_struct *tsk = current;
L
Linus Torvalds 已提交
1844 1845
	struct cfq_io_context *cic;
	const int rw = rq_data_dir(rq);
1846 1847
	const int is_sync = rq_is_sync(rq);
	pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1848
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
1849 1850 1851 1852
	unsigned long flags;

	might_sleep_if(gfp_mask & __GFP_WAIT);

1853
	cic = cfq_get_io_context(cfqd, gfp_mask);
1854

L
Linus Torvalds 已提交
1855 1856
	spin_lock_irqsave(q->queue_lock, flags);

1857 1858 1859
	if (!cic)
		goto queue_fail;

1860
	if (!cic->cfqq[is_sync]) {
1861
		cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1862 1863
		if (!cfqq)
			goto queue_fail;
L
Linus Torvalds 已提交
1864

1865
		cic->cfqq[is_sync] = cfqq;
1866
	} else
1867
		cfqq = cic->cfqq[is_sync];
L
Linus Torvalds 已提交
1868 1869

	cfqq->allocated[rw]++;
J
Jens Axboe 已提交
1870
	cfq_clear_cfqq_must_alloc(cfqq);
1871
	atomic_inc(&cfqq->ref);
L
Linus Torvalds 已提交
1872

J
Jens Axboe 已提交
1873
	spin_unlock_irqrestore(q->queue_lock, flags);
J
Jens Axboe 已提交
1874

J
Jens Axboe 已提交
1875 1876 1877
	rq->elevator_private = cic;
	rq->elevator_private2 = cfqq;
	return 0;
L
Linus Torvalds 已提交
1878

1879 1880 1881
queue_fail:
	if (cic)
		put_io_context(cic->ioc);
1882

J
Jens Axboe 已提交
1883
	cfq_schedule_dispatch(cfqd);
L
Linus Torvalds 已提交
1884 1885 1886 1887
	spin_unlock_irqrestore(q->queue_lock, flags);
	return 1;
}

1888
static void cfq_kick_queue(struct work_struct *work)
1889
{
1890 1891 1892
	struct cfq_data *cfqd =
		container_of(work, struct cfq_data, unplug_work);
	request_queue_t *q = cfqd->queue;
1893 1894 1895
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
1896
	blk_start_queueing(q);
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
	spin_unlock_irqrestore(q->queue_lock, flags);
}

/*
 * Timer running if the active_queue is currently idling inside its time slice
 */
static void cfq_idle_slice_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	struct cfq_queue *cfqq;
	unsigned long flags;

	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

	if ((cfqq = cfqd->active_queue) != NULL) {
		/*
		 * expired
		 */
1915
		if (cfq_slice_used(cfqq))
1916 1917 1918 1919 1920 1921
			goto expire;

		/*
		 * only expire and reinvoke request handler, if there are
		 * other queues with pending requests
		 */
1922
		if (!cfqd->busy_queues)
1923 1924 1925 1926 1927
			goto out_cont;

		/*
		 * not expired and it has a request pending, let it dispatch
		 */
1928
		if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
J
Jens Axboe 已提交
1929
			cfq_mark_cfqq_must_dispatch(cfqq);
1930 1931 1932 1933 1934 1935
			goto out_kick;
		}
	}
expire:
	cfq_slice_expired(cfqd, 0);
out_kick:
J
Jens Axboe 已提交
1936
	cfq_schedule_dispatch(cfqd);
1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
out_cont:
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

/*
 * Timer running if an idle class queue is waiting for service
 */
static void cfq_idle_class_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	unsigned long flags, end;

	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

	/*
	 * race with a non-idle queue, reset timer
	 */
	end = cfqd->last_end_request + CFQ_IDLE_GRACE;
1955 1956 1957
	if (!time_after_eq(jiffies, end))
		mod_timer(&cfqd->idle_class_timer, end);
	else
J
Jens Axboe 已提交
1958
		cfq_schedule_dispatch(cfqd);
1959 1960 1961 1962

	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

J
Jens Axboe 已提交
1963 1964 1965 1966 1967 1968
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
	del_timer_sync(&cfqd->idle_slice_timer);
	del_timer_sync(&cfqd->idle_class_timer);
	blk_sync_queue(cfqd->queue);
}
1969

L
Linus Torvalds 已提交
1970 1971
static void cfq_exit_queue(elevator_t *e)
{
1972
	struct cfq_data *cfqd = e->elevator_data;
1973
	request_queue_t *q = cfqd->queue;
1974

J
Jens Axboe 已提交
1975
	cfq_shutdown_timer_wq(cfqd);
1976

1977
	spin_lock_irq(q->queue_lock);
1978

1979 1980
	if (cfqd->active_queue)
		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
1981 1982

	while (!list_empty(&cfqd->cic_list)) {
1983 1984 1985
		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
							struct cfq_io_context,
							queue_list);
1986 1987

		__cfq_exit_single_io_context(cfqd, cic);
1988
	}
1989

1990
	spin_unlock_irq(q->queue_lock);
1991 1992 1993 1994 1995

	cfq_shutdown_timer_wq(cfqd);

	kfree(cfqd->cfq_hash);
	kfree(cfqd);
L
Linus Torvalds 已提交
1996 1997
}

1998
static void *cfq_init_queue(request_queue_t *q)
L
Linus Torvalds 已提交
1999 2000 2001 2002
{
	struct cfq_data *cfqd;
	int i;

2003
	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
L
Linus Torvalds 已提交
2004
	if (!cfqd)
J
Jens Axboe 已提交
2005
		return NULL;
L
Linus Torvalds 已提交
2006 2007

	memset(cfqd, 0, sizeof(*cfqd));
2008 2009 2010 2011 2012 2013 2014

	for (i = 0; i < CFQ_PRIO_LISTS; i++)
		INIT_LIST_HEAD(&cfqd->rr_list[i]);

	INIT_LIST_HEAD(&cfqd->busy_rr);
	INIT_LIST_HEAD(&cfqd->cur_rr);
	INIT_LIST_HEAD(&cfqd->idle_rr);
2015
	INIT_LIST_HEAD(&cfqd->cic_list);
L
Linus Torvalds 已提交
2016

2017
	cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
L
Linus Torvalds 已提交
2018
	if (!cfqd->cfq_hash)
J
Jens Axboe 已提交
2019
		goto out_free;
L
Linus Torvalds 已提交
2020 2021 2022 2023 2024 2025

	for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
		INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);

	cfqd->queue = q;

2026 2027 2028 2029 2030 2031 2032 2033
	init_timer(&cfqd->idle_slice_timer);
	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
	cfqd->idle_slice_timer.data = (unsigned long) cfqd;

	init_timer(&cfqd->idle_class_timer);
	cfqd->idle_class_timer.function = cfq_idle_class_timer;
	cfqd->idle_class_timer.data = (unsigned long) cfqd;

2034
	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2035

L
Linus Torvalds 已提交
2036
	cfqd->cfq_quantum = cfq_quantum;
2037 2038
	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
L
Linus Torvalds 已提交
2039 2040
	cfqd->cfq_back_max = cfq_back_max;
	cfqd->cfq_back_penalty = cfq_back_penalty;
2041 2042 2043 2044
	cfqd->cfq_slice[0] = cfq_slice_async;
	cfqd->cfq_slice[1] = cfq_slice_sync;
	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
	cfqd->cfq_slice_idle = cfq_slice_idle;
J
Jens Axboe 已提交
2045

J
Jens Axboe 已提交
2046
	return cfqd;
J
Jens Axboe 已提交
2047
out_free:
L
Linus Torvalds 已提交
2048
	kfree(cfqd);
J
Jens Axboe 已提交
2049
	return NULL;
L
Linus Torvalds 已提交
2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
}

static void cfq_slab_kill(void)
{
	if (cfq_pool)
		kmem_cache_destroy(cfq_pool);
	if (cfq_ioc_pool)
		kmem_cache_destroy(cfq_ioc_pool);
}

static int __init cfq_slab_setup(void)
{
	cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
					NULL, NULL);
	if (!cfq_pool)
		goto fail;

	cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
			sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
	if (!cfq_ioc_pool)
		goto fail;

	return 0;
fail:
	cfq_slab_kill();
	return -ENOMEM;
}

/*
 * sysfs parts below -->
 */

static ssize_t
cfq_var_show(unsigned int var, char *page)
{
	return sprintf(page, "%d\n", var);
}

static ssize_t
cfq_var_store(unsigned int *var, const char *page, size_t count)
{
	char *p = (char *) page;

	*var = simple_strtoul(p, &p, 10);
	return count;
}

#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
2098
static ssize_t __FUNC(elevator_t *e, char *page)			\
L
Linus Torvalds 已提交
2099
{									\
2100
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2101 2102 2103 2104 2105 2106
	unsigned int __data = __VAR;					\
	if (__CONV)							\
		__data = jiffies_to_msecs(__data);			\
	return cfq_var_show(__data, (page));				\
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2107 2108
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2109 2110
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2111 2112 2113 2114
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
L
Linus Torvalds 已提交
2115 2116 2117
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
2118
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
L
Linus Torvalds 已提交
2119
{									\
2120
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
	unsigned int __data;						\
	int ret = cfq_var_store(&__data, (page), count);		\
	if (__data < (MIN))						\
		__data = (MIN);						\
	else if (__data > (MAX))					\
		__data = (MAX);						\
	if (__CONV)							\
		*(__PTR) = msecs_to_jiffies(__data);			\
	else								\
		*(__PTR) = __data;					\
	return ret;							\
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2134 2135
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2136 2137
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2138 2139 2140 2141
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
L
Linus Torvalds 已提交
2142 2143
#undef STORE_FUNCTION

2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
#define CFQ_ATTR(name) \
	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)

static struct elv_fs_entry cfq_attrs[] = {
	CFQ_ATTR(quantum),
	CFQ_ATTR(fifo_expire_sync),
	CFQ_ATTR(fifo_expire_async),
	CFQ_ATTR(back_seek_max),
	CFQ_ATTR(back_seek_penalty),
	CFQ_ATTR(slice_sync),
	CFQ_ATTR(slice_async),
	CFQ_ATTR(slice_async_rq),
	CFQ_ATTR(slice_idle),
	__ATTR_NULL
L
Linus Torvalds 已提交
2158 2159 2160 2161 2162 2163 2164
};

static struct elevator_type iosched_cfq = {
	.ops = {
		.elevator_merge_fn = 		cfq_merge,
		.elevator_merged_fn =		cfq_merged_request,
		.elevator_merge_req_fn =	cfq_merged_requests,
2165
		.elevator_allow_merge_fn =	cfq_allow_merge,
2166
		.elevator_dispatch_fn =		cfq_dispatch_requests,
L
Linus Torvalds 已提交
2167
		.elevator_add_req_fn =		cfq_insert_request,
2168
		.elevator_activate_req_fn =	cfq_activate_request,
L
Linus Torvalds 已提交
2169 2170 2171
		.elevator_deactivate_req_fn =	cfq_deactivate_request,
		.elevator_queue_empty_fn =	cfq_queue_empty,
		.elevator_completed_req_fn =	cfq_completed_request,
2172 2173
		.elevator_former_req_fn =	elv_rb_former_request,
		.elevator_latter_req_fn =	elv_rb_latter_request,
L
Linus Torvalds 已提交
2174 2175 2176 2177 2178
		.elevator_set_req_fn =		cfq_set_request,
		.elevator_put_req_fn =		cfq_put_request,
		.elevator_may_queue_fn =	cfq_may_queue,
		.elevator_init_fn =		cfq_init_queue,
		.elevator_exit_fn =		cfq_exit_queue,
2179
		.trim =				cfq_free_io_context,
L
Linus Torvalds 已提交
2180
	},
2181
	.elevator_attrs =	cfq_attrs,
L
Linus Torvalds 已提交
2182 2183 2184 2185 2186 2187 2188 2189
	.elevator_name =	"cfq",
	.elevator_owner =	THIS_MODULE,
};

static int __init cfq_init(void)
{
	int ret;

2190 2191 2192 2193 2194 2195 2196 2197
	/*
	 * could be 0 on HZ < 1000 setups
	 */
	if (!cfq_slice_async)
		cfq_slice_async = 1;
	if (!cfq_slice_idle)
		cfq_slice_idle = 1;

L
Linus Torvalds 已提交
2198 2199 2200 2201
	if (cfq_slab_setup())
		return -ENOMEM;

	ret = elv_register(&iosched_cfq);
2202 2203
	if (ret)
		cfq_slab_kill();
L
Linus Torvalds 已提交
2204 2205 2206 2207 2208 2209

	return ret;
}

static void __exit cfq_exit(void)
{
2210
	DECLARE_COMPLETION_ONSTACK(all_gone);
L
Linus Torvalds 已提交
2211
	elv_unregister(&iosched_cfq);
2212
	ioc_gone = &all_gone;
2213 2214
	/* ioc_gone's update must be visible before reading ioc_count */
	smp_wmb();
2215
	if (elv_ioc_count_read(ioc_count))
2216
		wait_for_completion(ioc_gone);
2217
	synchronize_rcu();
2218
	cfq_slab_kill();
L
Linus Torvalds 已提交
2219 2220 2221 2222 2223 2224 2225 2226
}

module_init(cfq_init);
module_exit(cfq_exit);

MODULE_AUTHOR("Jens Axboe");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");