cfq-iosched.c 52.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
7
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
L
Linus Torvalds 已提交
8 9
 */
#include <linux/module.h>
A
Al Viro 已提交
10 11
#include <linux/blkdev.h>
#include <linux/elevator.h>
L
Linus Torvalds 已提交
12 13
#include <linux/hash.h>
#include <linux/rbtree.h>
14
#include <linux/ioprio.h>
L
Linus Torvalds 已提交
15 16 17 18

/*
 * tunables
 */
19 20 21 22
static const int cfq_quantum = 4;		/* max queue in one round of service */
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2;		/* penalty of a backwards seek */
L
Linus Torvalds 已提交
23

24
static const int cfq_slice_sync = HZ / 10;
J
Jens Axboe 已提交
25
static int cfq_slice_async = HZ / 25;
26
static const int cfq_slice_async_rq = 2;
27
static int cfq_slice_idle = HZ / 125;
28 29 30 31 32 33

#define CFQ_IDLE_GRACE		(HZ / 10)
#define CFQ_SLICE_SCALE		(5)

#define CFQ_KEY_ASYNC		(0)

L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41 42
/*
 * for the hash of cfqq inside the cfqd
 */
#define CFQ_QHASH_SHIFT		6
#define CFQ_QHASH_ENTRIES	(1 << CFQ_QHASH_SHIFT)
#define list_entry_qhash(entry)	hlist_entry((entry), struct cfq_queue, cfq_hash)

#define list_entry_cfqq(ptr)	list_entry((ptr), struct cfq_queue, cfq_list)

J
Jens Axboe 已提交
43 44
#define RQ_CIC(rq)		((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq)		((rq)->elevator_private2)
L
Linus Torvalds 已提交
45

46 47
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
L
Linus Torvalds 已提交
48

49
static DEFINE_PER_CPU(unsigned long, ioc_count);
50 51
static struct completion *ioc_gone;

52 53 54 55
#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

J
Jens Axboe 已提交
56 57 58 59 60 61 62 63 64 65
#define ASYNC			(0)
#define SYNC			(1)

#define cfq_cfqq_dispatched(cfqq)	\
	((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])

#define cfq_cfqq_class_sync(cfqq)	((cfqq)->key != CFQ_KEY_ASYNC)

#define cfq_cfqq_sync(cfqq)		\
	(cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
66

67 68
#define sample_valid(samples)	((samples) > 80)

69 70 71
/*
 * Per block device queue structure
 */
L
Linus Torvalds 已提交
72
struct cfq_data {
73 74 75 76 77 78 79 80 81 82 83 84 85 86
	request_queue_t *queue;

	/*
	 * rr list of queues with requests and the count of them
	 */
	struct list_head rr_list[CFQ_PRIO_LISTS];
	struct list_head busy_rr;
	struct list_head cur_rr;
	struct list_head idle_rr;
	unsigned int busy_queues;

	/*
	 * cfqq lookup hash
	 */
L
Linus Torvalds 已提交
87 88
	struct hlist_head *cfq_hash;

89
	int rq_in_driver;
90
	int hw_tag;
L
Linus Torvalds 已提交
91

92 93 94 95 96
	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
	struct work_struct unplug_work;
L
Linus Torvalds 已提交
97

98 99 100 101 102 103
	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;
	int cur_prio, cur_end_prio;
	unsigned int dispatch_slice;

	struct timer_list idle_class_timer;
L
Linus Torvalds 已提交
104 105

	sector_t last_sector;
106
	unsigned long last_end_request;
L
Linus Torvalds 已提交
107 108 109 110 111

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
112
	unsigned int cfq_fifo_expire[2];
L
Linus Torvalds 已提交
113 114
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
115 116 117
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
118 119

	struct list_head cic_list;
L
Linus Torvalds 已提交
120 121
};

122 123 124
/*
 * Per process-grouping structure
 */
L
Linus Torvalds 已提交
125 126 127 128 129
struct cfq_queue {
	/* reference count */
	atomic_t ref;
	/* parent cfq_data */
	struct cfq_data *cfqd;
130
	/* cfqq lookup hash */
L
Linus Torvalds 已提交
131 132
	struct hlist_node cfq_hash;
	/* hash key */
133
	unsigned int key;
134
	/* member of the rr/busy/cur/idle cfqd list */
L
Linus Torvalds 已提交
135 136 137 138
	struct list_head cfq_list;
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
J
Jens Axboe 已提交
139
	struct request *next_rq;
L
Linus Torvalds 已提交
140 141 142 143
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
144 145
	/* pending metadata requests */
	int meta_pending;
L
Linus Torvalds 已提交
146
	/* fifo list of requests in sort_list */
147
	struct list_head fifo;
L
Linus Torvalds 已提交
148

149 150
	unsigned long slice_end;
	unsigned long slice_left;
151
	unsigned long service_last;
L
Linus Torvalds 已提交
152

J
Jens Axboe 已提交
153 154
	/* number of requests that are on the dispatch list */
	int on_dispatch[2];
155 156 157 158 159

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
	unsigned short ioprio_class, org_ioprio_class;

J
Jens Axboe 已提交
160 161
	/* various state flags, see below */
	unsigned int flags;
L
Linus Torvalds 已提交
162 163
};

J
Jens Axboe 已提交
164
enum cfqq_state_flags {
165 166 167 168 169 170 171 172 173
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
	CFQ_CFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_must_dispatch,	/* must dispatch, even if expired */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
	CFQ_CFQQ_FLAG_queue_new,	/* queue never been serviced */
J
Jens Axboe 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
	cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
	cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
	return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
198
CFQ_CFQQ_FNS(queue_new);
J
Jens Axboe 已提交
199 200 201
#undef CFQ_CFQQ_FNS

static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
J
Jens Axboe 已提交
202
static void cfq_dispatch_insert(request_queue_t *, struct request *);
203
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
L
Linus Torvalds 已提交
204

A
Andrew Morton 已提交
205 206 207 208 209 210
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
211
	if (cfqd->busy_queues)
A
Andrew Morton 已提交
212 213 214 215 216 217 218
		kblockd_schedule_work(&cfqd->unplug_work);
}

static int cfq_queue_empty(request_queue_t *q)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

219
	return !cfqd->busy_queues;
A
Andrew Morton 已提交
220 221
}

222
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
223
{
224 225 226 227
	/*
	 * Use the per-process queue, for read requests and syncronous writes
	 */
	if (!(rw & REQ_RW) || is_sync)
228 229 230 231 232
		return task->pid;

	return CFQ_KEY_ASYNC;
}

L
Linus Torvalds 已提交
233
/*
J
Jens Axboe 已提交
234
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
L
Linus Torvalds 已提交
235
 * We choose the request that is closest to the head right now. Distance
236
 * behind the head is penalized and only allowed to a certain extent.
L
Linus Torvalds 已提交
237
 */
J
Jens Axboe 已提交
238 239
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
L
Linus Torvalds 已提交
240 241 242
{
	sector_t last, s1, s2, d1 = 0, d2 = 0;
	unsigned long back_max;
243 244 245
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
L
Linus Torvalds 已提交
246

J
Jens Axboe 已提交
247 248 249 250
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
J
Jens Axboe 已提交
251

J
Jens Axboe 已提交
252 253 254 255
	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
		return rq1;
	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
		return rq2;
256 257 258 259
	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
		return rq1;
	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
		return rq2;
L
Linus Torvalds 已提交
260

J
Jens Axboe 已提交
261 262
	s1 = rq1->sector;
	s2 = rq2->sector;
L
Linus Torvalds 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

	last = cfqd->last_sector;

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
281
		wrap |= CFQ_RQ1_WRAP;
L
Linus Torvalds 已提交
282 283 284 285 286 287

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
288
		wrap |= CFQ_RQ2_WRAP;
L
Linus Torvalds 已提交
289 290

	/* Found required data */
291 292 293 294 295 296

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
J
Jens Axboe 已提交
297
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
298
		if (d1 < d2)
J
Jens Axboe 已提交
299
			return rq1;
300
		else if (d2 < d1)
J
Jens Axboe 已提交
301
			return rq2;
302 303
		else {
			if (s1 >= s2)
J
Jens Axboe 已提交
304
				return rq1;
305
			else
J
Jens Axboe 已提交
306
				return rq2;
307
		}
L
Linus Torvalds 已提交
308

309
	case CFQ_RQ2_WRAP:
J
Jens Axboe 已提交
310
		return rq1;
311
	case CFQ_RQ1_WRAP:
J
Jens Axboe 已提交
312 313
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
314 315 316 317 318 319 320 321
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
J
Jens Axboe 已提交
322
			return rq1;
L
Linus Torvalds 已提交
323
		else
J
Jens Axboe 已提交
324
			return rq2;
L
Linus Torvalds 已提交
325 326 327 328 329 330
	}
}

/*
 * would be nice to take fifo expire time into account as well
 */
J
Jens Axboe 已提交
331 332 333
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
L
Linus Torvalds 已提交
334
{
335 336
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
J
Jens Axboe 已提交
337
	struct request *next = NULL, *prev = NULL;
L
Linus Torvalds 已提交
338

339
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
L
Linus Torvalds 已提交
340 341

	if (rbprev)
J
Jens Axboe 已提交
342
		prev = rb_entry_rq(rbprev);
L
Linus Torvalds 已提交
343

344
	if (rbnext)
J
Jens Axboe 已提交
345
		next = rb_entry_rq(rbnext);
346 347 348
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
J
Jens Axboe 已提交
349
			next = rb_entry_rq(rbnext);
350
	}
L
Linus Torvalds 已提交
351

352
	return cfq_choose_req(cfqd, next, prev);
L
Linus Torvalds 已提交
353 354
}

355
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
L
Linus Torvalds 已提交
356
{
357
	struct cfq_data *cfqd = cfqq->cfqd;
358 359
	struct list_head *list, *n;
	struct cfq_queue *__cfqq;
L
Linus Torvalds 已提交
360

361 362 363 364 365
	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
	if (!cfq_cfqq_on_rr(cfqq))
		return;
L
Linus Torvalds 已提交
366

367
	list_del(&cfqq->cfq_list);
L
Linus Torvalds 已提交
368

369 370 371 372 373 374 375 376 377 378 379 380
	if (cfq_class_rt(cfqq))
		list = &cfqd->cur_rr;
	else if (cfq_class_idle(cfqq))
		list = &cfqd->idle_rr;
	else {
		/*
		 * if cfqq has requests in flight, don't allow it to be
		 * found in cfq_set_active_queue before it has finished them.
		 * this is done to increase fairness between a process that
		 * has lots of io pending vs one that only generates one
		 * sporadically or synchronously
		 */
J
Jens Axboe 已提交
381
		if (cfq_cfqq_dispatched(cfqq))
382 383 384
			list = &cfqd->busy_rr;
		else
			list = &cfqd->rr_list[cfqq->ioprio];
L
Linus Torvalds 已提交
385 386
	}

387
	if (preempted || cfq_cfqq_queue_new(cfqq)) {
388 389 390 391 392 393
		/*
		 * If this queue was preempted or is new (never been serviced),
		 * let it be added first for fairness but beind other new
		 * queues.
		 */
		n = list;
394 395 396 397
		while (n->next != list) {
			__cfqq = list_entry_cfqq(n->next);
			if (!cfq_cfqq_queue_new(__cfqq))
				break;
L
Linus Torvalds 已提交
398

399 400
			n = n->next;
		}
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
		list_add_tail(&cfqq->cfq_list, n);
	} else if (!cfq_cfqq_class_sync(cfqq)) {
		/*
		 * async queue always goes to the end. this wont be overly
		 * unfair to writes, as the sort of the sync queue wont be
		 * allowed to pass the async queue again.
		 */
		list_add_tail(&cfqq->cfq_list, list);
	} else {
		/*
		 * sort by last service, but don't cross a new or async
		 * queue. we don't cross a new queue because it hasn't been
		 * service before, and we don't cross an async queue because
		 * it gets added to the end on expire.
		 */
		n = list;
		while ((n = n->prev) != list) {
			struct cfq_queue *__cfqq = list_entry_cfqq(n);
L
Linus Torvalds 已提交
419

420 421 422 423 424 425
			if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last)
				break;
			if (time_before(__cfqq->service_last, cfqq->service_last))
				break;
		}
		list_add(&cfqq->cfq_list, n);
L
Linus Torvalds 已提交
426 427 428 429 430
	}
}

/*
 * add to busy list of queues for service, trying to be fair in ordering
431
 * the pending list according to last request service
L
Linus Torvalds 已提交
432 433
 */
static inline void
434
cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
435
{
J
Jens Axboe 已提交
436 437
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
438 439
	cfqd->busy_queues++;

440
	cfq_resort_rr_list(cfqq, 0);
L
Linus Torvalds 已提交
441 442 443 444 445
}

static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
J
Jens Axboe 已提交
446 447
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
448
	list_del_init(&cfqq->cfq_list);
L
Linus Torvalds 已提交
449 450 451 452 453 454 455 456

	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
}

/*
 * rb tree support functions
 */
J
Jens Axboe 已提交
457
static inline void cfq_del_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
458
{
J
Jens Axboe 已提交
459
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
460
	struct cfq_data *cfqd = cfqq->cfqd;
J
Jens Axboe 已提交
461
	const int sync = rq_is_sync(rq);
L
Linus Torvalds 已提交
462

463 464
	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
L
Linus Torvalds 已提交
465

J
Jens Axboe 已提交
466
	elv_rb_del(&cfqq->sort_list, rq);
L
Linus Torvalds 已提交
467

468
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
469
		cfq_del_cfqq_rr(cfqd, cfqq);
L
Linus Torvalds 已提交
470 471
}

J
Jens Axboe 已提交
472
static void cfq_add_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
473
{
J
Jens Axboe 已提交
474
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
475
	struct cfq_data *cfqd = cfqq->cfqd;
476
	struct request *__alias;
L
Linus Torvalds 已提交
477

478
	cfqq->queued[rq_is_sync(rq)]++;
L
Linus Torvalds 已提交
479 480 481 482 483

	/*
	 * looks a little odd, but the first insert might return an alias.
	 * if that happens, put the alias on the dispatch list
	 */
484
	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
J
Jens Axboe 已提交
485
		cfq_dispatch_insert(cfqd->queue, __alias);
486 487 488

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
L
Linus Torvalds 已提交
489 490 491
}

static inline void
J
Jens Axboe 已提交
492
cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
L
Linus Torvalds 已提交
493
{
494 495
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
J
Jens Axboe 已提交
496
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
497 498
}

499 500
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
L
Linus Torvalds 已提交
501
{
502
	struct task_struct *tsk = current;
503
	pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
504
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
505

506
	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
507 508 509
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

510
		return elv_rb_find(&cfqq->sort_list, sector);
511
	}
L
Linus Torvalds 已提交
512 513 514 515

	return NULL;
}

516
static void cfq_activate_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
517
{
518
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
519

520
	cfqd->rq_in_driver++;
521 522 523 524 525 526 527 528 529

	/*
	 * If the depth is larger 1, it really could be queueing. But lets
	 * make the mark a little higher - idling could still be good for
	 * low queueing, and a low queueing number could also just indicate
	 * a SCSI mid layer like behaviour where limit+1 is often seen.
	 */
	if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
		cfqd->hw_tag = 1;
L
Linus Torvalds 已提交
530 531
}

532
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
533
{
534 535 536 537
	struct cfq_data *cfqd = q->elevator->elevator_data;

	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;
L
Linus Torvalds 已提交
538 539
}

540
static void cfq_remove_request(struct request *rq)
L
Linus Torvalds 已提交
541
{
J
Jens Axboe 已提交
542
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
543

J
Jens Axboe 已提交
544 545
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
L
Linus Torvalds 已提交
546

547
	list_del_init(&rq->queuelist);
J
Jens Axboe 已提交
548
	cfq_del_rq_rb(rq);
549 550 551 552 553

	if (rq_is_meta(rq)) {
		WARN_ON(!cfqq->meta_pending);
		cfqq->meta_pending--;
	}
L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561
}

static int
cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

562
	__rq = cfq_find_rq_fmerge(cfqd, bio);
563
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
564 565
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
L
Linus Torvalds 已提交
566 567 568 569 570
	}

	return ELEVATOR_NO_MERGE;
}

571 572
static void cfq_merged_request(request_queue_t *q, struct request *req,
			       int type)
L
Linus Torvalds 已提交
573
{
574
	if (type == ELEVATOR_FRONT_MERGE) {
J
Jens Axboe 已提交
575
		struct cfq_queue *cfqq = RQ_CFQQ(req);
L
Linus Torvalds 已提交
576

J
Jens Axboe 已提交
577
		cfq_reposition_rq_rb(cfqq, req);
L
Linus Torvalds 已提交
578 579 580 581 582 583 584
	}
}

static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
		    struct request *next)
{
585 586 587 588 589 590 591
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
	    time_before(next->start_time, rq->start_time))
		list_move(&rq->queuelist, &next->queuelist);

592
	cfq_remove_request(next);
593 594
}

595 596 597 598 599 600 601 602 603
static int cfq_allow_merge(request_queue_t *q, struct request *rq,
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	const int rw = bio_data_dir(bio);
	struct cfq_queue *cfqq;
	pid_t key;

	/*
604
	 * Disallow merge of a sync bio into an async request.
605
	 */
606
	if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
607 608 609
		return 0;

	/*
610 611
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
612
	 */
613
	key = cfq_queue_pid(current, rw, bio_sync(bio));
614
	cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
615 616 617

	if (cfqq == RQ_CFQQ(rq))
		return 1;
618

619
	return 0;
620 621
}

622 623 624 625 626 627 628 629 630 631 632
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	if (cfqq) {
		/*
		 * stop potential idle class queues waiting service
		 */
		del_timer(&cfqd->idle_class_timer);

		cfqq->slice_end = 0;
		cfqq->slice_left = 0;
J
Jens Axboe 已提交
633 634
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
635 636 637 638 639
	}

	cfqd->active_queue = cfqq;
}

640 641 642 643 644 645 646 647 648 649 650 651
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		    int preempted)
{
	unsigned long now = jiffies;

	if (cfq_cfqq_wait_request(cfqq))
		del_timer(&cfqd->idle_slice_timer);

652
	if (!preempted && !cfq_cfqq_dispatched(cfqq))
653 654 655 656
		cfq_schedule_dispatch(cfqd);

	cfq_clear_cfqq_must_dispatch(cfqq);
	cfq_clear_cfqq_wait_request(cfqq);
657
	cfq_clear_cfqq_queue_new(cfqq);
658 659 660 661 662 663 664 665 666 667

	/*
	 * store what was left of this slice, if the queue idled out
	 * or was preempted
	 */
	if (time_after(cfqq->slice_end, now))
		cfqq->slice_left = cfqq->slice_end - now;
	else
		cfqq->slice_left = 0;

668
	cfq_resort_rr_list(cfqq, preempted);
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
		put_io_context(cfqd->active_cic->ioc);
		cfqd->active_cic = NULL;
	}

	cfqd->dispatch_slice = 0;
}

static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
		__cfq_slice_expired(cfqd, cfqq, preempted);
}

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
/*
 * 0
 * 0,1
 * 0,1,2
 * 0,1,2,3
 * 0,1,2,3,4
 * 0,1,2,3,4,5
 * 0,1,2,3,4,5,6
 * 0,1,2,3,4,5,6,7
 */
static int cfq_get_next_prio_level(struct cfq_data *cfqd)
{
	int prio, wrap;

	prio = -1;
	wrap = 0;
	do {
		int p;

		for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
			if (!list_empty(&cfqd->rr_list[p])) {
				prio = p;
				break;
			}
		}

		if (prio != -1)
			break;
		cfqd->cur_prio = 0;
		if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
			cfqd->cur_end_prio = 0;
			if (wrap)
				break;
			wrap = 1;
L
Linus Torvalds 已提交
723
		}
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	} while (1);

	if (unlikely(prio == -1))
		return -1;

	BUG_ON(prio >= CFQ_PRIO_LISTS);

	list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);

	cfqd->cur_prio = prio + 1;
	if (cfqd->cur_prio > cfqd->cur_end_prio) {
		cfqd->cur_end_prio = cfqd->cur_prio;
		cfqd->cur_prio = 0;
	}
	if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
		cfqd->cur_prio = 0;
		cfqd->cur_end_prio = 0;
L
Linus Torvalds 已提交
741 742
	}

743 744 745
	return prio;
}

J
Jens Axboe 已提交
746
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
747
{
748
	struct cfq_queue *cfqq = NULL;
749

750 751 752 753 754 755
	if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
		/*
		 * if current list is non-empty, grab first entry. if it is
		 * empty, get next prio level and grab first entry then if any
		 * are spliced
		 */
756
		cfqq = list_entry_cfqq(cfqd->cur_rr.next);
757 758 759 760 761
	} else if (!list_empty(&cfqd->busy_rr)) {
		/*
		 * If no new queues are available, check if the busy list has
		 * some before falling back to idle io.
		 */
762
		cfqq = list_entry_cfqq(cfqd->busy_rr.next);
763 764 765 766 767 768
	} else if (!list_empty(&cfqd->idle_rr)) {
		/*
		 * if we have idle queues and no rt or be queues had pending
		 * requests, either allow immediate service if the grace period
		 * has passed or arm the idle grace timer
		 */
769 770 771 772 773 774 775 776 777
		unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;

		if (time_after_eq(jiffies, end))
			cfqq = list_entry_cfqq(cfqd->idle_rr.next);
		else
			mod_timer(&cfqd->idle_class_timer, end);
	}

	__cfq_set_active_queue(cfqd, cfqq);
J
Jens Axboe 已提交
778
	return cfqq;
779 780
}

781 782
#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))

783 784 785
static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)

{
786
	struct cfq_io_context *cic;
787 788
	unsigned long sl;

789
	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
790 791 792 793 794 795 796
	WARN_ON(cfqq != cfqd->active_queue);

	/*
	 * idle is disabled, either manually or by past process history
	 */
	if (!cfqd->cfq_slice_idle)
		return 0;
J
Jens Axboe 已提交
797
	if (!cfq_cfqq_idle_window(cfqq))
798 799 800 801
		return 0;
	/*
	 * task has exited, don't wait
	 */
802 803
	cic = cfqd->active_cic;
	if (!cic || !cic->ioc->task)
804 805
		return 0;

J
Jens Axboe 已提交
806 807
	cfq_mark_cfqq_must_dispatch(cfqq);
	cfq_mark_cfqq_wait_request(cfqq);
808

809
	sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
810 811 812 813 814 815

	/*
	 * we don't want to idle for seeks, but we do want to allow
	 * fair distribution of slice time for a process doing back-to-back
	 * seeks. so allow a little bit of time for him to submit a new rq
	 */
816
	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
817
		sl = min(sl, msecs_to_jiffies(2));
818

819
	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
820
	return 1;
L
Linus Torvalds 已提交
821 822
}

J
Jens Axboe 已提交
823
static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
824 825
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
826
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
827

828 829 830
	cfq_remove_request(rq);
	cfqq->on_dispatch[rq_is_sync(rq)]++;
	elv_dispatch_sort(q, rq);
831 832 833

	rq = list_entry(q->queue_head.prev, struct request, queuelist);
	cfqd->last_sector = rq->sector + rq->nr_sectors;
L
Linus Torvalds 已提交
834 835 836 837 838
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
J
Jens Axboe 已提交
839
static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
840 841
{
	struct cfq_data *cfqd = cfqq->cfqd;
842
	struct request *rq;
843
	int fifo;
L
Linus Torvalds 已提交
844

J
Jens Axboe 已提交
845
	if (cfq_cfqq_fifo_expire(cfqq))
L
Linus Torvalds 已提交
846
		return NULL;
847 848
	if (list_empty(&cfqq->fifo))
		return NULL;
L
Linus Torvalds 已提交
849

850 851
	fifo = cfq_cfqq_class_sync(cfqq);
	rq = rq_entry_fifo(cfqq->fifo.next);
L
Linus Torvalds 已提交
852

853 854 855
	if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
		cfq_mark_cfqq_fifo_expire(cfqq);
		return rq;
L
Linus Torvalds 已提交
856 857 858 859 860 861
	}

	return NULL;
}

/*
J
Jens Axboe 已提交
862 863 864
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
L
Linus Torvalds 已提交
865
 */
866 867 868 869 870 871 872 873 874 875
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];

	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
}

L
Linus Torvalds 已提交
876
static inline void
877
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
878
{
879 880
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
}
L
Linus Torvalds 已提交
881

882 883 884 885
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
L
Linus Torvalds 已提交
886

887
	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
L
Linus Torvalds 已提交
888

889
	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
L
Linus Torvalds 已提交
890 891
}

892 893 894
/*
 * get next queue for service
 */
895
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
L
Linus Torvalds 已提交
896
{
897
	unsigned long now = jiffies;
L
Linus Torvalds 已提交
898 899
	struct cfq_queue *cfqq;

900 901 902
	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
L
Linus Torvalds 已提交
903

904 905 906
	/*
	 * slice has expired
	 */
J
Jens Axboe 已提交
907 908
	if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
		goto expire;
L
Linus Torvalds 已提交
909

910 911 912 913
	/*
	 * if queue has requests, dispatch one. if not, check if
	 * enough slice is left to wait for one
	 */
914
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
915
		goto keep_queue;
916 917 918 919
	else if (cfq_cfqq_dispatched(cfqq)) {
		cfqq = NULL;
		goto keep_queue;
	} else if (cfq_cfqq_class_sync(cfqq)) {
920 921 922 923
		if (cfq_arm_slice_timer(cfqd, cfqq))
			return NULL;
	}

J
Jens Axboe 已提交
924
expire:
925
	cfq_slice_expired(cfqd, 0);
J
Jens Axboe 已提交
926 927
new_queue:
	cfqq = cfq_set_active_queue(cfqd);
928
keep_queue:
J
Jens Axboe 已提交
929
	return cfqq;
930 931 932 933 934 935 936 937
}

static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			int max_dispatch)
{
	int dispatched = 0;

938
	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
939 940

	do {
J
Jens Axboe 已提交
941
		struct request *rq;
L
Linus Torvalds 已提交
942 943

		/*
944
		 * follow expired path, else get first next available
L
Linus Torvalds 已提交
945
		 */
J
Jens Axboe 已提交
946 947
		if ((rq = cfq_check_fifo(cfqq)) == NULL)
			rq = cfqq->next_rq;
948 949 950 951

		/*
		 * finally, insert request into driver dispatch list
		 */
J
Jens Axboe 已提交
952
		cfq_dispatch_insert(cfqd->queue, rq);
L
Linus Torvalds 已提交
953

954 955
		cfqd->dispatch_slice++;
		dispatched++;
L
Linus Torvalds 已提交
956

957
		if (!cfqd->active_cic) {
J
Jens Axboe 已提交
958 959
			atomic_inc(&RQ_CIC(rq)->ioc->refcount);
			cfqd->active_cic = RQ_CIC(rq);
960
		}
L
Linus Torvalds 已提交
961

962
		if (RB_EMPTY_ROOT(&cfqq->sort_list))
963 964 965 966 967
			break;

	} while (dispatched < max_dispatch);

	/*
968
	 * if slice end isn't set yet, set it.
969 970 971 972 973 974 975 976 977 978
	 */
	if (!cfqq->slice_end)
		cfq_set_prio_slice(cfqd, cfqq);

	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
	if ((!cfq_cfqq_sync(cfqq) &&
	    cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
979 980
	    cfq_class_idle(cfqq) ||
	    !cfq_cfqq_idle_window(cfqq))
981 982 983 984 985
		cfq_slice_expired(cfqd, 0);

	return dispatched;
}

986 987 988 989
static int
cfq_forced_dispatch_cfqqs(struct list_head *list)
{
	struct cfq_queue *cfqq, *next;
990
	int dispatched;
991

992
	dispatched = 0;
993
	list_for_each_entry_safe(cfqq, next, list, cfq_list) {
J
Jens Axboe 已提交
994 995
		while (cfqq->next_rq) {
			cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
996 997 998 999
			dispatched++;
		}
		BUG_ON(!list_empty(&cfqq->fifo));
	}
1000

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
	return dispatched;
}

static int
cfq_forced_dispatch(struct cfq_data *cfqd)
{
	int i, dispatched = 0;

	for (i = 0; i < CFQ_PRIO_LISTS; i++)
		dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);

	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
	dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);

	cfq_slice_expired(cfqd, 0);

	BUG_ON(cfqd->busy_queues);

	return dispatched;
}

1023
static int
1024
cfq_dispatch_requests(request_queue_t *q, int force)
1025 1026
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
1027 1028
	struct cfq_queue *cfqq, *prev_cfqq;
	int dispatched;
1029 1030 1031 1032

	if (!cfqd->busy_queues)
		return 0;

1033 1034 1035
	if (unlikely(force))
		return cfq_forced_dispatch(cfqd);

1036 1037 1038
	dispatched = 0;
	prev_cfqq = NULL;
	while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1039 1040
		int max_dispatch;

1041 1042 1043 1044 1045 1046
		/*
		 * Don't repeat dispatch from the previous queue.
		 */
		if (prev_cfqq == cfqq)
			break;

J
Jens Axboe 已提交
1047 1048
		cfq_clear_cfqq_must_dispatch(cfqq);
		cfq_clear_cfqq_wait_request(cfqq);
1049 1050
		del_timer(&cfqd->idle_slice_timer);

1051 1052 1053
		max_dispatch = cfqd->cfq_quantum;
		if (cfq_class_idle(cfqq))
			max_dispatch = 1;
L
Linus Torvalds 已提交
1054

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
		dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);

		/*
		 * If the dispatch cfqq has idling enabled and is still
		 * the active queue, break out.
		 */
		if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
			break;

		prev_cfqq = cfqq;
L
Linus Torvalds 已提交
1065 1066
	}

1067
	return dispatched;
L
Linus Torvalds 已提交
1068 1069 1070
}

/*
J
Jens Axboe 已提交
1071 1072
 * task holds one reference to the queue, dropped when task exits. each rq
 * in-flight on this queue also holds a reference, dropped when rq is freed.
L
Linus Torvalds 已提交
1073 1074 1075 1076 1077
 *
 * queue lock must be held here.
 */
static void cfq_put_queue(struct cfq_queue *cfqq)
{
1078 1079 1080
	struct cfq_data *cfqd = cfqq->cfqd;

	BUG_ON(atomic_read(&cfqq->ref) <= 0);
L
Linus Torvalds 已提交
1081 1082 1083 1084 1085

	if (!atomic_dec_and_test(&cfqq->ref))
		return;

	BUG_ON(rb_first(&cfqq->sort_list));
1086
	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
J
Jens Axboe 已提交
1087
	BUG_ON(cfq_cfqq_on_rr(cfqq));
L
Linus Torvalds 已提交
1088

1089
	if (unlikely(cfqd->active_queue == cfqq))
J
Jens Axboe 已提交
1090
		__cfq_slice_expired(cfqd, cfqq, 0);
1091

L
Linus Torvalds 已提交
1092 1093 1094 1095 1096 1097 1098 1099
	/*
	 * it's on the empty list and still hashed
	 */
	list_del(&cfqq->cfq_list);
	hlist_del(&cfqq->cfq_hash);
	kmem_cache_free(cfq_pool, cfqq);
}

J
Jens Axboe 已提交
1100
static struct cfq_queue *
J
Jens Axboe 已提交
1101 1102
__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
		    const int hashval)
L
Linus Torvalds 已提交
1103 1104
{
	struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1105 1106
	struct hlist_node *entry;
	struct cfq_queue *__cfqq;
L
Linus Torvalds 已提交
1107

1108
	hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
A
Al Viro 已提交
1109
		const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
L
Linus Torvalds 已提交
1110

1111
		if (__cfqq->key == key && (__p == prio || !prio))
L
Linus Torvalds 已提交
1112 1113 1114 1115 1116 1117 1118
			return __cfqq;
	}

	return NULL;
}

static struct cfq_queue *
J
Jens Axboe 已提交
1119
cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
L
Linus Torvalds 已提交
1120
{
J
Jens Axboe 已提交
1121
	return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
L
Linus Torvalds 已提交
1122 1123
}

1124
static void cfq_free_io_context(struct io_context *ioc)
L
Linus Torvalds 已提交
1125
{
1126
	struct cfq_io_context *__cic;
1127 1128
	struct rb_node *n;
	int freed = 0;
L
Linus Torvalds 已提交
1129

1130 1131 1132
	while ((n = rb_first(&ioc->cic_root)) != NULL) {
		__cic = rb_entry(n, struct cfq_io_context, rb_node);
		rb_erase(&__cic->rb_node, &ioc->cic_root);
1133
		kmem_cache_free(cfq_ioc_pool, __cic);
1134
		freed++;
L
Linus Torvalds 已提交
1135 1136
	}

1137 1138 1139
	elv_ioc_count_mod(ioc_count, -freed);

	if (ioc_gone && !elv_ioc_count_read(ioc_count))
1140
		complete(ioc_gone);
L
Linus Torvalds 已提交
1141 1142
}

1143
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1144
{
1145 1146
	if (unlikely(cfqq == cfqd->active_queue))
		__cfq_slice_expired(cfqd, cfqq, 0);
1147

1148 1149
	cfq_put_queue(cfqq);
}
1150

1151 1152 1153
static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
					 struct cfq_io_context *cic)
{
1154 1155 1156 1157
	list_del_init(&cic->queue_list);
	smp_wmb();
	cic->key = NULL;

1158
	if (cic->cfqq[ASYNC]) {
1159
		cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1160 1161 1162 1163
		cic->cfqq[ASYNC] = NULL;
	}

	if (cic->cfqq[SYNC]) {
1164
		cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1165 1166
		cic->cfqq[SYNC] = NULL;
	}
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
}


/*
 * Called with interrupts disabled
 */
static void cfq_exit_single_io_context(struct cfq_io_context *cic)
{
	struct cfq_data *cfqd = cic->key;

	if (cfqd) {
		request_queue_t *q = cfqd->queue;

1180
		spin_lock_irq(q->queue_lock);
1181
		__cfq_exit_single_io_context(cfqd, cic);
1182
		spin_unlock_irq(q->queue_lock);
1183
	}
L
Linus Torvalds 已提交
1184 1185
}

1186
static void cfq_exit_io_context(struct io_context *ioc)
L
Linus Torvalds 已提交
1187
{
1188
	struct cfq_io_context *__cic;
1189
	struct rb_node *n;
1190

L
Linus Torvalds 已提交
1191 1192 1193
	/*
	 * put the reference this task is holding to the various queues
	 */
1194 1195 1196 1197 1198

	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		__cic = rb_entry(n, struct cfq_io_context, rb_node);

1199
		cfq_exit_single_io_context(__cic);
1200
		n = rb_next(n);
L
Linus Torvalds 已提交
1201 1202 1203
	}
}

1204
static struct cfq_io_context *
A
Al Viro 已提交
1205
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1206
{
1207
	struct cfq_io_context *cic;
L
Linus Torvalds 已提交
1208

1209
	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
L
Linus Torvalds 已提交
1210
	if (cic) {
1211
		memset(cic, 0, sizeof(*cic));
1212
		cic->last_end_request = jiffies;
1213
		INIT_LIST_HEAD(&cic->queue_list);
1214 1215
		cic->dtor = cfq_free_io_context;
		cic->exit = cfq_exit_io_context;
1216
		elv_ioc_count_inc(ioc_count);
L
Linus Torvalds 已提交
1217 1218 1219 1220 1221
	}

	return cic;
}

1222 1223 1224 1225 1226
static void cfq_init_prio_data(struct cfq_queue *cfqq)
{
	struct task_struct *tsk = current;
	int ioprio_class;

J
Jens Axboe 已提交
1227
	if (!cfq_cfqq_prio_changed(cfqq))
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
		return;

	ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
	switch (ioprio_class) {
		default:
			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
		case IOPRIO_CLASS_NONE:
			/*
			 * no prio set, place us in the middle of the BE classes
			 */
			cfqq->ioprio = task_nice_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_RT:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_RT;
			break;
		case IOPRIO_CLASS_BE:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_IDLE:
			cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
			cfqq->ioprio = 7;
J
Jens Axboe 已提交
1252
			cfq_clear_cfqq_idle_window(cfqq);
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
			break;
	}

	/*
	 * keep track of original prio settings in case we have to temporarily
	 * elevate the priority of this queue
	 */
	cfqq->org_ioprio = cfqq->ioprio;
	cfqq->org_ioprio_class = cfqq->ioprio_class;

1263
	cfq_resort_rr_list(cfqq, 0);
J
Jens Axboe 已提交
1264
	cfq_clear_cfqq_prio_changed(cfqq);
1265 1266
}

1267
static inline void changed_ioprio(struct cfq_io_context *cic)
1268
{
1269 1270
	struct cfq_data *cfqd = cic->key;
	struct cfq_queue *cfqq;
1271
	unsigned long flags;
1272

1273 1274 1275
	if (unlikely(!cfqd))
		return;

1276
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286

	cfqq = cic->cfqq[ASYNC];
	if (cfqq) {
		struct cfq_queue *new_cfqq;
		new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
					 GFP_ATOMIC);
		if (new_cfqq) {
			cic->cfqq[ASYNC] = new_cfqq;
			cfq_put_queue(cfqq);
		}
1287
	}
1288 1289 1290 1291 1292

	cfqq = cic->cfqq[SYNC];
	if (cfqq)
		cfq_mark_cfqq_prio_changed(cfqq);

1293
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1294 1295
}

1296
static void cfq_ioc_set_ioprio(struct io_context *ioc)
1297
{
1298
	struct cfq_io_context *cic;
1299
	struct rb_node *n;
1300

1301
	ioc->ioprio_changed = 0;
1302

1303 1304 1305
	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		cic = rb_entry(n, struct cfq_io_context, rb_node);
1306

1307
		changed_ioprio(cic);
1308 1309
		n = rb_next(n);
	}
1310 1311 1312
}

static struct cfq_queue *
1313
cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
A
Al Viro 已提交
1314
	      gfp_t gfp_mask)
1315 1316 1317
{
	const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
	struct cfq_queue *cfqq, *new_cfqq = NULL;
1318
	unsigned short ioprio;
1319 1320

retry:
1321
	ioprio = tsk->ioprio;
J
Jens Axboe 已提交
1322
	cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
1323 1324 1325 1326 1327 1328

	if (!cfqq) {
		if (new_cfqq) {
			cfqq = new_cfqq;
			new_cfqq = NULL;
		} else if (gfp_mask & __GFP_WAIT) {
1329 1330 1331 1332 1333 1334
			/*
			 * Inform the allocator of the fact that we will
			 * just repeat this allocation if it fails, to allow
			 * the allocator to do whatever it needs to attempt to
			 * free memory.
			 */
1335
			spin_unlock_irq(cfqd->queue->queue_lock);
1336
			new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
1337 1338 1339
			spin_lock_irq(cfqd->queue->queue_lock);
			goto retry;
		} else {
1340
			cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
			if (!cfqq)
				goto out;
		}

		memset(cfqq, 0, sizeof(*cfqq));

		INIT_HLIST_NODE(&cfqq->cfq_hash);
		INIT_LIST_HEAD(&cfqq->cfq_list);
		INIT_LIST_HEAD(&cfqq->fifo);

		cfqq->key = key;
		hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
		atomic_set(&cfqq->ref, 0);
		cfqq->cfqd = cfqd;
		/*
		 * set ->slice_left to allow preemption for a new process
		 */
		cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1359
		cfq_mark_cfqq_idle_window(cfqq);
J
Jens Axboe 已提交
1360
		cfq_mark_cfqq_prio_changed(cfqq);
1361
		cfq_mark_cfqq_queue_new(cfqq);
J
Jens Axboe 已提交
1362
		cfq_init_prio_data(cfqq);
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
	}

	if (new_cfqq)
		kmem_cache_free(cfq_pool, new_cfqq);

	atomic_inc(&cfqq->ref);
out:
	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
	return cfqq;
}

1374 1375 1376
static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{
1377
	WARN_ON(!list_empty(&cic->queue_list));
1378 1379
	rb_erase(&cic->rb_node, &ioc->cic_root);
	kmem_cache_free(cfq_ioc_pool, cic);
1380
	elv_ioc_count_dec(ioc_count);
1381 1382
}

1383 1384 1385
static struct cfq_io_context *
cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{
1386
	struct rb_node *n;
1387
	struct cfq_io_context *cic;
1388
	void *k, *key = cfqd;
1389

1390 1391
restart:
	n = ioc->cic_root.rb_node;
1392 1393
	while (n) {
		cic = rb_entry(n, struct cfq_io_context, rb_node);
1394 1395 1396
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = cic->key;
		if (unlikely(!k)) {
1397 1398 1399
			cfq_drop_dead_cic(ioc, cic);
			goto restart;
		}
1400

1401
		if (key < k)
1402
			n = n->rb_left;
1403
		else if (key > k)
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
			n = n->rb_right;
		else
			return cic;
	}

	return NULL;
}

static inline void
cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
	     struct cfq_io_context *cic)
{
1416 1417
	struct rb_node **p;
	struct rb_node *parent;
1418
	struct cfq_io_context *__cic;
1419
	unsigned long flags;
1420
	void *k;
1421 1422 1423 1424

	cic->ioc = ioc;
	cic->key = cfqd;

1425 1426 1427
restart:
	parent = NULL;
	p = &ioc->cic_root.rb_node;
1428 1429 1430
	while (*p) {
		parent = *p;
		__cic = rb_entry(parent, struct cfq_io_context, rb_node);
1431 1432 1433
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = __cic->key;
		if (unlikely(!k)) {
1434
			cfq_drop_dead_cic(ioc, __cic);
1435 1436
			goto restart;
		}
1437

1438
		if (cic->key < k)
1439
			p = &(*p)->rb_left;
1440
		else if (cic->key > k)
1441 1442 1443 1444 1445 1446 1447
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&cic->rb_node, parent, p);
	rb_insert_color(&cic->rb_node, &ioc->cic_root);
1448

1449
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1450
	list_add(&cic->queue_list, &cfqd->cic_list);
1451
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1452 1453
}

L
Linus Torvalds 已提交
1454 1455 1456
/*
 * Setup general io context and cfq io context. There can be several cfq
 * io contexts per general io context, if this process is doing io to more
1457
 * than one device managed by cfq.
L
Linus Torvalds 已提交
1458 1459
 */
static struct cfq_io_context *
1460
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1461
{
1462
	struct io_context *ioc = NULL;
L
Linus Torvalds 已提交
1463 1464
	struct cfq_io_context *cic;

1465
	might_sleep_if(gfp_mask & __GFP_WAIT);
L
Linus Torvalds 已提交
1466

1467
	ioc = get_io_context(gfp_mask, cfqd->queue->node);
L
Linus Torvalds 已提交
1468 1469 1470
	if (!ioc)
		return NULL;

1471 1472 1473
	cic = cfq_cic_rb_lookup(cfqd, ioc);
	if (cic)
		goto out;
L
Linus Torvalds 已提交
1474

1475 1476 1477
	cic = cfq_alloc_io_context(cfqd, gfp_mask);
	if (cic == NULL)
		goto err;
L
Linus Torvalds 已提交
1478

1479
	cfq_cic_link(cfqd, ioc, cic);
L
Linus Torvalds 已提交
1480
out:
1481 1482 1483 1484
	smp_read_barrier_depends();
	if (unlikely(ioc->ioprio_changed))
		cfq_ioc_set_ioprio(ioc);

L
Linus Torvalds 已提交
1485 1486 1487 1488 1489 1490
	return cic;
err:
	put_io_context(ioc);
	return NULL;
}

1491 1492
static void
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
L
Linus Torvalds 已提交
1493
{
1494 1495
	unsigned long elapsed = jiffies - cic->last_end_request;
	unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1496

1497 1498 1499 1500
	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
}
L
Linus Torvalds 已提交
1501

1502
static void
1503
cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
1504 1505 1506 1507
{
	sector_t sdist;
	u64 total;

J
Jens Axboe 已提交
1508 1509
	if (cic->last_request_pos < rq->sector)
		sdist = rq->sector - cic->last_request_pos;
1510
	else
J
Jens Axboe 已提交
1511
		sdist = cic->last_request_pos - rq->sector;
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527

	/*
	 * Don't allow the seek distance to get too large from the
	 * odd fragment, pagein, etc
	 */
	if (cic->seek_samples <= 60) /* second&third seek */
		sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
	else
		sdist = min(sdist, (cic->seek_mean * 4)	+ 2*1024*64);

	cic->seek_samples = (7*cic->seek_samples + 256) / 8;
	cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
	total = cic->seek_total + (cic->seek_samples/2);
	do_div(total, cic->seek_samples);
	cic->seek_mean = (sector_t)total;
}
L
Linus Torvalds 已提交
1528

1529 1530 1531 1532 1533 1534 1535 1536
/*
 * Disable idle window if the process thinks too long or seeks so much that
 * it doesn't matter
 */
static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		       struct cfq_io_context *cic)
{
J
Jens Axboe 已提交
1537
	int enable_idle = cfq_cfqq_idle_window(cfqq);
L
Linus Torvalds 已提交
1538

1539 1540
	if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
	    (cfqd->hw_tag && CIC_SEEKY(cic)))
1541 1542 1543 1544 1545 1546
		enable_idle = 0;
	else if (sample_valid(cic->ttime_samples)) {
		if (cic->ttime_mean > cfqd->cfq_slice_idle)
			enable_idle = 0;
		else
			enable_idle = 1;
L
Linus Torvalds 已提交
1547 1548
	}

J
Jens Axboe 已提交
1549 1550 1551 1552
	if (enable_idle)
		cfq_mark_cfqq_idle_window(cfqq);
	else
		cfq_clear_cfqq_idle_window(cfqq);
1553
}
L
Linus Torvalds 已提交
1554

1555 1556 1557 1558 1559 1560 1561

/*
 * Check if new_cfqq should preempt the currently active queue. Return 0 for
 * no or if we aren't sure, a 1 will cause a preempt.
 */
static int
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
J
Jens Axboe 已提交
1562
		   struct request *rq)
1563 1564 1565 1566 1567 1568 1569
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfq_class_idle(new_cfqq))
		return 0;

	if (!cfqq)
1570
		return 0;
1571 1572 1573

	if (cfq_class_idle(cfqq))
		return 1;
J
Jens Axboe 已提交
1574
	if (!cfq_cfqq_wait_request(new_cfqq))
1575 1576 1577 1578 1579 1580
		return 0;
	/*
	 * if it doesn't have slice left, forget it
	 */
	if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
		return 0;
1581 1582 1583 1584
	/*
	 * if the new request is sync, but the currently running queue is
	 * not, let the sync request have priority.
	 */
J
Jens Axboe 已提交
1585
	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1586
		return 1;
1587 1588 1589 1590 1591 1592
	/*
	 * So both queues are sync. Let the new request get disk time if
	 * it's a metadata request and the current queue is doing regular IO.
	 */
	if (rq_is_meta(rq) && !cfqq->meta_pending)
		return 1;
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602

	return 0;
}

/*
 * cfqq preempts the active queue. if we allowed preempt with no slice left,
 * let it have half of its nominal slice.
 */
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
1603
	cfq_slice_expired(cfqd, 1);
1604 1605 1606 1607

	if (!cfqq->slice_left)
		cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;

1608 1609 1610 1611 1612 1613 1614
	/*
	 * Put the new queue at the front of the of the current list,
	 * so we know that it will be selected next.
	 */
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	list_move(&cfqq->cfq_list, &cfqd->cur_rr);

1615 1616 1617 1618
	cfqq->slice_end = cfqq->slice_left + jiffies;
}

/*
J
Jens Axboe 已提交
1619
 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1620 1621 1622
 * something we should do about it
 */
static void
J
Jens Axboe 已提交
1623 1624
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		struct request *rq)
1625
{
J
Jens Axboe 已提交
1626
	struct cfq_io_context *cic = RQ_CIC(rq);
1627

1628 1629 1630
	if (rq_is_meta(rq))
		cfqq->meta_pending++;

1631
	/*
1632
	 * check if this request is a better next-serve candidate)) {
1633
	 */
J
Jens Axboe 已提交
1634 1635
	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
	BUG_ON(!cfqq->next_rq);
1636

J
Jens Axboe 已提交
1637 1638 1639 1640
	/*
	 * we never wait for an async request and we don't allow preemption
	 * of an async request. so just return early
	 */
J
Jens Axboe 已提交
1641
	if (!rq_is_sync(rq)) {
1642 1643 1644 1645 1646 1647 1648
		/*
		 * sync process issued an async request, if it's waiting
		 * then expire it and kick rq handling.
		 */
		if (cic == cfqd->active_cic &&
		    del_timer(&cfqd->idle_slice_timer)) {
			cfq_slice_expired(cfqd, 0);
1649
			blk_start_queueing(cfqd->queue);
1650
		}
J
Jens Axboe 已提交
1651
		return;
1652
	}
1653

J
Jens Axboe 已提交
1654
	cfq_update_io_thinktime(cfqd, cic);
1655
	cfq_update_io_seektime(cic, rq);
J
Jens Axboe 已提交
1656 1657
	cfq_update_idle_window(cfqd, cfqq, cic);

J
Jens Axboe 已提交
1658
	cic->last_request_pos = rq->sector + rq->nr_sectors;
1659 1660 1661 1662 1663 1664 1665

	if (cfqq == cfqd->active_queue) {
		/*
		 * if we are waiting for a request for this queue, let it rip
		 * immediately and flag that we must not expire this queue
		 * just now
		 */
J
Jens Axboe 已提交
1666 1667
		if (cfq_cfqq_wait_request(cfqq)) {
			cfq_mark_cfqq_must_dispatch(cfqq);
1668
			del_timer(&cfqd->idle_slice_timer);
1669
			blk_start_queueing(cfqd->queue);
1670
		}
J
Jens Axboe 已提交
1671
	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1672 1673 1674 1675 1676 1677
		/*
		 * not the active queue - expire current slice if it is
		 * idle and has expired it's mean thinktime or this new queue
		 * has some old slice time left and is of higher priority
		 */
		cfq_preempt_queue(cfqd, cfqq);
J
Jens Axboe 已提交
1678
		cfq_mark_cfqq_must_dispatch(cfqq);
1679
		blk_start_queueing(cfqd->queue);
1680
	}
L
Linus Torvalds 已提交
1681 1682
}

1683
static void cfq_insert_request(request_queue_t *q, struct request *rq)
L
Linus Torvalds 已提交
1684
{
1685
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1686
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1687 1688

	cfq_init_prio_data(cfqq);
L
Linus Torvalds 已提交
1689

J
Jens Axboe 已提交
1690
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
1691

1692 1693
	list_add_tail(&rq->queuelist, &cfqq->fifo);

J
Jens Axboe 已提交
1694
	cfq_rq_enqueued(cfqd, cfqq, rq);
L
Linus Torvalds 已提交
1695 1696 1697 1698
}

static void cfq_completed_request(request_queue_t *q, struct request *rq)
{
J
Jens Axboe 已提交
1699
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1700
	struct cfq_data *cfqd = cfqq->cfqd;
1701
	const int sync = rq_is_sync(rq);
1702
	unsigned long now;
L
Linus Torvalds 已提交
1703

1704
	now = jiffies;
L
Linus Torvalds 已提交
1705

1706 1707 1708 1709
	WARN_ON(!cfqd->rq_in_driver);
	WARN_ON(!cfqq->on_dispatch[sync]);
	cfqd->rq_in_driver--;
	cfqq->on_dispatch[sync]--;
1710
	cfqq->service_last = now;
L
Linus Torvalds 已提交
1711

1712 1713
	if (!cfq_class_idle(cfqq))
		cfqd->last_end_request = now;
J
Jens Axboe 已提交
1714

1715
	cfq_resort_rr_list(cfqq, 0);
L
Linus Torvalds 已提交
1716

1717
	if (sync)
J
Jens Axboe 已提交
1718
		RQ_CIC(rq)->last_end_request = now;
1719 1720 1721 1722 1723 1724 1725 1726

	/*
	 * If this is the active queue, check if it needs to be expired,
	 * or if we want to idle in case it has no pending requests.
	 */
	if (cfqd->active_queue == cfqq) {
		if (time_after(now, cfqq->slice_end))
			cfq_slice_expired(cfqd, 0);
1727
		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1728 1729 1730 1731
			if (!cfq_arm_slice_timer(cfqd, cfqq))
				cfq_schedule_dispatch(cfqd);
		}
	}
L
Linus Torvalds 已提交
1732 1733
}

1734 1735 1736 1737 1738
/*
 * we temporarily boost lower priority queues if they are holding fs exclusive
 * resources. they are boosted to normal prio (CLASS_BE/4)
 */
static void cfq_prio_boost(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1739
{
1740 1741
	const int ioprio_class = cfqq->ioprio_class;
	const int ioprio = cfqq->ioprio;
L
Linus Torvalds 已提交
1742

1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
	if (has_fs_excl()) {
		/*
		 * boost idle prio on transactions that would lock out other
		 * users of the filesystem
		 */
		if (cfq_class_idle(cfqq))
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
		if (cfqq->ioprio > IOPRIO_NORM)
			cfqq->ioprio = IOPRIO_NORM;
	} else {
		/*
		 * check if we need to unboost the queue
		 */
		if (cfqq->ioprio_class != cfqq->org_ioprio_class)
			cfqq->ioprio_class = cfqq->org_ioprio_class;
		if (cfqq->ioprio != cfqq->org_ioprio)
			cfqq->ioprio = cfqq->org_ioprio;
	}
L
Linus Torvalds 已提交
1761

1762 1763 1764
	/*
	 * refile between round-robin lists if we moved the priority class
	 */
1765
	if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio))
1766 1767
		cfq_resort_rr_list(cfqq, 0);
}
L
Linus Torvalds 已提交
1768

1769
static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1770
{
J
Jens Axboe 已提交
1771
	if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
A
Andrew Morton 已提交
1772
	    !cfq_cfqq_must_alloc_slice(cfqq)) {
J
Jens Axboe 已提交
1773
		cfq_mark_cfqq_must_alloc_slice(cfqq);
1774
		return ELV_MQUEUE_MUST;
J
Jens Axboe 已提交
1775
	}
L
Linus Torvalds 已提交
1776

1777 1778 1779
	return ELV_MQUEUE_MAY;
}

1780
static int cfq_may_queue(request_queue_t *q, int rw)
1781 1782 1783 1784
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct task_struct *tsk = current;
	struct cfq_queue *cfqq;
1785 1786 1787
	unsigned int key;

	key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
1788 1789 1790 1791 1792 1793 1794

	/*
	 * don't force setup of a queue from here, as a call to may_queue
	 * does not necessarily imply that a request actually will be queued.
	 * so just lookup a possibly existing queue, or return 'may queue'
	 * if that fails
	 */
1795
	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
1796 1797 1798 1799
	if (cfqq) {
		cfq_init_prio_data(cfqq);
		cfq_prio_boost(cfqq);

1800
		return __cfq_may_queue(cfqq);
1801 1802 1803
	}

	return ELV_MQUEUE_MAY;
L
Linus Torvalds 已提交
1804 1805 1806 1807 1808
}

/*
 * queue lock held here
 */
1809
static void cfq_put_request(struct request *rq)
L
Linus Torvalds 已提交
1810
{
J
Jens Axboe 已提交
1811
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
1812

J
Jens Axboe 已提交
1813
	if (cfqq) {
1814
		const int rw = rq_data_dir(rq);
L
Linus Torvalds 已提交
1815

1816 1817
		BUG_ON(!cfqq->allocated[rw]);
		cfqq->allocated[rw]--;
L
Linus Torvalds 已提交
1818

J
Jens Axboe 已提交
1819
		put_io_context(RQ_CIC(rq)->ioc);
L
Linus Torvalds 已提交
1820 1821

		rq->elevator_private = NULL;
J
Jens Axboe 已提交
1822
		rq->elevator_private2 = NULL;
L
Linus Torvalds 已提交
1823 1824 1825 1826 1827 1828

		cfq_put_queue(cfqq);
	}
}

/*
1829
 * Allocate cfq data structures associated with this request.
L
Linus Torvalds 已提交
1830
 */
1831
static int
1832
cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1833 1834
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1835
	struct task_struct *tsk = current;
L
Linus Torvalds 已提交
1836 1837
	struct cfq_io_context *cic;
	const int rw = rq_data_dir(rq);
1838 1839
	const int is_sync = rq_is_sync(rq);
	pid_t key = cfq_queue_pid(tsk, rw, is_sync);
1840
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
1841 1842 1843 1844
	unsigned long flags;

	might_sleep_if(gfp_mask & __GFP_WAIT);

1845
	cic = cfq_get_io_context(cfqd, gfp_mask);
1846

L
Linus Torvalds 已提交
1847 1848
	spin_lock_irqsave(q->queue_lock, flags);

1849 1850 1851
	if (!cic)
		goto queue_fail;

1852
	if (!cic->cfqq[is_sync]) {
1853
		cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
1854 1855
		if (!cfqq)
			goto queue_fail;
L
Linus Torvalds 已提交
1856

1857
		cic->cfqq[is_sync] = cfqq;
1858
	} else
1859
		cfqq = cic->cfqq[is_sync];
L
Linus Torvalds 已提交
1860 1861

	cfqq->allocated[rw]++;
J
Jens Axboe 已提交
1862
	cfq_clear_cfqq_must_alloc(cfqq);
1863
	atomic_inc(&cfqq->ref);
L
Linus Torvalds 已提交
1864

J
Jens Axboe 已提交
1865
	spin_unlock_irqrestore(q->queue_lock, flags);
J
Jens Axboe 已提交
1866

J
Jens Axboe 已提交
1867 1868 1869
	rq->elevator_private = cic;
	rq->elevator_private2 = cfqq;
	return 0;
L
Linus Torvalds 已提交
1870

1871 1872 1873
queue_fail:
	if (cic)
		put_io_context(cic->ioc);
1874

J
Jens Axboe 已提交
1875
	cfq_schedule_dispatch(cfqd);
L
Linus Torvalds 已提交
1876 1877 1878 1879
	spin_unlock_irqrestore(q->queue_lock, flags);
	return 1;
}

1880
static void cfq_kick_queue(struct work_struct *work)
1881
{
1882 1883 1884
	struct cfq_data *cfqd =
		container_of(work, struct cfq_data, unplug_work);
	request_queue_t *q = cfqd->queue;
1885 1886 1887
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
1888
	blk_start_queueing(q);
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
	spin_unlock_irqrestore(q->queue_lock, flags);
}

/*
 * Timer running if the active_queue is currently idling inside its time slice
 */
static void cfq_idle_slice_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	struct cfq_queue *cfqq;
	unsigned long flags;

	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

	if ((cfqq = cfqd->active_queue) != NULL) {
		unsigned long now = jiffies;

		/*
		 * expired
		 */
		if (time_after(now, cfqq->slice_end))
			goto expire;

		/*
		 * only expire and reinvoke request handler, if there are
		 * other queues with pending requests
		 */
1916
		if (!cfqd->busy_queues)
1917 1918 1919 1920 1921
			goto out_cont;

		/*
		 * not expired and it has a request pending, let it dispatch
		 */
1922
		if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
J
Jens Axboe 已提交
1923
			cfq_mark_cfqq_must_dispatch(cfqq);
1924 1925 1926 1927 1928 1929
			goto out_kick;
		}
	}
expire:
	cfq_slice_expired(cfqd, 0);
out_kick:
J
Jens Axboe 已提交
1930
	cfq_schedule_dispatch(cfqd);
1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
out_cont:
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

/*
 * Timer running if an idle class queue is waiting for service
 */
static void cfq_idle_class_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	unsigned long flags, end;

	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

	/*
	 * race with a non-idle queue, reset timer
	 */
	end = cfqd->last_end_request + CFQ_IDLE_GRACE;
1949 1950 1951
	if (!time_after_eq(jiffies, end))
		mod_timer(&cfqd->idle_class_timer, end);
	else
J
Jens Axboe 已提交
1952
		cfq_schedule_dispatch(cfqd);
1953 1954 1955 1956

	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

J
Jens Axboe 已提交
1957 1958 1959 1960 1961 1962
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
	del_timer_sync(&cfqd->idle_slice_timer);
	del_timer_sync(&cfqd->idle_class_timer);
	blk_sync_queue(cfqd->queue);
}
1963

L
Linus Torvalds 已提交
1964 1965
static void cfq_exit_queue(elevator_t *e)
{
1966
	struct cfq_data *cfqd = e->elevator_data;
1967
	request_queue_t *q = cfqd->queue;
1968

J
Jens Axboe 已提交
1969
	cfq_shutdown_timer_wq(cfqd);
1970

1971
	spin_lock_irq(q->queue_lock);
1972

1973 1974
	if (cfqd->active_queue)
		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
1975 1976

	while (!list_empty(&cfqd->cic_list)) {
1977 1978 1979
		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
							struct cfq_io_context,
							queue_list);
1980 1981

		__cfq_exit_single_io_context(cfqd, cic);
1982
	}
1983

1984
	spin_unlock_irq(q->queue_lock);
1985 1986 1987 1988 1989

	cfq_shutdown_timer_wq(cfqd);

	kfree(cfqd->cfq_hash);
	kfree(cfqd);
L
Linus Torvalds 已提交
1990 1991
}

1992
static void *cfq_init_queue(request_queue_t *q)
L
Linus Torvalds 已提交
1993 1994 1995 1996
{
	struct cfq_data *cfqd;
	int i;

1997
	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
L
Linus Torvalds 已提交
1998
	if (!cfqd)
J
Jens Axboe 已提交
1999
		return NULL;
L
Linus Torvalds 已提交
2000 2001

	memset(cfqd, 0, sizeof(*cfqd));
2002 2003 2004 2005 2006 2007 2008

	for (i = 0; i < CFQ_PRIO_LISTS; i++)
		INIT_LIST_HEAD(&cfqd->rr_list[i]);

	INIT_LIST_HEAD(&cfqd->busy_rr);
	INIT_LIST_HEAD(&cfqd->cur_rr);
	INIT_LIST_HEAD(&cfqd->idle_rr);
2009
	INIT_LIST_HEAD(&cfqd->cic_list);
L
Linus Torvalds 已提交
2010

2011
	cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
L
Linus Torvalds 已提交
2012
	if (!cfqd->cfq_hash)
J
Jens Axboe 已提交
2013
		goto out_free;
L
Linus Torvalds 已提交
2014 2015 2016 2017 2018 2019

	for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
		INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);

	cfqd->queue = q;

2020 2021 2022 2023 2024 2025 2026 2027
	init_timer(&cfqd->idle_slice_timer);
	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
	cfqd->idle_slice_timer.data = (unsigned long) cfqd;

	init_timer(&cfqd->idle_class_timer);
	cfqd->idle_class_timer.function = cfq_idle_class_timer;
	cfqd->idle_class_timer.data = (unsigned long) cfqd;

2028
	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2029

L
Linus Torvalds 已提交
2030
	cfqd->cfq_quantum = cfq_quantum;
2031 2032
	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
L
Linus Torvalds 已提交
2033 2034
	cfqd->cfq_back_max = cfq_back_max;
	cfqd->cfq_back_penalty = cfq_back_penalty;
2035 2036 2037 2038
	cfqd->cfq_slice[0] = cfq_slice_async;
	cfqd->cfq_slice[1] = cfq_slice_sync;
	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
	cfqd->cfq_slice_idle = cfq_slice_idle;
J
Jens Axboe 已提交
2039

J
Jens Axboe 已提交
2040
	return cfqd;
J
Jens Axboe 已提交
2041
out_free:
L
Linus Torvalds 已提交
2042
	kfree(cfqd);
J
Jens Axboe 已提交
2043
	return NULL;
L
Linus Torvalds 已提交
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
}

static void cfq_slab_kill(void)
{
	if (cfq_pool)
		kmem_cache_destroy(cfq_pool);
	if (cfq_ioc_pool)
		kmem_cache_destroy(cfq_ioc_pool);
}

static int __init cfq_slab_setup(void)
{
	cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
					NULL, NULL);
	if (!cfq_pool)
		goto fail;

	cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
			sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
	if (!cfq_ioc_pool)
		goto fail;

	return 0;
fail:
	cfq_slab_kill();
	return -ENOMEM;
}

/*
 * sysfs parts below -->
 */

static ssize_t
cfq_var_show(unsigned int var, char *page)
{
	return sprintf(page, "%d\n", var);
}

static ssize_t
cfq_var_store(unsigned int *var, const char *page, size_t count)
{
	char *p = (char *) page;

	*var = simple_strtoul(p, &p, 10);
	return count;
}

#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
2092
static ssize_t __FUNC(elevator_t *e, char *page)			\
L
Linus Torvalds 已提交
2093
{									\
2094
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2095 2096 2097 2098 2099 2100
	unsigned int __data = __VAR;					\
	if (__CONV)							\
		__data = jiffies_to_msecs(__data);			\
	return cfq_var_show(__data, (page));				\
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2101 2102
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2103 2104
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2105 2106 2107 2108
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
L
Linus Torvalds 已提交
2109 2110 2111
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
2112
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
L
Linus Torvalds 已提交
2113
{									\
2114
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	unsigned int __data;						\
	int ret = cfq_var_store(&__data, (page), count);		\
	if (__data < (MIN))						\
		__data = (MIN);						\
	else if (__data > (MAX))					\
		__data = (MAX);						\
	if (__CONV)							\
		*(__PTR) = msecs_to_jiffies(__data);			\
	else								\
		*(__PTR) = __data;					\
	return ret;							\
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2128 2129
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2130 2131
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2132 2133 2134 2135
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
L
Linus Torvalds 已提交
2136 2137
#undef STORE_FUNCTION

2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
#define CFQ_ATTR(name) \
	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)

static struct elv_fs_entry cfq_attrs[] = {
	CFQ_ATTR(quantum),
	CFQ_ATTR(fifo_expire_sync),
	CFQ_ATTR(fifo_expire_async),
	CFQ_ATTR(back_seek_max),
	CFQ_ATTR(back_seek_penalty),
	CFQ_ATTR(slice_sync),
	CFQ_ATTR(slice_async),
	CFQ_ATTR(slice_async_rq),
	CFQ_ATTR(slice_idle),
	__ATTR_NULL
L
Linus Torvalds 已提交
2152 2153 2154 2155 2156 2157 2158
};

static struct elevator_type iosched_cfq = {
	.ops = {
		.elevator_merge_fn = 		cfq_merge,
		.elevator_merged_fn =		cfq_merged_request,
		.elevator_merge_req_fn =	cfq_merged_requests,
2159
		.elevator_allow_merge_fn =	cfq_allow_merge,
2160
		.elevator_dispatch_fn =		cfq_dispatch_requests,
L
Linus Torvalds 已提交
2161
		.elevator_add_req_fn =		cfq_insert_request,
2162
		.elevator_activate_req_fn =	cfq_activate_request,
L
Linus Torvalds 已提交
2163 2164 2165
		.elevator_deactivate_req_fn =	cfq_deactivate_request,
		.elevator_queue_empty_fn =	cfq_queue_empty,
		.elevator_completed_req_fn =	cfq_completed_request,
2166 2167
		.elevator_former_req_fn =	elv_rb_former_request,
		.elevator_latter_req_fn =	elv_rb_latter_request,
L
Linus Torvalds 已提交
2168 2169 2170 2171 2172
		.elevator_set_req_fn =		cfq_set_request,
		.elevator_put_req_fn =		cfq_put_request,
		.elevator_may_queue_fn =	cfq_may_queue,
		.elevator_init_fn =		cfq_init_queue,
		.elevator_exit_fn =		cfq_exit_queue,
2173
		.trim =				cfq_free_io_context,
L
Linus Torvalds 已提交
2174
	},
2175
	.elevator_attrs =	cfq_attrs,
L
Linus Torvalds 已提交
2176 2177 2178 2179 2180 2181 2182 2183
	.elevator_name =	"cfq",
	.elevator_owner =	THIS_MODULE,
};

static int __init cfq_init(void)
{
	int ret;

2184 2185 2186 2187 2188 2189 2190 2191
	/*
	 * could be 0 on HZ < 1000 setups
	 */
	if (!cfq_slice_async)
		cfq_slice_async = 1;
	if (!cfq_slice_idle)
		cfq_slice_idle = 1;

L
Linus Torvalds 已提交
2192 2193 2194 2195
	if (cfq_slab_setup())
		return -ENOMEM;

	ret = elv_register(&iosched_cfq);
2196 2197
	if (ret)
		cfq_slab_kill();
L
Linus Torvalds 已提交
2198 2199 2200 2201 2202 2203

	return ret;
}

static void __exit cfq_exit(void)
{
2204
	DECLARE_COMPLETION_ONSTACK(all_gone);
L
Linus Torvalds 已提交
2205
	elv_unregister(&iosched_cfq);
2206
	ioc_gone = &all_gone;
2207 2208
	/* ioc_gone's update must be visible before reading ioc_count */
	smp_wmb();
2209
	if (elv_ioc_count_read(ioc_count))
2210
		wait_for_completion(ioc_gone);
2211
	synchronize_rcu();
2212
	cfq_slab_kill();
L
Linus Torvalds 已提交
2213 2214 2215 2216 2217 2218 2219 2220
}

module_init(cfq_init);
module_exit(cfq_exit);

MODULE_AUTHOR("Jens Axboe");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");