cfq-iosched.c 54.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
7
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
L
Linus Torvalds 已提交
8 9
 */
#include <linux/module.h>
A
Al Viro 已提交
10 11
#include <linux/blkdev.h>
#include <linux/elevator.h>
L
Linus Torvalds 已提交
12
#include <linux/rbtree.h>
13
#include <linux/ioprio.h>
L
Linus Torvalds 已提交
14 15 16 17

/*
 * tunables
 */
18 19 20 21
static const int cfq_quantum = 4;		/* max queue in one round of service */
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2;		/* penalty of a backwards seek */
L
Linus Torvalds 已提交
22

23
static const int cfq_slice_sync = HZ / 10;
J
Jens Axboe 已提交
24
static int cfq_slice_async = HZ / 25;
25
static const int cfq_slice_async_rq = 2;
26
static int cfq_slice_idle = HZ / 125;
27

28 29 30
/*
 * grace period before allowing idle class to get disk access
 */
31
#define CFQ_IDLE_GRACE		(HZ / 10)
32 33 34 35 36 37

/*
 * below this threshold, we consider thinktime immediate
 */
#define CFQ_MIN_TT		(2)

38 39
#define CFQ_SLICE_SCALE		(5)

J
Jens Axboe 已提交
40 41
#define RQ_CIC(rq)		((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq)		((rq)->elevator_private2)
L
Linus Torvalds 已提交
42

43 44
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
L
Linus Torvalds 已提交
45

46
static DEFINE_PER_CPU(unsigned long, ioc_count);
47 48
static struct completion *ioc_gone;

49 50 51 52
#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

J
Jens Axboe 已提交
53 54 55
#define ASYNC			(0)
#define SYNC			(1)

56 57
#define sample_valid(samples)	((samples) > 80)

58 59 60 61 62 63 64 65 66 67 68 69
/*
 * Most of our rbtree usage is for sorting with min extraction, so
 * if we cache the leftmost node we don't have to walk down the tree
 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
 * move this into the elevator for the rq sorting as well.
 */
struct cfq_rb_root {
	struct rb_root rb;
	struct rb_node *left;
};
#define CFQ_RB_ROOT	(struct cfq_rb_root) { RB_ROOT, NULL, }

70 71 72
/*
 * Per block device queue structure
 */
L
Linus Torvalds 已提交
73
struct cfq_data {
74
	struct request_queue *queue;
75 76 77 78

	/*
	 * rr list of queues with requests and the count of them
	 */
79
	struct cfq_rb_root service_tree;
80 81 82
	unsigned int busy_queues;

	int rq_in_driver;
83
	int sync_flight;
84
	int hw_tag;
L
Linus Torvalds 已提交
85

86 87 88 89 90
	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
	struct work_struct unplug_work;
L
Linus Torvalds 已提交
91

92 93 94
	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;

95 96 97 98 99
	/*
	 * async queue for each priority case
	 */
	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
	struct cfq_queue *async_idle_cfqq;
100

101
	struct timer_list idle_class_timer;
L
Linus Torvalds 已提交
102

J
Jens Axboe 已提交
103
	sector_t last_position;
104
	unsigned long last_end_request;
L
Linus Torvalds 已提交
105 106 107 108 109

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
110
	unsigned int cfq_fifo_expire[2];
L
Linus Torvalds 已提交
111 112
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
113 114 115
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
116 117

	struct list_head cic_list;
L
Linus Torvalds 已提交
118 119
};

120 121 122
/*
 * Per process-grouping structure
 */
L
Linus Torvalds 已提交
123 124 125 126 127
struct cfq_queue {
	/* reference count */
	atomic_t ref;
	/* parent cfq_data */
	struct cfq_data *cfqd;
128 129 130 131
	/* service_tree member */
	struct rb_node rb_node;
	/* service_tree key */
	unsigned long rb_key;
L
Linus Torvalds 已提交
132 133 134
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
J
Jens Axboe 已提交
135
	struct request *next_rq;
L
Linus Torvalds 已提交
136 137 138 139
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
140 141
	/* pending metadata requests */
	int meta_pending;
L
Linus Torvalds 已提交
142
	/* fifo list of requests in sort_list */
143
	struct list_head fifo;
L
Linus Torvalds 已提交
144

145
	unsigned long slice_end;
146
	long slice_resid;
L
Linus Torvalds 已提交
147

J
Jens Axboe 已提交
148 149
	/* number of requests that are on the dispatch list or inside driver */
	int dispatched;
150 151 152 153 154

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
	unsigned short ioprio_class, org_ioprio_class;

J
Jens Axboe 已提交
155 156
	/* various state flags, see below */
	unsigned int flags;
L
Linus Torvalds 已提交
157 158
};

J
Jens Axboe 已提交
159
enum cfqq_state_flags {
160 161 162 163 164 165 166 167 168
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
	CFQ_CFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_must_dispatch,	/* must dispatch, even if expired */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
	CFQ_CFQQ_FLAG_queue_new,	/* queue never been serviced */
169
	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
170
	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
J
Jens Axboe 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
	cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
	cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
	return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
195
CFQ_CFQQ_FNS(queue_new);
196
CFQ_CFQQ_FNS(slice_new);
197
CFQ_CFQQ_FNS(sync);
J
Jens Axboe 已提交
198 199
#undef CFQ_CFQQ_FNS

200
static void cfq_dispatch_insert(struct request_queue *, struct request *);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
				       struct task_struct *, gfp_t);
static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
						struct io_context *);

static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
					    int is_sync)
{
	return cic->cfqq[!!is_sync];
}

static inline void cic_set_cfqq(struct cfq_io_context *cic,
				struct cfq_queue *cfqq, int is_sync)
{
	cic->cfqq[!!is_sync] = cfqq;
}

/*
 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 * set (in which case it could also be direct WRITE).
 */
static inline int cfq_bio_sync(struct bio *bio)
{
	if (bio_data_dir(bio) == READ || bio_sync(bio))
		return 1;

	return 0;
}
L
Linus Torvalds 已提交
229

A
Andrew Morton 已提交
230 231 232 233 234 235
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
236
	if (cfqd->busy_queues)
A
Andrew Morton 已提交
237 238 239
		kblockd_schedule_work(&cfqd->unplug_work);
}

240
static int cfq_queue_empty(struct request_queue *q)
A
Andrew Morton 已提交
241 242 243
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

244
	return !cfqd->busy_queues;
A
Andrew Morton 已提交
245 246
}

247 248 249 250 251
/*
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
 */
252 253
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
				 unsigned short prio)
254
{
255
	const int base_slice = cfqd->cfq_slice[sync];
256

257 258 259 260
	WARN_ON(prio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
}
261

262 263 264 265
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
}

/*
 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 * isn't valid until the first request from the dispatch is activated
 * and the slice time set.
 */
static inline int cfq_slice_used(struct cfq_queue *cfqq)
{
	if (cfq_cfqq_slice_new(cfqq))
		return 0;
	if (time_before(jiffies, cfqq->slice_end))
		return 0;

	return 1;
}

L
Linus Torvalds 已提交
289
/*
J
Jens Axboe 已提交
290
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
L
Linus Torvalds 已提交
291
 * We choose the request that is closest to the head right now. Distance
292
 * behind the head is penalized and only allowed to a certain extent.
L
Linus Torvalds 已提交
293
 */
J
Jens Axboe 已提交
294 295
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
L
Linus Torvalds 已提交
296 297 298
{
	sector_t last, s1, s2, d1 = 0, d2 = 0;
	unsigned long back_max;
299 300 301
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
L
Linus Torvalds 已提交
302

J
Jens Axboe 已提交
303 304 305 306
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
J
Jens Axboe 已提交
307

J
Jens Axboe 已提交
308 309 310 311
	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
		return rq1;
	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
		return rq2;
312 313 314 315
	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
		return rq1;
	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
		return rq2;
L
Linus Torvalds 已提交
316

J
Jens Axboe 已提交
317 318
	s1 = rq1->sector;
	s2 = rq2->sector;
L
Linus Torvalds 已提交
319

J
Jens Axboe 已提交
320
	last = cfqd->last_position;
L
Linus Torvalds 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
337
		wrap |= CFQ_RQ1_WRAP;
L
Linus Torvalds 已提交
338 339 340 341 342 343

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
344
		wrap |= CFQ_RQ2_WRAP;
L
Linus Torvalds 已提交
345 346

	/* Found required data */
347 348 349 350 351 352

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
J
Jens Axboe 已提交
353
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
354
		if (d1 < d2)
J
Jens Axboe 已提交
355
			return rq1;
356
		else if (d2 < d1)
J
Jens Axboe 已提交
357
			return rq2;
358 359
		else {
			if (s1 >= s2)
J
Jens Axboe 已提交
360
				return rq1;
361
			else
J
Jens Axboe 已提交
362
				return rq2;
363
		}
L
Linus Torvalds 已提交
364

365
	case CFQ_RQ2_WRAP:
J
Jens Axboe 已提交
366
		return rq1;
367
	case CFQ_RQ1_WRAP:
J
Jens Axboe 已提交
368 369
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
370 371 372 373 374 375 376 377
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
J
Jens Axboe 已提交
378
			return rq1;
L
Linus Torvalds 已提交
379
		else
J
Jens Axboe 已提交
380
			return rq2;
L
Linus Torvalds 已提交
381 382 383
	}
}

384 385 386
/*
 * The below is leftmost cache rbtree addon
 */
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
{
	if (!root->left)
		root->left = rb_first(&root->rb);

	return root->left;
}

static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
{
	if (root->left == n)
		root->left = NULL;

	rb_erase(n, &root->rb);
	RB_CLEAR_NODE(n);
}

L
Linus Torvalds 已提交
404 405 406
/*
 * would be nice to take fifo expire time into account as well
 */
J
Jens Axboe 已提交
407 408 409
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
L
Linus Torvalds 已提交
410
{
411 412
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
J
Jens Axboe 已提交
413
	struct request *next = NULL, *prev = NULL;
L
Linus Torvalds 已提交
414

415
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
L
Linus Torvalds 已提交
416 417

	if (rbprev)
J
Jens Axboe 已提交
418
		prev = rb_entry_rq(rbprev);
L
Linus Torvalds 已提交
419

420
	if (rbnext)
J
Jens Axboe 已提交
421
		next = rb_entry_rq(rbnext);
422 423 424
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
J
Jens Axboe 已提交
425
			next = rb_entry_rq(rbnext);
426
	}
L
Linus Torvalds 已提交
427

428
	return cfq_choose_req(cfqd, next, prev);
L
Linus Torvalds 已提交
429 430
}

431 432
static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
				      struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
433
{
434 435 436
	/*
	 * just an approximation, should be ok.
	 */
437 438
	return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
439 440
}

441 442 443 444 445
/*
 * The cfqd->service_tree holds all pending cfq_queue's that have
 * requests waiting to be processed. It is sorted in the order that
 * we will service the queues.
 */
446
static void cfq_service_tree_add(struct cfq_data *cfqd,
447
				    struct cfq_queue *cfqq, int add_front)
448
{
449
	struct rb_node **p = &cfqd->service_tree.rb.rb_node;
450 451
	struct rb_node *parent = NULL;
	unsigned long rb_key;
452
	int left;
453

454 455 456 457 458 459
	if (!add_front) {
		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
		rb_key += cfqq->slice_resid;
		cfqq->slice_resid = 0;
	} else
		rb_key = 0;
L
Linus Torvalds 已提交
460

461
	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
462
		/*
463
		 * same position, nothing more to do
464
		 */
465 466
		if (rb_key == cfqq->rb_key)
			return;
L
Linus Torvalds 已提交
467

468
		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
L
Linus Torvalds 已提交
469
	}
470

471
	left = 1;
472
	while (*p) {
473
		struct cfq_queue *__cfqq;
474
		struct rb_node **n;
475

476 477 478
		parent = *p;
		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);

479 480
		/*
		 * sort RT queues first, we always want to give
481 482
		 * preference to them. IDLE queues goes to the back.
		 * after that, sort on the next service time.
483 484
		 */
		if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
485
			n = &(*p)->rb_left;
486
		else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
487 488 489 490 491
			n = &(*p)->rb_right;
		else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
			n = &(*p)->rb_left;
		else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
			n = &(*p)->rb_right;
492
		else if (rb_key < __cfqq->rb_key)
493 494 495 496 497
			n = &(*p)->rb_left;
		else
			n = &(*p)->rb_right;

		if (n == &(*p)->rb_right)
498
			left = 0;
499 500

		p = n;
501 502
	}

503 504 505
	if (left)
		cfqd->service_tree.left = &cfqq->rb_node;

506 507
	cfqq->rb_key = rb_key;
	rb_link_node(&cfqq->rb_node, parent, p);
508
	rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
L
Linus Torvalds 已提交
509 510
}

511 512 513
/*
 * Update cfqq's position in the service tree.
 */
514
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
J
Jens Axboe 已提交
515 516 517 518
{
	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
519
	if (cfq_cfqq_on_rr(cfqq))
520
		cfq_service_tree_add(cfqd, cfqq, 0);
J
Jens Axboe 已提交
521 522
}

L
Linus Torvalds 已提交
523 524
/*
 * add to busy list of queues for service, trying to be fair in ordering
525
 * the pending list according to last request service
L
Linus Torvalds 已提交
526 527
 */
static inline void
528
cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
529
{
J
Jens Axboe 已提交
530 531
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
532 533
	cfqd->busy_queues++;

534
	cfq_resort_rr_list(cfqd, cfqq);
L
Linus Torvalds 已提交
535 536
}

537 538 539 540
/*
 * Called when the cfqq no longer has requests pending, remove it from
 * the service tree.
 */
L
Linus Torvalds 已提交
541 542 543
static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
J
Jens Axboe 已提交
544 545
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
546

547 548
	if (!RB_EMPTY_NODE(&cfqq->rb_node))
		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
549

L
Linus Torvalds 已提交
550 551 552 553 554 555 556
	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
}

/*
 * rb tree support functions
 */
J
Jens Axboe 已提交
557
static inline void cfq_del_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
558
{
J
Jens Axboe 已提交
559
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
560
	struct cfq_data *cfqd = cfqq->cfqd;
J
Jens Axboe 已提交
561
	const int sync = rq_is_sync(rq);
L
Linus Torvalds 已提交
562

563 564
	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
L
Linus Torvalds 已提交
565

J
Jens Axboe 已提交
566
	elv_rb_del(&cfqq->sort_list, rq);
L
Linus Torvalds 已提交
567

568
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
569
		cfq_del_cfqq_rr(cfqd, cfqq);
L
Linus Torvalds 已提交
570 571
}

J
Jens Axboe 已提交
572
static void cfq_add_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
573
{
J
Jens Axboe 已提交
574
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
575
	struct cfq_data *cfqd = cfqq->cfqd;
576
	struct request *__alias;
L
Linus Torvalds 已提交
577

578
	cfqq->queued[rq_is_sync(rq)]++;
L
Linus Torvalds 已提交
579 580 581 582 583

	/*
	 * looks a little odd, but the first insert might return an alias.
	 * if that happens, put the alias on the dispatch list
	 */
584
	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
J
Jens Axboe 已提交
585
		cfq_dispatch_insert(cfqd->queue, __alias);
586 587 588

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
589 590 591 592 593 594

	/*
	 * check if this request is a better next-serve candidate
	 */
	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
	BUG_ON(!cfqq->next_rq);
L
Linus Torvalds 已提交
595 596 597
}

static inline void
J
Jens Axboe 已提交
598
cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
L
Linus Torvalds 已提交
599
{
600 601
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
J
Jens Axboe 已提交
602
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
603 604
}

605 606
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
L
Linus Torvalds 已提交
607
{
608
	struct task_struct *tsk = current;
609
	struct cfq_io_context *cic;
610
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
611

612 613 614 615 616
	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
	if (!cic)
		return NULL;

	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
617 618 619
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

620
		return elv_rb_find(&cfqq->sort_list, sector);
621
	}
L
Linus Torvalds 已提交
622 623 624 625

	return NULL;
}

626
static void cfq_activate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
627
{
628
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
629

630
	cfqd->rq_in_driver++;
631 632 633 634 635 636 637 638 639

	/*
	 * If the depth is larger 1, it really could be queueing. But lets
	 * make the mark a little higher - idling could still be good for
	 * low queueing, and a low queueing number could also just indicate
	 * a SCSI mid layer like behaviour where limit+1 is often seen.
	 */
	if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
		cfqd->hw_tag = 1;
J
Jens Axboe 已提交
640 641

	cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
L
Linus Torvalds 已提交
642 643
}

644
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
645
{
646 647 648 649
	struct cfq_data *cfqd = q->elevator->elevator_data;

	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;
L
Linus Torvalds 已提交
650 651
}

652
static void cfq_remove_request(struct request *rq)
L
Linus Torvalds 已提交
653
{
J
Jens Axboe 已提交
654
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
655

J
Jens Axboe 已提交
656 657
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
L
Linus Torvalds 已提交
658

659
	list_del_init(&rq->queuelist);
J
Jens Axboe 已提交
660
	cfq_del_rq_rb(rq);
661 662 663 664 665

	if (rq_is_meta(rq)) {
		WARN_ON(!cfqq->meta_pending);
		cfqq->meta_pending--;
	}
L
Linus Torvalds 已提交
666 667
}

668 669
static int cfq_merge(struct request_queue *q, struct request **req,
		     struct bio *bio)
L
Linus Torvalds 已提交
670 671 672 673
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

674
	__rq = cfq_find_rq_fmerge(cfqd, bio);
675
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
676 677
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
L
Linus Torvalds 已提交
678 679 680 681 682
	}

	return ELEVATOR_NO_MERGE;
}

683
static void cfq_merged_request(struct request_queue *q, struct request *req,
684
			       int type)
L
Linus Torvalds 已提交
685
{
686
	if (type == ELEVATOR_FRONT_MERGE) {
J
Jens Axboe 已提交
687
		struct cfq_queue *cfqq = RQ_CFQQ(req);
L
Linus Torvalds 已提交
688

J
Jens Axboe 已提交
689
		cfq_reposition_rq_rb(cfqq, req);
L
Linus Torvalds 已提交
690 691 692 693
	}
}

static void
694
cfq_merged_requests(struct request_queue *q, struct request *rq,
L
Linus Torvalds 已提交
695 696
		    struct request *next)
{
697 698 699 700 701 702 703
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
	    time_before(next->start_time, rq->start_time))
		list_move(&rq->queuelist, &next->queuelist);

704
	cfq_remove_request(next);
705 706
}

707
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
708 709 710
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
711
	struct cfq_io_context *cic;
712 713 714
	struct cfq_queue *cfqq;

	/*
715
	 * Disallow merge of a sync bio into an async request.
716
	 */
717
	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
718 719 720
		return 0;

	/*
721 722
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
723
	 */
724 725 726
	cic = cfq_cic_rb_lookup(cfqd, current->io_context);
	if (!cic)
		return 0;
727

728
	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
729 730
	if (cfqq == RQ_CFQQ(rq))
		return 1;
731

732
	return 0;
733 734
}

735 736 737 738 739 740 741 742 743 744
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	if (cfqq) {
		/*
		 * stop potential idle class queues waiting service
		 */
		del_timer(&cfqd->idle_class_timer);

		cfqq->slice_end = 0;
J
Jens Axboe 已提交
745 746
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
747
		cfq_mark_cfqq_slice_new(cfqq);
J
Jens Axboe 已提交
748
		cfq_clear_cfqq_queue_new(cfqq);
749 750 751 752 753
	}

	cfqd->active_queue = cfqq;
}

754 755 756 757 758
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
759
		    int timed_out)
760 761 762 763 764 765 766 767
{
	if (cfq_cfqq_wait_request(cfqq))
		del_timer(&cfqd->idle_slice_timer);

	cfq_clear_cfqq_must_dispatch(cfqq);
	cfq_clear_cfqq_wait_request(cfqq);

	/*
768
	 * store what was left of this slice, if the queue idled/timed out
769
	 */
770
	if (timed_out && !cfq_cfqq_slice_new(cfqq))
771
		cfqq->slice_resid = cfqq->slice_end - jiffies;
772

773
	cfq_resort_rr_list(cfqd, cfqq);
774 775 776 777 778 779 780 781 782 783

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
		put_io_context(cfqd->active_cic->ioc);
		cfqd->active_cic = NULL;
	}
}

784
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
785 786 787 788
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
789
		__cfq_slice_expired(cfqd, cfqq, timed_out);
790 791
}

792 793 794 795 796 797 798 799 800 801 802 803 804 805
static int start_idle_class_timer(struct cfq_data *cfqd)
{
	unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
	unsigned long now = jiffies;

	if (time_before(now, end) &&
	    time_after_eq(now, cfqd->last_end_request)) {
		mod_timer(&cfqd->idle_class_timer, end);
		return 1;
	}

	return 0;
}

806 807 808 809
/*
 * Get next queue for service. Unless we have a queue preemption,
 * we'll simply select the first cfqq in the service tree.
 */
J
Jens Axboe 已提交
810
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
811
{
812 813
	struct cfq_queue *cfqq;
	struct rb_node *n;
814

815 816
	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
		return NULL;
817

818 819
	n = cfq_rb_first(&cfqd->service_tree);
	cfqq = rb_entry(n, struct cfq_queue, rb_node);
820

821 822 823 824 825 826 827
	if (cfq_class_idle(cfqq)) {
		/*
		 * if we have idle queues and no rt or be queues had
		 * pending requests, either allow immediate service if
		 * the grace period has passed or arm the idle grace
		 * timer
		 */
828
		if (start_idle_class_timer(cfqd))
829
			cfqq = NULL;
830 831
	}

J
Jens Axboe 已提交
832 833 834
	return cfqq;
}

835 836 837
/*
 * Get and set a new active queue for service.
 */
J
Jens Axboe 已提交
838 839 840 841
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
{
	struct cfq_queue *cfqq;

842
	cfqq = cfq_get_next_queue(cfqd);
843
	__cfq_set_active_queue(cfqd, cfqq);
J
Jens Axboe 已提交
844
	return cfqq;
845 846
}

847 848 849 850 851 852 853 854 855
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
					  struct request *rq)
{
	if (rq->sector >= cfqd->last_position)
		return rq->sector - cfqd->last_position;
	else
		return cfqd->last_position - rq->sector;
}

J
Jens Axboe 已提交
856 857 858 859 860 861 862 863 864 865
static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
{
	struct cfq_io_context *cic = cfqd->active_cic;

	if (!sample_valid(cic->seek_samples))
		return 0;

	return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
}

866 867
static int cfq_close_cooperator(struct cfq_data *cfq_data,
				struct cfq_queue *cfqq)
J
Jens Axboe 已提交
868 869
{
	/*
870 871 872
	 * We should notice if some of the queues are cooperating, eg
	 * working closely on the same area of the disk. In that case,
	 * we can group them together and don't waste time idling.
J
Jens Axboe 已提交
873
	 */
874
	return 0;
J
Jens Axboe 已提交
875 876 877
}

#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
878

J
Jens Axboe 已提交
879
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
880
{
881
	struct cfq_queue *cfqq = cfqd->active_queue;
882
	struct cfq_io_context *cic;
883 884
	unsigned long sl;

885
	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
J
Jens Axboe 已提交
886
	WARN_ON(cfq_cfqq_slice_new(cfqq));
887 888 889 890

	/*
	 * idle is disabled, either manually or by past process history
	 */
J
Jens Axboe 已提交
891 892 893
	if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
		return;

894 895 896
	/*
	 * task has exited, don't wait
	 */
897 898
	cic = cfqd->active_cic;
	if (!cic || !cic->ioc->task)
J
Jens Axboe 已提交
899 900 901 902 903
		return;

	/*
	 * See if this prio level has a good candidate
	 */
J
Jens Axboe 已提交
904 905
	if (cfq_close_cooperator(cfqd, cfqq) &&
	    (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
J
Jens Axboe 已提交
906
		return;
907

J
Jens Axboe 已提交
908 909
	cfq_mark_cfqq_must_dispatch(cfqq);
	cfq_mark_cfqq_wait_request(cfqq);
910

911 912 913 914 915
	/*
	 * we don't want to idle for seeks, but we do want to allow
	 * fair distribution of slice time for a process doing back-to-back
	 * seeks. so allow a little bit of time for him to submit a new rq
	 */
J
Jens Axboe 已提交
916
	sl = cfqd->cfq_slice_idle;
917
	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
918
		sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
919

920
	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
L
Linus Torvalds 已提交
921 922
}

923 924 925
/*
 * Move request from internal lists to the request queue dispatch list.
 */
926
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
927
{
928
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
929
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
930

931
	cfq_remove_request(rq);
J
Jens Axboe 已提交
932
	cfqq->dispatched++;
933
	elv_dispatch_sort(q, rq);
934 935 936

	if (cfq_cfqq_sync(cfqq))
		cfqd->sync_flight++;
L
Linus Torvalds 已提交
937 938 939 940 941
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
J
Jens Axboe 已提交
942
static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
943 944
{
	struct cfq_data *cfqd = cfqq->cfqd;
945
	struct request *rq;
946
	int fifo;
L
Linus Torvalds 已提交
947

J
Jens Axboe 已提交
948
	if (cfq_cfqq_fifo_expire(cfqq))
L
Linus Torvalds 已提交
949
		return NULL;
950 951 952

	cfq_mark_cfqq_fifo_expire(cfqq);

953 954
	if (list_empty(&cfqq->fifo))
		return NULL;
L
Linus Torvalds 已提交
955

J
Jens Axboe 已提交
956
	fifo = cfq_cfqq_sync(cfqq);
957
	rq = rq_entry_fifo(cfqq->fifo.next);
L
Linus Torvalds 已提交
958

J
Jens Axboe 已提交
959 960
	if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
		return NULL;
L
Linus Torvalds 已提交
961

J
Jens Axboe 已提交
962
	return rq;
L
Linus Torvalds 已提交
963 964
}

965 966 967 968
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
L
Linus Torvalds 已提交
969

970
	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
L
Linus Torvalds 已提交
971

972
	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
L
Linus Torvalds 已提交
973 974
}

975
/*
976 977
 * Select a queue for service. If we have a current active queue,
 * check whether to continue servicing it, or retrieve and set a new one.
978
 */
979
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
L
Linus Torvalds 已提交
980 981 982
{
	struct cfq_queue *cfqq;

983 984 985
	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
L
Linus Torvalds 已提交
986

987
	/*
J
Jens Axboe 已提交
988
	 * The active queue has run out of time, expire it and select new.
989
	 */
J
Jens Axboe 已提交
990
	if (cfq_slice_used(cfqq))
J
Jens Axboe 已提交
991
		goto expire;
L
Linus Torvalds 已提交
992

993
	/*
J
Jens Axboe 已提交
994 995
	 * The active queue has requests and isn't expired, allow it to
	 * dispatch.
996
	 */
997
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
998
		goto keep_queue;
J
Jens Axboe 已提交
999 1000 1001 1002 1003 1004

	/*
	 * No requests pending. If the active queue still has requests in
	 * flight or is idling for a new request, allow either of these
	 * conditions to happen (or time out) before selecting a new queue.
	 */
1005 1006
	if (timer_pending(&cfqd->idle_slice_timer) ||
	    (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1007 1008
		cfqq = NULL;
		goto keep_queue;
1009 1010
	}

J
Jens Axboe 已提交
1011
expire:
1012
	cfq_slice_expired(cfqd, 0);
J
Jens Axboe 已提交
1013 1014
new_queue:
	cfqq = cfq_set_active_queue(cfqd);
1015
keep_queue:
J
Jens Axboe 已提交
1016
	return cfqq;
1017 1018
}

1019 1020 1021 1022
/*
 * Dispatch some requests from cfqq, moving them to the request queue
 * dispatch list.
 */
1023 1024 1025 1026 1027 1028
static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			int max_dispatch)
{
	int dispatched = 0;

1029
	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1030 1031

	do {
J
Jens Axboe 已提交
1032
		struct request *rq;
L
Linus Torvalds 已提交
1033 1034

		/*
1035
		 * follow expired path, else get first next available
L
Linus Torvalds 已提交
1036
		 */
J
Jens Axboe 已提交
1037 1038
		if ((rq = cfq_check_fifo(cfqq)) == NULL)
			rq = cfqq->next_rq;
1039 1040 1041 1042

		/*
		 * finally, insert request into driver dispatch list
		 */
J
Jens Axboe 已提交
1043
		cfq_dispatch_insert(cfqd->queue, rq);
L
Linus Torvalds 已提交
1044

1045
		dispatched++;
L
Linus Torvalds 已提交
1046

1047
		if (!cfqd->active_cic) {
J
Jens Axboe 已提交
1048 1049
			atomic_inc(&RQ_CIC(rq)->ioc->refcount);
			cfqd->active_cic = RQ_CIC(rq);
1050
		}
L
Linus Torvalds 已提交
1051

1052
		if (RB_EMPTY_ROOT(&cfqq->sort_list))
1053 1054 1055 1056 1057 1058 1059 1060
			break;

	} while (dispatched < max_dispatch);

	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
1061
	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1062
	    dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1063
	    cfq_class_idle(cfqq))) {
1064
		cfqq->slice_end = jiffies + 1;
1065
		cfq_slice_expired(cfqd, 0);
1066
	}
1067 1068 1069 1070

	return dispatched;
}

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
{
	int dispatched = 0;

	while (cfqq->next_rq) {
		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
		dispatched++;
	}

	BUG_ON(!list_empty(&cfqq->fifo));
	return dispatched;
}

1084 1085 1086 1087
/*
 * Drain our current requests. Used for barriers and when switching
 * io schedulers on-the-fly.
 */
1088
static int cfq_forced_dispatch(struct cfq_data *cfqd)
1089
{
1090 1091 1092
	int dispatched = 0;
	struct rb_node *n;

1093
	while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
1094
		struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
1095

1096 1097
		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
	}
1098

1099
	cfq_slice_expired(cfqd, 0);
1100 1101 1102 1103 1104 1105

	BUG_ON(cfqd->busy_queues);

	return dispatched;
}

1106
static int cfq_dispatch_requests(struct request_queue *q, int force)
1107 1108
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1109
	struct cfq_queue *cfqq;
1110
	int dispatched;
1111 1112 1113 1114

	if (!cfqd->busy_queues)
		return 0;

1115 1116 1117
	if (unlikely(force))
		return cfq_forced_dispatch(cfqd);

1118 1119
	dispatched = 0;
	while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1120 1121
		int max_dispatch;

1122 1123 1124 1125 1126 1127
		max_dispatch = cfqd->cfq_quantum;
		if (cfq_class_idle(cfqq))
			max_dispatch = 1;

		if (cfqq->dispatched >= max_dispatch) {
			if (cfqd->busy_queues > 1)
J
Jens Axboe 已提交
1128
				break;
1129
			if (cfqq->dispatched >= 4 * max_dispatch)
1130 1131
				break;
		}
1132

1133 1134 1135
		if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
			break;

J
Jens Axboe 已提交
1136 1137
		cfq_clear_cfqq_must_dispatch(cfqq);
		cfq_clear_cfqq_wait_request(cfqq);
1138 1139
		del_timer(&cfqd->idle_slice_timer);

1140
		dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
L
Linus Torvalds 已提交
1141 1142
	}

1143
	return dispatched;
L
Linus Torvalds 已提交
1144 1145 1146
}

/*
J
Jens Axboe 已提交
1147 1148
 * task holds one reference to the queue, dropped when task exits. each rq
 * in-flight on this queue also holds a reference, dropped when rq is freed.
L
Linus Torvalds 已提交
1149 1150 1151 1152 1153
 *
 * queue lock must be held here.
 */
static void cfq_put_queue(struct cfq_queue *cfqq)
{
1154 1155 1156
	struct cfq_data *cfqd = cfqq->cfqd;

	BUG_ON(atomic_read(&cfqq->ref) <= 0);
L
Linus Torvalds 已提交
1157 1158 1159 1160 1161

	if (!atomic_dec_and_test(&cfqq->ref))
		return;

	BUG_ON(rb_first(&cfqq->sort_list));
1162
	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
J
Jens Axboe 已提交
1163
	BUG_ON(cfq_cfqq_on_rr(cfqq));
L
Linus Torvalds 已提交
1164

1165
	if (unlikely(cfqd->active_queue == cfqq)) {
1166
		__cfq_slice_expired(cfqd, cfqq, 0);
1167 1168
		cfq_schedule_dispatch(cfqd);
	}
1169

L
Linus Torvalds 已提交
1170 1171 1172
	kmem_cache_free(cfq_pool, cfqq);
}

1173
static void cfq_free_io_context(struct io_context *ioc)
L
Linus Torvalds 已提交
1174
{
1175
	struct cfq_io_context *__cic;
1176 1177
	struct rb_node *n;
	int freed = 0;
L
Linus Torvalds 已提交
1178

J
Jens Axboe 已提交
1179 1180
	ioc->ioc_data = NULL;

1181 1182 1183
	while ((n = rb_first(&ioc->cic_root)) != NULL) {
		__cic = rb_entry(n, struct cfq_io_context, rb_node);
		rb_erase(&__cic->rb_node, &ioc->cic_root);
1184
		kmem_cache_free(cfq_ioc_pool, __cic);
1185
		freed++;
L
Linus Torvalds 已提交
1186 1187
	}

1188 1189 1190
	elv_ioc_count_mod(ioc_count, -freed);

	if (ioc_gone && !elv_ioc_count_read(ioc_count))
1191
		complete(ioc_gone);
L
Linus Torvalds 已提交
1192 1193
}

1194
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1195
{
1196
	if (unlikely(cfqq == cfqd->active_queue)) {
1197
		__cfq_slice_expired(cfqd, cfqq, 0);
1198 1199
		cfq_schedule_dispatch(cfqd);
	}
1200

1201 1202
	cfq_put_queue(cfqq);
}
1203

1204 1205 1206
static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
					 struct cfq_io_context *cic)
{
1207 1208 1209 1210
	list_del_init(&cic->queue_list);
	smp_wmb();
	cic->key = NULL;

1211
	if (cic->cfqq[ASYNC]) {
1212
		cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1213 1214 1215 1216
		cic->cfqq[ASYNC] = NULL;
	}

	if (cic->cfqq[SYNC]) {
1217
		cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1218 1219
		cic->cfqq[SYNC] = NULL;
	}
1220 1221 1222 1223 1224 1225 1226
}

static void cfq_exit_single_io_context(struct cfq_io_context *cic)
{
	struct cfq_data *cfqd = cic->key;

	if (cfqd) {
1227
		struct request_queue *q = cfqd->queue;
1228

1229
		spin_lock_irq(q->queue_lock);
1230
		__cfq_exit_single_io_context(cfqd, cic);
1231
		spin_unlock_irq(q->queue_lock);
1232
	}
L
Linus Torvalds 已提交
1233 1234
}

1235 1236 1237 1238
/*
 * The process that ioc belongs to has exited, we need to clean up
 * and put the internal structures we have that belongs to that process.
 */
1239
static void cfq_exit_io_context(struct io_context *ioc)
L
Linus Torvalds 已提交
1240
{
1241
	struct cfq_io_context *__cic;
1242
	struct rb_node *n;
1243

J
Jens Axboe 已提交
1244 1245
	ioc->ioc_data = NULL;

L
Linus Torvalds 已提交
1246 1247 1248
	/*
	 * put the reference this task is holding to the various queues
	 */
1249 1250 1251 1252
	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		__cic = rb_entry(n, struct cfq_io_context, rb_node);

1253
		cfq_exit_single_io_context(__cic);
1254
		n = rb_next(n);
L
Linus Torvalds 已提交
1255 1256 1257
	}
}

1258
static struct cfq_io_context *
A
Al Viro 已提交
1259
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1260
{
1261
	struct cfq_io_context *cic;
L
Linus Torvalds 已提交
1262

1263 1264
	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
							cfqd->queue->node);
L
Linus Torvalds 已提交
1265
	if (cic) {
1266
		cic->last_end_request = jiffies;
1267
		INIT_LIST_HEAD(&cic->queue_list);
1268 1269
		cic->dtor = cfq_free_io_context;
		cic->exit = cfq_exit_io_context;
1270
		elv_ioc_count_inc(ioc_count);
L
Linus Torvalds 已提交
1271 1272 1273 1274 1275
	}

	return cic;
}

1276 1277 1278 1279 1280
static void cfq_init_prio_data(struct cfq_queue *cfqq)
{
	struct task_struct *tsk = current;
	int ioprio_class;

J
Jens Axboe 已提交
1281
	if (!cfq_cfqq_prio_changed(cfqq))
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
		return;

	ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
	switch (ioprio_class) {
		default:
			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
		case IOPRIO_CLASS_NONE:
			/*
			 * no prio set, place us in the middle of the BE classes
			 */
			cfqq->ioprio = task_nice_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_RT:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_RT;
			break;
		case IOPRIO_CLASS_BE:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_IDLE:
			cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
			cfqq->ioprio = 7;
J
Jens Axboe 已提交
1306
			cfq_clear_cfqq_idle_window(cfqq);
1307 1308 1309 1310 1311 1312 1313 1314 1315
			break;
	}

	/*
	 * keep track of original prio settings in case we have to temporarily
	 * elevate the priority of this queue
	 */
	cfqq->org_ioprio = cfqq->ioprio;
	cfqq->org_ioprio_class = cfqq->ioprio_class;
J
Jens Axboe 已提交
1316
	cfq_clear_cfqq_prio_changed(cfqq);
1317 1318
}

1319
static inline void changed_ioprio(struct cfq_io_context *cic)
1320
{
1321 1322
	struct cfq_data *cfqd = cic->key;
	struct cfq_queue *cfqq;
1323
	unsigned long flags;
1324

1325 1326 1327
	if (unlikely(!cfqd))
		return;

1328
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1329 1330 1331 1332

	cfqq = cic->cfqq[ASYNC];
	if (cfqq) {
		struct cfq_queue *new_cfqq;
1333
		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
1334 1335 1336 1337 1338
					 GFP_ATOMIC);
		if (new_cfqq) {
			cic->cfqq[ASYNC] = new_cfqq;
			cfq_put_queue(cfqq);
		}
1339
	}
1340 1341 1342 1343 1344

	cfqq = cic->cfqq[SYNC];
	if (cfqq)
		cfq_mark_cfqq_prio_changed(cfqq);

1345
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1346 1347
}

1348
static void cfq_ioc_set_ioprio(struct io_context *ioc)
1349
{
1350
	struct cfq_io_context *cic;
1351
	struct rb_node *n;
1352

1353
	ioc->ioprio_changed = 0;
1354

1355 1356 1357
	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		cic = rb_entry(n, struct cfq_io_context, rb_node);
1358

1359
		changed_ioprio(cic);
1360 1361
		n = rb_next(n);
	}
1362 1363 1364
}

static struct cfq_queue *
1365 1366
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
		     struct task_struct *tsk, gfp_t gfp_mask)
1367 1368
{
	struct cfq_queue *cfqq, *new_cfqq = NULL;
1369
	struct cfq_io_context *cic;
1370 1371

retry:
1372 1373 1374
	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
	/* cic always exists here */
	cfqq = cic_to_cfqq(cic, is_sync);
1375 1376 1377 1378 1379 1380

	if (!cfqq) {
		if (new_cfqq) {
			cfqq = new_cfqq;
			new_cfqq = NULL;
		} else if (gfp_mask & __GFP_WAIT) {
1381 1382 1383 1384 1385 1386
			/*
			 * Inform the allocator of the fact that we will
			 * just repeat this allocation if it fails, to allow
			 * the allocator to do whatever it needs to attempt to
			 * free memory.
			 */
1387
			spin_unlock_irq(cfqd->queue->queue_lock);
1388 1389 1390
			new_cfqq = kmem_cache_alloc_node(cfq_pool,
					gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
					cfqd->queue->node);
1391 1392 1393
			spin_lock_irq(cfqd->queue->queue_lock);
			goto retry;
		} else {
1394 1395 1396
			cfqq = kmem_cache_alloc_node(cfq_pool,
					gfp_mask | __GFP_ZERO,
					cfqd->queue->node);
1397 1398 1399 1400
			if (!cfqq)
				goto out;
		}

1401
		RB_CLEAR_NODE(&cfqq->rb_node);
1402 1403 1404 1405
		INIT_LIST_HEAD(&cfqq->fifo);

		atomic_set(&cfqq->ref, 0);
		cfqq->cfqd = cfqd;
1406

1407
		if (is_sync) {
1408
			cfq_mark_cfqq_idle_window(cfqq);
1409 1410
			cfq_mark_cfqq_sync(cfqq);
		}
1411

J
Jens Axboe 已提交
1412
		cfq_mark_cfqq_prio_changed(cfqq);
1413
		cfq_mark_cfqq_queue_new(cfqq);
1414

J
Jens Axboe 已提交
1415
		cfq_init_prio_data(cfqq);
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
	}

	if (new_cfqq)
		kmem_cache_free(cfq_pool, new_cfqq);

out:
	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
	return cfqq;
}

1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{
	switch(ioprio_class) {
	case IOPRIO_CLASS_RT:
		return &cfqd->async_cfqq[0][ioprio];
	case IOPRIO_CLASS_BE:
		return &cfqd->async_cfqq[1][ioprio];
	case IOPRIO_CLASS_IDLE:
		return &cfqd->async_idle_cfqq;
	default:
		BUG();
	}
}

1441 1442 1443 1444 1445
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
	      gfp_t gfp_mask)
{
	const int ioprio = task_ioprio(tsk);
1446 1447
	const int ioprio_class = task_ioprio_class(tsk);
	struct cfq_queue **async_cfqq = NULL;
1448 1449
	struct cfq_queue *cfqq = NULL;

1450 1451 1452 1453 1454
	if (!is_sync) {
		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
		cfqq = *async_cfqq;
	}

1455
	if (!cfqq) {
1456
		cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
1457 1458 1459
		if (!cfqq)
			return NULL;
	}
1460 1461 1462 1463

	/*
	 * pin the queue now that it's allocated, scheduler exit will prune it
	 */
1464
	if (!is_sync && !(*async_cfqq)) {
1465
		atomic_inc(&cfqq->ref);
1466
		*async_cfqq = cfqq;
1467 1468 1469 1470 1471 1472
	}

	atomic_inc(&cfqq->ref);
	return cfqq;
}

1473 1474 1475
/*
 * We drop cfq io contexts lazily, so we may find a dead one.
 */
1476 1477 1478
static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{
1479
	WARN_ON(!list_empty(&cic->queue_list));
J
Jens Axboe 已提交
1480 1481 1482 1483

	if (ioc->ioc_data == cic)
		ioc->ioc_data = NULL;

1484 1485
	rb_erase(&cic->rb_node, &ioc->cic_root);
	kmem_cache_free(cfq_ioc_pool, cic);
1486
	elv_ioc_count_dec(ioc_count);
1487 1488
}

1489 1490 1491
static struct cfq_io_context *
cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{
1492
	struct rb_node *n;
1493
	struct cfq_io_context *cic;
1494
	void *k, *key = cfqd;
1495

1496 1497 1498
	if (unlikely(!ioc))
		return NULL;

J
Jens Axboe 已提交
1499 1500 1501 1502 1503 1504 1505
	/*
	 * we maintain a last-hit cache, to avoid browsing over the tree
	 */
	cic = ioc->ioc_data;
	if (cic && cic->key == cfqd)
		return cic;

1506 1507
restart:
	n = ioc->cic_root.rb_node;
1508 1509
	while (n) {
		cic = rb_entry(n, struct cfq_io_context, rb_node);
1510 1511 1512
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = cic->key;
		if (unlikely(!k)) {
1513 1514 1515
			cfq_drop_dead_cic(ioc, cic);
			goto restart;
		}
1516

1517
		if (key < k)
1518
			n = n->rb_left;
1519
		else if (key > k)
1520
			n = n->rb_right;
J
Jens Axboe 已提交
1521 1522
		else {
			ioc->ioc_data = cic;
1523
			return cic;
J
Jens Axboe 已提交
1524
		}
1525 1526 1527 1528 1529 1530 1531 1532 1533
	}

	return NULL;
}

static inline void
cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
	     struct cfq_io_context *cic)
{
1534 1535
	struct rb_node **p;
	struct rb_node *parent;
1536
	struct cfq_io_context *__cic;
1537
	unsigned long flags;
1538
	void *k;
1539 1540 1541 1542

	cic->ioc = ioc;
	cic->key = cfqd;

1543 1544 1545
restart:
	parent = NULL;
	p = &ioc->cic_root.rb_node;
1546 1547 1548
	while (*p) {
		parent = *p;
		__cic = rb_entry(parent, struct cfq_io_context, rb_node);
1549 1550 1551
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = __cic->key;
		if (unlikely(!k)) {
1552
			cfq_drop_dead_cic(ioc, __cic);
1553 1554
			goto restart;
		}
1555

1556
		if (cic->key < k)
1557
			p = &(*p)->rb_left;
1558
		else if (cic->key > k)
1559 1560 1561 1562 1563 1564 1565
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&cic->rb_node, parent, p);
	rb_insert_color(&cic->rb_node, &ioc->cic_root);
1566

1567
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1568
	list_add(&cic->queue_list, &cfqd->cic_list);
1569
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1570 1571
}

L
Linus Torvalds 已提交
1572 1573 1574
/*
 * Setup general io context and cfq io context. There can be several cfq
 * io contexts per general io context, if this process is doing io to more
1575
 * than one device managed by cfq.
L
Linus Torvalds 已提交
1576 1577
 */
static struct cfq_io_context *
1578
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1579
{
1580
	struct io_context *ioc = NULL;
L
Linus Torvalds 已提交
1581 1582
	struct cfq_io_context *cic;

1583
	might_sleep_if(gfp_mask & __GFP_WAIT);
L
Linus Torvalds 已提交
1584

1585
	ioc = get_io_context(gfp_mask, cfqd->queue->node);
L
Linus Torvalds 已提交
1586 1587 1588
	if (!ioc)
		return NULL;

1589 1590 1591
	cic = cfq_cic_rb_lookup(cfqd, ioc);
	if (cic)
		goto out;
L
Linus Torvalds 已提交
1592

1593 1594 1595
	cic = cfq_alloc_io_context(cfqd, gfp_mask);
	if (cic == NULL)
		goto err;
L
Linus Torvalds 已提交
1596

1597
	cfq_cic_link(cfqd, ioc, cic);
L
Linus Torvalds 已提交
1598
out:
1599 1600 1601 1602
	smp_read_barrier_depends();
	if (unlikely(ioc->ioprio_changed))
		cfq_ioc_set_ioprio(ioc);

L
Linus Torvalds 已提交
1603 1604 1605 1606 1607 1608
	return cic;
err:
	put_io_context(ioc);
	return NULL;
}

1609 1610
static void
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
L
Linus Torvalds 已提交
1611
{
1612 1613
	unsigned long elapsed = jiffies - cic->last_end_request;
	unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1614

1615 1616 1617 1618
	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
}
L
Linus Torvalds 已提交
1619

1620
static void
J
Jens Axboe 已提交
1621 1622
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
		       struct request *rq)
1623 1624 1625 1626
{
	sector_t sdist;
	u64 total;

J
Jens Axboe 已提交
1627 1628
	if (cic->last_request_pos < rq->sector)
		sdist = rq->sector - cic->last_request_pos;
1629
	else
J
Jens Axboe 已提交
1630
		sdist = cic->last_request_pos - rq->sector;
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646

	/*
	 * Don't allow the seek distance to get too large from the
	 * odd fragment, pagein, etc
	 */
	if (cic->seek_samples <= 60) /* second&third seek */
		sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
	else
		sdist = min(sdist, (cic->seek_mean * 4)	+ 2*1024*64);

	cic->seek_samples = (7*cic->seek_samples + 256) / 8;
	cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
	total = cic->seek_total + (cic->seek_samples/2);
	do_div(total, cic->seek_samples);
	cic->seek_mean = (sector_t)total;
}
L
Linus Torvalds 已提交
1647

1648 1649 1650 1651 1652 1653 1654 1655
/*
 * Disable idle window if the process thinks too long or seeks so much that
 * it doesn't matter
 */
static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		       struct cfq_io_context *cic)
{
1656 1657 1658 1659 1660 1661
	int enable_idle;

	if (!cfq_cfqq_sync(cfqq))
		return;

	enable_idle = cfq_cfqq_idle_window(cfqq);
L
Linus Torvalds 已提交
1662

1663 1664
	if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
	    (cfqd->hw_tag && CIC_SEEKY(cic)))
1665 1666 1667 1668 1669 1670
		enable_idle = 0;
	else if (sample_valid(cic->ttime_samples)) {
		if (cic->ttime_mean > cfqd->cfq_slice_idle)
			enable_idle = 0;
		else
			enable_idle = 1;
L
Linus Torvalds 已提交
1671 1672
	}

J
Jens Axboe 已提交
1673 1674 1675 1676
	if (enable_idle)
		cfq_mark_cfqq_idle_window(cfqq);
	else
		cfq_clear_cfqq_idle_window(cfqq);
1677
}
L
Linus Torvalds 已提交
1678

1679 1680 1681 1682 1683 1684
/*
 * Check if new_cfqq should preempt the currently active queue. Return 0 for
 * no or if we aren't sure, a 1 will cause a preempt.
 */
static int
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
J
Jens Axboe 已提交
1685
		   struct request *rq)
1686
{
J
Jens Axboe 已提交
1687
	struct cfq_queue *cfqq;
1688

J
Jens Axboe 已提交
1689 1690
	cfqq = cfqd->active_queue;
	if (!cfqq)
1691 1692
		return 0;

J
Jens Axboe 已提交
1693 1694 1695 1696
	if (cfq_slice_used(cfqq))
		return 1;

	if (cfq_class_idle(new_cfqq))
1697
		return 0;
1698 1699 1700

	if (cfq_class_idle(cfqq))
		return 1;
1701

1702 1703 1704 1705
	/*
	 * if the new request is sync, but the currently running queue is
	 * not, let the sync request have priority.
	 */
J
Jens Axboe 已提交
1706
	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1707
		return 1;
1708

1709 1710 1711 1712 1713 1714
	/*
	 * So both queues are sync. Let the new request get disk time if
	 * it's a metadata request and the current queue is doing regular IO.
	 */
	if (rq_is_meta(rq) && !cfqq->meta_pending)
		return 1;
1715

1716 1717 1718 1719 1720 1721 1722
	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
		return 0;

	/*
	 * if this request is as-good as one we would expect from the
	 * current cfqq, let it preempt
	 */
J
Jens Axboe 已提交
1723
	if (cfq_rq_close(cfqd, rq))
1724 1725
		return 1;

1726 1727 1728 1729 1730 1731 1732 1733 1734
	return 0;
}

/*
 * cfqq preempts the active queue. if we allowed preempt with no slice left,
 * let it have half of its nominal slice.
 */
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
1735
	cfq_slice_expired(cfqd, 1);
1736

1737 1738 1739 1740 1741
	/*
	 * Put the new queue at the front of the of the current list,
	 * so we know that it will be selected next.
	 */
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
1742 1743

	cfq_service_tree_add(cfqd, cfqq, 1);
1744

1745 1746
	cfqq->slice_end = 0;
	cfq_mark_cfqq_slice_new(cfqq);
1747 1748 1749
}

/*
J
Jens Axboe 已提交
1750
 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1751 1752 1753
 * something we should do about it
 */
static void
J
Jens Axboe 已提交
1754 1755
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		struct request *rq)
1756
{
J
Jens Axboe 已提交
1757
	struct cfq_io_context *cic = RQ_CIC(rq);
1758

1759 1760 1761
	if (rq_is_meta(rq))
		cfqq->meta_pending++;

J
Jens Axboe 已提交
1762
	cfq_update_io_thinktime(cfqd, cic);
J
Jens Axboe 已提交
1763
	cfq_update_io_seektime(cfqd, cic, rq);
J
Jens Axboe 已提交
1764 1765
	cfq_update_idle_window(cfqd, cfqq, cic);

J
Jens Axboe 已提交
1766
	cic->last_request_pos = rq->sector + rq->nr_sectors;
1767 1768 1769 1770 1771 1772 1773

	if (cfqq == cfqd->active_queue) {
		/*
		 * if we are waiting for a request for this queue, let it rip
		 * immediately and flag that we must not expire this queue
		 * just now
		 */
J
Jens Axboe 已提交
1774 1775
		if (cfq_cfqq_wait_request(cfqq)) {
			cfq_mark_cfqq_must_dispatch(cfqq);
1776
			del_timer(&cfqd->idle_slice_timer);
1777
			blk_start_queueing(cfqd->queue);
1778
		}
J
Jens Axboe 已提交
1779
	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1780 1781 1782 1783 1784 1785
		/*
		 * not the active queue - expire current slice if it is
		 * idle and has expired it's mean thinktime or this new queue
		 * has some old slice time left and is of higher priority
		 */
		cfq_preempt_queue(cfqd, cfqq);
J
Jens Axboe 已提交
1786
		cfq_mark_cfqq_must_dispatch(cfqq);
1787
		blk_start_queueing(cfqd->queue);
1788
	}
L
Linus Torvalds 已提交
1789 1790
}

1791
static void cfq_insert_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1792
{
1793
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1794
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1795 1796

	cfq_init_prio_data(cfqq);
L
Linus Torvalds 已提交
1797

J
Jens Axboe 已提交
1798
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
1799

1800 1801
	list_add_tail(&rq->queuelist, &cfqq->fifo);

J
Jens Axboe 已提交
1802
	cfq_rq_enqueued(cfqd, cfqq, rq);
L
Linus Torvalds 已提交
1803 1804
}

1805
static void cfq_completed_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1806
{
J
Jens Axboe 已提交
1807
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1808
	struct cfq_data *cfqd = cfqq->cfqd;
1809
	const int sync = rq_is_sync(rq);
1810
	unsigned long now;
L
Linus Torvalds 已提交
1811

1812
	now = jiffies;
L
Linus Torvalds 已提交
1813

1814
	WARN_ON(!cfqd->rq_in_driver);
J
Jens Axboe 已提交
1815
	WARN_ON(!cfqq->dispatched);
1816
	cfqd->rq_in_driver--;
J
Jens Axboe 已提交
1817
	cfqq->dispatched--;
L
Linus Torvalds 已提交
1818

1819 1820 1821
	if (cfq_cfqq_sync(cfqq))
		cfqd->sync_flight--;

1822 1823
	if (!cfq_class_idle(cfqq))
		cfqd->last_end_request = now;
J
Jens Axboe 已提交
1824

1825
	if (sync)
J
Jens Axboe 已提交
1826
		RQ_CIC(rq)->last_end_request = now;
1827 1828 1829 1830 1831 1832

	/*
	 * If this is the active queue, check if it needs to be expired,
	 * or if we want to idle in case it has no pending requests.
	 */
	if (cfqd->active_queue == cfqq) {
1833 1834 1835 1836 1837
		if (cfq_cfqq_slice_new(cfqq)) {
			cfq_set_prio_slice(cfqd, cfqq);
			cfq_clear_cfqq_slice_new(cfqq);
		}
		if (cfq_slice_used(cfqq))
1838
			cfq_slice_expired(cfqd, 1);
J
Jens Axboe 已提交
1839 1840
		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
			cfq_arm_slice_timer(cfqd);
1841
	}
J
Jens Axboe 已提交
1842 1843 1844

	if (!cfqd->rq_in_driver)
		cfq_schedule_dispatch(cfqd);
L
Linus Torvalds 已提交
1845 1846
}

1847 1848 1849 1850 1851
/*
 * we temporarily boost lower priority queues if they are holding fs exclusive
 * resources. they are boosted to normal prio (CLASS_BE/4)
 */
static void cfq_prio_boost(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1852
{
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
	if (has_fs_excl()) {
		/*
		 * boost idle prio on transactions that would lock out other
		 * users of the filesystem
		 */
		if (cfq_class_idle(cfqq))
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
		if (cfqq->ioprio > IOPRIO_NORM)
			cfqq->ioprio = IOPRIO_NORM;
	} else {
		/*
		 * check if we need to unboost the queue
		 */
		if (cfqq->ioprio_class != cfqq->org_ioprio_class)
			cfqq->ioprio_class = cfqq->org_ioprio_class;
		if (cfqq->ioprio != cfqq->org_ioprio)
			cfqq->ioprio = cfqq->org_ioprio;
	}
}
L
Linus Torvalds 已提交
1872

1873
static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1874
{
J
Jens Axboe 已提交
1875
	if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
A
Andrew Morton 已提交
1876
	    !cfq_cfqq_must_alloc_slice(cfqq)) {
J
Jens Axboe 已提交
1877
		cfq_mark_cfqq_must_alloc_slice(cfqq);
1878
		return ELV_MQUEUE_MUST;
J
Jens Axboe 已提交
1879
	}
L
Linus Torvalds 已提交
1880

1881 1882 1883
	return ELV_MQUEUE_MAY;
}

1884
static int cfq_may_queue(struct request_queue *q, int rw)
1885 1886 1887
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct task_struct *tsk = current;
1888
	struct cfq_io_context *cic;
1889 1890 1891 1892 1893 1894 1895 1896
	struct cfq_queue *cfqq;

	/*
	 * don't force setup of a queue from here, as a call to may_queue
	 * does not necessarily imply that a request actually will be queued.
	 * so just lookup a possibly existing queue, or return 'may queue'
	 * if that fails
	 */
1897 1898 1899 1900 1901
	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
	if (!cic)
		return ELV_MQUEUE_MAY;

	cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
1902 1903 1904 1905
	if (cfqq) {
		cfq_init_prio_data(cfqq);
		cfq_prio_boost(cfqq);

1906
		return __cfq_may_queue(cfqq);
1907 1908 1909
	}

	return ELV_MQUEUE_MAY;
L
Linus Torvalds 已提交
1910 1911 1912 1913 1914
}

/*
 * queue lock held here
 */
1915
static void cfq_put_request(struct request *rq)
L
Linus Torvalds 已提交
1916
{
J
Jens Axboe 已提交
1917
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
1918

J
Jens Axboe 已提交
1919
	if (cfqq) {
1920
		const int rw = rq_data_dir(rq);
L
Linus Torvalds 已提交
1921

1922 1923
		BUG_ON(!cfqq->allocated[rw]);
		cfqq->allocated[rw]--;
L
Linus Torvalds 已提交
1924

J
Jens Axboe 已提交
1925
		put_io_context(RQ_CIC(rq)->ioc);
L
Linus Torvalds 已提交
1926 1927

		rq->elevator_private = NULL;
J
Jens Axboe 已提交
1928
		rq->elevator_private2 = NULL;
L
Linus Torvalds 已提交
1929 1930 1931 1932 1933 1934

		cfq_put_queue(cfqq);
	}
}

/*
1935
 * Allocate cfq data structures associated with this request.
L
Linus Torvalds 已提交
1936
 */
1937
static int
1938
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1939 1940
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1941
	struct task_struct *tsk = current;
L
Linus Torvalds 已提交
1942 1943
	struct cfq_io_context *cic;
	const int rw = rq_data_dir(rq);
1944
	const int is_sync = rq_is_sync(rq);
1945
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
1946 1947 1948 1949
	unsigned long flags;

	might_sleep_if(gfp_mask & __GFP_WAIT);

1950
	cic = cfq_get_io_context(cfqd, gfp_mask);
1951

L
Linus Torvalds 已提交
1952 1953
	spin_lock_irqsave(q->queue_lock, flags);

1954 1955 1956
	if (!cic)
		goto queue_fail;

1957 1958 1959 1960
	cfqq = cic_to_cfqq(cic, is_sync);
	if (!cfqq) {
		cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);

1961 1962
		if (!cfqq)
			goto queue_fail;
L
Linus Torvalds 已提交
1963

1964 1965
		cic_set_cfqq(cic, cfqq, is_sync);
	}
L
Linus Torvalds 已提交
1966 1967

	cfqq->allocated[rw]++;
J
Jens Axboe 已提交
1968
	cfq_clear_cfqq_must_alloc(cfqq);
1969
	atomic_inc(&cfqq->ref);
L
Linus Torvalds 已提交
1970

J
Jens Axboe 已提交
1971
	spin_unlock_irqrestore(q->queue_lock, flags);
J
Jens Axboe 已提交
1972

J
Jens Axboe 已提交
1973 1974 1975
	rq->elevator_private = cic;
	rq->elevator_private2 = cfqq;
	return 0;
L
Linus Torvalds 已提交
1976

1977 1978 1979
queue_fail:
	if (cic)
		put_io_context(cic->ioc);
1980

J
Jens Axboe 已提交
1981
	cfq_schedule_dispatch(cfqd);
L
Linus Torvalds 已提交
1982 1983 1984 1985
	spin_unlock_irqrestore(q->queue_lock, flags);
	return 1;
}

1986
static void cfq_kick_queue(struct work_struct *work)
1987
{
1988 1989
	struct cfq_data *cfqd =
		container_of(work, struct cfq_data, unplug_work);
1990
	struct request_queue *q = cfqd->queue;
1991 1992 1993
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
1994
	blk_start_queueing(q);
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
	spin_unlock_irqrestore(q->queue_lock, flags);
}

/*
 * Timer running if the active_queue is currently idling inside its time slice
 */
static void cfq_idle_slice_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	struct cfq_queue *cfqq;
	unsigned long flags;
2006
	int timed_out = 1;
2007 2008 2009 2010

	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

	if ((cfqq = cfqd->active_queue) != NULL) {
2011 2012
		timed_out = 0;

2013 2014 2015
		/*
		 * expired
		 */
2016
		if (cfq_slice_used(cfqq))
2017 2018 2019 2020 2021 2022
			goto expire;

		/*
		 * only expire and reinvoke request handler, if there are
		 * other queues with pending requests
		 */
2023
		if (!cfqd->busy_queues)
2024 2025 2026 2027 2028
			goto out_cont;

		/*
		 * not expired and it has a request pending, let it dispatch
		 */
2029
		if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
J
Jens Axboe 已提交
2030
			cfq_mark_cfqq_must_dispatch(cfqq);
2031 2032 2033 2034
			goto out_kick;
		}
	}
expire:
2035
	cfq_slice_expired(cfqd, timed_out);
2036
out_kick:
J
Jens Axboe 已提交
2037
	cfq_schedule_dispatch(cfqd);
2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
out_cont:
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

/*
 * Timer running if an idle class queue is waiting for service
 */
static void cfq_idle_class_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
2048
	unsigned long flags;
2049 2050 2051 2052 2053 2054

	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

	/*
	 * race with a non-idle queue, reset timer
	 */
2055
	if (!start_idle_class_timer(cfqd))
J
Jens Axboe 已提交
2056
		cfq_schedule_dispatch(cfqd);
2057 2058 2059 2060

	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

J
Jens Axboe 已提交
2061 2062 2063 2064
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
	del_timer_sync(&cfqd->idle_slice_timer);
	del_timer_sync(&cfqd->idle_class_timer);
2065
	kblockd_flush_work(&cfqd->unplug_work);
J
Jens Axboe 已提交
2066
}
2067

2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
static void cfq_put_async_queues(struct cfq_data *cfqd)
{
	int i;

	for (i = 0; i < IOPRIO_BE_NR; i++) {
		if (cfqd->async_cfqq[0][i])
			cfq_put_queue(cfqd->async_cfqq[0][i]);
		if (cfqd->async_cfqq[1][i])
			cfq_put_queue(cfqd->async_cfqq[1][i]);
	}
2078 2079 2080

	if (cfqd->async_idle_cfqq)
		cfq_put_queue(cfqd->async_idle_cfqq);
2081 2082
}

L
Linus Torvalds 已提交
2083 2084
static void cfq_exit_queue(elevator_t *e)
{
2085
	struct cfq_data *cfqd = e->elevator_data;
2086
	struct request_queue *q = cfqd->queue;
2087

J
Jens Axboe 已提交
2088
	cfq_shutdown_timer_wq(cfqd);
2089

2090
	spin_lock_irq(q->queue_lock);
2091

2092
	if (cfqd->active_queue)
2093
		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2094 2095

	while (!list_empty(&cfqd->cic_list)) {
2096 2097 2098
		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
							struct cfq_io_context,
							queue_list);
2099 2100

		__cfq_exit_single_io_context(cfqd, cic);
2101
	}
2102

2103
	cfq_put_async_queues(cfqd);
2104

2105
	spin_unlock_irq(q->queue_lock);
2106 2107 2108 2109

	cfq_shutdown_timer_wq(cfqd);

	kfree(cfqd);
L
Linus Torvalds 已提交
2110 2111
}

2112
static void *cfq_init_queue(struct request_queue *q)
L
Linus Torvalds 已提交
2113 2114 2115
{
	struct cfq_data *cfqd;

2116
	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
L
Linus Torvalds 已提交
2117
	if (!cfqd)
J
Jens Axboe 已提交
2118
		return NULL;
L
Linus Torvalds 已提交
2119

2120
	cfqd->service_tree = CFQ_RB_ROOT;
2121
	INIT_LIST_HEAD(&cfqd->cic_list);
L
Linus Torvalds 已提交
2122 2123 2124

	cfqd->queue = q;

2125 2126 2127 2128 2129 2130 2131 2132
	init_timer(&cfqd->idle_slice_timer);
	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
	cfqd->idle_slice_timer.data = (unsigned long) cfqd;

	init_timer(&cfqd->idle_class_timer);
	cfqd->idle_class_timer.function = cfq_idle_class_timer;
	cfqd->idle_class_timer.data = (unsigned long) cfqd;

2133
	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2134

O
Oleg Nesterov 已提交
2135
	cfqd->last_end_request = jiffies;
L
Linus Torvalds 已提交
2136
	cfqd->cfq_quantum = cfq_quantum;
2137 2138
	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
L
Linus Torvalds 已提交
2139 2140
	cfqd->cfq_back_max = cfq_back_max;
	cfqd->cfq_back_penalty = cfq_back_penalty;
2141 2142 2143 2144
	cfqd->cfq_slice[0] = cfq_slice_async;
	cfqd->cfq_slice[1] = cfq_slice_sync;
	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
	cfqd->cfq_slice_idle = cfq_slice_idle;
J
Jens Axboe 已提交
2145

J
Jens Axboe 已提交
2146
	return cfqd;
L
Linus Torvalds 已提交
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
}

static void cfq_slab_kill(void)
{
	if (cfq_pool)
		kmem_cache_destroy(cfq_pool);
	if (cfq_ioc_pool)
		kmem_cache_destroy(cfq_ioc_pool);
}

static int __init cfq_slab_setup(void)
{
2159
	cfq_pool = KMEM_CACHE(cfq_queue, 0);
L
Linus Torvalds 已提交
2160 2161 2162
	if (!cfq_pool)
		goto fail;

2163
	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
L
Linus Torvalds 已提交
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
	if (!cfq_ioc_pool)
		goto fail;

	return 0;
fail:
	cfq_slab_kill();
	return -ENOMEM;
}

/*
 * sysfs parts below -->
 */
static ssize_t
cfq_var_show(unsigned int var, char *page)
{
	return sprintf(page, "%d\n", var);
}

static ssize_t
cfq_var_store(unsigned int *var, const char *page, size_t count)
{
	char *p = (char *) page;

	*var = simple_strtoul(p, &p, 10);
	return count;
}

#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
2192
static ssize_t __FUNC(elevator_t *e, char *page)			\
L
Linus Torvalds 已提交
2193
{									\
2194
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2195 2196 2197 2198 2199 2200
	unsigned int __data = __VAR;					\
	if (__CONV)							\
		__data = jiffies_to_msecs(__data);			\
	return cfq_var_show(__data, (page));				\
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2201 2202
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2203 2204
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2205 2206 2207 2208
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
L
Linus Torvalds 已提交
2209 2210 2211
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
2212
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
L
Linus Torvalds 已提交
2213
{									\
2214
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
	unsigned int __data;						\
	int ret = cfq_var_store(&__data, (page), count);		\
	if (__data < (MIN))						\
		__data = (MIN);						\
	else if (__data > (MAX))					\
		__data = (MAX);						\
	if (__CONV)							\
		*(__PTR) = msecs_to_jiffies(__data);			\
	else								\
		*(__PTR) = __data;					\
	return ret;							\
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2228 2229
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2230 2231
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2232 2233 2234 2235
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
L
Linus Torvalds 已提交
2236 2237
#undef STORE_FUNCTION

2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
#define CFQ_ATTR(name) \
	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)

static struct elv_fs_entry cfq_attrs[] = {
	CFQ_ATTR(quantum),
	CFQ_ATTR(fifo_expire_sync),
	CFQ_ATTR(fifo_expire_async),
	CFQ_ATTR(back_seek_max),
	CFQ_ATTR(back_seek_penalty),
	CFQ_ATTR(slice_sync),
	CFQ_ATTR(slice_async),
	CFQ_ATTR(slice_async_rq),
	CFQ_ATTR(slice_idle),
	__ATTR_NULL
L
Linus Torvalds 已提交
2252 2253 2254 2255 2256 2257 2258
};

static struct elevator_type iosched_cfq = {
	.ops = {
		.elevator_merge_fn = 		cfq_merge,
		.elevator_merged_fn =		cfq_merged_request,
		.elevator_merge_req_fn =	cfq_merged_requests,
2259
		.elevator_allow_merge_fn =	cfq_allow_merge,
2260
		.elevator_dispatch_fn =		cfq_dispatch_requests,
L
Linus Torvalds 已提交
2261
		.elevator_add_req_fn =		cfq_insert_request,
2262
		.elevator_activate_req_fn =	cfq_activate_request,
L
Linus Torvalds 已提交
2263 2264 2265
		.elevator_deactivate_req_fn =	cfq_deactivate_request,
		.elevator_queue_empty_fn =	cfq_queue_empty,
		.elevator_completed_req_fn =	cfq_completed_request,
2266 2267
		.elevator_former_req_fn =	elv_rb_former_request,
		.elevator_latter_req_fn =	elv_rb_latter_request,
L
Linus Torvalds 已提交
2268 2269 2270 2271 2272
		.elevator_set_req_fn =		cfq_set_request,
		.elevator_put_req_fn =		cfq_put_request,
		.elevator_may_queue_fn =	cfq_may_queue,
		.elevator_init_fn =		cfq_init_queue,
		.elevator_exit_fn =		cfq_exit_queue,
2273
		.trim =				cfq_free_io_context,
L
Linus Torvalds 已提交
2274
	},
2275
	.elevator_attrs =	cfq_attrs,
L
Linus Torvalds 已提交
2276 2277 2278 2279 2280 2281
	.elevator_name =	"cfq",
	.elevator_owner =	THIS_MODULE,
};

static int __init cfq_init(void)
{
2282 2283 2284 2285 2286 2287 2288 2289
	/*
	 * could be 0 on HZ < 1000 setups
	 */
	if (!cfq_slice_async)
		cfq_slice_async = 1;
	if (!cfq_slice_idle)
		cfq_slice_idle = 1;

L
Linus Torvalds 已提交
2290 2291 2292
	if (cfq_slab_setup())
		return -ENOMEM;

2293
	elv_register(&iosched_cfq);
L
Linus Torvalds 已提交
2294

2295
	return 0;
L
Linus Torvalds 已提交
2296 2297 2298 2299
}

static void __exit cfq_exit(void)
{
2300
	DECLARE_COMPLETION_ONSTACK(all_gone);
L
Linus Torvalds 已提交
2301
	elv_unregister(&iosched_cfq);
2302
	ioc_gone = &all_gone;
2303 2304
	/* ioc_gone's update must be visible before reading ioc_count */
	smp_wmb();
2305
	if (elv_ioc_count_read(ioc_count))
2306
		wait_for_completion(ioc_gone);
2307
	synchronize_rcu();
2308
	cfq_slab_kill();
L
Linus Torvalds 已提交
2309 2310 2311 2312 2313 2314 2315 2316
}

module_init(cfq_init);
module_exit(cfq_exit);

MODULE_AUTHOR("Jens Axboe");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");