cfq-iosched.c 57.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
7
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
L
Linus Torvalds 已提交
8 9
 */
#include <linux/module.h>
A
Al Viro 已提交
10 11
#include <linux/blkdev.h>
#include <linux/elevator.h>
L
Linus Torvalds 已提交
12
#include <linux/rbtree.h>
13
#include <linux/ioprio.h>
14
#include <linux/blktrace_api.h>
L
Linus Torvalds 已提交
15 16 17 18

/*
 * tunables
 */
19 20
/* max queue in one round of service */
static const int cfq_quantum = 4;
21
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22 23 24 25
/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024;
/* penalty of a backwards seek */
static const int cfq_back_penalty = 2;
26
static const int cfq_slice_sync = HZ / 10;
J
Jens Axboe 已提交
27
static int cfq_slice_async = HZ / 25;
28
static const int cfq_slice_async_rq = 2;
29
static int cfq_slice_idle = HZ / 125;
30

31
/*
32
 * offset from end of service tree
33
 */
34
#define CFQ_IDLE_DELAY		(HZ / 5)
35 36 37 38 39 40

/*
 * below this threshold, we consider thinktime immediate
 */
#define CFQ_MIN_TT		(2)

41
#define CFQ_SLICE_SCALE		(5)
42
#define CFQ_HW_QUEUE_MIN	(5)
43

44 45
#define RQ_CIC(rq)		\
	((struct cfq_io_context *) (rq)->elevator_private)
46
#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private2)
L
Linus Torvalds 已提交
47

48 49
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
L
Linus Torvalds 已提交
50

51
static DEFINE_PER_CPU(unsigned long, ioc_count);
52
static struct completion *ioc_gone;
53
static DEFINE_SPINLOCK(ioc_gone_lock);
54

55 56 57 58
#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

J
Jens Axboe 已提交
59 60 61
#define ASYNC			(0)
#define SYNC			(1)

62 63
#define sample_valid(samples)	((samples) > 80)

64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Most of our rbtree usage is for sorting with min extraction, so
 * if we cache the leftmost node we don't have to walk down the tree
 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
 * move this into the elevator for the rq sorting as well.
 */
struct cfq_rb_root {
	struct rb_root rb;
	struct rb_node *left;
};
#define CFQ_RB_ROOT	(struct cfq_rb_root) { RB_ROOT, NULL, }

76 77 78
/*
 * Per block device queue structure
 */
L
Linus Torvalds 已提交
79
struct cfq_data {
80
	struct request_queue *queue;
81 82 83 84

	/*
	 * rr list of queues with requests and the count of them
	 */
85
	struct cfq_rb_root service_tree;
86 87 88
	unsigned int busy_queues;

	int rq_in_driver;
89
	int sync_flight;
90 91 92 93 94

	/*
	 * queue-depth detection
	 */
	int rq_queued;
95
	int hw_tag;
96 97
	int hw_tag_samples;
	int rq_in_driver_peak;
L
Linus Torvalds 已提交
98

99 100 101 102 103
	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
	struct work_struct unplug_work;
L
Linus Torvalds 已提交
104

105 106 107
	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;

108 109 110 111 112
	/*
	 * async queue for each priority case
	 */
	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
	struct cfq_queue *async_idle_cfqq;
113

J
Jens Axboe 已提交
114
	sector_t last_position;
115
	unsigned long last_end_request;
L
Linus Torvalds 已提交
116 117 118 119 120

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
121
	unsigned int cfq_fifo_expire[2];
L
Linus Torvalds 已提交
122 123
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
124 125 126
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
127 128

	struct list_head cic_list;
L
Linus Torvalds 已提交
129 130
};

131 132 133
/*
 * Per process-grouping structure
 */
L
Linus Torvalds 已提交
134 135 136
struct cfq_queue {
	/* reference count */
	atomic_t ref;
137 138
	/* various state flags, see below */
	unsigned int flags;
L
Linus Torvalds 已提交
139 140
	/* parent cfq_data */
	struct cfq_data *cfqd;
141 142 143 144
	/* service_tree member */
	struct rb_node rb_node;
	/* service_tree key */
	unsigned long rb_key;
L
Linus Torvalds 已提交
145 146 147
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
J
Jens Axboe 已提交
148
	struct request *next_rq;
L
Linus Torvalds 已提交
149 150 151 152 153
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
	/* fifo list of requests in sort_list */
154
	struct list_head fifo;
L
Linus Torvalds 已提交
155

156
	unsigned long slice_end;
157
	long slice_resid;
L
Linus Torvalds 已提交
158

159 160
	/* pending metadata requests */
	int meta_pending;
J
Jens Axboe 已提交
161 162
	/* number of requests that are on the dispatch list or inside driver */
	int dispatched;
163 164 165 166 167

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
	unsigned short ioprio_class, org_ioprio_class;

168
	pid_t pid;
L
Linus Torvalds 已提交
169 170
};

J
Jens Axboe 已提交
171
enum cfqq_state_flags {
172 173 174 175 176 177 178 179 180
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
	CFQ_CFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_must_dispatch,	/* must dispatch, even if expired */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
	CFQ_CFQQ_FLAG_queue_new,	/* queue never been serviced */
181
	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
182
	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
J
Jens Axboe 已提交
183 184 185 186 187
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
188
	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
J
Jens Axboe 已提交
189 190 191
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
192
	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
J
Jens Axboe 已提交
193 194 195
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
196
	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
J
Jens Axboe 已提交
197 198 199 200 201 202 203 204 205 206
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
207
CFQ_CFQQ_FNS(queue_new);
208
CFQ_CFQQ_FNS(slice_new);
209
CFQ_CFQQ_FNS(sync);
J
Jens Axboe 已提交
210 211
#undef CFQ_CFQQ_FNS

212 213 214 215 216
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log(cfqd, fmt, args...)	\
	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)

217
static void cfq_dispatch_insert(struct request_queue *, struct request *);
218
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
219
				       struct io_context *, gfp_t);
220
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
						struct io_context *);

static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
					    int is_sync)
{
	return cic->cfqq[!!is_sync];
}

static inline void cic_set_cfqq(struct cfq_io_context *cic,
				struct cfq_queue *cfqq, int is_sync)
{
	cic->cfqq[!!is_sync] = cfqq;
}

/*
 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 * set (in which case it could also be direct WRITE).
 */
static inline int cfq_bio_sync(struct bio *bio)
{
	if (bio_data_dir(bio) == READ || bio_sync(bio))
		return 1;

	return 0;
}
L
Linus Torvalds 已提交
246

A
Andrew Morton 已提交
247 248 249 250 251 252
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
253 254
	if (cfqd->busy_queues) {
		cfq_log(cfqd, "schedule dispatch");
255
		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
256
	}
A
Andrew Morton 已提交
257 258
}

259
static int cfq_queue_empty(struct request_queue *q)
A
Andrew Morton 已提交
260 261 262
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

263
	return !cfqd->busy_queues;
A
Andrew Morton 已提交
264 265
}

266 267 268 269 270
/*
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
 */
271 272
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
				 unsigned short prio)
273
{
274
	const int base_slice = cfqd->cfq_slice[sync];
275

276 277 278 279
	WARN_ON(prio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
}
280

281 282 283 284
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
285 286 287 288 289 290
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
291
	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
}

/*
 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 * isn't valid until the first request from the dispatch is activated
 * and the slice time set.
 */
static inline int cfq_slice_used(struct cfq_queue *cfqq)
{
	if (cfq_cfqq_slice_new(cfqq))
		return 0;
	if (time_before(jiffies, cfqq->slice_end))
		return 0;

	return 1;
}

L
Linus Torvalds 已提交
309
/*
J
Jens Axboe 已提交
310
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
L
Linus Torvalds 已提交
311
 * We choose the request that is closest to the head right now. Distance
312
 * behind the head is penalized and only allowed to a certain extent.
L
Linus Torvalds 已提交
313
 */
J
Jens Axboe 已提交
314 315
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
L
Linus Torvalds 已提交
316 317 318
{
	sector_t last, s1, s2, d1 = 0, d2 = 0;
	unsigned long back_max;
319 320 321
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
L
Linus Torvalds 已提交
322

J
Jens Axboe 已提交
323 324 325 326
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
J
Jens Axboe 已提交
327

J
Jens Axboe 已提交
328 329 330 331
	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
		return rq1;
	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
		return rq2;
332 333 334 335
	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
		return rq1;
	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
		return rq2;
L
Linus Torvalds 已提交
336

J
Jens Axboe 已提交
337 338
	s1 = rq1->sector;
	s2 = rq2->sector;
L
Linus Torvalds 已提交
339

J
Jens Axboe 已提交
340
	last = cfqd->last_position;
L
Linus Torvalds 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
357
		wrap |= CFQ_RQ1_WRAP;
L
Linus Torvalds 已提交
358 359 360 361 362 363

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
364
		wrap |= CFQ_RQ2_WRAP;
L
Linus Torvalds 已提交
365 366

	/* Found required data */
367 368 369 370 371 372

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
J
Jens Axboe 已提交
373
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
374
		if (d1 < d2)
J
Jens Axboe 已提交
375
			return rq1;
376
		else if (d2 < d1)
J
Jens Axboe 已提交
377
			return rq2;
378 379
		else {
			if (s1 >= s2)
J
Jens Axboe 已提交
380
				return rq1;
381
			else
J
Jens Axboe 已提交
382
				return rq2;
383
		}
L
Linus Torvalds 已提交
384

385
	case CFQ_RQ2_WRAP:
J
Jens Axboe 已提交
386
		return rq1;
387
	case CFQ_RQ1_WRAP:
J
Jens Axboe 已提交
388 389
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
390 391 392 393 394 395 396 397
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
J
Jens Axboe 已提交
398
			return rq1;
L
Linus Torvalds 已提交
399
		else
J
Jens Axboe 已提交
400
			return rq2;
L
Linus Torvalds 已提交
401 402 403
	}
}

404 405 406
/*
 * The below is leftmost cache rbtree addon
 */
407
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
408 409 410 411
{
	if (!root->left)
		root->left = rb_first(&root->rb);

412 413 414 415
	if (root->left)
		return rb_entry(root->left, struct cfq_queue, rb_node);

	return NULL;
416 417 418 419 420 421 422 423 424 425 426
}

static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
{
	if (root->left == n)
		root->left = NULL;

	rb_erase(n, &root->rb);
	RB_CLEAR_NODE(n);
}

L
Linus Torvalds 已提交
427 428 429
/*
 * would be nice to take fifo expire time into account as well
 */
J
Jens Axboe 已提交
430 431 432
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
L
Linus Torvalds 已提交
433
{
434 435
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
J
Jens Axboe 已提交
436
	struct request *next = NULL, *prev = NULL;
L
Linus Torvalds 已提交
437

438
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
L
Linus Torvalds 已提交
439 440

	if (rbprev)
J
Jens Axboe 已提交
441
		prev = rb_entry_rq(rbprev);
L
Linus Torvalds 已提交
442

443
	if (rbnext)
J
Jens Axboe 已提交
444
		next = rb_entry_rq(rbnext);
445 446 447
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
J
Jens Axboe 已提交
448
			next = rb_entry_rq(rbnext);
449
	}
L
Linus Torvalds 已提交
450

451
	return cfq_choose_req(cfqd, next, prev);
L
Linus Torvalds 已提交
452 453
}

454 455
static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
				      struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
456
{
457 458 459
	/*
	 * just an approximation, should be ok.
	 */
460 461
	return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
462 463
}

464 465 466 467 468
/*
 * The cfqd->service_tree holds all pending cfq_queue's that have
 * requests waiting to be processed. It is sorted in the order that
 * we will service the queues.
 */
469
static void cfq_service_tree_add(struct cfq_data *cfqd,
470
				    struct cfq_queue *cfqq, int add_front)
471
{
472 473
	struct rb_node **p, *parent;
	struct cfq_queue *__cfqq;
474
	unsigned long rb_key;
475
	int left;
476

477 478 479 480 481 482 483 484 485
	if (cfq_class_idle(cfqq)) {
		rb_key = CFQ_IDLE_DELAY;
		parent = rb_last(&cfqd->service_tree.rb);
		if (parent && parent != &cfqq->rb_node) {
			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
			rb_key += __cfqq->rb_key;
		} else
			rb_key += jiffies;
	} else if (!add_front) {
486 487 488 489 490
		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
		rb_key += cfqq->slice_resid;
		cfqq->slice_resid = 0;
	} else
		rb_key = 0;
L
Linus Torvalds 已提交
491

492
	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
493
		/*
494
		 * same position, nothing more to do
495
		 */
496 497
		if (rb_key == cfqq->rb_key)
			return;
L
Linus Torvalds 已提交
498

499
		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
L
Linus Torvalds 已提交
500
	}
501

502
	left = 1;
503 504
	parent = NULL;
	p = &cfqd->service_tree.rb.rb_node;
505
	while (*p) {
506
		struct rb_node **n;
507

508 509 510
		parent = *p;
		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);

511 512
		/*
		 * sort RT queues first, we always want to give
513 514
		 * preference to them. IDLE queues goes to the back.
		 * after that, sort on the next service time.
515 516
		 */
		if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
517
			n = &(*p)->rb_left;
518
		else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
519 520 521 522 523
			n = &(*p)->rb_right;
		else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
			n = &(*p)->rb_left;
		else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
			n = &(*p)->rb_right;
524
		else if (rb_key < __cfqq->rb_key)
525 526 527 528 529
			n = &(*p)->rb_left;
		else
			n = &(*p)->rb_right;

		if (n == &(*p)->rb_right)
530
			left = 0;
531 532

		p = n;
533 534
	}

535 536 537
	if (left)
		cfqd->service_tree.left = &cfqq->rb_node;

538 539
	cfqq->rb_key = rb_key;
	rb_link_node(&cfqq->rb_node, parent, p);
540
	rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
L
Linus Torvalds 已提交
541 542
}

543 544 545
/*
 * Update cfqq's position in the service tree.
 */
546
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
J
Jens Axboe 已提交
547 548 549 550
{
	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
551
	if (cfq_cfqq_on_rr(cfqq))
552
		cfq_service_tree_add(cfqd, cfqq, 0);
J
Jens Axboe 已提交
553 554
}

L
Linus Torvalds 已提交
555 556
/*
 * add to busy list of queues for service, trying to be fair in ordering
557
 * the pending list according to last request service
L
Linus Torvalds 已提交
558
 */
J
Jens Axboe 已提交
559
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
560
{
561
	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
J
Jens Axboe 已提交
562 563
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
564 565
	cfqd->busy_queues++;

566
	cfq_resort_rr_list(cfqd, cfqq);
L
Linus Torvalds 已提交
567 568
}

569 570 571 572
/*
 * Called when the cfqq no longer has requests pending, remove it from
 * the service tree.
 */
J
Jens Axboe 已提交
573
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
574
{
575
	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
J
Jens Axboe 已提交
576 577
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
578

579 580
	if (!RB_EMPTY_NODE(&cfqq->rb_node))
		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
581

L
Linus Torvalds 已提交
582 583 584 585 586 587 588
	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
}

/*
 * rb tree support functions
 */
J
Jens Axboe 已提交
589
static void cfq_del_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
590
{
J
Jens Axboe 已提交
591
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
592
	struct cfq_data *cfqd = cfqq->cfqd;
J
Jens Axboe 已提交
593
	const int sync = rq_is_sync(rq);
L
Linus Torvalds 已提交
594

595 596
	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
L
Linus Torvalds 已提交
597

J
Jens Axboe 已提交
598
	elv_rb_del(&cfqq->sort_list, rq);
L
Linus Torvalds 已提交
599

600
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
601
		cfq_del_cfqq_rr(cfqd, cfqq);
L
Linus Torvalds 已提交
602 603
}

J
Jens Axboe 已提交
604
static void cfq_add_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
605
{
J
Jens Axboe 已提交
606
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
607
	struct cfq_data *cfqd = cfqq->cfqd;
608
	struct request *__alias;
L
Linus Torvalds 已提交
609

610
	cfqq->queued[rq_is_sync(rq)]++;
L
Linus Torvalds 已提交
611 612 613 614 615

	/*
	 * looks a little odd, but the first insert might return an alias.
	 * if that happens, put the alias on the dispatch list
	 */
616
	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
J
Jens Axboe 已提交
617
		cfq_dispatch_insert(cfqd->queue, __alias);
618 619 620

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
621 622 623 624 625 626

	/*
	 * check if this request is a better next-serve candidate
	 */
	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
	BUG_ON(!cfqq->next_rq);
L
Linus Torvalds 已提交
627 628
}

J
Jens Axboe 已提交
629
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
L
Linus Torvalds 已提交
630
{
631 632
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
J
Jens Axboe 已提交
633
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
634 635
}

636 637
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
L
Linus Torvalds 已提交
638
{
639
	struct task_struct *tsk = current;
640
	struct cfq_io_context *cic;
641
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
642

643
	cic = cfq_cic_lookup(cfqd, tsk->io_context);
644 645 646 647
	if (!cic)
		return NULL;

	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
648 649 650
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

651
		return elv_rb_find(&cfqq->sort_list, sector);
652
	}
L
Linus Torvalds 已提交
653 654 655 656

	return NULL;
}

657
static void cfq_activate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
658
{
659
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
660

661
	cfqd->rq_in_driver++;
662 663
	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
						cfqd->rq_in_driver);
664

J
Jens Axboe 已提交
665
	cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
L
Linus Torvalds 已提交
666 667
}

668
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
669
{
670 671 672 673
	struct cfq_data *cfqd = q->elevator->elevator_data;

	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;
674 675
	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
						cfqd->rq_in_driver);
L
Linus Torvalds 已提交
676 677
}

678
static void cfq_remove_request(struct request *rq)
L
Linus Torvalds 已提交
679
{
J
Jens Axboe 已提交
680
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
681

J
Jens Axboe 已提交
682 683
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
L
Linus Torvalds 已提交
684

685
	list_del_init(&rq->queuelist);
J
Jens Axboe 已提交
686
	cfq_del_rq_rb(rq);
687

688
	cfqq->cfqd->rq_queued--;
689 690 691 692
	if (rq_is_meta(rq)) {
		WARN_ON(!cfqq->meta_pending);
		cfqq->meta_pending--;
	}
L
Linus Torvalds 已提交
693 694
}

695 696
static int cfq_merge(struct request_queue *q, struct request **req,
		     struct bio *bio)
L
Linus Torvalds 已提交
697 698 699 700
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

701
	__rq = cfq_find_rq_fmerge(cfqd, bio);
702
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
703 704
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
L
Linus Torvalds 已提交
705 706 707 708 709
	}

	return ELEVATOR_NO_MERGE;
}

710
static void cfq_merged_request(struct request_queue *q, struct request *req,
711
			       int type)
L
Linus Torvalds 已提交
712
{
713
	if (type == ELEVATOR_FRONT_MERGE) {
J
Jens Axboe 已提交
714
		struct cfq_queue *cfqq = RQ_CFQQ(req);
L
Linus Torvalds 已提交
715

J
Jens Axboe 已提交
716
		cfq_reposition_rq_rb(cfqq, req);
L
Linus Torvalds 已提交
717 718 719 720
	}
}

static void
721
cfq_merged_requests(struct request_queue *q, struct request *rq,
L
Linus Torvalds 已提交
722 723
		    struct request *next)
{
724 725 726 727 728 729 730
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
	    time_before(next->start_time, rq->start_time))
		list_move(&rq->queuelist, &next->queuelist);

731
	cfq_remove_request(next);
732 733
}

734
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
735 736 737
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
738
	struct cfq_io_context *cic;
739 740 741
	struct cfq_queue *cfqq;

	/*
742
	 * Disallow merge of a sync bio into an async request.
743
	 */
744
	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
745 746 747
		return 0;

	/*
748 749
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
750
	 */
751
	cic = cfq_cic_lookup(cfqd, current->io_context);
752 753
	if (!cic)
		return 0;
754

755
	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
756 757
	if (cfqq == RQ_CFQQ(rq))
		return 1;
758

759
	return 0;
760 761
}

J
Jens Axboe 已提交
762 763
static void __cfq_set_active_queue(struct cfq_data *cfqd,
				   struct cfq_queue *cfqq)
764 765
{
	if (cfqq) {
766
		cfq_log_cfqq(cfqd, cfqq, "set_active");
767
		cfqq->slice_end = 0;
J
Jens Axboe 已提交
768 769
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
770
		cfq_mark_cfqq_slice_new(cfqq);
J
Jens Axboe 已提交
771
		cfq_clear_cfqq_queue_new(cfqq);
772 773 774 775 776
	}

	cfqd->active_queue = cfqq;
}

777 778 779 780 781
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
782
		    int timed_out)
783
{
784 785
	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);

786 787 788 789 790 791 792
	if (cfq_cfqq_wait_request(cfqq))
		del_timer(&cfqd->idle_slice_timer);

	cfq_clear_cfqq_must_dispatch(cfqq);
	cfq_clear_cfqq_wait_request(cfqq);

	/*
793
	 * store what was left of this slice, if the queue idled/timed out
794
	 */
795
	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
796
		cfqq->slice_resid = cfqq->slice_end - jiffies;
797 798
		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
	}
799

800
	cfq_resort_rr_list(cfqd, cfqq);
801 802 803 804 805 806 807 808 809 810

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
		put_io_context(cfqd->active_cic->ioc);
		cfqd->active_cic = NULL;
	}
}

811
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
812 813 814 815
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
816
		__cfq_slice_expired(cfqd, cfqq, timed_out);
817 818
}

819 820 821 822
/*
 * Get next queue for service. Unless we have a queue preemption,
 * we'll simply select the first cfqq in the service tree.
 */
J
Jens Axboe 已提交
823
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
824
{
825 826
	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
		return NULL;
827

828
	return cfq_rb_first(&cfqd->service_tree);
J
Jens Axboe 已提交
829 830
}

831 832 833
/*
 * Get and set a new active queue for service.
 */
J
Jens Axboe 已提交
834 835 836 837
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
{
	struct cfq_queue *cfqq;

838
	cfqq = cfq_get_next_queue(cfqd);
839
	__cfq_set_active_queue(cfqd, cfqq);
J
Jens Axboe 已提交
840
	return cfqq;
841 842
}

843 844 845 846 847 848 849 850 851
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
					  struct request *rq)
{
	if (rq->sector >= cfqd->last_position)
		return rq->sector - cfqd->last_position;
	else
		return cfqd->last_position - rq->sector;
}

J
Jens Axboe 已提交
852 853 854 855 856 857 858 859 860 861
static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
{
	struct cfq_io_context *cic = cfqd->active_cic;

	if (!sample_valid(cic->seek_samples))
		return 0;

	return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
}

862 863
static int cfq_close_cooperator(struct cfq_data *cfq_data,
				struct cfq_queue *cfqq)
J
Jens Axboe 已提交
864 865
{
	/*
866 867 868
	 * We should notice if some of the queues are cooperating, eg
	 * working closely on the same area of the disk. In that case,
	 * we can group them together and don't waste time idling.
J
Jens Axboe 已提交
869
	 */
870
	return 0;
J
Jens Axboe 已提交
871 872 873
}

#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
874

J
Jens Axboe 已提交
875
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
876
{
877
	struct cfq_queue *cfqq = cfqd->active_queue;
878
	struct cfq_io_context *cic;
879 880
	unsigned long sl;

881
	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
J
Jens Axboe 已提交
882
	WARN_ON(cfq_cfqq_slice_new(cfqq));
883 884 885 886

	/*
	 * idle is disabled, either manually or by past process history
	 */
J
Jens Axboe 已提交
887 888 889
	if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
		return;

890 891 892 893 894 895
	/*
	 * still requests with the driver, don't idle
	 */
	if (cfqd->rq_in_driver)
		return;

896 897 898
	/*
	 * task has exited, don't wait
	 */
899
	cic = cfqd->active_cic;
900
	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
J
Jens Axboe 已提交
901 902 903 904 905
		return;

	/*
	 * See if this prio level has a good candidate
	 */
J
Jens Axboe 已提交
906 907
	if (cfq_close_cooperator(cfqd, cfqq) &&
	    (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
J
Jens Axboe 已提交
908
		return;
909

J
Jens Axboe 已提交
910 911
	cfq_mark_cfqq_must_dispatch(cfqq);
	cfq_mark_cfqq_wait_request(cfqq);
912

913 914 915 916 917
	/*
	 * we don't want to idle for seeks, but we do want to allow
	 * fair distribution of slice time for a process doing back-to-back
	 * seeks. so allow a little bit of time for him to submit a new rq
	 */
J
Jens Axboe 已提交
918
	sl = cfqd->cfq_slice_idle;
919
	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
920
		sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
921

922
	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
923
	cfq_log(cfqd, "arm_idle: %lu", sl);
L
Linus Torvalds 已提交
924 925
}

926 927 928
/*
 * Move request from internal lists to the request queue dispatch list.
 */
929
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
930
{
931
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
932
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
933

934 935
	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");

936
	cfq_remove_request(rq);
J
Jens Axboe 已提交
937
	cfqq->dispatched++;
938
	elv_dispatch_sort(q, rq);
939 940 941

	if (cfq_cfqq_sync(cfqq))
		cfqd->sync_flight++;
L
Linus Torvalds 已提交
942 943 944 945 946
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
J
Jens Axboe 已提交
947
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
948 949
{
	struct cfq_data *cfqd = cfqq->cfqd;
950
	struct request *rq;
951
	int fifo;
L
Linus Torvalds 已提交
952

J
Jens Axboe 已提交
953
	if (cfq_cfqq_fifo_expire(cfqq))
L
Linus Torvalds 已提交
954
		return NULL;
955 956 957

	cfq_mark_cfqq_fifo_expire(cfqq);

958 959
	if (list_empty(&cfqq->fifo))
		return NULL;
L
Linus Torvalds 已提交
960

J
Jens Axboe 已提交
961
	fifo = cfq_cfqq_sync(cfqq);
962
	rq = rq_entry_fifo(cfqq->fifo.next);
L
Linus Torvalds 已提交
963

J
Jens Axboe 已提交
964
	if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
965
		rq = NULL;
L
Linus Torvalds 已提交
966

967
	cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
J
Jens Axboe 已提交
968
	return rq;
L
Linus Torvalds 已提交
969 970
}

971 972 973 974
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
L
Linus Torvalds 已提交
975

976
	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
L
Linus Torvalds 已提交
977

978
	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
L
Linus Torvalds 已提交
979 980
}

981
/*
982 983
 * Select a queue for service. If we have a current active queue,
 * check whether to continue servicing it, or retrieve and set a new one.
984
 */
985
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
L
Linus Torvalds 已提交
986 987 988
{
	struct cfq_queue *cfqq;

989 990 991
	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
L
Linus Torvalds 已提交
992

993
	/*
J
Jens Axboe 已提交
994
	 * The active queue has run out of time, expire it and select new.
995
	 */
J
Jens Axboe 已提交
996
	if (cfq_slice_used(cfqq))
J
Jens Axboe 已提交
997
		goto expire;
L
Linus Torvalds 已提交
998

999
	/*
J
Jens Axboe 已提交
1000 1001
	 * The active queue has requests and isn't expired, allow it to
	 * dispatch.
1002
	 */
1003
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1004
		goto keep_queue;
J
Jens Axboe 已提交
1005 1006 1007 1008 1009 1010

	/*
	 * No requests pending. If the active queue still has requests in
	 * flight or is idling for a new request, allow either of these
	 * conditions to happen (or time out) before selecting a new queue.
	 */
1011 1012
	if (timer_pending(&cfqd->idle_slice_timer) ||
	    (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1013 1014
		cfqq = NULL;
		goto keep_queue;
1015 1016
	}

J
Jens Axboe 已提交
1017
expire:
1018
	cfq_slice_expired(cfqd, 0);
J
Jens Axboe 已提交
1019 1020
new_queue:
	cfqq = cfq_set_active_queue(cfqd);
1021
keep_queue:
J
Jens Axboe 已提交
1022
	return cfqq;
1023 1024
}

1025 1026 1027 1028
/*
 * Dispatch some requests from cfqq, moving them to the request queue
 * dispatch list.
 */
1029 1030 1031 1032 1033 1034
static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			int max_dispatch)
{
	int dispatched = 0;

1035
	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1036 1037

	do {
J
Jens Axboe 已提交
1038
		struct request *rq;
L
Linus Torvalds 已提交
1039 1040

		/*
1041
		 * follow expired path, else get first next available
L
Linus Torvalds 已提交
1042
		 */
1043 1044
		rq = cfq_check_fifo(cfqq);
		if (rq == NULL)
J
Jens Axboe 已提交
1045
			rq = cfqq->next_rq;
1046 1047 1048 1049

		/*
		 * finally, insert request into driver dispatch list
		 */
J
Jens Axboe 已提交
1050
		cfq_dispatch_insert(cfqd->queue, rq);
L
Linus Torvalds 已提交
1051

1052
		dispatched++;
L
Linus Torvalds 已提交
1053

1054
		if (!cfqd->active_cic) {
J
Jens Axboe 已提交
1055 1056
			atomic_inc(&RQ_CIC(rq)->ioc->refcount);
			cfqd->active_cic = RQ_CIC(rq);
1057
		}
L
Linus Torvalds 已提交
1058

1059
		if (RB_EMPTY_ROOT(&cfqq->sort_list))
1060 1061 1062 1063 1064 1065 1066 1067
			break;

	} while (dispatched < max_dispatch);

	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
1068
	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1069
	    dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1070
	    cfq_class_idle(cfqq))) {
1071
		cfqq->slice_end = jiffies + 1;
1072
		cfq_slice_expired(cfqd, 0);
1073
	}
1074 1075 1076 1077

	return dispatched;
}

J
Jens Axboe 已提交
1078
static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
{
	int dispatched = 0;

	while (cfqq->next_rq) {
		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
		dispatched++;
	}

	BUG_ON(!list_empty(&cfqq->fifo));
	return dispatched;
}

1091 1092 1093 1094
/*
 * Drain our current requests. Used for barriers and when switching
 * io schedulers on-the-fly.
 */
1095
static int cfq_forced_dispatch(struct cfq_data *cfqd)
1096
{
1097
	struct cfq_queue *cfqq;
1098
	int dispatched = 0;
1099

1100
	while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
1101
		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1102

1103
	cfq_slice_expired(cfqd, 0);
1104 1105 1106

	BUG_ON(cfqd->busy_queues);

1107
	cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
1108 1109 1110
	return dispatched;
}

1111
static int cfq_dispatch_requests(struct request_queue *q, int force)
1112 1113
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1114
	struct cfq_queue *cfqq;
1115
	int dispatched;
1116 1117 1118 1119

	if (!cfqd->busy_queues)
		return 0;

1120 1121 1122
	if (unlikely(force))
		return cfq_forced_dispatch(cfqd);

1123 1124
	dispatched = 0;
	while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1125 1126
		int max_dispatch;

1127 1128 1129 1130 1131 1132
		max_dispatch = cfqd->cfq_quantum;
		if (cfq_class_idle(cfqq))
			max_dispatch = 1;

		if (cfqq->dispatched >= max_dispatch) {
			if (cfqd->busy_queues > 1)
J
Jens Axboe 已提交
1133
				break;
1134
			if (cfqq->dispatched >= 4 * max_dispatch)
1135 1136
				break;
		}
1137

1138 1139 1140
		if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
			break;

J
Jens Axboe 已提交
1141 1142
		cfq_clear_cfqq_must_dispatch(cfqq);
		cfq_clear_cfqq_wait_request(cfqq);
1143 1144
		del_timer(&cfqd->idle_slice_timer);

1145
		dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
L
Linus Torvalds 已提交
1146 1147
	}

1148
	cfq_log(cfqd, "dispatched=%d", dispatched);
1149
	return dispatched;
L
Linus Torvalds 已提交
1150 1151 1152
}

/*
J
Jens Axboe 已提交
1153 1154
 * task holds one reference to the queue, dropped when task exits. each rq
 * in-flight on this queue also holds a reference, dropped when rq is freed.
L
Linus Torvalds 已提交
1155 1156 1157 1158 1159
 *
 * queue lock must be held here.
 */
static void cfq_put_queue(struct cfq_queue *cfqq)
{
1160 1161 1162
	struct cfq_data *cfqd = cfqq->cfqd;

	BUG_ON(atomic_read(&cfqq->ref) <= 0);
L
Linus Torvalds 已提交
1163 1164 1165 1166

	if (!atomic_dec_and_test(&cfqq->ref))
		return;

1167
	cfq_log_cfqq(cfqd, cfqq, "put_queue");
L
Linus Torvalds 已提交
1168
	BUG_ON(rb_first(&cfqq->sort_list));
1169
	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
J
Jens Axboe 已提交
1170
	BUG_ON(cfq_cfqq_on_rr(cfqq));
L
Linus Torvalds 已提交
1171

1172
	if (unlikely(cfqd->active_queue == cfqq)) {
1173
		__cfq_slice_expired(cfqd, cfqq, 0);
1174 1175
		cfq_schedule_dispatch(cfqd);
	}
1176

L
Linus Torvalds 已提交
1177 1178 1179
	kmem_cache_free(cfq_pool, cfqq);
}

1180 1181 1182
/*
 * Must always be called with the rcu_read_lock() held
 */
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
static void
__call_for_each_cic(struct io_context *ioc,
		    void (*func)(struct io_context *, struct cfq_io_context *))
{
	struct cfq_io_context *cic;
	struct hlist_node *n;

	hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
		func(ioc, cic);
}

1194
/*
1195
 * Call func for each cic attached to this ioc.
1196
 */
1197
static void
1198 1199
call_for_each_cic(struct io_context *ioc,
		  void (*func)(struct io_context *, struct cfq_io_context *))
L
Linus Torvalds 已提交
1200
{
1201
	rcu_read_lock();
1202
	__call_for_each_cic(ioc, func);
1203
	rcu_read_unlock();
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
}

static void cfq_cic_free_rcu(struct rcu_head *head)
{
	struct cfq_io_context *cic;

	cic = container_of(head, struct cfq_io_context, rcu_head);

	kmem_cache_free(cfq_ioc_pool, cic);
	elv_ioc_count_dec(ioc_count);

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	if (ioc_gone) {
		/*
		 * CFQ scheduler is exiting, grab exit lock and check
		 * the pending io context count. If it hits zero,
		 * complete ioc_gone and set it back to NULL
		 */
		spin_lock(&ioc_gone_lock);
		if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
			complete(ioc_gone);
			ioc_gone = NULL;
		}
		spin_unlock(&ioc_gone_lock);
	}
1228
}
1229

1230 1231 1232
static void cfq_cic_free(struct cfq_io_context *cic)
{
	call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
}

static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
{
	unsigned long flags;

	BUG_ON(!cic->dead_key);

	spin_lock_irqsave(&ioc->lock, flags);
	radix_tree_delete(&ioc->radix_root, cic->dead_key);
1243
	hlist_del_rcu(&cic->cic_list);
1244 1245
	spin_unlock_irqrestore(&ioc->lock, flags);

1246
	cfq_cic_free(cic);
1247 1248
}

1249 1250 1251 1252 1253
/*
 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
 * and ->trim() which is called with the task lock held
 */
1254 1255 1256
static void cfq_free_io_context(struct io_context *ioc)
{
	/*
1257 1258 1259 1260
	 * ioc->refcount is zero here, or we are called from elv_unregister(),
	 * so no more cic's are allowed to be linked into this ioc.  So it
	 * should be ok to iterate over the known list, we will see all cic's
	 * since no new ones are added.
1261
	 */
1262
	__call_for_each_cic(ioc, cic_free_func);
L
Linus Torvalds 已提交
1263 1264
}

1265
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1266
{
1267
	if (unlikely(cfqq == cfqd->active_queue)) {
1268
		__cfq_slice_expired(cfqd, cfqq, 0);
1269 1270
		cfq_schedule_dispatch(cfqd);
	}
1271

1272 1273
	cfq_put_queue(cfqq);
}
1274

1275 1276 1277
static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
					 struct cfq_io_context *cic)
{
1278 1279
	struct io_context *ioc = cic->ioc;

1280
	list_del_init(&cic->queue_list);
1281 1282 1283 1284

	/*
	 * Make sure key == NULL is seen for dead queues
	 */
1285
	smp_wmb();
1286
	cic->dead_key = (unsigned long) cic->key;
1287 1288
	cic->key = NULL;

1289 1290 1291
	if (ioc->ioc_data == cic)
		rcu_assign_pointer(ioc->ioc_data, NULL);

1292
	if (cic->cfqq[ASYNC]) {
1293
		cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1294 1295 1296 1297
		cic->cfqq[ASYNC] = NULL;
	}

	if (cic->cfqq[SYNC]) {
1298
		cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1299 1300
		cic->cfqq[SYNC] = NULL;
	}
1301 1302
}

1303 1304
static void cfq_exit_single_io_context(struct io_context *ioc,
				       struct cfq_io_context *cic)
1305 1306 1307 1308
{
	struct cfq_data *cfqd = cic->key;

	if (cfqd) {
1309
		struct request_queue *q = cfqd->queue;
1310
		unsigned long flags;
1311

1312
		spin_lock_irqsave(q->queue_lock, flags);
1313
		__cfq_exit_single_io_context(cfqd, cic);
1314
		spin_unlock_irqrestore(q->queue_lock, flags);
1315
	}
L
Linus Torvalds 已提交
1316 1317
}

1318 1319 1320 1321
/*
 * The process that ioc belongs to has exited, we need to clean up
 * and put the internal structures we have that belongs to that process.
 */
1322
static void cfq_exit_io_context(struct io_context *ioc)
L
Linus Torvalds 已提交
1323
{
1324
	call_for_each_cic(ioc, cfq_exit_single_io_context);
L
Linus Torvalds 已提交
1325 1326
}

1327
static struct cfq_io_context *
A
Al Viro 已提交
1328
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1329
{
1330
	struct cfq_io_context *cic;
L
Linus Torvalds 已提交
1331

1332 1333
	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
							cfqd->queue->node);
L
Linus Torvalds 已提交
1334
	if (cic) {
1335
		cic->last_end_request = jiffies;
1336
		INIT_LIST_HEAD(&cic->queue_list);
1337
		INIT_HLIST_NODE(&cic->cic_list);
1338 1339
		cic->dtor = cfq_free_io_context;
		cic->exit = cfq_exit_io_context;
1340
		elv_ioc_count_inc(ioc_count);
L
Linus Torvalds 已提交
1341 1342 1343 1344 1345
	}

	return cic;
}

1346
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1347 1348 1349 1350
{
	struct task_struct *tsk = current;
	int ioprio_class;

J
Jens Axboe 已提交
1351
	if (!cfq_cfqq_prio_changed(cfqq))
1352 1353
		return;

1354
	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1355
	switch (ioprio_class) {
1356 1357 1358 1359
	default:
		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
	case IOPRIO_CLASS_NONE:
		/*
1360
		 * no prio set, inherit CPU scheduling settings
1361 1362
		 */
		cfqq->ioprio = task_nice_ioprio(tsk);
1363
		cfqq->ioprio_class = task_nice_ioclass(tsk);
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
		break;
	case IOPRIO_CLASS_RT:
		cfqq->ioprio = task_ioprio(ioc);
		cfqq->ioprio_class = IOPRIO_CLASS_RT;
		break;
	case IOPRIO_CLASS_BE:
		cfqq->ioprio = task_ioprio(ioc);
		cfqq->ioprio_class = IOPRIO_CLASS_BE;
		break;
	case IOPRIO_CLASS_IDLE:
		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
		cfqq->ioprio = 7;
		cfq_clear_cfqq_idle_window(cfqq);
		break;
1378 1379 1380 1381 1382 1383 1384 1385
	}

	/*
	 * keep track of original prio settings in case we have to temporarily
	 * elevate the priority of this queue
	 */
	cfqq->org_ioprio = cfqq->ioprio;
	cfqq->org_ioprio_class = cfqq->ioprio_class;
J
Jens Axboe 已提交
1386
	cfq_clear_cfqq_prio_changed(cfqq);
1387 1388
}

J
Jens Axboe 已提交
1389
static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1390
{
1391 1392
	struct cfq_data *cfqd = cic->key;
	struct cfq_queue *cfqq;
1393
	unsigned long flags;
1394

1395 1396 1397
	if (unlikely(!cfqd))
		return;

1398
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1399 1400 1401 1402

	cfqq = cic->cfqq[ASYNC];
	if (cfqq) {
		struct cfq_queue *new_cfqq;
1403
		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
1404 1405 1406 1407
		if (new_cfqq) {
			cic->cfqq[ASYNC] = new_cfqq;
			cfq_put_queue(cfqq);
		}
1408
	}
1409 1410 1411 1412 1413

	cfqq = cic->cfqq[SYNC];
	if (cfqq)
		cfq_mark_cfqq_prio_changed(cfqq);

1414
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1415 1416
}

1417
static void cfq_ioc_set_ioprio(struct io_context *ioc)
1418
{
1419
	call_for_each_cic(ioc, changed_ioprio);
1420
	ioc->ioprio_changed = 0;
1421 1422 1423
}

static struct cfq_queue *
1424
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1425
		     struct io_context *ioc, gfp_t gfp_mask)
1426 1427
{
	struct cfq_queue *cfqq, *new_cfqq = NULL;
1428
	struct cfq_io_context *cic;
1429 1430

retry:
1431
	cic = cfq_cic_lookup(cfqd, ioc);
1432 1433
	/* cic always exists here */
	cfqq = cic_to_cfqq(cic, is_sync);
1434 1435 1436 1437 1438 1439

	if (!cfqq) {
		if (new_cfqq) {
			cfqq = new_cfqq;
			new_cfqq = NULL;
		} else if (gfp_mask & __GFP_WAIT) {
1440 1441 1442 1443 1444 1445
			/*
			 * Inform the allocator of the fact that we will
			 * just repeat this allocation if it fails, to allow
			 * the allocator to do whatever it needs to attempt to
			 * free memory.
			 */
1446
			spin_unlock_irq(cfqd->queue->queue_lock);
1447 1448 1449
			new_cfqq = kmem_cache_alloc_node(cfq_pool,
					gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
					cfqd->queue->node);
1450 1451 1452
			spin_lock_irq(cfqd->queue->queue_lock);
			goto retry;
		} else {
1453 1454 1455
			cfqq = kmem_cache_alloc_node(cfq_pool,
					gfp_mask | __GFP_ZERO,
					cfqd->queue->node);
1456 1457 1458 1459
			if (!cfqq)
				goto out;
		}

1460
		RB_CLEAR_NODE(&cfqq->rb_node);
1461 1462 1463 1464
		INIT_LIST_HEAD(&cfqq->fifo);

		atomic_set(&cfqq->ref, 0);
		cfqq->cfqd = cfqd;
1465

J
Jens Axboe 已提交
1466
		cfq_mark_cfqq_prio_changed(cfqq);
1467
		cfq_mark_cfqq_queue_new(cfqq);
1468

1469
		cfq_init_prio_data(cfqq, ioc);
1470 1471 1472 1473 1474 1475

		if (is_sync) {
			if (!cfq_class_idle(cfqq))
				cfq_mark_cfqq_idle_window(cfqq);
			cfq_mark_cfqq_sync(cfqq);
		}
1476 1477
		cfqq->pid = current->pid;
		cfq_log_cfqq(cfqd, cfqq, "alloced");
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
	}

	if (new_cfqq)
		kmem_cache_free(cfq_pool, new_cfqq);

out:
	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
	return cfqq;
}

1488 1489 1490
static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{
1491
	switch (ioprio_class) {
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
	case IOPRIO_CLASS_RT:
		return &cfqd->async_cfqq[0][ioprio];
	case IOPRIO_CLASS_BE:
		return &cfqd->async_cfqq[1][ioprio];
	case IOPRIO_CLASS_IDLE:
		return &cfqd->async_idle_cfqq;
	default:
		BUG();
	}
}

1503
static struct cfq_queue *
1504
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
1505 1506
	      gfp_t gfp_mask)
{
1507 1508
	const int ioprio = task_ioprio(ioc);
	const int ioprio_class = task_ioprio_class(ioc);
1509
	struct cfq_queue **async_cfqq = NULL;
1510 1511
	struct cfq_queue *cfqq = NULL;

1512 1513 1514 1515 1516
	if (!is_sync) {
		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
		cfqq = *async_cfqq;
	}

1517
	if (!cfqq) {
1518
		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1519 1520 1521
		if (!cfqq)
			return NULL;
	}
1522 1523 1524 1525

	/*
	 * pin the queue now that it's allocated, scheduler exit will prune it
	 */
1526
	if (!is_sync && !(*async_cfqq)) {
1527
		atomic_inc(&cfqq->ref);
1528
		*async_cfqq = cfqq;
1529 1530 1531 1532 1533 1534
	}

	atomic_inc(&cfqq->ref);
	return cfqq;
}

1535 1536 1537
/*
 * We drop cfq io contexts lazily, so we may find a dead one.
 */
1538
static void
1539 1540
cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
		  struct cfq_io_context *cic)
1541
{
1542 1543
	unsigned long flags;

1544
	WARN_ON(!list_empty(&cic->queue_list));
J
Jens Axboe 已提交
1545

1546 1547
	spin_lock_irqsave(&ioc->lock, flags);

1548
	BUG_ON(ioc->ioc_data == cic);
J
Jens Axboe 已提交
1549

1550
	radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1551
	hlist_del_rcu(&cic->cic_list);
1552 1553 1554
	spin_unlock_irqrestore(&ioc->lock, flags);

	cfq_cic_free(cic);
1555 1556
}

1557
static struct cfq_io_context *
1558
cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1559 1560
{
	struct cfq_io_context *cic;
1561
	unsigned long flags;
1562
	void *k;
1563

1564 1565 1566
	if (unlikely(!ioc))
		return NULL;

1567 1568
	rcu_read_lock();

J
Jens Axboe 已提交
1569 1570 1571
	/*
	 * we maintain a last-hit cache, to avoid browsing over the tree
	 */
1572
	cic = rcu_dereference(ioc->ioc_data);
1573 1574
	if (cic && cic->key == cfqd) {
		rcu_read_unlock();
J
Jens Axboe 已提交
1575
		return cic;
1576
	}
J
Jens Axboe 已提交
1577

1578 1579 1580 1581 1582
	do {
		cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
		rcu_read_unlock();
		if (!cic)
			break;
1583 1584 1585
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = cic->key;
		if (unlikely(!k)) {
1586
			cfq_drop_dead_cic(cfqd, ioc, cic);
1587
			rcu_read_lock();
1588
			continue;
1589
		}
1590

1591
		spin_lock_irqsave(&ioc->lock, flags);
1592
		rcu_assign_pointer(ioc->ioc_data, cic);
1593
		spin_unlock_irqrestore(&ioc->lock, flags);
1594 1595
		break;
	} while (1);
1596

1597
	return cic;
1598 1599
}

1600 1601 1602 1603 1604
/*
 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
 * the process specific cfq io context when entered from the block layer.
 * Also adds the cic to a per-cfqd list, used when this queue is removed.
 */
J
Jens Axboe 已提交
1605 1606
static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
			struct cfq_io_context *cic, gfp_t gfp_mask)
1607
{
1608
	unsigned long flags;
1609
	int ret;
1610

1611 1612 1613 1614
	ret = radix_tree_preload(gfp_mask);
	if (!ret) {
		cic->ioc = ioc;
		cic->key = cfqd;
1615

1616 1617 1618
		spin_lock_irqsave(&ioc->lock, flags);
		ret = radix_tree_insert(&ioc->radix_root,
						(unsigned long) cfqd, cic);
1619 1620
		if (!ret)
			hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1621
		spin_unlock_irqrestore(&ioc->lock, flags);
1622

1623 1624 1625 1626 1627 1628 1629
		radix_tree_preload_end();

		if (!ret) {
			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
			list_add(&cic->queue_list, &cfqd->cic_list);
			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
		}
1630 1631
	}

1632 1633
	if (ret)
		printk(KERN_ERR "cfq: cic link failed!\n");
1634

1635
	return ret;
1636 1637
}

L
Linus Torvalds 已提交
1638 1639 1640
/*
 * Setup general io context and cfq io context. There can be several cfq
 * io contexts per general io context, if this process is doing io to more
1641
 * than one device managed by cfq.
L
Linus Torvalds 已提交
1642 1643
 */
static struct cfq_io_context *
1644
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
L
Linus Torvalds 已提交
1645
{
1646
	struct io_context *ioc = NULL;
L
Linus Torvalds 已提交
1647 1648
	struct cfq_io_context *cic;

1649
	might_sleep_if(gfp_mask & __GFP_WAIT);
L
Linus Torvalds 已提交
1650

1651
	ioc = get_io_context(gfp_mask, cfqd->queue->node);
L
Linus Torvalds 已提交
1652 1653 1654
	if (!ioc)
		return NULL;

1655
	cic = cfq_cic_lookup(cfqd, ioc);
1656 1657
	if (cic)
		goto out;
L
Linus Torvalds 已提交
1658

1659 1660 1661
	cic = cfq_alloc_io_context(cfqd, gfp_mask);
	if (cic == NULL)
		goto err;
L
Linus Torvalds 已提交
1662

1663 1664 1665
	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
		goto err_free;

L
Linus Torvalds 已提交
1666
out:
1667 1668 1669 1670
	smp_read_barrier_depends();
	if (unlikely(ioc->ioprio_changed))
		cfq_ioc_set_ioprio(ioc);

L
Linus Torvalds 已提交
1671
	return cic;
1672 1673
err_free:
	cfq_cic_free(cic);
L
Linus Torvalds 已提交
1674 1675 1676 1677 1678
err:
	put_io_context(ioc);
	return NULL;
}

1679 1680
static void
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
L
Linus Torvalds 已提交
1681
{
1682 1683
	unsigned long elapsed = jiffies - cic->last_end_request;
	unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1684

1685 1686 1687 1688
	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
}
L
Linus Torvalds 已提交
1689

1690
static void
J
Jens Axboe 已提交
1691 1692
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
		       struct request *rq)
1693 1694 1695 1696
{
	sector_t sdist;
	u64 total;

J
Jens Axboe 已提交
1697 1698
	if (cic->last_request_pos < rq->sector)
		sdist = rq->sector - cic->last_request_pos;
1699
	else
J
Jens Axboe 已提交
1700
		sdist = cic->last_request_pos - rq->sector;
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716

	/*
	 * Don't allow the seek distance to get too large from the
	 * odd fragment, pagein, etc
	 */
	if (cic->seek_samples <= 60) /* second&third seek */
		sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
	else
		sdist = min(sdist, (cic->seek_mean * 4)	+ 2*1024*64);

	cic->seek_samples = (7*cic->seek_samples + 256) / 8;
	cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
	total = cic->seek_total + (cic->seek_samples/2);
	do_div(total, cic->seek_samples);
	cic->seek_mean = (sector_t)total;
}
L
Linus Torvalds 已提交
1717

1718 1719 1720 1721 1722 1723 1724 1725
/*
 * Disable idle window if the process thinks too long or seeks so much that
 * it doesn't matter
 */
static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		       struct cfq_io_context *cic)
{
1726
	int old_idle, enable_idle;
1727

1728 1729 1730 1731
	/*
	 * Don't idle for async or idle io prio class
	 */
	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1732 1733
		return;

1734
	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
L
Linus Torvalds 已提交
1735

1736
	if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1737
	    (cfqd->hw_tag && CIC_SEEKY(cic)))
1738 1739 1740 1741 1742 1743
		enable_idle = 0;
	else if (sample_valid(cic->ttime_samples)) {
		if (cic->ttime_mean > cfqd->cfq_slice_idle)
			enable_idle = 0;
		else
			enable_idle = 1;
L
Linus Torvalds 已提交
1744 1745
	}

1746 1747 1748 1749 1750 1751 1752
	if (old_idle != enable_idle) {
		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
		if (enable_idle)
			cfq_mark_cfqq_idle_window(cfqq);
		else
			cfq_clear_cfqq_idle_window(cfqq);
	}
1753
}
L
Linus Torvalds 已提交
1754

1755 1756 1757 1758 1759 1760
/*
 * Check if new_cfqq should preempt the currently active queue. Return 0 for
 * no or if we aren't sure, a 1 will cause a preempt.
 */
static int
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
J
Jens Axboe 已提交
1761
		   struct request *rq)
1762
{
J
Jens Axboe 已提交
1763
	struct cfq_queue *cfqq;
1764

J
Jens Axboe 已提交
1765 1766
	cfqq = cfqd->active_queue;
	if (!cfqq)
1767 1768
		return 0;

J
Jens Axboe 已提交
1769 1770 1771 1772
	if (cfq_slice_used(cfqq))
		return 1;

	if (cfq_class_idle(new_cfqq))
1773
		return 0;
1774 1775 1776

	if (cfq_class_idle(cfqq))
		return 1;
1777

1778 1779 1780 1781
	/*
	 * if the new request is sync, but the currently running queue is
	 * not, let the sync request have priority.
	 */
J
Jens Axboe 已提交
1782
	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1783
		return 1;
1784

1785 1786 1787 1788 1789 1790
	/*
	 * So both queues are sync. Let the new request get disk time if
	 * it's a metadata request and the current queue is doing regular IO.
	 */
	if (rq_is_meta(rq) && !cfqq->meta_pending)
		return 1;
1791

1792 1793 1794 1795 1796 1797 1798
	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
		return 0;

	/*
	 * if this request is as-good as one we would expect from the
	 * current cfqq, let it preempt
	 */
J
Jens Axboe 已提交
1799
	if (cfq_rq_close(cfqd, rq))
1800 1801
		return 1;

1802 1803 1804 1805 1806 1807 1808 1809 1810
	return 0;
}

/*
 * cfqq preempts the active queue. if we allowed preempt with no slice left,
 * let it have half of its nominal slice.
 */
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
1811
	cfq_log_cfqq(cfqd, cfqq, "preempt");
1812
	cfq_slice_expired(cfqd, 1);
1813

1814 1815 1816 1817 1818
	/*
	 * Put the new queue at the front of the of the current list,
	 * so we know that it will be selected next.
	 */
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
1819 1820

	cfq_service_tree_add(cfqd, cfqq, 1);
1821

1822 1823
	cfqq->slice_end = 0;
	cfq_mark_cfqq_slice_new(cfqq);
1824 1825 1826
}

/*
J
Jens Axboe 已提交
1827
 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1828 1829 1830
 * something we should do about it
 */
static void
J
Jens Axboe 已提交
1831 1832
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		struct request *rq)
1833
{
J
Jens Axboe 已提交
1834
	struct cfq_io_context *cic = RQ_CIC(rq);
1835

1836
	cfqd->rq_queued++;
1837 1838 1839
	if (rq_is_meta(rq))
		cfqq->meta_pending++;

J
Jens Axboe 已提交
1840
	cfq_update_io_thinktime(cfqd, cic);
J
Jens Axboe 已提交
1841
	cfq_update_io_seektime(cfqd, cic, rq);
J
Jens Axboe 已提交
1842 1843
	cfq_update_idle_window(cfqd, cfqq, cic);

J
Jens Axboe 已提交
1844
	cic->last_request_pos = rq->sector + rq->nr_sectors;
1845 1846 1847 1848 1849 1850 1851

	if (cfqq == cfqd->active_queue) {
		/*
		 * if we are waiting for a request for this queue, let it rip
		 * immediately and flag that we must not expire this queue
		 * just now
		 */
J
Jens Axboe 已提交
1852 1853
		if (cfq_cfqq_wait_request(cfqq)) {
			cfq_mark_cfqq_must_dispatch(cfqq);
1854
			del_timer(&cfqd->idle_slice_timer);
1855
			blk_start_queueing(cfqd->queue);
1856
		}
J
Jens Axboe 已提交
1857
	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1858 1859 1860 1861 1862 1863
		/*
		 * not the active queue - expire current slice if it is
		 * idle and has expired it's mean thinktime or this new queue
		 * has some old slice time left and is of higher priority
		 */
		cfq_preempt_queue(cfqd, cfqq);
J
Jens Axboe 已提交
1864
		cfq_mark_cfqq_must_dispatch(cfqq);
1865
		blk_start_queueing(cfqd->queue);
1866
	}
L
Linus Torvalds 已提交
1867 1868
}

1869
static void cfq_insert_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1870
{
1871
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1872
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1873

1874
	cfq_log_cfqq(cfqd, cfqq, "insert_request");
1875
	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
L
Linus Torvalds 已提交
1876

J
Jens Axboe 已提交
1877
	cfq_add_rq_rb(rq);
L
Linus Torvalds 已提交
1878

1879 1880
	list_add_tail(&rq->queuelist, &cfqq->fifo);

J
Jens Axboe 已提交
1881
	cfq_rq_enqueued(cfqd, cfqq, rq);
L
Linus Torvalds 已提交
1882 1883
}

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
/*
 * Update hw_tag based on peak queue depth over 50 samples under
 * sufficient load.
 */
static void cfq_update_hw_tag(struct cfq_data *cfqd)
{
	if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
		cfqd->rq_in_driver_peak = cfqd->rq_in_driver;

	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
		return;

	if (cfqd->hw_tag_samples++ < 50)
		return;

	if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
		cfqd->hw_tag = 1;
	else
		cfqd->hw_tag = 0;

	cfqd->hw_tag_samples = 0;
	cfqd->rq_in_driver_peak = 0;
}

1909
static void cfq_completed_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1910
{
J
Jens Axboe 已提交
1911
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1912
	struct cfq_data *cfqd = cfqq->cfqd;
1913
	const int sync = rq_is_sync(rq);
1914
	unsigned long now;
L
Linus Torvalds 已提交
1915

1916
	now = jiffies;
1917
	cfq_log_cfqq(cfqd, cfqq, "complete");
L
Linus Torvalds 已提交
1918

1919 1920
	cfq_update_hw_tag(cfqd);

1921
	WARN_ON(!cfqd->rq_in_driver);
J
Jens Axboe 已提交
1922
	WARN_ON(!cfqq->dispatched);
1923
	cfqd->rq_in_driver--;
J
Jens Axboe 已提交
1924
	cfqq->dispatched--;
L
Linus Torvalds 已提交
1925

1926 1927 1928
	if (cfq_cfqq_sync(cfqq))
		cfqd->sync_flight--;

1929 1930
	if (!cfq_class_idle(cfqq))
		cfqd->last_end_request = now;
J
Jens Axboe 已提交
1931

1932
	if (sync)
J
Jens Axboe 已提交
1933
		RQ_CIC(rq)->last_end_request = now;
1934 1935 1936 1937 1938 1939

	/*
	 * If this is the active queue, check if it needs to be expired,
	 * or if we want to idle in case it has no pending requests.
	 */
	if (cfqd->active_queue == cfqq) {
1940 1941 1942 1943
		if (cfq_cfqq_slice_new(cfqq)) {
			cfq_set_prio_slice(cfqd, cfqq);
			cfq_clear_cfqq_slice_new(cfqq);
		}
1944
		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
1945
			cfq_slice_expired(cfqd, 1);
J
Jens Axboe 已提交
1946 1947
		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
			cfq_arm_slice_timer(cfqd);
1948
	}
J
Jens Axboe 已提交
1949 1950 1951

	if (!cfqd->rq_in_driver)
		cfq_schedule_dispatch(cfqd);
L
Linus Torvalds 已提交
1952 1953
}

1954 1955 1956 1957 1958
/*
 * we temporarily boost lower priority queues if they are holding fs exclusive
 * resources. they are boosted to normal prio (CLASS_BE/4)
 */
static void cfq_prio_boost(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1959
{
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
	if (has_fs_excl()) {
		/*
		 * boost idle prio on transactions that would lock out other
		 * users of the filesystem
		 */
		if (cfq_class_idle(cfqq))
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
		if (cfqq->ioprio > IOPRIO_NORM)
			cfqq->ioprio = IOPRIO_NORM;
	} else {
		/*
		 * check if we need to unboost the queue
		 */
		if (cfqq->ioprio_class != cfqq->org_ioprio_class)
			cfqq->ioprio_class = cfqq->org_ioprio_class;
		if (cfqq->ioprio != cfqq->org_ioprio)
			cfqq->ioprio = cfqq->org_ioprio;
	}
}
L
Linus Torvalds 已提交
1979

1980
static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1981
{
J
Jens Axboe 已提交
1982
	if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
A
Andrew Morton 已提交
1983
	    !cfq_cfqq_must_alloc_slice(cfqq)) {
J
Jens Axboe 已提交
1984
		cfq_mark_cfqq_must_alloc_slice(cfqq);
1985
		return ELV_MQUEUE_MUST;
J
Jens Axboe 已提交
1986
	}
L
Linus Torvalds 已提交
1987

1988 1989 1990
	return ELV_MQUEUE_MAY;
}

1991
static int cfq_may_queue(struct request_queue *q, int rw)
1992 1993 1994
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct task_struct *tsk = current;
1995
	struct cfq_io_context *cic;
1996 1997 1998 1999 2000 2001 2002 2003
	struct cfq_queue *cfqq;

	/*
	 * don't force setup of a queue from here, as a call to may_queue
	 * does not necessarily imply that a request actually will be queued.
	 * so just lookup a possibly existing queue, or return 'may queue'
	 * if that fails
	 */
2004
	cic = cfq_cic_lookup(cfqd, tsk->io_context);
2005 2006 2007 2008
	if (!cic)
		return ELV_MQUEUE_MAY;

	cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
2009
	if (cfqq) {
2010
		cfq_init_prio_data(cfqq, cic->ioc);
2011 2012
		cfq_prio_boost(cfqq);

2013
		return __cfq_may_queue(cfqq);
2014 2015 2016
	}

	return ELV_MQUEUE_MAY;
L
Linus Torvalds 已提交
2017 2018 2019 2020 2021
}

/*
 * queue lock held here
 */
2022
static void cfq_put_request(struct request *rq)
L
Linus Torvalds 已提交
2023
{
J
Jens Axboe 已提交
2024
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
2025

J
Jens Axboe 已提交
2026
	if (cfqq) {
2027
		const int rw = rq_data_dir(rq);
L
Linus Torvalds 已提交
2028

2029 2030
		BUG_ON(!cfqq->allocated[rw]);
		cfqq->allocated[rw]--;
L
Linus Torvalds 已提交
2031

J
Jens Axboe 已提交
2032
		put_io_context(RQ_CIC(rq)->ioc);
L
Linus Torvalds 已提交
2033 2034

		rq->elevator_private = NULL;
J
Jens Axboe 已提交
2035
		rq->elevator_private2 = NULL;
L
Linus Torvalds 已提交
2036 2037 2038 2039 2040 2041

		cfq_put_queue(cfqq);
	}
}

/*
2042
 * Allocate cfq data structures associated with this request.
L
Linus Torvalds 已提交
2043
 */
2044
static int
2045
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
L
Linus Torvalds 已提交
2046 2047 2048 2049
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_io_context *cic;
	const int rw = rq_data_dir(rq);
2050
	const int is_sync = rq_is_sync(rq);
2051
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
2052 2053 2054 2055
	unsigned long flags;

	might_sleep_if(gfp_mask & __GFP_WAIT);

2056
	cic = cfq_get_io_context(cfqd, gfp_mask);
2057

L
Linus Torvalds 已提交
2058 2059
	spin_lock_irqsave(q->queue_lock, flags);

2060 2061 2062
	if (!cic)
		goto queue_fail;

2063 2064
	cfqq = cic_to_cfqq(cic, is_sync);
	if (!cfqq) {
2065
		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2066

2067 2068
		if (!cfqq)
			goto queue_fail;
L
Linus Torvalds 已提交
2069

2070 2071
		cic_set_cfqq(cic, cfqq, is_sync);
	}
L
Linus Torvalds 已提交
2072 2073

	cfqq->allocated[rw]++;
J
Jens Axboe 已提交
2074
	cfq_clear_cfqq_must_alloc(cfqq);
2075
	atomic_inc(&cfqq->ref);
L
Linus Torvalds 已提交
2076

J
Jens Axboe 已提交
2077
	spin_unlock_irqrestore(q->queue_lock, flags);
J
Jens Axboe 已提交
2078

J
Jens Axboe 已提交
2079 2080 2081
	rq->elevator_private = cic;
	rq->elevator_private2 = cfqq;
	return 0;
L
Linus Torvalds 已提交
2082

2083 2084 2085
queue_fail:
	if (cic)
		put_io_context(cic->ioc);
2086

J
Jens Axboe 已提交
2087
	cfq_schedule_dispatch(cfqd);
L
Linus Torvalds 已提交
2088
	spin_unlock_irqrestore(q->queue_lock, flags);
2089
	cfq_log(cfqd, "set_request fail");
L
Linus Torvalds 已提交
2090 2091 2092
	return 1;
}

2093
static void cfq_kick_queue(struct work_struct *work)
2094
{
2095 2096
	struct cfq_data *cfqd =
		container_of(work, struct cfq_data, unplug_work);
2097
	struct request_queue *q = cfqd->queue;
2098 2099 2100
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
2101
	blk_start_queueing(q);
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
	spin_unlock_irqrestore(q->queue_lock, flags);
}

/*
 * Timer running if the active_queue is currently idling inside its time slice
 */
static void cfq_idle_slice_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	struct cfq_queue *cfqq;
	unsigned long flags;
2113
	int timed_out = 1;
2114

2115 2116
	cfq_log(cfqd, "idle timer fired");

2117 2118
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

2119 2120
	cfqq = cfqd->active_queue;
	if (cfqq) {
2121 2122
		timed_out = 0;

2123 2124 2125
		/*
		 * expired
		 */
2126
		if (cfq_slice_used(cfqq))
2127 2128 2129 2130 2131 2132
			goto expire;

		/*
		 * only expire and reinvoke request handler, if there are
		 * other queues with pending requests
		 */
2133
		if (!cfqd->busy_queues)
2134 2135 2136 2137 2138
			goto out_cont;

		/*
		 * not expired and it has a request pending, let it dispatch
		 */
2139
		if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
J
Jens Axboe 已提交
2140
			cfq_mark_cfqq_must_dispatch(cfqq);
2141 2142 2143 2144
			goto out_kick;
		}
	}
expire:
2145
	cfq_slice_expired(cfqd, timed_out);
2146
out_kick:
J
Jens Axboe 已提交
2147
	cfq_schedule_dispatch(cfqd);
2148 2149 2150 2151
out_cont:
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

J
Jens Axboe 已提交
2152 2153 2154
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
	del_timer_sync(&cfqd->idle_slice_timer);
2155
	kblockd_flush_work(&cfqd->unplug_work);
J
Jens Axboe 已提交
2156
}
2157

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
static void cfq_put_async_queues(struct cfq_data *cfqd)
{
	int i;

	for (i = 0; i < IOPRIO_BE_NR; i++) {
		if (cfqd->async_cfqq[0][i])
			cfq_put_queue(cfqd->async_cfqq[0][i]);
		if (cfqd->async_cfqq[1][i])
			cfq_put_queue(cfqd->async_cfqq[1][i]);
	}
2168 2169 2170

	if (cfqd->async_idle_cfqq)
		cfq_put_queue(cfqd->async_idle_cfqq);
2171 2172
}

L
Linus Torvalds 已提交
2173 2174
static void cfq_exit_queue(elevator_t *e)
{
2175
	struct cfq_data *cfqd = e->elevator_data;
2176
	struct request_queue *q = cfqd->queue;
2177

J
Jens Axboe 已提交
2178
	cfq_shutdown_timer_wq(cfqd);
2179

2180
	spin_lock_irq(q->queue_lock);
2181

2182
	if (cfqd->active_queue)
2183
		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2184 2185

	while (!list_empty(&cfqd->cic_list)) {
2186 2187 2188
		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
							struct cfq_io_context,
							queue_list);
2189 2190

		__cfq_exit_single_io_context(cfqd, cic);
2191
	}
2192

2193
	cfq_put_async_queues(cfqd);
2194

2195
	spin_unlock_irq(q->queue_lock);
2196 2197 2198 2199

	cfq_shutdown_timer_wq(cfqd);

	kfree(cfqd);
L
Linus Torvalds 已提交
2200 2201
}

2202
static void *cfq_init_queue(struct request_queue *q)
L
Linus Torvalds 已提交
2203 2204 2205
{
	struct cfq_data *cfqd;

2206
	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
L
Linus Torvalds 已提交
2207
	if (!cfqd)
J
Jens Axboe 已提交
2208
		return NULL;
L
Linus Torvalds 已提交
2209

2210
	cfqd->service_tree = CFQ_RB_ROOT;
2211
	INIT_LIST_HEAD(&cfqd->cic_list);
L
Linus Torvalds 已提交
2212 2213 2214

	cfqd->queue = q;

2215 2216 2217 2218
	init_timer(&cfqd->idle_slice_timer);
	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
	cfqd->idle_slice_timer.data = (unsigned long) cfqd;

2219
	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2220

O
Oleg Nesterov 已提交
2221
	cfqd->last_end_request = jiffies;
L
Linus Torvalds 已提交
2222
	cfqd->cfq_quantum = cfq_quantum;
2223 2224
	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
L
Linus Torvalds 已提交
2225 2226
	cfqd->cfq_back_max = cfq_back_max;
	cfqd->cfq_back_penalty = cfq_back_penalty;
2227 2228 2229 2230
	cfqd->cfq_slice[0] = cfq_slice_async;
	cfqd->cfq_slice[1] = cfq_slice_sync;
	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
	cfqd->cfq_slice_idle = cfq_slice_idle;
2231
	cfqd->hw_tag = 1;
J
Jens Axboe 已提交
2232

J
Jens Axboe 已提交
2233
	return cfqd;
L
Linus Torvalds 已提交
2234 2235 2236 2237
}

static void cfq_slab_kill(void)
{
2238 2239 2240 2241
	/*
	 * Caller already ensured that pending RCU callbacks are completed,
	 * so we should have no busy allocations at this point.
	 */
L
Linus Torvalds 已提交
2242 2243 2244 2245 2246 2247 2248 2249
	if (cfq_pool)
		kmem_cache_destroy(cfq_pool);
	if (cfq_ioc_pool)
		kmem_cache_destroy(cfq_ioc_pool);
}

static int __init cfq_slab_setup(void)
{
2250
	cfq_pool = KMEM_CACHE(cfq_queue, 0);
L
Linus Torvalds 已提交
2251 2252 2253
	if (!cfq_pool)
		goto fail;

2254
	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
L
Linus Torvalds 已提交
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
	if (!cfq_ioc_pool)
		goto fail;

	return 0;
fail:
	cfq_slab_kill();
	return -ENOMEM;
}

/*
 * sysfs parts below -->
 */
static ssize_t
cfq_var_show(unsigned int var, char *page)
{
	return sprintf(page, "%d\n", var);
}

static ssize_t
cfq_var_store(unsigned int *var, const char *page, size_t count)
{
	char *p = (char *) page;

	*var = simple_strtoul(p, &p, 10);
	return count;
}

#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
2283
static ssize_t __FUNC(elevator_t *e, char *page)			\
L
Linus Torvalds 已提交
2284
{									\
2285
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2286 2287 2288 2289 2290 2291
	unsigned int __data = __VAR;					\
	if (__CONV)							\
		__data = jiffies_to_msecs(__data);			\
	return cfq_var_show(__data, (page));				\
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2292 2293
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2294 2295
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2296 2297 2298 2299
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
L
Linus Torvalds 已提交
2300 2301 2302
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
2303
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
L
Linus Torvalds 已提交
2304
{									\
2305
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
	unsigned int __data;						\
	int ret = cfq_var_store(&__data, (page), count);		\
	if (__data < (MIN))						\
		__data = (MIN);						\
	else if (__data > (MAX))					\
		__data = (MAX);						\
	if (__CONV)							\
		*(__PTR) = msecs_to_jiffies(__data);			\
	else								\
		*(__PTR) = __data;					\
	return ret;							\
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2319 2320 2321 2322
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
		UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
		UINT_MAX, 1);
2323
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2324 2325
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
		UINT_MAX, 0);
2326 2327 2328
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2329 2330
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
		UINT_MAX, 0);
L
Linus Torvalds 已提交
2331 2332
#undef STORE_FUNCTION

2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
#define CFQ_ATTR(name) \
	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)

static struct elv_fs_entry cfq_attrs[] = {
	CFQ_ATTR(quantum),
	CFQ_ATTR(fifo_expire_sync),
	CFQ_ATTR(fifo_expire_async),
	CFQ_ATTR(back_seek_max),
	CFQ_ATTR(back_seek_penalty),
	CFQ_ATTR(slice_sync),
	CFQ_ATTR(slice_async),
	CFQ_ATTR(slice_async_rq),
	CFQ_ATTR(slice_idle),
	__ATTR_NULL
L
Linus Torvalds 已提交
2347 2348 2349 2350 2351 2352 2353
};

static struct elevator_type iosched_cfq = {
	.ops = {
		.elevator_merge_fn = 		cfq_merge,
		.elevator_merged_fn =		cfq_merged_request,
		.elevator_merge_req_fn =	cfq_merged_requests,
2354
		.elevator_allow_merge_fn =	cfq_allow_merge,
2355
		.elevator_dispatch_fn =		cfq_dispatch_requests,
L
Linus Torvalds 已提交
2356
		.elevator_add_req_fn =		cfq_insert_request,
2357
		.elevator_activate_req_fn =	cfq_activate_request,
L
Linus Torvalds 已提交
2358 2359 2360
		.elevator_deactivate_req_fn =	cfq_deactivate_request,
		.elevator_queue_empty_fn =	cfq_queue_empty,
		.elevator_completed_req_fn =	cfq_completed_request,
2361 2362
		.elevator_former_req_fn =	elv_rb_former_request,
		.elevator_latter_req_fn =	elv_rb_latter_request,
L
Linus Torvalds 已提交
2363 2364 2365 2366 2367
		.elevator_set_req_fn =		cfq_set_request,
		.elevator_put_req_fn =		cfq_put_request,
		.elevator_may_queue_fn =	cfq_may_queue,
		.elevator_init_fn =		cfq_init_queue,
		.elevator_exit_fn =		cfq_exit_queue,
2368
		.trim =				cfq_free_io_context,
L
Linus Torvalds 已提交
2369
	},
2370
	.elevator_attrs =	cfq_attrs,
L
Linus Torvalds 已提交
2371 2372 2373 2374 2375 2376
	.elevator_name =	"cfq",
	.elevator_owner =	THIS_MODULE,
};

static int __init cfq_init(void)
{
2377 2378 2379 2380 2381 2382 2383 2384
	/*
	 * could be 0 on HZ < 1000 setups
	 */
	if (!cfq_slice_async)
		cfq_slice_async = 1;
	if (!cfq_slice_idle)
		cfq_slice_idle = 1;

L
Linus Torvalds 已提交
2385 2386 2387
	if (cfq_slab_setup())
		return -ENOMEM;

2388
	elv_register(&iosched_cfq);
L
Linus Torvalds 已提交
2389

2390
	return 0;
L
Linus Torvalds 已提交
2391 2392 2393 2394
}

static void __exit cfq_exit(void)
{
2395
	DECLARE_COMPLETION_ONSTACK(all_gone);
L
Linus Torvalds 已提交
2396
	elv_unregister(&iosched_cfq);
2397
	ioc_gone = &all_gone;
2398 2399
	/* ioc_gone's update must be visible before reading ioc_count */
	smp_wmb();
2400 2401 2402 2403 2404

	/*
	 * this also protects us from entering cfq_slab_kill() with
	 * pending RCU callbacks
	 */
2405
	if (elv_ioc_count_read(ioc_count))
2406
		wait_for_completion(&all_gone);
2407
	cfq_slab_kill();
L
Linus Torvalds 已提交
2408 2409 2410 2411 2412 2413 2414 2415
}

module_init(cfq_init);
module_exit(cfq_exit);

MODULE_AUTHOR("Jens Axboe");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");