cfq-iosched.c 108.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
7
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
L
Linus Torvalds 已提交
8 9
 */
#include <linux/module.h>
10
#include <linux/slab.h>
A
Al Viro 已提交
11 12
#include <linux/blkdev.h>
#include <linux/elevator.h>
R
Randy Dunlap 已提交
13
#include <linux/jiffies.h>
L
Linus Torvalds 已提交
14
#include <linux/rbtree.h>
15
#include <linux/ioprio.h>
16
#include <linux/blktrace_api.h>
17
#include "blk.h"
18
#include "blk-cgroup.h"
L
Linus Torvalds 已提交
19 20 21 22

/*
 * tunables
 */
23
/* max queue in one round of service */
S
Shaohua Li 已提交
24
static const int cfq_quantum = 8;
25
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
26 27 28 29
/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024;
/* penalty of a backwards seek */
static const int cfq_back_penalty = 2;
30
static const int cfq_slice_sync = HZ / 10;
J
Jens Axboe 已提交
31
static int cfq_slice_async = HZ / 25;
32
static const int cfq_slice_async_rq = 2;
33
static int cfq_slice_idle = HZ / 125;
34
static int cfq_group_idle = HZ / 125;
35 36
static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
37

38
/*
39
 * offset from end of service tree
40
 */
41
#define CFQ_IDLE_DELAY		(HZ / 5)
42 43 44 45 46 47

/*
 * below this threshold, we consider thinktime immediate
 */
#define CFQ_MIN_TT		(2)

48
#define CFQ_SLICE_SCALE		(5)
49
#define CFQ_HW_QUEUE_MIN	(5)
50
#define CFQ_SERVICE_SHIFT       12
51

52
#define CFQQ_SEEK_THR		(sector_t)(8 * 100)
53
#define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
54
#define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
55
#define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
56

57 58 59
#define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)
#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])
#define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])
L
Linus Torvalds 已提交
60

61
static struct kmem_cache *cfq_pool;
L
Linus Torvalds 已提交
62

63 64 65 66
#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

67
#define sample_valid(samples)	((samples) > 80)
68
#define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
69

70 71 72 73 74 75 76 77
struct cfq_ttime {
	unsigned long last_end_request;

	unsigned long ttime_total;
	unsigned long ttime_samples;
	unsigned long ttime_mean;
};

78 79 80 81 82 83 84 85 86
/*
 * Most of our rbtree usage is for sorting with min extraction, so
 * if we cache the leftmost node we don't have to walk down the tree
 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
 * move this into the elevator for the rq sorting as well.
 */
struct cfq_rb_root {
	struct rb_root rb;
	struct rb_node *left;
87
	unsigned count;
88
	unsigned total_weight;
89
	u64 min_vdisktime;
90
	struct cfq_ttime ttime;
91
};
92 93
#define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
			.ttime = {.last_end_request = jiffies,},}
94

95 96 97 98 99
/*
 * Per process-grouping structure
 */
struct cfq_queue {
	/* reference count */
100
	int ref;
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	/* various state flags, see below */
	unsigned int flags;
	/* parent cfq_data */
	struct cfq_data *cfqd;
	/* service_tree member */
	struct rb_node rb_node;
	/* service_tree key */
	unsigned long rb_key;
	/* prio tree member */
	struct rb_node p_node;
	/* prio tree root we belong to, if any */
	struct rb_root *p_root;
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
	struct request *next_rq;
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
	/* fifo list of requests in sort_list */
	struct list_head fifo;

124 125
	/* time when queue got scheduled in to dispatch first request. */
	unsigned long dispatch_start;
126
	unsigned int allocated_slice;
127
	unsigned int slice_dispatch;
128 129
	/* time when first request from queue completed and slice started. */
	unsigned long slice_start;
130 131 132
	unsigned long slice_end;
	long slice_resid;

133 134
	/* pending priority requests */
	int prio_pending;
135 136 137 138 139
	/* number of requests that are on the dispatch list or inside driver */
	int dispatched;

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
140
	unsigned short ioprio_class;
141

142 143
	pid_t pid;

144
	u32 seek_history;
145 146
	sector_t last_request_pos;

147
	struct cfq_rb_root *service_tree;
J
Jeff Moyer 已提交
148
	struct cfq_queue *new_cfqq;
149
	struct cfq_group *cfqg;
150 151
	/* Number of sectors dispatched from queue in single dispatch round */
	unsigned long nr_sectors;
152 153
};

154
/*
155
 * First index in the service_trees.
156 157
 * IDLE is handled separately, so it has negative index
 */
158
enum wl_class_t {
159
	BE_WORKLOAD = 0,
160 161
	RT_WORKLOAD = 1,
	IDLE_WORKLOAD = 2,
162
	CFQ_PRIO_NR,
163 164
};

165 166 167 168 169 170 171 172 173
/*
 * Second index in the service_trees.
 */
enum wl_type_t {
	ASYNC_WORKLOAD = 0,
	SYNC_NOIDLE_WORKLOAD = 1,
	SYNC_WORKLOAD = 2
};

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
struct cfqg_stats {
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	/* total bytes transferred */
	struct blkg_rwstat		service_bytes;
	/* total IOs serviced, post merge */
	struct blkg_rwstat		serviced;
	/* number of ios merged */
	struct blkg_rwstat		merged;
	/* total time spent on device in ns, may not be accurate w/ queueing */
	struct blkg_rwstat		service_time;
	/* total time spent waiting in scheduler queue in ns */
	struct blkg_rwstat		wait_time;
	/* number of IOs queued up */
	struct blkg_rwstat		queued;
	/* total sectors transferred */
	struct blkg_stat		sectors;
	/* total disk time and nr sectors dispatched by this group */
	struct blkg_stat		time;
#ifdef CONFIG_DEBUG_BLK_CGROUP
	/* time not charged to this cgroup */
	struct blkg_stat		unaccounted_time;
	/* sum of number of ios queued across all samples */
	struct blkg_stat		avg_queue_size_sum;
	/* count of samples taken for average */
	struct blkg_stat		avg_queue_size_samples;
	/* how many times this group has been removed from service tree */
	struct blkg_stat		dequeue;
	/* total time spent waiting for it to be assigned a timeslice. */
	struct blkg_stat		group_wait_time;
T
Tejun Heo 已提交
203
	/* time spent idling for this blkcg_gq */
204 205 206 207 208 209 210 211 212 213 214 215
	struct blkg_stat		idle_time;
	/* total time with empty current active q with other requests queued */
	struct blkg_stat		empty_time;
	/* fields after this shouldn't be cleared on stat reset */
	uint64_t			start_group_wait_time;
	uint64_t			start_idle_time;
	uint64_t			start_empty_time;
	uint16_t			flags;
#endif	/* CONFIG_DEBUG_BLK_CGROUP */
#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
};

216 217
/* This is per cgroup per device grouping structure */
struct cfq_group {
218 219 220
	/* must be the first member */
	struct blkg_policy_data pd;

221 222 223 224 225
	/* group service_tree member */
	struct rb_node rb_node;

	/* group service_tree key */
	u64 vdisktime;
226
	unsigned int weight;
227
	unsigned int new_weight;
228
	unsigned int dev_weight;
229 230 231 232

	/* number of cfqq currently on this group */
	int nr_cfqq;

233
	/*
234
	 * Per group busy queues average. Useful for workload slice calc. We
235 236 237 238 239 240 241 242 243 244 245
	 * create the array for each prio class but at run time it is used
	 * only for RT and BE class and slot for IDLE class remains unused.
	 * This is primarily done to avoid confusion and a gcc warning.
	 */
	unsigned int busy_queues_avg[CFQ_PRIO_NR];
	/*
	 * rr lists of queues with requests. We maintain service trees for
	 * RT and BE classes. These trees are subdivided in subclasses
	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
	 * class there is no subclassification and all the cfq queues go on
	 * a single tree service_tree_idle.
246 247 248 249
	 * Counts are embedded in the cfq_rb_root
	 */
	struct cfq_rb_root service_trees[2][3];
	struct cfq_rb_root service_tree_idle;
250

251 252 253
	unsigned long saved_wl_slice;
	enum wl_type_t saved_wl_type;
	enum wl_class_t saved_wl_class;
254

255 256
	/* number of requests that are on the dispatch list or inside driver */
	int dispatched;
S
Shaohua Li 已提交
257
	struct cfq_ttime ttime;
258
	struct cfqg_stats stats;
259
};
260

261 262 263 264
struct cfq_io_cq {
	struct io_cq		icq;		/* must be the first member */
	struct cfq_queue	*cfqq[2];
	struct cfq_ttime	ttime;
T
Tejun Heo 已提交
265 266 267 268
	int			ioprio;		/* the current ioprio */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	uint64_t		blkcg_id;	/* the current blkcg ID */
#endif
269 270
};

271 272 273
/*
 * Per block device queue structure
 */
L
Linus Torvalds 已提交
274
struct cfq_data {
275
	struct request_queue *queue;
276 277
	/* Root service tree for cfq_groups */
	struct cfq_rb_root grp_service_tree;
278
	struct cfq_group *root_group;
279

280 281
	/*
	 * The priority currently being served
282
	 */
283 284
	enum wl_class_t serving_wl_class;
	enum wl_type_t serving_wl_type;
285
	unsigned long workload_expires;
286
	struct cfq_group *serving_group;
287 288 289 290 291 292 293 294

	/*
	 * Each priority tree is sorted by next_request position.  These
	 * trees are used when determining if two or more queues are
	 * interleaving requests (see cfq_close_cooperator).
	 */
	struct rb_root prio_trees[CFQ_PRIO_LISTS];

295
	unsigned int busy_queues;
296
	unsigned int busy_sync_queues;
297

298 299
	int rq_in_driver;
	int rq_in_flight[2];
300 301 302 303 304

	/*
	 * queue-depth detection
	 */
	int rq_queued;
305
	int hw_tag;
306 307 308 309 310 311 312 313
	/*
	 * hw_tag can be
	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
	 *  0 => no NCQ
	 */
	int hw_tag_est_depth;
	unsigned int hw_tag_samples;
L
Linus Torvalds 已提交
314

315 316 317 318
	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
319
	struct work_struct unplug_work;
L
Linus Torvalds 已提交
320

321
	struct cfq_queue *active_queue;
322
	struct cfq_io_cq *active_cic;
323

324 325 326 327 328
	/*
	 * async queue for each priority case
	 */
	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
	struct cfq_queue *async_idle_cfqq;
329

J
Jens Axboe 已提交
330
	sector_t last_position;
L
Linus Torvalds 已提交
331 332 333 334 335

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
336
	unsigned int cfq_fifo_expire[2];
L
Linus Torvalds 已提交
337 338
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
339 340 341
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
342
	unsigned int cfq_group_idle;
343
	unsigned int cfq_latency;
344
	unsigned int cfq_target_latency;
345

346 347 348 349
	/*
	 * Fallback dummy cfqq for extreme OOM conditions
	 */
	struct cfq_queue oom_cfqq;
350

351
	unsigned long last_delayed_sync;
L
Linus Torvalds 已提交
352 353
};

354 355
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);

356
static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
357
					    enum wl_class_t class,
358
					    enum wl_type_t type)
359
{
360 361 362
	if (!cfqg)
		return NULL;

363
	if (class == IDLE_WORKLOAD)
364
		return &cfqg->service_tree_idle;
365

366
	return &cfqg->service_trees[class][type];
367 368
}

J
Jens Axboe 已提交
369
enum cfqq_state_flags {
370 371
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
372
	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
373 374 375 376
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
377
	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
378
	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
379
	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
380
	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
381
	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
382
	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
J
Jens Axboe 已提交
383 384 385 386 387
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
388
	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
J
Jens Axboe 已提交
389 390 391
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
392
	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
J
Jens Axboe 已提交
393 394 395
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
396
	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
J
Jens Axboe 已提交
397 398 399 400
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
401
CFQ_CFQQ_FNS(must_dispatch);
J
Jens Axboe 已提交
402 403 404 405
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
406
CFQ_CFQQ_FNS(slice_new);
407
CFQ_CFQQ_FNS(sync);
408
CFQ_CFQQ_FNS(coop);
409
CFQ_CFQQ_FNS(split_coop);
410
CFQ_CFQQ_FNS(deep);
411
CFQ_CFQQ_FNS(wait_busy);
J
Jens Axboe 已提交
412 413
#undef CFQ_CFQQ_FNS

414 415 416 417 418 419 420 421 422 423
static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
{
	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
}

static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
{
	return pd_to_blkg(&cfqg->pd);
}

424
#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
425

426 427 428 429 430
/* cfqg stats flags */
enum cfqg_stats_flags {
	CFQG_stats_waiting = 0,
	CFQG_stats_idling,
	CFQG_stats_empty,
431 432
};

433 434
#define CFQG_FLAG_FNS(name)						\
static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)	\
435
{									\
436
	stats->flags |= (1 << CFQG_stats_##name);			\
437
}									\
438
static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)	\
439
{									\
440
	stats->flags &= ~(1 << CFQG_stats_##name);			\
441
}									\
442
static inline int cfqg_stats_##name(struct cfqg_stats *stats)		\
443
{									\
444
	return (stats->flags & (1 << CFQG_stats_##name)) != 0;		\
445 446
}									\

447 448 449 450
CFQG_FLAG_FNS(waiting)
CFQG_FLAG_FNS(idling)
CFQG_FLAG_FNS(empty)
#undef CFQG_FLAG_FNS
451 452

/* This should be called with the queue_lock held. */
453
static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
454 455 456
{
	unsigned long long now;

457
	if (!cfqg_stats_waiting(stats))
458 459 460 461 462 463
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_group_wait_time))
		blkg_stat_add(&stats->group_wait_time,
			      now - stats->start_group_wait_time);
464
	cfqg_stats_clear_waiting(stats);
465 466 467
}

/* This should be called with the queue_lock held. */
468 469
static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
						 struct cfq_group *curr_cfqg)
470
{
471
	struct cfqg_stats *stats = &cfqg->stats;
472

473
	if (cfqg_stats_waiting(stats))
474
		return;
475
	if (cfqg == curr_cfqg)
476
		return;
477 478
	stats->start_group_wait_time = sched_clock();
	cfqg_stats_mark_waiting(stats);
479 480 481
}

/* This should be called with the queue_lock held. */
482
static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
483 484 485
{
	unsigned long long now;

486
	if (!cfqg_stats_empty(stats))
487 488 489 490 491 492
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_empty_time))
		blkg_stat_add(&stats->empty_time,
			      now - stats->start_empty_time);
493
	cfqg_stats_clear_empty(stats);
494 495
}

496
static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
497
{
498
	blkg_stat_add(&cfqg->stats.dequeue, 1);
499 500
}

501
static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
502
{
503
	struct cfqg_stats *stats = &cfqg->stats;
504 505 506 507 508 509 510 511 512

	if (blkg_rwstat_sum(&stats->queued))
		return;

	/*
	 * group is already marked empty. This can happen if cfqq got new
	 * request in parent group and moved to this group while being added
	 * to service tree. Just ignore the event and move on.
	 */
513
	if (cfqg_stats_empty(stats))
514 515 516
		return;

	stats->start_empty_time = sched_clock();
517
	cfqg_stats_mark_empty(stats);
518 519
}

520
static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
521
{
522
	struct cfqg_stats *stats = &cfqg->stats;
523

524
	if (cfqg_stats_idling(stats)) {
525 526 527 528 529
		unsigned long long now = sched_clock();

		if (time_after64(now, stats->start_idle_time))
			blkg_stat_add(&stats->idle_time,
				      now - stats->start_idle_time);
530
		cfqg_stats_clear_idling(stats);
531 532 533
	}
}

534
static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
535
{
536
	struct cfqg_stats *stats = &cfqg->stats;
537

538
	BUG_ON(cfqg_stats_idling(stats));
539 540

	stats->start_idle_time = sched_clock();
541
	cfqg_stats_mark_idling(stats);
542 543
}

544
static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
545
{
546
	struct cfqg_stats *stats = &cfqg->stats;
547 548 549 550

	blkg_stat_add(&stats->avg_queue_size_sum,
		      blkg_rwstat_sum(&stats->queued));
	blkg_stat_add(&stats->avg_queue_size_samples, 1);
551
	cfqg_stats_update_group_wait_time(stats);
552 553 554 555
}

#else	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */

T
Tejun Heo 已提交
556 557 558 559 560 561 562
static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
563 564 565 566

#endif	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */

#ifdef CONFIG_CFQ_GROUP_IOSCHED
567

568 569 570 571 572 573 574
static struct blkcg_policy blkcg_policy_cfq;

static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
{
	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
}

575 576 577 578 579 580 581 582 583 584
static inline void cfqg_get(struct cfq_group *cfqg)
{
	return blkg_get(cfqg_to_blkg(cfqg));
}

static inline void cfqg_put(struct cfq_group *cfqg)
{
	return blkg_put(cfqg_to_blkg(cfqg));
}

T
Tejun Heo 已提交
585 586 587 588
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	do {			\
	char __pbuf[128];						\
									\
	blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));	\
V
Vivek Goyal 已提交
589
	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
T
Tejun Heo 已提交
590 591 592 593 594 595 596 597 598 599
			  cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
			  __pbuf, ##args);				\
} while (0)

#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)	do {			\
	char __pbuf[128];						\
									\
	blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));		\
	blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);	\
} while (0)
V
Vivek Goyal 已提交
600

601 602
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
					    struct cfq_group *curr_cfqg, int rw)
603
{
604 605 606
	blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
	cfqg_stats_end_empty_time(&cfqg->stats);
	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
607 608
}

609 610
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
			unsigned long time, unsigned long unaccounted_time)
611
{
612
	blkg_stat_add(&cfqg->stats.time, time);
613
#ifdef CONFIG_DEBUG_BLK_CGROUP
614
	blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
615
#endif
616 617
}

618
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
619
{
620
	blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
621 622
}

623
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
624
{
625
	blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
626 627
}

628 629
static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
					      uint64_t bytes, int rw)
630
{
631 632 633
	blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
	blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
	blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
634 635
}

636 637
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
			uint64_t start_time, uint64_t io_start_time, int rw)
638
{
639
	struct cfqg_stats *stats = &cfqg->stats;
640 641 642 643 644 645 646
	unsigned long long now = sched_clock();

	if (time_after64(now, io_start_time))
		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
	if (time_after64(io_start_time, start_time))
		blkg_rwstat_add(&stats->wait_time, rw,
				io_start_time - start_time);
647 648
}

T
Tejun Heo 已提交
649
static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
{
	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
	struct cfqg_stats *stats = &cfqg->stats;

	/* queued stats shouldn't be cleared */
	blkg_rwstat_reset(&stats->service_bytes);
	blkg_rwstat_reset(&stats->serviced);
	blkg_rwstat_reset(&stats->merged);
	blkg_rwstat_reset(&stats->service_time);
	blkg_rwstat_reset(&stats->wait_time);
	blkg_stat_reset(&stats->time);
#ifdef CONFIG_DEBUG_BLK_CGROUP
	blkg_stat_reset(&stats->unaccounted_time);
	blkg_stat_reset(&stats->avg_queue_size_sum);
	blkg_stat_reset(&stats->avg_queue_size_samples);
	blkg_stat_reset(&stats->dequeue);
	blkg_stat_reset(&stats->group_wait_time);
	blkg_stat_reset(&stats->idle_time);
	blkg_stat_reset(&stats->empty_time);
#endif
}

672 673 674 675 676
#else	/* CONFIG_CFQ_GROUP_IOSCHED */

static inline void cfqg_get(struct cfq_group *cfqg) { }
static inline void cfqg_put(struct cfq_group *cfqg) { }

677 678
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
679
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
680

681 682 683 684 685 686 687 688 689 690
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
			struct cfq_group *curr_cfqg, int rw) { }
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
			unsigned long time, unsigned long unaccounted_time) { }
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
					      uint64_t bytes, int rw) { }
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
			uint64_t start_time, uint64_t io_start_time, int rw) { }
691

692 693
#endif	/* CONFIG_CFQ_GROUP_IOSCHED */

694 695 696
#define cfq_log(cfqd, fmt, args...)	\
	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)

697 698 699 700 701 702 703 704 705 706
/* Traverses through cfq group service trees */
#define for_each_cfqg_st(cfqg, i, j, st) \
	for (i = 0; i <= IDLE_WORKLOAD; i++) \
		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
			: &cfqg->service_tree_idle; \
			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
			(i == IDLE_WORKLOAD && j == 0); \
			j++, st = i < IDLE_WORKLOAD ? \
			&cfqg->service_trees[i][j]: NULL) \

707 708 709 710 711 712 713 714 715 716 717 718
static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
	struct cfq_ttime *ttime, bool group_idle)
{
	unsigned long slice;
	if (!sample_valid(ttime->ttime_samples))
		return false;
	if (group_idle)
		slice = cfqd->cfq_group_idle;
	else
		slice = cfqd->cfq_slice_idle;
	return ttime->ttime_mean > slice;
}
719

720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
static inline bool iops_mode(struct cfq_data *cfqd)
{
	/*
	 * If we are not idling on queues and it is a NCQ drive, parallel
	 * execution of requests is on and measuring time is not possible
	 * in most of the cases until and unless we drive shallower queue
	 * depths and that becomes a performance bottleneck. In such cases
	 * switch to start providing fairness in terms of number of IOs.
	 */
	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
		return true;
	else
		return false;
}

735
static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
736 737 738 739 740 741 742 743
{
	if (cfq_class_idle(cfqq))
		return IDLE_WORKLOAD;
	if (cfq_class_rt(cfqq))
		return RT_WORKLOAD;
	return BE_WORKLOAD;
}

744 745 746 747 748 749 750 751 752 753

static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
{
	if (!cfq_cfqq_sync(cfqq))
		return ASYNC_WORKLOAD;
	if (!cfq_cfqq_idle_window(cfqq))
		return SYNC_NOIDLE_WORKLOAD;
	return SYNC_WORKLOAD;
}

754
static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
755 756
					struct cfq_data *cfqd,
					struct cfq_group *cfqg)
757
{
758
	if (wl_class == IDLE_WORKLOAD)
759
		return cfqg->service_tree_idle.count;
760

761 762 763
	return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
		cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
		cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
764 765
}

766 767 768
static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
					struct cfq_group *cfqg)
{
769 770
	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
		cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
771 772
}

773
static void cfq_dispatch_insert(struct request_queue *, struct request *);
774
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
775
				       struct cfq_io_cq *cic, struct bio *bio,
776
				       gfp_t gfp_mask);
777

778 779 780 781 782 783
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
	/* cic->icq is the first member, %NULL will convert to %NULL */
	return container_of(icq, struct cfq_io_cq, icq);
}

784 785 786 787 788 789 790 791
static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
					       struct io_context *ioc)
{
	if (ioc)
		return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
	return NULL;
}

792
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
793
{
794
	return cic->cfqq[is_sync];
795 796
}

797 798
static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
				bool is_sync)
799
{
800
	cic->cfqq[is_sync] = cfqq;
801 802
}

803
static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
804
{
805
	return cic->icq.q->elevator->elevator_data;
806 807
}

808 809 810 811
/*
 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 * set (in which case it could also be direct WRITE).
 */
812
static inline bool cfq_bio_sync(struct bio *bio)
813
{
814
	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
815
}
L
Linus Torvalds 已提交
816

A
Andrew Morton 已提交
817 818 819 820
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
821
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
A
Andrew Morton 已提交
822
{
823 824
	if (cfqd->busy_queues) {
		cfq_log(cfqd, "schedule dispatch");
825
		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
826
	}
A
Andrew Morton 已提交
827 828
}

829 830 831 832 833
/*
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
 */
834
static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
835
				 unsigned short prio)
836
{
837
	const int base_slice = cfqd->cfq_slice[sync];
838

839 840 841 842
	WARN_ON(prio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
}
843

844 845 846 847
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
848 849
}

850 851 852 853
static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
{
	u64 d = delta << CFQ_SERVICE_SHIFT;

854
	d = d * CFQ_WEIGHT_DEFAULT;
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
	do_div(d, cfqg->weight);
	return d;
}

static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
{
	s64 delta = (s64)(vdisktime - min_vdisktime);
	if (delta > 0)
		min_vdisktime = vdisktime;

	return min_vdisktime;
}

static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
{
	s64 delta = (s64)(vdisktime - min_vdisktime);
	if (delta < 0)
		min_vdisktime = vdisktime;

	return min_vdisktime;
}

static void update_min_vdisktime(struct cfq_rb_root *st)
{
	struct cfq_group *cfqg;

	if (st->left) {
		cfqg = rb_entry_cfqg(st->left);
883 884
		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
						  cfqg->vdisktime);
885 886 887
	}
}

888 889 890 891 892 893
/*
 * get averaged number of queues of RT/BE priority.
 * average is updated, with a formula that gives more weight to higher numbers,
 * to quickly follows sudden increases and decrease slowly
 */

894 895
static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
					struct cfq_group *cfqg, bool rt)
896
{
897 898 899
	unsigned min_q, max_q;
	unsigned mult  = cfq_hist_divisor - 1;
	unsigned round = cfq_hist_divisor / 2;
900
	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
901

902 903 904
	min_q = min(cfqg->busy_queues_avg[rt], busy);
	max_q = max(cfqg->busy_queues_avg[rt], busy);
	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
905
		cfq_hist_divisor;
906 907 908 909 910 911 912 913
	return cfqg->busy_queues_avg[rt];
}

static inline unsigned
cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
	struct cfq_rb_root *st = &cfqd->grp_service_tree;

914
	return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
915 916
}

917
static inline unsigned
918
cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
919
{
920 921
	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
	if (cfqd->cfq_latency) {
922 923 924 925 926 927
		/*
		 * interested queues (we consider only the ones with the same
		 * priority class in the cfq group)
		 */
		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
						cfq_class_rt(cfqq));
928 929
		unsigned sync_slice = cfqd->cfq_slice[1];
		unsigned expect_latency = sync_slice * iq;
930 931 932
		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);

		if (expect_latency > group_slice) {
933 934 935 936 937 938 939
			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
			/* scale low_slice according to IO priority
			 * and sync vs async */
			unsigned low_slice =
				min(slice, base_low_slice * slice / sync_slice);
			/* the adapted slice value is scaled to fit all iqs
			 * into the target latency */
940
			slice = max(slice * group_slice / expect_latency,
941 942 943
				    low_slice);
		}
	}
944 945 946 947 948 949
	return slice;
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
950
	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
951

952
	cfqq->slice_start = jiffies;
953
	cfqq->slice_end = jiffies + slice;
954
	cfqq->allocated_slice = slice;
955
	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
956 957 958 959 960 961 962
}

/*
 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 * isn't valid until the first request from the dispatch is activated
 * and the slice time set.
 */
963
static inline bool cfq_slice_used(struct cfq_queue *cfqq)
964 965
{
	if (cfq_cfqq_slice_new(cfqq))
S
Shaohua Li 已提交
966
		return false;
967
	if (time_before(jiffies, cfqq->slice_end))
S
Shaohua Li 已提交
968
		return false;
969

S
Shaohua Li 已提交
970
	return true;
971 972
}

L
Linus Torvalds 已提交
973
/*
J
Jens Axboe 已提交
974
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
L
Linus Torvalds 已提交
975
 * We choose the request that is closest to the head right now. Distance
976
 * behind the head is penalized and only allowed to a certain extent.
L
Linus Torvalds 已提交
977
 */
J
Jens Axboe 已提交
978
static struct request *
979
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
L
Linus Torvalds 已提交
980
{
981
	sector_t s1, s2, d1 = 0, d2 = 0;
L
Linus Torvalds 已提交
982
	unsigned long back_max;
983 984 985
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
L
Linus Torvalds 已提交
986

J
Jens Axboe 已提交
987 988 989 990
	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
J
Jens Axboe 已提交
991

992 993 994
	if (rq_is_sync(rq1) != rq_is_sync(rq2))
		return rq_is_sync(rq1) ? rq1 : rq2;

995 996
	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
997

998 999
	s1 = blk_rq_pos(rq1);
	s2 = blk_rq_pos(rq2);
L
Linus Torvalds 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
1016
		wrap |= CFQ_RQ1_WRAP;
L
Linus Torvalds 已提交
1017 1018 1019 1020 1021 1022

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
1023
		wrap |= CFQ_RQ2_WRAP;
L
Linus Torvalds 已提交
1024 1025

	/* Found required data */
1026 1027 1028 1029 1030 1031

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
J
Jens Axboe 已提交
1032
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1033
		if (d1 < d2)
J
Jens Axboe 已提交
1034
			return rq1;
1035
		else if (d2 < d1)
J
Jens Axboe 已提交
1036
			return rq2;
1037 1038
		else {
			if (s1 >= s2)
J
Jens Axboe 已提交
1039
				return rq1;
1040
			else
J
Jens Axboe 已提交
1041
				return rq2;
1042
		}
L
Linus Torvalds 已提交
1043

1044
	case CFQ_RQ2_WRAP:
J
Jens Axboe 已提交
1045
		return rq1;
1046
	case CFQ_RQ1_WRAP:
J
Jens Axboe 已提交
1047 1048
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1049 1050 1051 1052 1053 1054 1055 1056
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
J
Jens Axboe 已提交
1057
			return rq1;
L
Linus Torvalds 已提交
1058
		else
J
Jens Axboe 已提交
1059
			return rq2;
L
Linus Torvalds 已提交
1060 1061 1062
	}
}

1063 1064 1065
/*
 * The below is leftmost cache rbtree addon
 */
1066
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1067
{
1068 1069 1070 1071
	/* Service tree is empty */
	if (!root->count)
		return NULL;

1072 1073 1074
	if (!root->left)
		root->left = rb_first(&root->rb);

1075 1076 1077 1078
	if (root->left)
		return rb_entry(root->left, struct cfq_queue, rb_node);

	return NULL;
1079 1080
}

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
{
	if (!root->left)
		root->left = rb_first(&root->rb);

	if (root->left)
		return rb_entry_cfqg(root->left);

	return NULL;
}

1092 1093 1094 1095 1096 1097
static void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
	rb_erase(n, root);
	RB_CLEAR_NODE(n);
}

1098 1099 1100 1101
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
{
	if (root->left == n)
		root->left = NULL;
1102
	rb_erase_init(n, &root->rb);
1103
	--root->count;
1104 1105
}

L
Linus Torvalds 已提交
1106 1107 1108
/*
 * would be nice to take fifo expire time into account as well
 */
J
Jens Axboe 已提交
1109 1110 1111
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
L
Linus Torvalds 已提交
1112
{
1113 1114
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
J
Jens Axboe 已提交
1115
	struct request *next = NULL, *prev = NULL;
L
Linus Torvalds 已提交
1116

1117
	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
L
Linus Torvalds 已提交
1118 1119

	if (rbprev)
J
Jens Axboe 已提交
1120
		prev = rb_entry_rq(rbprev);
L
Linus Torvalds 已提交
1121

1122
	if (rbnext)
J
Jens Axboe 已提交
1123
		next = rb_entry_rq(rbnext);
1124 1125 1126
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
J
Jens Axboe 已提交
1127
			next = rb_entry_rq(rbnext);
1128
	}
L
Linus Torvalds 已提交
1129

1130
	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
L
Linus Torvalds 已提交
1131 1132
}

1133 1134
static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
				      struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1135
{
1136 1137 1138
	/*
	 * just an approximation, should be ok.
	 */
1139
	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1140
		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1141 1142
}

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
static inline s64
cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
	return cfqg->vdisktime - st->min_vdisktime;
}

static void
__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
	struct rb_node **node = &st->rb.rb_node;
	struct rb_node *parent = NULL;
	struct cfq_group *__cfqg;
	s64 key = cfqg_key(st, cfqg);
	int left = 1;

	while (*node != NULL) {
		parent = *node;
		__cfqg = rb_entry_cfqg(parent);

		if (key < cfqg_key(st, __cfqg))
			node = &parent->rb_left;
		else {
			node = &parent->rb_right;
			left = 0;
		}
	}

	if (left)
		st->left = &cfqg->rb_node;

	rb_link_node(&cfqg->rb_node, parent, node);
	rb_insert_color(&cfqg->rb_node, &st->rb);
}

static void
1178 1179 1180
cfq_update_group_weight(struct cfq_group *cfqg)
{
	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1181
	if (cfqg->new_weight) {
1182
		cfqg->weight = cfqg->new_weight;
1183
		cfqg->new_weight = 0;
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	}
}

static void
cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));

	cfq_update_group_weight(cfqg);
	__cfq_group_service_tree_add(st, cfqg);
	st->total_weight += cfqg->weight;
}

static void
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1199 1200 1201 1202 1203 1204
{
	struct cfq_rb_root *st = &cfqd->grp_service_tree;
	struct cfq_group *__cfqg;
	struct rb_node *n;

	cfqg->nr_cfqq++;
G
Gui Jianfeng 已提交
1205
	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1206 1207 1208 1209 1210
		return;

	/*
	 * Currently put the group at the end. Later implement something
	 * so that groups get lesser vtime based on their weights, so that
L
Lucas De Marchi 已提交
1211
	 * if group does not loose all if it was not continuously backlogged.
1212 1213 1214 1215 1216 1217 1218
	 */
	n = rb_last(&st->rb);
	if (n) {
		__cfqg = rb_entry_cfqg(n);
		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
	} else
		cfqg->vdisktime = st->min_vdisktime;
1219 1220
	cfq_group_service_tree_add(st, cfqg);
}
1221

1222 1223 1224 1225 1226 1227
static void
cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
	st->total_weight -= cfqg->weight;
	if (!RB_EMPTY_NODE(&cfqg->rb_node))
		cfq_rb_erase(&cfqg->rb_node, st);
1228 1229 1230
}

static void
1231
cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1232 1233 1234 1235 1236
{
	struct cfq_rb_root *st = &cfqd->grp_service_tree;

	BUG_ON(cfqg->nr_cfqq < 1);
	cfqg->nr_cfqq--;
1237

1238 1239 1240 1241
	/* If there are other cfq queues under this group, don't delete it */
	if (cfqg->nr_cfqq)
		return;

V
Vivek Goyal 已提交
1242
	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1243
	cfq_group_service_tree_del(st, cfqg);
1244
	cfqg->saved_wl_slice = 0;
1245
	cfqg_stats_update_dequeue(cfqg);
1246 1247
}

1248 1249
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
						unsigned int *unaccounted_time)
1250
{
1251
	unsigned int slice_used;
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267

	/*
	 * Queue got expired before even a single request completed or
	 * got expired immediately after first request completion.
	 */
	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
		/*
		 * Also charge the seek time incurred to the group, otherwise
		 * if there are mutiple queues in the group, each can dispatch
		 * a single request on seeky media and cause lots of seek time
		 * and group will never know it.
		 */
		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
					1);
	} else {
		slice_used = jiffies - cfqq->slice_start;
1268 1269
		if (slice_used > cfqq->allocated_slice) {
			*unaccounted_time = slice_used - cfqq->allocated_slice;
1270
			slice_used = cfqq->allocated_slice;
1271 1272 1273 1274
		}
		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
			*unaccounted_time += cfqq->slice_start -
					cfqq->dispatch_start;
1275 1276 1277 1278 1279 1280
	}

	return slice_used;
}

static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1281
				struct cfq_queue *cfqq)
1282 1283
{
	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1284
	unsigned int used_sl, charge, unaccounted_sl = 0;
1285 1286 1287 1288
	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
			- cfqg->service_tree_idle.count;

	BUG_ON(nr_sync < 0);
1289
	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1290

1291 1292 1293 1294
	if (iops_mode(cfqd))
		charge = cfqq->slice_dispatch;
	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
		charge = cfqq->allocated_slice;
1295 1296

	/* Can't update vdisktime while group is on service tree */
1297
	cfq_group_service_tree_del(st, cfqg);
1298
	cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
1299 1300
	/* If a new weight was requested, update now, off tree */
	cfq_group_service_tree_add(st, cfqg);
1301 1302 1303

	/* This group is being expired. Save the context */
	if (time_after(cfqd->workload_expires, jiffies)) {
1304
		cfqg->saved_wl_slice = cfqd->workload_expires
1305
						- jiffies;
1306 1307
		cfqg->saved_wl_type = cfqd->serving_wl_type;
		cfqg->saved_wl_class = cfqd->serving_wl_class;
1308
	} else
1309
		cfqg->saved_wl_slice = 0;
V
Vivek Goyal 已提交
1310 1311 1312

	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
					st->min_vdisktime);
1313 1314 1315 1316
	cfq_log_cfqq(cfqq->cfqd, cfqq,
		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
		     used_sl, cfqq->slice_dispatch, charge,
		     iops_mode(cfqd), cfqq->nr_sectors);
1317 1318
	cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
	cfqg_stats_set_start_empty_time(cfqg);
1319 1320
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
/**
 * cfq_init_cfqg_base - initialize base part of a cfq_group
 * @cfqg: cfq_group to initialize
 *
 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
 * is enabled or not.
 */
static void cfq_init_cfqg_base(struct cfq_group *cfqg)
{
	struct cfq_rb_root *st;
	int i, j;

	for_each_cfqg_st(cfqg, i, j, st)
		*st = CFQ_RB_ROOT;
	RB_CLEAR_NODE(&cfqg->rb_node);

	cfqg->ttime.last_end_request = jiffies;
}

1340
#ifdef CONFIG_CFQ_GROUP_IOSCHED
T
Tejun Heo 已提交
1341
static void cfq_pd_init(struct blkcg_gq *blkg)
1342
{
1343
	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1344

1345
	cfq_init_cfqg_base(cfqg);
1346
	cfqg->weight = blkg->blkcg->cfq_weight;
1347 1348 1349
}

/*
1350 1351
 * Search for the cfq group current task belongs to. request_queue lock must
 * be held.
1352
 */
1353
static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
T
Tejun Heo 已提交
1354
						struct blkcg *blkcg)
1355
{
1356
	struct request_queue *q = cfqd->queue;
1357
	struct cfq_group *cfqg = NULL;
1358

T
Tejun Heo 已提交
1359 1360
	/* avoid lookup for the common case where there's no blkcg */
	if (blkcg == &blkcg_root) {
1361 1362
		cfqg = cfqd->root_group;
	} else {
T
Tejun Heo 已提交
1363
		struct blkcg_gq *blkg;
1364

1365
		blkg = blkg_lookup_create(blkcg, q);
1366
		if (!IS_ERR(blkg))
1367
			cfqg = blkg_to_cfqg(blkg);
1368
	}
1369

1370 1371 1372 1373 1374 1375 1376
	return cfqg;
}

static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
	/* Currently, all async queues are mapped to root group */
	if (!cfq_cfqq_sync(cfqq))
1377
		cfqg = cfqq->cfqd->root_group;
1378 1379

	cfqq->cfqg = cfqg;
1380
	/* cfqq reference on cfqg */
1381
	cfqg_get(cfqg);
1382 1383
}

1384 1385
static u64 cfqg_prfill_weight_device(struct seq_file *sf,
				     struct blkg_policy_data *pd, int off)
1386
{
1387
	struct cfq_group *cfqg = pd_to_cfqg(pd);
1388 1389

	if (!cfqg->dev_weight)
1390
		return 0;
1391
	return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1392 1393
}

1394 1395
static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
				    struct seq_file *sf)
1396
{
T
Tejun Heo 已提交
1397 1398
	blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
			  cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
1399 1400 1401 1402
			  false);
	return 0;
}

1403 1404
static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
			    struct seq_file *sf)
1405
{
T
Tejun Heo 已提交
1406
	seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
1407 1408 1409
	return 0;
}

1410 1411
static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
				  const char *buf)
1412
{
T
Tejun Heo 已提交
1413
	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1414
	struct blkg_conf_ctx ctx;
1415
	struct cfq_group *cfqg;
1416 1417
	int ret;

T
Tejun Heo 已提交
1418
	ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1419 1420 1421 1422
	if (ret)
		return ret;

	ret = -EINVAL;
1423
	cfqg = blkg_to_cfqg(ctx.blkg);
1424
	if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1425 1426
		cfqg->dev_weight = ctx.v;
		cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
1427 1428 1429 1430 1431 1432 1433
		ret = 0;
	}

	blkg_conf_finish(&ctx);
	return ret;
}

1434
static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1435
{
T
Tejun Heo 已提交
1436 1437
	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
	struct blkcg_gq *blkg;
1438 1439
	struct hlist_node *n;

1440
	if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1441 1442 1443
		return -EINVAL;

	spin_lock_irq(&blkcg->lock);
1444
	blkcg->cfq_weight = (unsigned int)val;
1445 1446

	hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1447
		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1448

1449 1450
		if (cfqg && !cfqg->dev_weight)
			cfqg->new_weight = blkcg->cfq_weight;
1451 1452 1453 1454 1455 1456
	}

	spin_unlock_irq(&blkcg->lock);
	return 0;
}

1457 1458 1459
static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
			   struct seq_file *sf)
{
T
Tejun Heo 已提交
1460
	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1461

T
Tejun Heo 已提交
1462
	blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
1463 1464 1465 1466 1467 1468 1469
			  cft->private, false);
	return 0;
}

static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
			     struct seq_file *sf)
{
T
Tejun Heo 已提交
1470
	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1471

T
Tejun Heo 已提交
1472
	blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
1473 1474 1475 1476
			  cft->private, true);
	return 0;
}

1477
#ifdef CONFIG_DEBUG_BLK_CGROUP
1478 1479
static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
				      struct blkg_policy_data *pd, int off)
1480
{
1481
	struct cfq_group *cfqg = pd_to_cfqg(pd);
1482
	u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1483 1484 1485
	u64 v = 0;

	if (samples) {
1486
		v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1487 1488
		do_div(v, samples);
	}
1489
	__blkg_prfill_u64(sf, pd, v);
1490 1491 1492 1493
	return 0;
}

/* print avg_queue_size */
1494 1495
static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
				     struct seq_file *sf)
1496
{
T
Tejun Heo 已提交
1497
	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1498

1499
	blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
T
Tejun Heo 已提交
1500
			  &blkcg_policy_cfq, 0, false);
1501 1502 1503 1504 1505 1506 1507
	return 0;
}
#endif	/* CONFIG_DEBUG_BLK_CGROUP */

static struct cftype cfq_blkcg_files[] = {
	{
		.name = "weight_device",
1508 1509
		.read_seq_string = cfqg_print_weight_device,
		.write_string = cfqg_set_weight_device,
1510 1511 1512 1513
		.max_write_len = 256,
	},
	{
		.name = "weight",
1514 1515
		.read_seq_string = cfq_print_weight,
		.write_u64 = cfq_set_weight,
1516 1517 1518
	},
	{
		.name = "time",
1519 1520
		.private = offsetof(struct cfq_group, stats.time),
		.read_seq_string = cfqg_print_stat,
1521 1522 1523
	},
	{
		.name = "sectors",
1524 1525
		.private = offsetof(struct cfq_group, stats.sectors),
		.read_seq_string = cfqg_print_stat,
1526 1527 1528
	},
	{
		.name = "io_service_bytes",
1529 1530
		.private = offsetof(struct cfq_group, stats.service_bytes),
		.read_seq_string = cfqg_print_rwstat,
1531 1532 1533
	},
	{
		.name = "io_serviced",
1534 1535
		.private = offsetof(struct cfq_group, stats.serviced),
		.read_seq_string = cfqg_print_rwstat,
1536 1537 1538
	},
	{
		.name = "io_service_time",
1539 1540
		.private = offsetof(struct cfq_group, stats.service_time),
		.read_seq_string = cfqg_print_rwstat,
1541 1542 1543
	},
	{
		.name = "io_wait_time",
1544 1545
		.private = offsetof(struct cfq_group, stats.wait_time),
		.read_seq_string = cfqg_print_rwstat,
1546 1547 1548
	},
	{
		.name = "io_merged",
1549 1550
		.private = offsetof(struct cfq_group, stats.merged),
		.read_seq_string = cfqg_print_rwstat,
1551 1552 1553
	},
	{
		.name = "io_queued",
1554 1555
		.private = offsetof(struct cfq_group, stats.queued),
		.read_seq_string = cfqg_print_rwstat,
1556 1557 1558 1559
	},
#ifdef CONFIG_DEBUG_BLK_CGROUP
	{
		.name = "avg_queue_size",
1560
		.read_seq_string = cfqg_print_avg_queue_size,
1561 1562 1563
	},
	{
		.name = "group_wait_time",
1564 1565
		.private = offsetof(struct cfq_group, stats.group_wait_time),
		.read_seq_string = cfqg_print_stat,
1566 1567 1568
	},
	{
		.name = "idle_time",
1569 1570
		.private = offsetof(struct cfq_group, stats.idle_time),
		.read_seq_string = cfqg_print_stat,
1571 1572 1573
	},
	{
		.name = "empty_time",
1574 1575
		.private = offsetof(struct cfq_group, stats.empty_time),
		.read_seq_string = cfqg_print_stat,
1576 1577 1578
	},
	{
		.name = "dequeue",
1579 1580
		.private = offsetof(struct cfq_group, stats.dequeue),
		.read_seq_string = cfqg_print_stat,
1581 1582 1583
	},
	{
		.name = "unaccounted_time",
1584 1585
		.private = offsetof(struct cfq_group, stats.unaccounted_time),
		.read_seq_string = cfqg_print_stat,
1586 1587 1588 1589
	},
#endif	/* CONFIG_DEBUG_BLK_CGROUP */
	{ }	/* terminate */
};
1590
#else /* GROUP_IOSCHED */
1591
static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
T
Tejun Heo 已提交
1592
						struct blkcg *blkcg)
1593
{
1594
	return cfqd->root_group;
1595
}
1596

1597 1598 1599 1600 1601 1602 1603
static inline void
cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
	cfqq->cfqg = cfqg;
}

#endif /* GROUP_IOSCHED */

1604
/*
1605
 * The cfqd->service_trees holds all pending cfq_queue's that have
1606 1607 1608
 * requests waiting to be processed. It is sorted in the order that
 * we will service the queues.
 */
1609
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1610
				 bool add_front)
1611
{
1612 1613
	struct rb_node **p, *parent;
	struct cfq_queue *__cfqq;
1614
	unsigned long rb_key;
1615
	struct cfq_rb_root *st;
1616
	int left;
1617
	int new_cfqq = 1;
1618

1619
	st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
1620 1621
	if (cfq_class_idle(cfqq)) {
		rb_key = CFQ_IDLE_DELAY;
1622
		parent = rb_last(&st->rb);
1623 1624 1625 1626 1627 1628
		if (parent && parent != &cfqq->rb_node) {
			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
			rb_key += __cfqq->rb_key;
		} else
			rb_key += jiffies;
	} else if (!add_front) {
1629 1630 1631 1632 1633 1634
		/*
		 * Get our rb key offset. Subtract any residual slice
		 * value carried from last service. A negative resid
		 * count indicates slice overrun, and this should position
		 * the next service time further away in the tree.
		 */
1635
		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1636
		rb_key -= cfqq->slice_resid;
1637
		cfqq->slice_resid = 0;
1638 1639
	} else {
		rb_key = -HZ;
1640
		__cfqq = cfq_rb_first(st);
1641 1642
		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
	}
L
Linus Torvalds 已提交
1643

1644
	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1645
		new_cfqq = 0;
1646
		/*
1647
		 * same position, nothing more to do
1648
		 */
1649
		if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
1650
			return;
L
Linus Torvalds 已提交
1651

1652 1653
		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
		cfqq->service_tree = NULL;
L
Linus Torvalds 已提交
1654
	}
1655

1656
	left = 1;
1657
	parent = NULL;
1658 1659
	cfqq->service_tree = st;
	p = &st->rb.rb_node;
1660
	while (*p) {
1661
		struct rb_node **n;
1662

1663 1664 1665
		parent = *p;
		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);

1666
		/*
1667
		 * sort by key, that represents service time.
1668
		 */
1669
		if (time_before(rb_key, __cfqq->rb_key))
1670
			n = &(*p)->rb_left;
1671
		else {
1672
			n = &(*p)->rb_right;
1673
			left = 0;
1674
		}
1675 1676

		p = n;
1677 1678
	}

1679
	if (left)
1680
		st->left = &cfqq->rb_node;
1681

1682 1683
	cfqq->rb_key = rb_key;
	rb_link_node(&cfqq->rb_node, parent, p);
1684 1685
	rb_insert_color(&cfqq->rb_node, &st->rb);
	st->count++;
1686
	if (add_front || !new_cfqq)
1687
		return;
1688
	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
L
Linus Torvalds 已提交
1689 1690
}

1691
static struct cfq_queue *
1692 1693 1694
cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
		     sector_t sector, struct rb_node **ret_parent,
		     struct rb_node ***rb_link)
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
{
	struct rb_node **p, *parent;
	struct cfq_queue *cfqq = NULL;

	parent = NULL;
	p = &root->rb_node;
	while (*p) {
		struct rb_node **n;

		parent = *p;
		cfqq = rb_entry(parent, struct cfq_queue, p_node);

		/*
		 * Sort strictly based on sector.  Smallest to the left,
		 * largest to the right.
		 */
1711
		if (sector > blk_rq_pos(cfqq->next_rq))
1712
			n = &(*p)->rb_right;
1713
		else if (sector < blk_rq_pos(cfqq->next_rq))
1714 1715 1716 1717
			n = &(*p)->rb_left;
		else
			break;
		p = n;
1718
		cfqq = NULL;
1719 1720 1721 1722 1723
	}

	*ret_parent = parent;
	if (rb_link)
		*rb_link = p;
1724
	return cfqq;
1725 1726 1727 1728 1729 1730 1731
}

static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	struct rb_node **p, *parent;
	struct cfq_queue *__cfqq;

1732 1733 1734 1735
	if (cfqq->p_root) {
		rb_erase(&cfqq->p_node, cfqq->p_root);
		cfqq->p_root = NULL;
	}
1736 1737 1738 1739 1740 1741

	if (cfq_class_idle(cfqq))
		return;
	if (!cfqq->next_rq)
		return;

1742
	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1743 1744
	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
				      blk_rq_pos(cfqq->next_rq), &parent, &p);
1745 1746
	if (!__cfqq) {
		rb_link_node(&cfqq->p_node, parent, p);
1747 1748 1749
		rb_insert_color(&cfqq->p_node, cfqq->p_root);
	} else
		cfqq->p_root = NULL;
1750 1751
}

1752 1753 1754
/*
 * Update cfqq's position in the service tree.
 */
1755
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
J
Jens Axboe 已提交
1756 1757 1758 1759
{
	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
1760
	if (cfq_cfqq_on_rr(cfqq)) {
1761
		cfq_service_tree_add(cfqd, cfqq, 0);
1762 1763
		cfq_prio_tree_add(cfqd, cfqq);
	}
J
Jens Axboe 已提交
1764 1765
}

L
Linus Torvalds 已提交
1766 1767
/*
 * add to busy list of queues for service, trying to be fair in ordering
1768
 * the pending list according to last request service
L
Linus Torvalds 已提交
1769
 */
J
Jens Axboe 已提交
1770
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1771
{
1772
	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
J
Jens Axboe 已提交
1773 1774
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
1775
	cfqd->busy_queues++;
1776 1777
	if (cfq_cfqq_sync(cfqq))
		cfqd->busy_sync_queues++;
L
Linus Torvalds 已提交
1778

1779
	cfq_resort_rr_list(cfqd, cfqq);
L
Linus Torvalds 已提交
1780 1781
}

1782 1783 1784 1785
/*
 * Called when the cfqq no longer has requests pending, remove it from
 * the service tree.
 */
J
Jens Axboe 已提交
1786
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
1787
{
1788
	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
J
Jens Axboe 已提交
1789 1790
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
L
Linus Torvalds 已提交
1791

1792 1793 1794 1795
	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
		cfqq->service_tree = NULL;
	}
1796 1797 1798 1799
	if (cfqq->p_root) {
		rb_erase(&cfqq->p_node, cfqq->p_root);
		cfqq->p_root = NULL;
	}
1800

1801
	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
L
Linus Torvalds 已提交
1802 1803
	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
1804 1805
	if (cfq_cfqq_sync(cfqq))
		cfqd->busy_sync_queues--;
L
Linus Torvalds 已提交
1806 1807 1808 1809 1810
}

/*
 * rb tree support functions
 */
J
Jens Axboe 已提交
1811
static void cfq_del_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
1812
{
J
Jens Axboe 已提交
1813 1814
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
	const int sync = rq_is_sync(rq);
L
Linus Torvalds 已提交
1815

1816 1817
	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
L
Linus Torvalds 已提交
1818

J
Jens Axboe 已提交
1819
	elv_rb_del(&cfqq->sort_list, rq);
L
Linus Torvalds 已提交
1820

1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
		/*
		 * Queue will be deleted from service tree when we actually
		 * expire it later. Right now just remove it from prio tree
		 * as it is empty.
		 */
		if (cfqq->p_root) {
			rb_erase(&cfqq->p_node, cfqq->p_root);
			cfqq->p_root = NULL;
		}
	}
L
Linus Torvalds 已提交
1832 1833
}

J
Jens Axboe 已提交
1834
static void cfq_add_rq_rb(struct request *rq)
L
Linus Torvalds 已提交
1835
{
J
Jens Axboe 已提交
1836
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
1837
	struct cfq_data *cfqd = cfqq->cfqd;
1838
	struct request *prev;
L
Linus Torvalds 已提交
1839

1840
	cfqq->queued[rq_is_sync(rq)]++;
L
Linus Torvalds 已提交
1841

1842
	elv_rb_add(&cfqq->sort_list, rq);
1843 1844 1845

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
1846 1847 1848 1849

	/*
	 * check if this request is a better next-serve candidate
	 */
1850
	prev = cfqq->next_rq;
1851
	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1852 1853 1854 1855 1856 1857 1858

	/*
	 * adjust priority tree position, if ->next_rq changes
	 */
	if (prev != cfqq->next_rq)
		cfq_prio_tree_add(cfqd, cfqq);

1859
	BUG_ON(!cfqq->next_rq);
L
Linus Torvalds 已提交
1860 1861
}

J
Jens Axboe 已提交
1862
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
L
Linus Torvalds 已提交
1863
{
1864 1865
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
1866
	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
J
Jens Axboe 已提交
1867
	cfq_add_rq_rb(rq);
1868 1869
	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
				 rq->cmd_flags);
L
Linus Torvalds 已提交
1870 1871
}

1872 1873
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
L
Linus Torvalds 已提交
1874
{
1875
	struct task_struct *tsk = current;
1876
	struct cfq_io_cq *cic;
1877
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
1878

1879
	cic = cfq_cic_lookup(cfqd, tsk->io_context);
1880 1881 1882 1883
	if (!cic)
		return NULL;

	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1884 1885 1886
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

1887
		return elv_rb_find(&cfqq->sort_list, sector);
1888
	}
L
Linus Torvalds 已提交
1889 1890 1891 1892

	return NULL;
}

1893
static void cfq_activate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1894
{
1895
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
1896

1897
	cfqd->rq_in_driver++;
1898
	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1899
						cfqd->rq_in_driver);
1900

1901
	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
L
Linus Torvalds 已提交
1902 1903
}

1904
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
1905
{
1906 1907
	struct cfq_data *cfqd = q->elevator->elevator_data;

1908 1909
	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;
1910
	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1911
						cfqd->rq_in_driver);
L
Linus Torvalds 已提交
1912 1913
}

1914
static void cfq_remove_request(struct request *rq)
L
Linus Torvalds 已提交
1915
{
J
Jens Axboe 已提交
1916
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1917

J
Jens Axboe 已提交
1918 1919
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
L
Linus Torvalds 已提交
1920

1921
	list_del_init(&rq->queuelist);
J
Jens Axboe 已提交
1922
	cfq_del_rq_rb(rq);
1923

1924
	cfqq->cfqd->rq_queued--;
1925
	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
1926 1927 1928
	if (rq->cmd_flags & REQ_PRIO) {
		WARN_ON(!cfqq->prio_pending);
		cfqq->prio_pending--;
1929
	}
L
Linus Torvalds 已提交
1930 1931
}

1932 1933
static int cfq_merge(struct request_queue *q, struct request **req,
		     struct bio *bio)
L
Linus Torvalds 已提交
1934 1935 1936 1937
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

1938
	__rq = cfq_find_rq_fmerge(cfqd, bio);
1939
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
1940 1941
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
L
Linus Torvalds 已提交
1942 1943 1944 1945 1946
	}

	return ELEVATOR_NO_MERGE;
}

1947
static void cfq_merged_request(struct request_queue *q, struct request *req,
1948
			       int type)
L
Linus Torvalds 已提交
1949
{
1950
	if (type == ELEVATOR_FRONT_MERGE) {
J
Jens Axboe 已提交
1951
		struct cfq_queue *cfqq = RQ_CFQQ(req);
L
Linus Torvalds 已提交
1952

J
Jens Axboe 已提交
1953
		cfq_reposition_rq_rb(cfqq, req);
L
Linus Torvalds 已提交
1954 1955 1956
	}
}

D
Divyesh Shah 已提交
1957 1958 1959
static void cfq_bio_merged(struct request_queue *q, struct request *req,
				struct bio *bio)
{
1960
	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
D
Divyesh Shah 已提交
1961 1962
}

L
Linus Torvalds 已提交
1963
static void
1964
cfq_merged_requests(struct request_queue *q, struct request *rq,
L
Linus Torvalds 已提交
1965 1966
		    struct request *next)
{
1967
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1968 1969
	struct cfq_data *cfqd = q->elevator->elevator_data;

1970 1971 1972 1973
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1974 1975
	    time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
	    cfqq == RQ_CFQQ(next)) {
1976
		list_move(&rq->queuelist, &next->queuelist);
1977 1978
		rq_set_fifo_time(rq, rq_fifo_time(next));
	}
1979

1980 1981
	if (cfqq->next_rq == next)
		cfqq->next_rq = rq;
1982
	cfq_remove_request(next);
1983
	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993

	cfqq = RQ_CFQQ(next);
	/*
	 * all requests of this queue are merged to other queues, delete it
	 * from the service tree. If it's the active_queue,
	 * cfq_dispatch_requests() will choose to expire it or do idle
	 */
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
	    cfqq != cfqd->active_queue)
		cfq_del_cfqq_rr(cfqd, cfqq);
1994 1995
}

1996
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1997 1998 1999
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
2000
	struct cfq_io_cq *cic;
2001 2002 2003
	struct cfq_queue *cfqq;

	/*
2004
	 * Disallow merge of a sync bio into an async request.
2005
	 */
2006
	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2007
		return false;
2008 2009

	/*
T
Tejun Heo 已提交
2010
	 * Lookup the cfqq that this bio will be queued with and allow
2011
	 * merge only if rq is queued there.
T
Tejun Heo 已提交
2012
	 */
2013 2014 2015
	cic = cfq_cic_lookup(cfqd, current->io_context);
	if (!cic)
		return false;
2016

2017
	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2018
	return cfqq == RQ_CFQQ(rq);
2019 2020
}

2021 2022 2023
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	del_timer(&cfqd->idle_slice_timer);
2024
	cfqg_stats_update_idle_time(cfqq->cfqg);
2025 2026
}

J
Jens Axboe 已提交
2027 2028
static void __cfq_set_active_queue(struct cfq_data *cfqd,
				   struct cfq_queue *cfqq)
2029 2030
{
	if (cfqq) {
2031
		cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2032
				cfqd->serving_wl_class, cfqd->serving_wl_type);
2033
		cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
		cfqq->slice_start = 0;
		cfqq->dispatch_start = jiffies;
		cfqq->allocated_slice = 0;
		cfqq->slice_end = 0;
		cfqq->slice_dispatch = 0;
		cfqq->nr_sectors = 0;

		cfq_clear_cfqq_wait_request(cfqq);
		cfq_clear_cfqq_must_dispatch(cfqq);
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
		cfq_mark_cfqq_slice_new(cfqq);

		cfq_del_timer(cfqd, cfqq);
2048 2049 2050 2051 2052
	}

	cfqd->active_queue = cfqq;
}

2053 2054 2055 2056 2057
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2058
		    bool timed_out)
2059
{
2060 2061
	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);

2062
	if (cfq_cfqq_wait_request(cfqq))
2063
		cfq_del_timer(cfqd, cfqq);
2064 2065

	cfq_clear_cfqq_wait_request(cfqq);
2066
	cfq_clear_cfqq_wait_busy(cfqq);
2067

2068 2069 2070 2071 2072 2073 2074 2075 2076
	/*
	 * If this cfqq is shared between multiple processes, check to
	 * make sure that those processes are still issuing I/Os within
	 * the mean seek distance.  If not, it may be time to break the
	 * queues apart again.
	 */
	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
		cfq_mark_cfqq_split_coop(cfqq);

2077
	/*
2078
	 * store what was left of this slice, if the queue idled/timed out
2079
	 */
2080 2081
	if (timed_out) {
		if (cfq_cfqq_slice_new(cfqq))
2082
			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2083 2084
		else
			cfqq->slice_resid = cfqq->slice_end - jiffies;
2085 2086
		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
	}
2087

2088
	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2089

2090 2091 2092
	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
		cfq_del_cfqq_rr(cfqd, cfqq);

2093
	cfq_resort_rr_list(cfqd, cfqq);
2094 2095 2096 2097 2098

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
2099
		put_io_context(cfqd->active_cic->icq.ioc);
2100 2101 2102 2103
		cfqd->active_cic = NULL;
	}
}

2104
static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2105 2106 2107 2108
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
2109
		__cfq_slice_expired(cfqd, cfqq, timed_out);
2110 2111
}

2112 2113 2114 2115
/*
 * Get next queue for service. Unless we have a queue preemption,
 * we'll simply select the first cfqq in the service tree.
 */
J
Jens Axboe 已提交
2116
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2117
{
2118 2119
	struct cfq_rb_root *st = st_for(cfqd->serving_group,
			cfqd->serving_wl_class, cfqd->serving_wl_type);
2120

2121 2122 2123
	if (!cfqd->rq_queued)
		return NULL;

2124
	/* There is nothing to dispatch */
2125
	if (!st)
2126
		return NULL;
2127
	if (RB_EMPTY_ROOT(&st->rb))
2128
		return NULL;
2129
	return cfq_rb_first(st);
J
Jens Axboe 已提交
2130 2131
}

2132 2133
static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
{
2134
	struct cfq_group *cfqg;
2135 2136 2137 2138 2139 2140 2141
	struct cfq_queue *cfqq;
	int i, j;
	struct cfq_rb_root *st;

	if (!cfqd->rq_queued)
		return NULL;

2142 2143 2144 2145
	cfqg = cfq_get_next_cfqg(cfqd);
	if (!cfqg)
		return NULL;

2146 2147 2148 2149 2150 2151
	for_each_cfqg_st(cfqg, i, j, st)
		if ((cfqq = cfq_rb_first(st)) != NULL)
			return cfqq;
	return NULL;
}

2152 2153 2154
/*
 * Get and set a new active queue for service.
 */
2155 2156
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
					      struct cfq_queue *cfqq)
J
Jens Axboe 已提交
2157
{
2158
	if (!cfqq)
2159
		cfqq = cfq_get_next_queue(cfqd);
J
Jens Axboe 已提交
2160

2161
	__cfq_set_active_queue(cfqd, cfqq);
J
Jens Axboe 已提交
2162
	return cfqq;
2163 2164
}

2165 2166 2167
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
					  struct request *rq)
{
2168 2169
	if (blk_rq_pos(rq) >= cfqd->last_position)
		return blk_rq_pos(rq) - cfqd->last_position;
2170
	else
2171
		return cfqd->last_position - blk_rq_pos(rq);
2172 2173
}

2174
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2175
			       struct request *rq)
J
Jens Axboe 已提交
2176
{
2177
	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
J
Jens Axboe 已提交
2178 2179
}

2180 2181 2182
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
				    struct cfq_queue *cur_cfqq)
{
2183
	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
	struct rb_node *parent, *node;
	struct cfq_queue *__cfqq;
	sector_t sector = cfqd->last_position;

	if (RB_EMPTY_ROOT(root))
		return NULL;

	/*
	 * First, if we find a request starting at the end of the last
	 * request, choose it.
	 */
2195
	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2196 2197 2198 2199 2200 2201 2202 2203
	if (__cfqq)
		return __cfqq;

	/*
	 * If the exact sector wasn't found, the parent of the NULL leaf
	 * will contain the closest sector.
	 */
	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
2204
	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2205 2206
		return __cfqq;

2207
	if (blk_rq_pos(__cfqq->next_rq) < sector)
2208 2209 2210 2211 2212 2213 2214
		node = rb_next(&__cfqq->p_node);
	else
		node = rb_prev(&__cfqq->p_node);
	if (!node)
		return NULL;

	__cfqq = rb_entry(node, struct cfq_queue, p_node);
2215
	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
		return __cfqq;

	return NULL;
}

/*
 * cfqd - obvious
 * cur_cfqq - passed in so that we don't decide that the current queue is
 * 	      closely cooperating with itself.
 *
 * So, basically we're assuming that that cur_cfqq has dispatched at least
 * one request, and that cfqd->last_position reflects a position on the disk
 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
 * assumption.
 */
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2232
					      struct cfq_queue *cur_cfqq)
J
Jens Axboe 已提交
2233
{
2234 2235
	struct cfq_queue *cfqq;

2236 2237
	if (cfq_class_idle(cur_cfqq))
		return NULL;
2238 2239 2240 2241 2242
	if (!cfq_cfqq_sync(cur_cfqq))
		return NULL;
	if (CFQQ_SEEKY(cur_cfqq))
		return NULL;

2243 2244 2245 2246 2247 2248
	/*
	 * Don't search priority tree if it's the only queue in the group.
	 */
	if (cur_cfqq->cfqg->nr_cfqq == 1)
		return NULL;

J
Jens Axboe 已提交
2249
	/*
2250 2251 2252
	 * We should notice if some of the queues are cooperating, eg
	 * working closely on the same area of the disk. In that case,
	 * we can group them together and don't waste time idling.
J
Jens Axboe 已提交
2253
	 */
2254 2255 2256 2257
	cfqq = cfqq_close(cfqd, cur_cfqq);
	if (!cfqq)
		return NULL;

2258 2259 2260 2261
	/* If new queue belongs to different cfq_group, don't choose it */
	if (cur_cfqq->cfqg != cfqq->cfqg)
		return NULL;

J
Jeff Moyer 已提交
2262 2263 2264 2265 2266
	/*
	 * It only makes sense to merge sync queues.
	 */
	if (!cfq_cfqq_sync(cfqq))
		return NULL;
2267 2268
	if (CFQQ_SEEKY(cfqq))
		return NULL;
J
Jeff Moyer 已提交
2269

2270 2271 2272 2273 2274 2275
	/*
	 * Do not merge queues of different priority classes
	 */
	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
		return NULL;

2276
	return cfqq;
J
Jens Axboe 已提交
2277 2278
}

2279 2280 2281 2282 2283 2284
/*
 * Determine whether we should enforce idle window for this queue.
 */

static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
2285
	enum wl_class_t wl_class = cfqq_class(cfqq);
2286
	struct cfq_rb_root *st = cfqq->service_tree;
2287

2288 2289
	BUG_ON(!st);
	BUG_ON(!st->count);
2290

2291 2292 2293
	if (!cfqd->cfq_slice_idle)
		return false;

2294
	/* We never do for idle class queues. */
2295
	if (wl_class == IDLE_WORKLOAD)
2296 2297 2298
		return false;

	/* We do for queues that were marked with idle window flag. */
2299 2300
	if (cfq_cfqq_idle_window(cfqq) &&
	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2301 2302 2303 2304 2305 2306
		return true;

	/*
	 * Otherwise, we do only if they are the last ones
	 * in their service tree.
	 */
2307 2308
	if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
	   !cfq_io_thinktime_big(cfqd, &st->ttime, false))
S
Shaohua Li 已提交
2309
		return true;
2310
	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
S
Shaohua Li 已提交
2311
	return false;
2312 2313
}

J
Jens Axboe 已提交
2314
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2315
{
2316
	struct cfq_queue *cfqq = cfqd->active_queue;
2317
	struct cfq_io_cq *cic;
2318
	unsigned long sl, group_idle = 0;
2319

2320
	/*
J
Jens Axboe 已提交
2321 2322 2323
	 * SSD device without seek penalty, disable idling. But only do so
	 * for devices that support queuing, otherwise we still have a problem
	 * with sync vs async workloads.
2324
	 */
J
Jens Axboe 已提交
2325
	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2326 2327
		return;

2328
	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
J
Jens Axboe 已提交
2329
	WARN_ON(cfq_cfqq_slice_new(cfqq));
2330 2331 2332 2333

	/*
	 * idle is disabled, either manually or by past process history
	 */
2334 2335 2336 2337 2338 2339 2340
	if (!cfq_should_idle(cfqd, cfqq)) {
		/* no queue idling. Check for group idling */
		if (cfqd->cfq_group_idle)
			group_idle = cfqd->cfq_group_idle;
		else
			return;
	}
J
Jens Axboe 已提交
2341

2342
	/*
2343
	 * still active requests from this queue, don't idle
2344
	 */
2345
	if (cfqq->dispatched)
2346 2347
		return;

2348 2349 2350
	/*
	 * task has exited, don't wait
	 */
2351
	cic = cfqd->active_cic;
T
Tejun Heo 已提交
2352
	if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
J
Jens Axboe 已提交
2353 2354
		return;

2355 2356 2357 2358 2359
	/*
	 * If our average think time is larger than the remaining time
	 * slice, then don't idle. This avoids overrunning the allotted
	 * time slice.
	 */
2360 2361
	if (sample_valid(cic->ttime.ttime_samples) &&
	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2362
		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2363
			     cic->ttime.ttime_mean);
2364
		return;
2365
	}
2366

2367 2368 2369 2370
	/* There are other queues in the group, don't do group idle */
	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
		return;

J
Jens Axboe 已提交
2371
	cfq_mark_cfqq_wait_request(cfqq);
2372

2373 2374 2375 2376
	if (group_idle)
		sl = cfqd->cfq_group_idle;
	else
		sl = cfqd->cfq_slice_idle;
2377

2378
	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2379
	cfqg_stats_set_start_idle_time(cfqq->cfqg);
2380 2381
	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
			group_idle ? 1 : 0);
L
Linus Torvalds 已提交
2382 2383
}

2384 2385 2386
/*
 * Move request from internal lists to the request queue dispatch list.
 */
2387
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
2388
{
2389
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
2390
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2391

2392 2393
	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");

2394
	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2395
	cfq_remove_request(rq);
J
Jens Axboe 已提交
2396
	cfqq->dispatched++;
2397
	(RQ_CFQG(rq))->dispatched++;
2398
	elv_dispatch_sort(q, rq);
2399

2400
	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2401
	cfqq->nr_sectors += blk_rq_sectors(rq);
2402
	cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
L
Linus Torvalds 已提交
2403 2404 2405 2406 2407
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
J
Jens Axboe 已提交
2408
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
2409
{
2410
	struct request *rq = NULL;
L
Linus Torvalds 已提交
2411

J
Jens Axboe 已提交
2412
	if (cfq_cfqq_fifo_expire(cfqq))
L
Linus Torvalds 已提交
2413
		return NULL;
2414 2415 2416

	cfq_mark_cfqq_fifo_expire(cfqq);

2417 2418
	if (list_empty(&cfqq->fifo))
		return NULL;
L
Linus Torvalds 已提交
2419

2420
	rq = rq_entry_fifo(cfqq->fifo.next);
2421
	if (time_before(jiffies, rq_fifo_time(rq)))
2422
		rq = NULL;
L
Linus Torvalds 已提交
2423

2424
	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
J
Jens Axboe 已提交
2425
	return rq;
L
Linus Torvalds 已提交
2426 2427
}

2428 2429 2430 2431
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
L
Linus Torvalds 已提交
2432

2433
	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
L
Linus Torvalds 已提交
2434

2435
	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
L
Linus Torvalds 已提交
2436 2437
}

J
Jeff Moyer 已提交
2438 2439 2440 2441 2442 2443 2444 2445
/*
 * Must be called with the queue_lock held.
 */
static int cfqq_process_refs(struct cfq_queue *cfqq)
{
	int process_refs, io_refs;

	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2446
	process_refs = cfqq->ref - io_refs;
J
Jeff Moyer 已提交
2447 2448 2449 2450 2451 2452
	BUG_ON(process_refs < 0);
	return process_refs;
}

static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
{
2453
	int process_refs, new_process_refs;
J
Jeff Moyer 已提交
2454 2455
	struct cfq_queue *__cfqq;

2456 2457 2458 2459 2460 2461 2462 2463 2464
	/*
	 * If there are no process references on the new_cfqq, then it is
	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
	 * chain may have dropped their last reference (not just their
	 * last process reference).
	 */
	if (!cfqq_process_refs(new_cfqq))
		return;

J
Jeff Moyer 已提交
2465 2466 2467 2468 2469 2470 2471 2472
	/* Avoid a circular list and skip interim queue merges */
	while ((__cfqq = new_cfqq->new_cfqq)) {
		if (__cfqq == cfqq)
			return;
		new_cfqq = __cfqq;
	}

	process_refs = cfqq_process_refs(cfqq);
2473
	new_process_refs = cfqq_process_refs(new_cfqq);
J
Jeff Moyer 已提交
2474 2475 2476 2477
	/*
	 * If the process for the cfqq has gone away, there is no
	 * sense in merging the queues.
	 */
2478
	if (process_refs == 0 || new_process_refs == 0)
J
Jeff Moyer 已提交
2479 2480
		return;

2481 2482 2483 2484 2485
	/*
	 * Merge in the direction of the lesser amount of work.
	 */
	if (new_process_refs >= process_refs) {
		cfqq->new_cfqq = new_cfqq;
2486
		new_cfqq->ref += process_refs;
2487 2488
	} else {
		new_cfqq->new_cfqq = cfqq;
2489
		cfqq->ref += new_process_refs;
2490
	}
J
Jeff Moyer 已提交
2491 2492
}

2493
static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
2494
			struct cfq_group *cfqg, enum wl_class_t wl_class)
2495 2496 2497 2498 2499 2500 2501
{
	struct cfq_queue *queue;
	int i;
	bool key_valid = false;
	unsigned long lowest_key = 0;
	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;

2502 2503
	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
		/* select the one with lowest rb_key */
2504
		queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515
		if (queue &&
		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
			lowest_key = queue->rb_key;
			cur_best = i;
			key_valid = true;
		}
	}

	return cur_best;
}

2516 2517
static void
choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
2518 2519 2520
{
	unsigned slice;
	unsigned count;
2521
	struct cfq_rb_root *st;
2522
	unsigned group_slice;
2523
	enum wl_class_t original_class = cfqd->serving_wl_class;
2524

2525
	/* Choose next priority. RT > BE > IDLE */
2526
	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2527
		cfqd->serving_wl_class = RT_WORKLOAD;
2528
	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2529
		cfqd->serving_wl_class = BE_WORKLOAD;
2530
	else {
2531
		cfqd->serving_wl_class = IDLE_WORKLOAD;
2532 2533 2534 2535
		cfqd->workload_expires = jiffies + 1;
		return;
	}

2536
	if (original_class != cfqd->serving_wl_class)
2537 2538
		goto new_workload;

2539 2540 2541 2542 2543
	/*
	 * For RT and BE, we have to choose also the type
	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
	 * expiration time
	 */
2544
	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2545
	count = st->count;
2546 2547

	/*
2548
	 * check workload expiration, and that we still have other queues ready
2549
	 */
2550
	if (count && !time_after(jiffies, cfqd->workload_expires))
2551 2552
		return;

2553
new_workload:
2554
	/* otherwise select new workload type */
2555
	cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
2556
					cfqd->serving_wl_class);
2557
	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2558
	count = st->count;
2559 2560 2561 2562 2563 2564

	/*
	 * the workload slice is computed as a fraction of target latency
	 * proportional to the number of queues in that workload, over
	 * all the queues in the same priority class
	 */
2565 2566 2567
	group_slice = cfq_group_slice(cfqd, cfqg);

	slice = group_slice * count /
2568 2569
		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
		      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
2570
					cfqg));
2571

2572
	if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
2573 2574 2575 2576 2577 2578 2579 2580 2581
		unsigned int tmp;

		/*
		 * Async queues are currently system wide. Just taking
		 * proportion of queues with-in same group will lead to higher
		 * async ratio system wide as generally root group is going
		 * to have higher weight. A more accurate thing would be to
		 * calculate system wide asnc/sync ratio.
		 */
2582 2583
		tmp = cfqd->cfq_target_latency *
			cfqg_busy_async_queues(cfqd, cfqg);
2584 2585 2586
		tmp = tmp/cfqd->busy_queues;
		slice = min_t(unsigned, slice, tmp);

2587 2588 2589
		/* async workload slice is scaled down according to
		 * the sync/async slice ratio. */
		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2590
	} else
2591 2592 2593 2594
		/* sync workload slice is at least 2 * cfq_slice_idle */
		slice = max(slice, 2 * cfqd->cfq_slice_idle);

	slice = max_t(unsigned, slice, CFQ_MIN_TT);
2595
	cfq_log(cfqd, "workload slice:%d", slice);
2596 2597 2598
	cfqd->workload_expires = jiffies + slice;
}

2599 2600 2601
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
{
	struct cfq_rb_root *st = &cfqd->grp_service_tree;
2602
	struct cfq_group *cfqg;
2603 2604 2605

	if (RB_EMPTY_ROOT(&st->rb))
		return NULL;
2606 2607 2608
	cfqg = cfq_rb_first_group(st);
	update_min_vdisktime(st);
	return cfqg;
2609 2610
}

2611 2612
static void cfq_choose_cfqg(struct cfq_data *cfqd)
{
2613 2614 2615
	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);

	cfqd->serving_group = cfqg;
2616 2617

	/* Restore the workload type data */
2618 2619 2620 2621
	if (cfqg->saved_wl_slice) {
		cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
		cfqd->serving_wl_type = cfqg->saved_wl_type;
		cfqd->serving_wl_class = cfqg->saved_wl_class;
2622 2623 2624
	} else
		cfqd->workload_expires = jiffies - 1;

2625
	choose_wl_class_and_type(cfqd, cfqg);
2626 2627
}

2628
/*
2629 2630
 * Select a queue for service. If we have a current active queue,
 * check whether to continue servicing it, or retrieve and set a new one.
2631
 */
2632
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
L
Linus Torvalds 已提交
2633
{
2634
	struct cfq_queue *cfqq, *new_cfqq = NULL;
L
Linus Torvalds 已提交
2635

2636 2637 2638
	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
L
Linus Torvalds 已提交
2639

2640 2641
	if (!cfqd->rq_queued)
		return NULL;
2642 2643 2644 2645 2646 2647 2648

	/*
	 * We were waiting for group to get backlogged. Expire the queue
	 */
	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
		goto expire;

2649
	/*
J
Jens Axboe 已提交
2650
	 * The active queue has run out of time, expire it and select new.
2651
	 */
2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
		/*
		 * If slice had not expired at the completion of last request
		 * we might not have turned on wait_busy flag. Don't expire
		 * the queue yet. Allow the group to get backlogged.
		 *
		 * The very fact that we have used the slice, that means we
		 * have been idling all along on this queue and it should be
		 * ok to wait for this request to complete.
		 */
2662 2663 2664
		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
			cfqq = NULL;
2665
			goto keep_queue;
2666
		} else
2667
			goto check_group_idle;
2668
	}
L
Linus Torvalds 已提交
2669

2670
	/*
J
Jens Axboe 已提交
2671 2672
	 * The active queue has requests and isn't expired, allow it to
	 * dispatch.
2673
	 */
2674
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2675
		goto keep_queue;
J
Jens Axboe 已提交
2676

2677 2678 2679 2680
	/*
	 * If another queue has a request waiting within our mean seek
	 * distance, let it run.  The expire code will check for close
	 * cooperators and put the close queue at the front of the service
J
Jeff Moyer 已提交
2681
	 * tree.  If possible, merge the expiring queue with the new cfqq.
2682
	 */
2683
	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
J
Jeff Moyer 已提交
2684 2685 2686
	if (new_cfqq) {
		if (!cfqq->new_cfqq)
			cfq_setup_merge(cfqq, new_cfqq);
2687
		goto expire;
J
Jeff Moyer 已提交
2688
	}
2689

J
Jens Axboe 已提交
2690 2691 2692 2693 2694
	/*
	 * No requests pending. If the active queue still has requests in
	 * flight or is idling for a new request, allow either of these
	 * conditions to happen (or time out) before selecting a new queue.
	 */
2695 2696 2697 2698 2699
	if (timer_pending(&cfqd->idle_slice_timer)) {
		cfqq = NULL;
		goto keep_queue;
	}

2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
	/*
	 * This is a deep seek queue, but the device is much faster than
	 * the queue can deliver, don't idle
	 **/
	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
	    (cfq_cfqq_slice_new(cfqq) ||
	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
		cfq_clear_cfqq_deep(cfqq);
		cfq_clear_cfqq_idle_window(cfqq);
	}

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
		cfqq = NULL;
		goto keep_queue;
	}

	/*
	 * If group idle is enabled and there are requests dispatched from
	 * this group, wait for requests to complete.
	 */
check_group_idle:
S
Shaohua Li 已提交
2721 2722 2723
	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
	    cfqq->cfqg->dispatched &&
	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2724 2725
		cfqq = NULL;
		goto keep_queue;
2726 2727
	}

J
Jens Axboe 已提交
2728
expire:
2729
	cfq_slice_expired(cfqd, 0);
J
Jens Axboe 已提交
2730
new_queue:
2731 2732 2733 2734 2735
	/*
	 * Current queue expired. Check if we have to switch to a new
	 * service tree
	 */
	if (!new_cfqq)
2736
		cfq_choose_cfqg(cfqd);
2737

2738
	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2739
keep_queue:
J
Jens Axboe 已提交
2740
	return cfqq;
2741 2742
}

J
Jens Axboe 已提交
2743
static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2744 2745 2746 2747 2748 2749 2750 2751 2752
{
	int dispatched = 0;

	while (cfqq->next_rq) {
		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
		dispatched++;
	}

	BUG_ON(!list_empty(&cfqq->fifo));
2753 2754

	/* By default cfqq is not expired if it is empty. Do it explicitly */
2755
	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2756 2757 2758
	return dispatched;
}

2759 2760 2761 2762
/*
 * Drain our current requests. Used for barriers and when switching
 * io schedulers on-the-fly.
 */
2763
static int cfq_forced_dispatch(struct cfq_data *cfqd)
2764
{
2765
	struct cfq_queue *cfqq;
2766
	int dispatched = 0;
2767

2768
	/* Expire the timeslice of the current active queue first */
2769
	cfq_slice_expired(cfqd, 0);
2770 2771
	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
		__cfq_set_active_queue(cfqd, cfqq);
2772
		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2773
	}
2774 2775 2776

	BUG_ON(cfqd->busy_queues);

2777
	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2778 2779 2780
	return dispatched;
}

S
Shaohua Li 已提交
2781 2782 2783 2784 2785
static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
	struct cfq_queue *cfqq)
{
	/* the queue hasn't finished any request, can't estimate */
	if (cfq_cfqq_slice_new(cfqq))
S
Shaohua Li 已提交
2786
		return true;
S
Shaohua Li 已提交
2787 2788
	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
		cfqq->slice_end))
S
Shaohua Li 已提交
2789
		return true;
S
Shaohua Li 已提交
2790

S
Shaohua Li 已提交
2791
	return false;
S
Shaohua Li 已提交
2792 2793
}

2794
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2795 2796
{
	unsigned int max_dispatch;
2797

2798 2799 2800
	/*
	 * Drain async requests before we start sync IO
	 */
2801
	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2802
		return false;
2803

2804 2805 2806
	/*
	 * If this is an async queue and we have sync IO in flight, let it wait
	 */
2807
	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2808
		return false;
2809

S
Shaohua Li 已提交
2810
	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2811 2812
	if (cfq_class_idle(cfqq))
		max_dispatch = 1;
2813

2814 2815 2816 2817
	/*
	 * Does this cfqq already have too much IO in flight?
	 */
	if (cfqq->dispatched >= max_dispatch) {
2818
		bool promote_sync = false;
2819 2820 2821
		/*
		 * idle queue must always only have a single IO in flight
		 */
2822
		if (cfq_class_idle(cfqq))
2823
			return false;
2824

2825
		/*
2826 2827
		 * If there is only one sync queue
		 * we can ignore async queue here and give the sync
2828 2829 2830 2831
		 * queue no dispatch limit. The reason is a sync queue can
		 * preempt async queue, limiting the sync queue doesn't make
		 * sense. This is useful for aiostress test.
		 */
2832 2833
		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
			promote_sync = true;
2834

2835 2836 2837
		/*
		 * We have other queues, don't allow more IO from this one
		 */
2838 2839
		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
				!promote_sync)
2840
			return false;
2841

2842
		/*
2843
		 * Sole queue user, no limit
2844
		 */
2845
		if (cfqd->busy_queues == 1 || promote_sync)
S
Shaohua Li 已提交
2846 2847 2848 2849 2850 2851 2852 2853 2854
			max_dispatch = -1;
		else
			/*
			 * Normally we start throttling cfqq when cfq_quantum/2
			 * requests have been dispatched. But we can drive
			 * deeper queue depths at the beginning of slice
			 * subjected to upper limit of cfq_quantum.
			 * */
			max_dispatch = cfqd->cfq_quantum;
2855 2856 2857 2858 2859 2860 2861
	}

	/*
	 * Async queues must wait a bit before being allowed dispatch.
	 * We also ramp up the dispatch depth gradually for async IO,
	 * based on the last sync IO we serviced
	 */
2862
	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2863
		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2864
		unsigned int depth;
2865

2866
		depth = last_sync / cfqd->cfq_slice[1];
2867 2868
		if (!depth && !cfqq->dispatched)
			depth = 1;
2869 2870
		if (depth < max_dispatch)
			max_dispatch = depth;
2871
	}
2872

2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
	/*
	 * If we're below the current max, allow a dispatch
	 */
	return cfqq->dispatched < max_dispatch;
}

/*
 * Dispatch a request from cfqq, moving them to the request queue
 * dispatch list.
 */
static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	struct request *rq;

	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));

	if (!cfq_may_dispatch(cfqd, cfqq))
		return false;

	/*
	 * follow expired path, else get first next available
	 */
	rq = cfq_check_fifo(cfqq);
	if (!rq)
		rq = cfqq->next_rq;

	/*
	 * insert request into driver dispatch list
	 */
	cfq_dispatch_insert(cfqd->queue, rq);

	if (!cfqd->active_cic) {
2905
		struct cfq_io_cq *cic = RQ_CIC(rq);
2906

2907
		atomic_long_inc(&cic->icq.ioc->refcount);
2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930
		cfqd->active_cic = cic;
	}

	return true;
}

/*
 * Find the cfqq that we need to service and move a request from that to the
 * dispatch list
 */
static int cfq_dispatch_requests(struct request_queue *q, int force)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_queue *cfqq;

	if (!cfqd->busy_queues)
		return 0;

	if (unlikely(force))
		return cfq_forced_dispatch(cfqd);

	cfqq = cfq_select_queue(cfqd);
	if (!cfqq)
2931 2932
		return 0;

2933
	/*
2934
	 * Dispatch a request from this cfqq, if it is allowed
2935
	 */
2936 2937 2938
	if (!cfq_dispatch_request(cfqd, cfqq))
		return 0;

2939
	cfqq->slice_dispatch++;
2940
	cfq_clear_cfqq_must_dispatch(cfqq);
2941

2942 2943 2944 2945 2946 2947 2948 2949
	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
	    cfq_class_idle(cfqq))) {
		cfqq->slice_end = jiffies + 1;
2950
		cfq_slice_expired(cfqd, 0);
L
Linus Torvalds 已提交
2951 2952
	}

2953
	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2954
	return 1;
L
Linus Torvalds 已提交
2955 2956 2957
}

/*
J
Jens Axboe 已提交
2958 2959
 * task holds one reference to the queue, dropped when task exits. each rq
 * in-flight on this queue also holds a reference, dropped when rq is freed.
L
Linus Torvalds 已提交
2960
 *
2961
 * Each cfq queue took a reference on the parent group. Drop it now.
L
Linus Torvalds 已提交
2962 2963 2964 2965
 * queue lock must be held here.
 */
static void cfq_put_queue(struct cfq_queue *cfqq)
{
2966
	struct cfq_data *cfqd = cfqq->cfqd;
2967
	struct cfq_group *cfqg;
2968

2969
	BUG_ON(cfqq->ref <= 0);
L
Linus Torvalds 已提交
2970

2971 2972
	cfqq->ref--;
	if (cfqq->ref)
L
Linus Torvalds 已提交
2973 2974
		return;

2975
	cfq_log_cfqq(cfqd, cfqq, "put_queue");
L
Linus Torvalds 已提交
2976
	BUG_ON(rb_first(&cfqq->sort_list));
2977
	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2978
	cfqg = cfqq->cfqg;
L
Linus Torvalds 已提交
2979

2980
	if (unlikely(cfqd->active_queue == cfqq)) {
2981
		__cfq_slice_expired(cfqd, cfqq, 0);
2982
		cfq_schedule_dispatch(cfqd);
2983
	}
2984

2985
	BUG_ON(cfq_cfqq_on_rr(cfqq));
L
Linus Torvalds 已提交
2986
	kmem_cache_free(cfq_pool, cfqq);
2987
	cfqg_put(cfqg);
L
Linus Torvalds 已提交
2988 2989
}

2990
static void cfq_put_cooperator(struct cfq_queue *cfqq)
L
Linus Torvalds 已提交
2991
{
J
Jeff Moyer 已提交
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008
	struct cfq_queue *__cfqq, *next;

	/*
	 * If this queue was scheduled to merge with another queue, be
	 * sure to drop the reference taken on that queue (and others in
	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
	 */
	__cfqq = cfqq->new_cfqq;
	while (__cfqq) {
		if (__cfqq == cfqq) {
			WARN(1, "cfqq->new_cfqq loop detected\n");
			break;
		}
		next = __cfqq->new_cfqq;
		cfq_put_queue(__cfqq);
		__cfqq = next;
	}
3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
}

static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	if (unlikely(cfqq == cfqd->active_queue)) {
		__cfq_slice_expired(cfqd, cfqq, 0);
		cfq_schedule_dispatch(cfqd);
	}

	cfq_put_cooperator(cfqq);
J
Jeff Moyer 已提交
3019

3020 3021
	cfq_put_queue(cfqq);
}
3022

3023 3024 3025 3026 3027 3028 3029
static void cfq_init_icq(struct io_cq *icq)
{
	struct cfq_io_cq *cic = icq_to_cic(icq);

	cic->ttime.last_end_request = jiffies;
}

3030
static void cfq_exit_icq(struct io_cq *icq)
3031
{
3032
	struct cfq_io_cq *cic = icq_to_cic(icq);
3033
	struct cfq_data *cfqd = cic_to_cfqd(cic);
3034

3035 3036 3037
	if (cic->cfqq[BLK_RW_ASYNC]) {
		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
		cic->cfqq[BLK_RW_ASYNC] = NULL;
3038 3039
	}

3040 3041 3042
	if (cic->cfqq[BLK_RW_SYNC]) {
		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
		cic->cfqq[BLK_RW_SYNC] = NULL;
3043
	}
3044 3045
}

3046
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3047 3048 3049 3050
{
	struct task_struct *tsk = current;
	int ioprio_class;

J
Jens Axboe 已提交
3051
	if (!cfq_cfqq_prio_changed(cfqq))
3052 3053
		return;

T
Tejun Heo 已提交
3054
	ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3055
	switch (ioprio_class) {
3056 3057 3058 3059
	default:
		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
	case IOPRIO_CLASS_NONE:
		/*
3060
		 * no prio set, inherit CPU scheduling settings
3061 3062
		 */
		cfqq->ioprio = task_nice_ioprio(tsk);
3063
		cfqq->ioprio_class = task_nice_ioclass(tsk);
3064 3065
		break;
	case IOPRIO_CLASS_RT:
T
Tejun Heo 已提交
3066
		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3067 3068 3069
		cfqq->ioprio_class = IOPRIO_CLASS_RT;
		break;
	case IOPRIO_CLASS_BE:
T
Tejun Heo 已提交
3070
		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3071 3072 3073 3074 3075 3076 3077
		cfqq->ioprio_class = IOPRIO_CLASS_BE;
		break;
	case IOPRIO_CLASS_IDLE:
		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
		cfqq->ioprio = 7;
		cfq_clear_cfqq_idle_window(cfqq);
		break;
3078 3079 3080 3081 3082 3083 3084
	}

	/*
	 * keep track of original prio settings in case we have to temporarily
	 * elevate the priority of this queue
	 */
	cfqq->org_ioprio = cfqq->ioprio;
J
Jens Axboe 已提交
3085
	cfq_clear_cfqq_prio_changed(cfqq);
3086 3087
}

T
Tejun Heo 已提交
3088
static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3089
{
T
Tejun Heo 已提交
3090
	int ioprio = cic->icq.ioc->ioprio;
3091
	struct cfq_data *cfqd = cic_to_cfqd(cic);
3092
	struct cfq_queue *cfqq;
3093

T
Tejun Heo 已提交
3094 3095 3096 3097 3098
	/*
	 * Check whether ioprio has changed.  The condition may trigger
	 * spuriously on a newly created cic but there's no harm.
	 */
	if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3099 3100
		return;

3101
	cfqq = cic->cfqq[BLK_RW_ASYNC];
3102 3103
	if (cfqq) {
		struct cfq_queue *new_cfqq;
3104 3105
		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
					 GFP_ATOMIC);
3106
		if (new_cfqq) {
3107
			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3108 3109
			cfq_put_queue(cfqq);
		}
3110
	}
3111

3112
	cfqq = cic->cfqq[BLK_RW_SYNC];
3113 3114
	if (cfqq)
		cfq_mark_cfqq_prio_changed(cfqq);
T
Tejun Heo 已提交
3115 3116

	cic->ioprio = ioprio;
3117 3118
}

3119
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3120
			  pid_t pid, bool is_sync)
3121 3122 3123 3124 3125
{
	RB_CLEAR_NODE(&cfqq->rb_node);
	RB_CLEAR_NODE(&cfqq->p_node);
	INIT_LIST_HEAD(&cfqq->fifo);

3126
	cfqq->ref = 0;
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138
	cfqq->cfqd = cfqd;

	cfq_mark_cfqq_prio_changed(cfqq);

	if (is_sync) {
		if (!cfq_class_idle(cfqq))
			cfq_mark_cfqq_idle_window(cfqq);
		cfq_mark_cfqq_sync(cfqq);
	}
	cfqq->pid = pid;
}

3139
#ifdef CONFIG_CFQ_GROUP_IOSCHED
T
Tejun Heo 已提交
3140
static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3141
{
3142
	struct cfq_data *cfqd = cic_to_cfqd(cic);
T
Tejun Heo 已提交
3143 3144
	struct cfq_queue *sync_cfqq;
	uint64_t id;
3145

T
Tejun Heo 已提交
3146
	rcu_read_lock();
T
Tejun Heo 已提交
3147
	id = bio_blkcg(bio)->id;
T
Tejun Heo 已提交
3148
	rcu_read_unlock();
3149

T
Tejun Heo 已提交
3150 3151 3152 3153 3154 3155
	/*
	 * Check whether blkcg has changed.  The condition may trigger
	 * spuriously on a newly created cic but there's no harm.
	 */
	if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
		return;
3156

T
Tejun Heo 已提交
3157
	sync_cfqq = cic_to_cfqq(cic, 1);
3158 3159 3160 3161 3162 3163 3164 3165 3166
	if (sync_cfqq) {
		/*
		 * Drop reference to sync queue. A new sync queue will be
		 * assigned in new group upon arrival of a fresh request.
		 */
		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
		cic_set_cfqq(cic, NULL, 1);
		cfq_put_queue(sync_cfqq);
	}
T
Tejun Heo 已提交
3167 3168

	cic->blkcg_id = id;
3169
}
T
Tejun Heo 已提交
3170 3171
#else
static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3172 3173
#endif  /* CONFIG_CFQ_GROUP_IOSCHED */

3174
static struct cfq_queue *
3175 3176
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
		     struct bio *bio, gfp_t gfp_mask)
3177
{
T
Tejun Heo 已提交
3178
	struct blkcg *blkcg;
3179
	struct cfq_queue *cfqq, *new_cfqq = NULL;
3180
	struct cfq_group *cfqg;
3181 3182

retry:
3183 3184
	rcu_read_lock();

T
Tejun Heo 已提交
3185
	blkcg = bio_blkcg(bio);
3186
	cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3187
	cfqq = cic_to_cfqq(cic, is_sync);
3188

3189 3190 3191 3192 3193 3194
	/*
	 * Always try a new alloc if we fell back to the OOM cfqq
	 * originally, since it should just be a temporary situation.
	 */
	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
		cfqq = NULL;
3195 3196 3197 3198
		if (new_cfqq) {
			cfqq = new_cfqq;
			new_cfqq = NULL;
		} else if (gfp_mask & __GFP_WAIT) {
3199
			rcu_read_unlock();
3200
			spin_unlock_irq(cfqd->queue->queue_lock);
3201
			new_cfqq = kmem_cache_alloc_node(cfq_pool,
3202
					gfp_mask | __GFP_ZERO,
3203
					cfqd->queue->node);
3204
			spin_lock_irq(cfqd->queue->queue_lock);
3205 3206
			if (new_cfqq)
				goto retry;
3207
		} else {
3208 3209 3210
			cfqq = kmem_cache_alloc_node(cfq_pool,
					gfp_mask | __GFP_ZERO,
					cfqd->queue->node);
3211 3212
		}

3213 3214
		if (cfqq) {
			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3215
			cfq_init_prio_data(cfqq, cic);
3216
			cfq_link_cfqq_cfqg(cfqq, cfqg);
3217 3218 3219
			cfq_log_cfqq(cfqd, cfqq, "alloced");
		} else
			cfqq = &cfqd->oom_cfqq;
3220 3221 3222 3223 3224
	}

	if (new_cfqq)
		kmem_cache_free(cfq_pool, new_cfqq);

3225
	rcu_read_unlock();
3226 3227 3228
	return cfqq;
}

3229 3230 3231
static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{
3232
	switch (ioprio_class) {
3233 3234
	case IOPRIO_CLASS_RT:
		return &cfqd->async_cfqq[0][ioprio];
T
Tejun Heo 已提交
3235 3236 3237
	case IOPRIO_CLASS_NONE:
		ioprio = IOPRIO_NORM;
		/* fall through */
3238 3239 3240 3241 3242 3243 3244 3245 3246
	case IOPRIO_CLASS_BE:
		return &cfqd->async_cfqq[1][ioprio];
	case IOPRIO_CLASS_IDLE:
		return &cfqd->async_idle_cfqq;
	default:
		BUG();
	}
}

3247
static struct cfq_queue *
3248
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3249
	      struct bio *bio, gfp_t gfp_mask)
3250
{
T
Tejun Heo 已提交
3251 3252
	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3253
	struct cfq_queue **async_cfqq = NULL;
3254 3255
	struct cfq_queue *cfqq = NULL;

3256 3257 3258 3259 3260
	if (!is_sync) {
		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
		cfqq = *async_cfqq;
	}

3261
	if (!cfqq)
3262
		cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3263 3264 3265 3266

	/*
	 * pin the queue now that it's allocated, scheduler exit will prune it
	 */
3267
	if (!is_sync && !(*async_cfqq)) {
3268
		cfqq->ref++;
3269
		*async_cfqq = cfqq;
3270 3271
	}

3272
	cfqq->ref++;
3273 3274 3275
	return cfqq;
}

3276
static void
3277
__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
L
Linus Torvalds 已提交
3278
{
3279 3280
	unsigned long elapsed = jiffies - ttime->last_end_request;
	elapsed = min(elapsed, 2UL * slice_idle);
3281

3282 3283 3284 3285 3286 3287 3288
	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
}

static void
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3289
			struct cfq_io_cq *cic)
3290
{
3291
	if (cfq_cfqq_sync(cfqq)) {
3292
		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3293 3294 3295
		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
			cfqd->cfq_slice_idle);
	}
S
Shaohua Li 已提交
3296 3297 3298
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
#endif
3299
}
L
Linus Torvalds 已提交
3300

3301
static void
3302
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
J
Jens Axboe 已提交
3303
		       struct request *rq)
3304
{
3305
	sector_t sdist = 0;
3306
	sector_t n_sec = blk_rq_sectors(rq);
3307 3308 3309 3310 3311 3312
	if (cfqq->last_request_pos) {
		if (cfqq->last_request_pos < blk_rq_pos(rq))
			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
		else
			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
	}
3313

3314
	cfqq->seek_history <<= 1;
3315 3316 3317 3318
	if (blk_queue_nonrot(cfqd->queue))
		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
	else
		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3319
}
L
Linus Torvalds 已提交
3320

3321 3322 3323 3324 3325 3326
/*
 * Disable idle window if the process thinks too long or seeks so much that
 * it doesn't matter
 */
static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3327
		       struct cfq_io_cq *cic)
3328
{
3329
	int old_idle, enable_idle;
3330

3331 3332 3333 3334
	/*
	 * Don't idle for async or idle io prio class
	 */
	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3335 3336
		return;

3337
	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
L
Linus Torvalds 已提交
3338

3339 3340 3341
	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
		cfq_mark_cfqq_deep(cfqq);

3342 3343
	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
		enable_idle = 0;
T
Tejun Heo 已提交
3344
	else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3345 3346
		 !cfqd->cfq_slice_idle ||
		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3347
		enable_idle = 0;
3348 3349
	else if (sample_valid(cic->ttime.ttime_samples)) {
		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3350 3351 3352
			enable_idle = 0;
		else
			enable_idle = 1;
L
Linus Torvalds 已提交
3353 3354
	}

3355 3356 3357 3358 3359 3360 3361
	if (old_idle != enable_idle) {
		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
		if (enable_idle)
			cfq_mark_cfqq_idle_window(cfqq);
		else
			cfq_clear_cfqq_idle_window(cfqq);
	}
3362
}
L
Linus Torvalds 已提交
3363

3364 3365 3366 3367
/*
 * Check if new_cfqq should preempt the currently active queue. Return 0 for
 * no or if we aren't sure, a 1 will cause a preempt.
 */
3368
static bool
3369
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
J
Jens Axboe 已提交
3370
		   struct request *rq)
3371
{
J
Jens Axboe 已提交
3372
	struct cfq_queue *cfqq;
3373

J
Jens Axboe 已提交
3374 3375
	cfqq = cfqd->active_queue;
	if (!cfqq)
3376
		return false;
3377

J
Jens Axboe 已提交
3378
	if (cfq_class_idle(new_cfqq))
3379
		return false;
3380 3381

	if (cfq_class_idle(cfqq))
3382
		return true;
3383

3384 3385 3386 3387 3388 3389
	/*
	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
	 */
	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
		return false;

3390 3391 3392 3393
	/*
	 * if the new request is sync, but the currently running queue is
	 * not, let the sync request have priority.
	 */
J
Jens Axboe 已提交
3394
	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3395
		return true;
3396

3397 3398 3399 3400 3401 3402 3403
	if (new_cfqq->cfqg != cfqq->cfqg)
		return false;

	if (cfq_slice_used(cfqq))
		return true;

	/* Allow preemption only if we are idling on sync-noidle tree */
3404
	if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3405 3406 3407 3408 3409
	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
	    new_cfqq->service_tree->count == 2 &&
	    RB_EMPTY_ROOT(&cfqq->sort_list))
		return true;

3410 3411 3412 3413
	/*
	 * So both queues are sync. Let the new request get disk time if
	 * it's a metadata request and the current queue is doing regular IO.
	 */
3414
	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3415 3416
		return true;

3417 3418 3419 3420
	/*
	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
	 */
	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3421
		return true;
3422

3423 3424 3425 3426
	/* An idle queue should not be idle now for some reason */
	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
		return true;

3427
	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3428
		return false;
3429 3430 3431 3432 3433

	/*
	 * if this request is as-good as one we would expect from the
	 * current cfqq, let it preempt
	 */
3434
	if (cfq_rq_close(cfqd, cfqq, rq))
3435
		return true;
3436

3437
	return false;
3438 3439 3440 3441 3442 3443 3444 3445
}

/*
 * cfqq preempts the active queue. if we allowed preempt with no slice left,
 * let it have half of its nominal slice.
 */
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
S
Shaohua Li 已提交
3446 3447
	enum wl_type_t old_type = cfqq_type(cfqd->active_queue);

3448
	cfq_log_cfqq(cfqd, cfqq, "preempt");
S
Shaohua Li 已提交
3449
	cfq_slice_expired(cfqd, 1);
3450

3451 3452 3453 3454
	/*
	 * workload type is changed, don't save slice, otherwise preempt
	 * doesn't happen
	 */
S
Shaohua Li 已提交
3455
	if (old_type != cfqq_type(cfqq))
3456
		cfqq->cfqg->saved_wl_slice = 0;
3457

3458 3459 3460 3461 3462
	/*
	 * Put the new queue at the front of the of the current list,
	 * so we know that it will be selected next.
	 */
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
3463 3464

	cfq_service_tree_add(cfqd, cfqq, 1);
3465

3466 3467
	cfqq->slice_end = 0;
	cfq_mark_cfqq_slice_new(cfqq);
3468 3469 3470
}

/*
J
Jens Axboe 已提交
3471
 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3472 3473 3474
 * something we should do about it
 */
static void
J
Jens Axboe 已提交
3475 3476
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		struct request *rq)
3477
{
3478
	struct cfq_io_cq *cic = RQ_CIC(rq);
3479

3480
	cfqd->rq_queued++;
3481 3482
	if (rq->cmd_flags & REQ_PRIO)
		cfqq->prio_pending++;
3483

3484
	cfq_update_io_thinktime(cfqd, cfqq, cic);
3485
	cfq_update_io_seektime(cfqd, cfqq, rq);
J
Jens Axboe 已提交
3486 3487
	cfq_update_idle_window(cfqd, cfqq, cic);

3488
	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3489 3490 3491

	if (cfqq == cfqd->active_queue) {
		/*
3492 3493 3494
		 * Remember that we saw a request from this process, but
		 * don't start queuing just yet. Otherwise we risk seeing lots
		 * of tiny requests, because we disrupt the normal plugging
3495 3496
		 * and merging. If the request is already larger than a single
		 * page, let it rip immediately. For that case we assume that
3497 3498 3499
		 * merging is already done. Ditto for a busy system that
		 * has other work pending, don't risk delaying until the
		 * idle timer unplug to continue working.
3500
		 */
3501
		if (cfq_cfqq_wait_request(cfqq)) {
3502 3503
			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
			    cfqd->busy_queues > 1) {
3504
				cfq_del_timer(cfqd, cfqq);
3505
				cfq_clear_cfqq_wait_request(cfqq);
3506
				__blk_run_queue(cfqd->queue);
3507
			} else {
3508
				cfqg_stats_update_idle_time(cfqq->cfqg);
3509
				cfq_mark_cfqq_must_dispatch(cfqq);
3510
			}
3511
		}
J
Jens Axboe 已提交
3512
	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3513 3514 3515
		/*
		 * not the active queue - expire current slice if it is
		 * idle and has expired it's mean thinktime or this new queue
3516 3517
		 * has some old slice time left and is of higher priority or
		 * this new queue is RT and the current one is BE
3518 3519
		 */
		cfq_preempt_queue(cfqd, cfqq);
3520
		__blk_run_queue(cfqd->queue);
3521
	}
L
Linus Torvalds 已提交
3522 3523
}

3524
static void cfq_insert_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
3525
{
3526
	struct cfq_data *cfqd = q->elevator->elevator_data;
J
Jens Axboe 已提交
3527
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3528

3529
	cfq_log_cfqq(cfqd, cfqq, "insert_request");
3530
	cfq_init_prio_data(cfqq, RQ_CIC(rq));
L
Linus Torvalds 已提交
3531

3532
	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3533
	list_add_tail(&rq->queuelist, &cfqq->fifo);
3534
	cfq_add_rq_rb(rq);
3535 3536
	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
				 rq->cmd_flags);
J
Jens Axboe 已提交
3537
	cfq_rq_enqueued(cfqd, cfqq, rq);
L
Linus Torvalds 已提交
3538 3539
}

3540 3541 3542 3543 3544 3545
/*
 * Update hw_tag based on peak queue depth over 50 samples under
 * sufficient load.
 */
static void cfq_update_hw_tag(struct cfq_data *cfqd)
{
S
Shaohua Li 已提交
3546 3547
	struct cfq_queue *cfqq = cfqd->active_queue;

3548 3549
	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3550 3551 3552

	if (cfqd->hw_tag == 1)
		return;
3553 3554

	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3555
	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3556 3557
		return;

S
Shaohua Li 已提交
3558 3559 3560 3561 3562 3563 3564
	/*
	 * If active queue hasn't enough requests and can idle, cfq might not
	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
	 * case
	 */
	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3565
	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
S
Shaohua Li 已提交
3566 3567
		return;

3568 3569 3570
	if (cfqd->hw_tag_samples++ < 50)
		return;

3571
	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3572 3573 3574 3575 3576
		cfqd->hw_tag = 1;
	else
		cfqd->hw_tag = 0;
}

3577 3578
static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
3579
	struct cfq_io_cq *cic = cfqd->active_cic;
3580

3581 3582 3583 3584
	/* If the queue already has requests, don't wait */
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
		return false;

3585 3586 3587 3588
	/* If there are other queues in the group, don't wait */
	if (cfqq->cfqg->nr_cfqq > 1)
		return false;

S
Shaohua Li 已提交
3589 3590 3591 3592
	/* the only queue in the group, but think time is big */
	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
		return false;

3593 3594 3595 3596
	if (cfq_slice_used(cfqq))
		return true;

	/* if slice left is less than think time, wait busy */
3597 3598
	if (cic && sample_valid(cic->ttime.ttime_samples)
	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
		return true;

	/*
	 * If think times is less than a jiffy than ttime_mean=0 and above
	 * will not be true. It might happen that slice has not expired yet
	 * but will expire soon (4-5 ns) during select_queue(). To cover the
	 * case where think time is less than a jiffy, mark the queue wait
	 * busy if only 1 jiffy is left in the slice.
	 */
	if (cfqq->slice_end - jiffies == 1)
		return true;

	return false;
}

3614
static void cfq_completed_request(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
3615
{
J
Jens Axboe 已提交
3616
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3617
	struct cfq_data *cfqd = cfqq->cfqd;
3618
	const int sync = rq_is_sync(rq);
3619
	unsigned long now;
L
Linus Torvalds 已提交
3620

3621
	now = jiffies;
3622 3623
	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
		     !!(rq->cmd_flags & REQ_NOIDLE));
L
Linus Torvalds 已提交
3624

3625 3626
	cfq_update_hw_tag(cfqd);

3627
	WARN_ON(!cfqd->rq_in_driver);
J
Jens Axboe 已提交
3628
	WARN_ON(!cfqq->dispatched);
3629
	cfqd->rq_in_driver--;
J
Jens Axboe 已提交
3630
	cfqq->dispatched--;
3631
	(RQ_CFQG(rq))->dispatched--;
3632 3633
	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
				     rq_io_start_time_ns(rq), rq->cmd_flags);
L
Linus Torvalds 已提交
3634

3635
	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3636

3637
	if (sync) {
3638
		struct cfq_rb_root *st;
3639

3640
		RQ_CIC(rq)->ttime.last_end_request = now;
3641 3642

		if (cfq_cfqq_on_rr(cfqq))
3643
			st = cfqq->service_tree;
3644
		else
3645 3646 3647 3648
			st = st_for(cfqq->cfqg, cfqq_class(cfqq),
					cfqq_type(cfqq));

		st->ttime.last_end_request = now;
3649 3650
		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
			cfqd->last_delayed_sync = now;
3651
	}
3652

S
Shaohua Li 已提交
3653 3654 3655 3656
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	cfqq->cfqg->ttime.last_end_request = now;
#endif

3657 3658 3659 3660 3661
	/*
	 * If this is the active queue, check if it needs to be expired,
	 * or if we want to idle in case it has no pending requests.
	 */
	if (cfqd->active_queue == cfqq) {
3662 3663
		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);

3664 3665 3666 3667
		if (cfq_cfqq_slice_new(cfqq)) {
			cfq_set_prio_slice(cfqd, cfqq);
			cfq_clear_cfqq_slice_new(cfqq);
		}
3668 3669

		/*
3670 3671
		 * Should we wait for next request to come in before we expire
		 * the queue.
3672
		 */
3673
		if (cfq_should_wait_busy(cfqd, cfqq)) {
3674 3675 3676 3677
			unsigned long extend_sl = cfqd->cfq_slice_idle;
			if (!cfqd->cfq_slice_idle)
				extend_sl = cfqd->cfq_group_idle;
			cfqq->slice_end = jiffies + extend_sl;
3678
			cfq_mark_cfqq_wait_busy(cfqq);
3679
			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3680 3681
		}

3682
		/*
3683 3684 3685 3686 3687 3688
		 * Idling is not enabled on:
		 * - expired queues
		 * - idle-priority queues
		 * - async queues
		 * - queues with still some requests queued
		 * - when there is a close cooperator
3689
		 */
3690
		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3691
			cfq_slice_expired(cfqd, 1);
3692 3693
		else if (sync && cfqq_empty &&
			 !cfq_close_cooperator(cfqd, cfqq)) {
3694
			cfq_arm_slice_timer(cfqd);
3695
		}
3696
	}
J
Jens Axboe 已提交
3697

3698
	if (!cfqd->rq_in_driver)
3699
		cfq_schedule_dispatch(cfqd);
L
Linus Torvalds 已提交
3700 3701
}

3702
static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3703
{
3704
	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
J
Jens Axboe 已提交
3705
		cfq_mark_cfqq_must_alloc_slice(cfqq);
3706
		return ELV_MQUEUE_MUST;
J
Jens Axboe 已提交
3707
	}
L
Linus Torvalds 已提交
3708

3709 3710 3711
	return ELV_MQUEUE_MAY;
}

3712
static int cfq_may_queue(struct request_queue *q, int rw)
3713 3714 3715
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct task_struct *tsk = current;
3716
	struct cfq_io_cq *cic;
3717 3718 3719 3720 3721 3722 3723 3724
	struct cfq_queue *cfqq;

	/*
	 * don't force setup of a queue from here, as a call to may_queue
	 * does not necessarily imply that a request actually will be queued.
	 * so just lookup a possibly existing queue, or return 'may queue'
	 * if that fails
	 */
3725
	cic = cfq_cic_lookup(cfqd, tsk->io_context);
3726 3727 3728
	if (!cic)
		return ELV_MQUEUE_MAY;

3729
	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3730
	if (cfqq) {
3731
		cfq_init_prio_data(cfqq, cic);
3732

3733
		return __cfq_may_queue(cfqq);
3734 3735 3736
	}

	return ELV_MQUEUE_MAY;
L
Linus Torvalds 已提交
3737 3738 3739 3740 3741
}

/*
 * queue lock held here
 */
3742
static void cfq_put_request(struct request *rq)
L
Linus Torvalds 已提交
3743
{
J
Jens Axboe 已提交
3744
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
L
Linus Torvalds 已提交
3745

J
Jens Axboe 已提交
3746
	if (cfqq) {
3747
		const int rw = rq_data_dir(rq);
L
Linus Torvalds 已提交
3748

3749 3750
		BUG_ON(!cfqq->allocated[rw]);
		cfqq->allocated[rw]--;
L
Linus Torvalds 已提交
3751

3752
		/* Put down rq reference on cfqg */
3753
		cfqg_put(RQ_CFQG(rq));
3754 3755
		rq->elv.priv[0] = NULL;
		rq->elv.priv[1] = NULL;
3756

L
Linus Torvalds 已提交
3757 3758 3759 3760
		cfq_put_queue(cfqq);
	}
}

J
Jeff Moyer 已提交
3761
static struct cfq_queue *
3762
cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
J
Jeff Moyer 已提交
3763 3764 3765 3766
		struct cfq_queue *cfqq)
{
	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3767
	cfq_mark_cfqq_coop(cfqq->new_cfqq);
J
Jeff Moyer 已提交
3768 3769 3770 3771
	cfq_put_queue(cfqq);
	return cic_to_cfqq(cic, 1);
}

3772 3773 3774 3775 3776
/*
 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
 * was the last process referring to said cfqq.
 */
static struct cfq_queue *
3777
split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3778 3779 3780 3781
{
	if (cfqq_process_refs(cfqq) == 1) {
		cfqq->pid = current->pid;
		cfq_clear_cfqq_coop(cfqq);
3782
		cfq_clear_cfqq_split_coop(cfqq);
3783 3784 3785 3786
		return cfqq;
	}

	cic_set_cfqq(cic, NULL, 1);
3787 3788 3789

	cfq_put_cooperator(cfqq);

3790 3791 3792
	cfq_put_queue(cfqq);
	return NULL;
}
L
Linus Torvalds 已提交
3793
/*
3794
 * Allocate cfq data structures associated with this request.
L
Linus Torvalds 已提交
3795
 */
3796
static int
3797 3798
cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
		gfp_t gfp_mask)
L
Linus Torvalds 已提交
3799 3800
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
3801
	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
L
Linus Torvalds 已提交
3802
	const int rw = rq_data_dir(rq);
3803
	const bool is_sync = rq_is_sync(rq);
3804
	struct cfq_queue *cfqq;
L
Linus Torvalds 已提交
3805 3806 3807

	might_sleep_if(gfp_mask & __GFP_WAIT);

3808
	spin_lock_irq(q->queue_lock);
3809

T
Tejun Heo 已提交
3810 3811
	check_ioprio_changed(cic, bio);
	check_blkcg_changed(cic, bio);
3812
new_queue:
3813
	cfqq = cic_to_cfqq(cic, is_sync);
3814
	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3815
		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
3816
		cic_set_cfqq(cic, cfqq, is_sync);
J
Jeff Moyer 已提交
3817
	} else {
3818 3819 3820
		/*
		 * If the queue was seeky for too long, break it apart.
		 */
3821
		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3822 3823 3824 3825 3826 3827
			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
			cfqq = split_cfqq(cic, cfqq);
			if (!cfqq)
				goto new_queue;
		}

J
Jeff Moyer 已提交
3828 3829 3830 3831 3832 3833 3834 3835
		/*
		 * Check to see if this queue is scheduled to merge with
		 * another, closely cooperating queue.  The merging of
		 * queues happens here as it must be done in process context.
		 * The reference on new_cfqq was taken in merge_cfqqs.
		 */
		if (cfqq->new_cfqq)
			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3836
	}
L
Linus Torvalds 已提交
3837 3838 3839

	cfqq->allocated[rw]++;

3840
	cfqq->ref++;
3841
	cfqg_get(cfqq->cfqg);
3842
	rq->elv.priv[0] = cfqq;
T
Tejun Heo 已提交
3843
	rq->elv.priv[1] = cfqq->cfqg;
3844
	spin_unlock_irq(q->queue_lock);
J
Jens Axboe 已提交
3845
	return 0;
L
Linus Torvalds 已提交
3846 3847
}

3848
static void cfq_kick_queue(struct work_struct *work)
3849
{
3850
	struct cfq_data *cfqd =
3851
		container_of(work, struct cfq_data, unplug_work);
3852
	struct request_queue *q = cfqd->queue;
3853

3854
	spin_lock_irq(q->queue_lock);
3855
	__blk_run_queue(cfqd->queue);
3856
	spin_unlock_irq(q->queue_lock);
3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
}

/*
 * Timer running if the active_queue is currently idling inside its time slice
 */
static void cfq_idle_slice_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	struct cfq_queue *cfqq;
	unsigned long flags;
3867
	int timed_out = 1;
3868

3869 3870
	cfq_log(cfqd, "idle timer fired");

3871 3872
	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

3873 3874
	cfqq = cfqd->active_queue;
	if (cfqq) {
3875 3876
		timed_out = 0;

3877 3878 3879 3880 3881 3882
		/*
		 * We saw a request before the queue expired, let it through
		 */
		if (cfq_cfqq_must_dispatch(cfqq))
			goto out_kick;

3883 3884 3885
		/*
		 * expired
		 */
3886
		if (cfq_slice_used(cfqq))
3887 3888 3889 3890 3891 3892
			goto expire;

		/*
		 * only expire and reinvoke request handler, if there are
		 * other queues with pending requests
		 */
3893
		if (!cfqd->busy_queues)
3894 3895 3896 3897 3898
			goto out_cont;

		/*
		 * not expired and it has a request pending, let it dispatch
		 */
3899
		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3900
			goto out_kick;
3901 3902 3903 3904 3905

		/*
		 * Queue depth flag is reset only when the idle didn't succeed
		 */
		cfq_clear_cfqq_deep(cfqq);
3906 3907
	}
expire:
3908
	cfq_slice_expired(cfqd, timed_out);
3909
out_kick:
3910
	cfq_schedule_dispatch(cfqd);
3911 3912 3913 3914
out_cont:
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

J
Jens Axboe 已提交
3915 3916 3917
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
	del_timer_sync(&cfqd->idle_slice_timer);
3918
	cancel_work_sync(&cfqd->unplug_work);
J
Jens Axboe 已提交
3919
}
3920

3921 3922 3923 3924 3925 3926 3927 3928 3929 3930
static void cfq_put_async_queues(struct cfq_data *cfqd)
{
	int i;

	for (i = 0; i < IOPRIO_BE_NR; i++) {
		if (cfqd->async_cfqq[0][i])
			cfq_put_queue(cfqd->async_cfqq[0][i]);
		if (cfqd->async_cfqq[1][i])
			cfq_put_queue(cfqd->async_cfqq[1][i]);
	}
3931 3932 3933

	if (cfqd->async_idle_cfqq)
		cfq_put_queue(cfqd->async_idle_cfqq);
3934 3935
}

J
Jens Axboe 已提交
3936
static void cfq_exit_queue(struct elevator_queue *e)
L
Linus Torvalds 已提交
3937
{
3938
	struct cfq_data *cfqd = e->elevator_data;
3939
	struct request_queue *q = cfqd->queue;
3940

J
Jens Axboe 已提交
3941
	cfq_shutdown_timer_wq(cfqd);
3942

3943
	spin_lock_irq(q->queue_lock);
3944

3945
	if (cfqd->active_queue)
3946
		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3947

3948
	cfq_put_async_queues(cfqd);
3949 3950 3951

	spin_unlock_irq(q->queue_lock);

3952 3953
	cfq_shutdown_timer_wq(cfqd);

3954 3955 3956
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
#else
3957
	kfree(cfqd->root_group);
3958
#endif
3959
	kfree(cfqd);
L
Linus Torvalds 已提交
3960 3961
}

3962
static int cfq_init_queue(struct request_queue *q)
L
Linus Torvalds 已提交
3963 3964
{
	struct cfq_data *cfqd;
T
Tejun Heo 已提交
3965
	struct blkcg_gq *blkg __maybe_unused;
3966
	int i, ret;
L
Linus Torvalds 已提交
3967

3968
	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3969
	if (!cfqd)
3970
		return -ENOMEM;
3971

3972 3973 3974
	cfqd->queue = q;
	q->elevator->elevator_data = cfqd;

3975 3976 3977
	/* Init root service tree */
	cfqd->grp_service_tree = CFQ_RB_ROOT;

3978
	/* Init root group and prefer root group over other groups by default */
3979
#ifdef CONFIG_CFQ_GROUP_IOSCHED
T
Tejun Heo 已提交
3980
	ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
3981 3982
	if (ret)
		goto out_free;
3983

3984
	cfqd->root_group = blkg_to_cfqg(q->root_blkg);
3985
#else
3986
	ret = -ENOMEM;
3987 3988
	cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
					GFP_KERNEL, cfqd->queue->node);
3989 3990
	if (!cfqd->root_group)
		goto out_free;
3991

3992 3993
	cfq_init_cfqg_base(cfqd->root_group);
#endif
3994
	cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
3995

3996 3997 3998 3999 4000 4001 4002 4003
	/*
	 * Not strictly needed (since RB_ROOT just clears the node and we
	 * zeroed cfqd on alloc), but better be safe in case someone decides
	 * to add magic to the rb code
	 */
	for (i = 0; i < CFQ_PRIO_LISTS; i++)
		cfqd->prio_trees[i] = RB_ROOT;

4004 4005 4006
	/*
	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
	 * Grab a permanent reference to it, so that the normal code flow
4007 4008 4009
	 * will not attempt to free it.  oom_cfqq is linked to root_group
	 * but shouldn't hold a reference as it'll never be unlinked.  Lose
	 * the reference from linking right away.
4010 4011
	 */
	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4012
	cfqd->oom_cfqq.ref++;
T
Tejun Heo 已提交
4013 4014

	spin_lock_irq(q->queue_lock);
4015
	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4016
	cfqg_put(cfqd->root_group);
T
Tejun Heo 已提交
4017
	spin_unlock_irq(q->queue_lock);
L
Linus Torvalds 已提交
4018

4019 4020 4021 4022
	init_timer(&cfqd->idle_slice_timer);
	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
	cfqd->idle_slice_timer.data = (unsigned long) cfqd;

4023
	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4024

L
Linus Torvalds 已提交
4025
	cfqd->cfq_quantum = cfq_quantum;
4026 4027
	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
L
Linus Torvalds 已提交
4028 4029
	cfqd->cfq_back_max = cfq_back_max;
	cfqd->cfq_back_penalty = cfq_back_penalty;
4030 4031
	cfqd->cfq_slice[0] = cfq_slice_async;
	cfqd->cfq_slice[1] = cfq_slice_sync;
4032
	cfqd->cfq_target_latency = cfq_target_latency;
4033 4034
	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
	cfqd->cfq_slice_idle = cfq_slice_idle;
4035
	cfqd->cfq_group_idle = cfq_group_idle;
4036
	cfqd->cfq_latency = 1;
4037
	cfqd->hw_tag = -1;
4038 4039 4040 4041
	/*
	 * we optimistically start assuming sync ops weren't delayed in last
	 * second, in order to have larger depth for async operations.
	 */
4042
	cfqd->last_delayed_sync = jiffies - HZ;
4043
	return 0;
4044 4045 4046 4047

out_free:
	kfree(cfqd);
	return ret;
L
Linus Torvalds 已提交
4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068
}

/*
 * sysfs parts below -->
 */
static ssize_t
cfq_var_show(unsigned int var, char *page)
{
	return sprintf(page, "%d\n", var);
}

static ssize_t
cfq_var_store(unsigned int *var, const char *page, size_t count)
{
	char *p = (char *) page;

	*var = simple_strtoul(p, &p, 10);
	return count;
}

#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
J
Jens Axboe 已提交
4069
static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
L
Linus Torvalds 已提交
4070
{									\
4071
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
4072 4073 4074 4075 4076 4077
	unsigned int __data = __VAR;					\
	if (__CONV)							\
		__data = jiffies_to_msecs(__data);			\
	return cfq_var_show(__data, (page));				\
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4078 4079
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4080 4081
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4082
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4083
SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4084 4085 4086
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4087
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4088
SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
L
Linus Torvalds 已提交
4089 4090 4091
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
J
Jens Axboe 已提交
4092
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
L
Linus Torvalds 已提交
4093
{									\
4094
	struct cfq_data *cfqd = e->elevator_data;			\
L
Linus Torvalds 已提交
4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
	unsigned int __data;						\
	int ret = cfq_var_store(&__data, (page), count);		\
	if (__data < (MIN))						\
		__data = (MIN);						\
	else if (__data > (MAX))					\
		__data = (MAX);						\
	if (__CONV)							\
		*(__PTR) = msecs_to_jiffies(__data);			\
	else								\
		*(__PTR) = __data;					\
	return ret;							\
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4108 4109 4110 4111
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
		UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
		UINT_MAX, 1);
4112
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4113 4114
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
		UINT_MAX, 0);
4115
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4116
STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4117 4118
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4119 4120
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
		UINT_MAX, 0);
4121
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4122
STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
L
Linus Torvalds 已提交
4123 4124
#undef STORE_FUNCTION

4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137
#define CFQ_ATTR(name) \
	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)

static struct elv_fs_entry cfq_attrs[] = {
	CFQ_ATTR(quantum),
	CFQ_ATTR(fifo_expire_sync),
	CFQ_ATTR(fifo_expire_async),
	CFQ_ATTR(back_seek_max),
	CFQ_ATTR(back_seek_penalty),
	CFQ_ATTR(slice_sync),
	CFQ_ATTR(slice_async),
	CFQ_ATTR(slice_async_rq),
	CFQ_ATTR(slice_idle),
4138
	CFQ_ATTR(group_idle),
4139
	CFQ_ATTR(low_latency),
4140
	CFQ_ATTR(target_latency),
4141
	__ATTR_NULL
L
Linus Torvalds 已提交
4142 4143 4144 4145 4146 4147 4148
};

static struct elevator_type iosched_cfq = {
	.ops = {
		.elevator_merge_fn = 		cfq_merge,
		.elevator_merged_fn =		cfq_merged_request,
		.elevator_merge_req_fn =	cfq_merged_requests,
4149
		.elevator_allow_merge_fn =	cfq_allow_merge,
D
Divyesh Shah 已提交
4150
		.elevator_bio_merged_fn =	cfq_bio_merged,
4151
		.elevator_dispatch_fn =		cfq_dispatch_requests,
L
Linus Torvalds 已提交
4152
		.elevator_add_req_fn =		cfq_insert_request,
4153
		.elevator_activate_req_fn =	cfq_activate_request,
L
Linus Torvalds 已提交
4154 4155
		.elevator_deactivate_req_fn =	cfq_deactivate_request,
		.elevator_completed_req_fn =	cfq_completed_request,
4156 4157
		.elevator_former_req_fn =	elv_rb_former_request,
		.elevator_latter_req_fn =	elv_rb_latter_request,
4158
		.elevator_init_icq_fn =		cfq_init_icq,
4159
		.elevator_exit_icq_fn =		cfq_exit_icq,
L
Linus Torvalds 已提交
4160 4161 4162 4163 4164 4165
		.elevator_set_req_fn =		cfq_set_request,
		.elevator_put_req_fn =		cfq_put_request,
		.elevator_may_queue_fn =	cfq_may_queue,
		.elevator_init_fn =		cfq_init_queue,
		.elevator_exit_fn =		cfq_exit_queue,
	},
4166 4167
	.icq_size	=	sizeof(struct cfq_io_cq),
	.icq_align	=	__alignof__(struct cfq_io_cq),
4168
	.elevator_attrs =	cfq_attrs,
4169
	.elevator_name	=	"cfq",
L
Linus Torvalds 已提交
4170 4171 4172
	.elevator_owner =	THIS_MODULE,
};

4173
#ifdef CONFIG_CFQ_GROUP_IOSCHED
T
Tejun Heo 已提交
4174
static struct blkcg_policy blkcg_policy_cfq = {
4175 4176 4177 4178 4179
	.pd_size		= sizeof(struct cfq_group),
	.cftypes		= cfq_blkcg_files,

	.pd_init_fn		= cfq_pd_init,
	.pd_reset_stats_fn	= cfq_pd_reset_stats,
4180 4181 4182
};
#endif

L
Linus Torvalds 已提交
4183 4184
static int __init cfq_init(void)
{
4185 4186
	int ret;

4187 4188 4189 4190 4191 4192 4193 4194
	/*
	 * could be 0 on HZ < 1000 setups
	 */
	if (!cfq_slice_async)
		cfq_slice_async = 1;
	if (!cfq_slice_idle)
		cfq_slice_idle = 1;

4195 4196 4197
#ifdef CONFIG_CFQ_GROUP_IOSCHED
	if (!cfq_group_idle)
		cfq_group_idle = 1;
T
Tejun Heo 已提交
4198

T
Tejun Heo 已提交
4199
	ret = blkcg_policy_register(&blkcg_policy_cfq);
T
Tejun Heo 已提交
4200 4201
	if (ret)
		return ret;
4202 4203 4204
#else
	cfq_group_idle = 0;
#endif
T
Tejun Heo 已提交
4205

4206
	ret = -ENOMEM;
4207 4208
	cfq_pool = KMEM_CACHE(cfq_queue, 0);
	if (!cfq_pool)
T
Tejun Heo 已提交
4209
		goto err_pol_unreg;
L
Linus Torvalds 已提交
4210

4211
	ret = elv_register(&iosched_cfq);
T
Tejun Heo 已提交
4212 4213
	if (ret)
		goto err_free_pool;
4214

4215
	return 0;
T
Tejun Heo 已提交
4216 4217 4218 4219

err_free_pool:
	kmem_cache_destroy(cfq_pool);
err_pol_unreg:
4220
#ifdef CONFIG_CFQ_GROUP_IOSCHED
T
Tejun Heo 已提交
4221
	blkcg_policy_unregister(&blkcg_policy_cfq);
4222
#endif
T
Tejun Heo 已提交
4223
	return ret;
L
Linus Torvalds 已提交
4224 4225 4226 4227
}

static void __exit cfq_exit(void)
{
4228
#ifdef CONFIG_CFQ_GROUP_IOSCHED
T
Tejun Heo 已提交
4229
	blkcg_policy_unregister(&blkcg_policy_cfq);
4230
#endif
L
Linus Torvalds 已提交
4231
	elv_unregister(&iosched_cfq);
4232
	kmem_cache_destroy(cfq_pool);
L
Linus Torvalds 已提交
4233 4234 4235 4236 4237 4238 4239 4240
}

module_init(cfq_init);
module_exit(cfq_exit);

MODULE_AUTHOR("Jens Axboe");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");