menu.c 15.0 KB
Newer Older
1 2 3 4
/*
 * menu.c - the menu idle governor
 *
 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 6 7
 * Copyright (C) 2009 Intel Corporation
 * Author:
 *        Arjan van de Ven <arjan@linux.intel.com>
8
 *
9 10
 * This code is licenced under the GPL version 2 as described
 * in the COPYING file that acompanies the Linux Kernel.
11 12 13 14
 */

#include <linux/kernel.h>
#include <linux/cpuidle.h>
15
#include <linux/pm_qos.h>
16 17 18 19
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
20
#include <linux/sched.h>
21
#include <linux/math64.h>
22
#include <linux/module.h>
23

24 25 26 27 28 29 30 31 32
/*
 * Please note when changing the tuning values:
 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
 * a scaling operation multiplication may overflow on 32 bit platforms.
 * In that case, #define RESOLUTION as ULL to get 64 bit result:
 * #define RESOLUTION 1024ULL
 *
 * The default values do not overflow.
 */
33
#define BUCKETS 12
34 35
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
36
#define RESOLUTION 1024
37
#define DECAY 8
38
#define MAX_INTERESTING 50000
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

/*
 * Concepts and ideas behind the menu governor
 *
 * For the menu governor, there are 3 decision factors for picking a C
 * state:
 * 1) Energy break even point
 * 2) Performance impact
 * 3) Latency tolerance (from pmqos infrastructure)
 * These these three factors are treated independently.
 *
 * Energy break even point
 * -----------------------
 * C state entry and exit have an energy cost, and a certain amount of time in
 * the  C state is required to actually break even on this cost. CPUIDLE
 * provides us this duration in the "target_residency" field. So all that we
 * need is a good prediction of how long we'll be idle. Like the traditional
 * menu governor, we start with the actual known "next timer event" time.
 *
 * Since there are other source of wakeups (interrupts for example) than
 * the next timer event, this estimation is rather optimistic. To get a
 * more realistic estimate, a correction factor is applied to the estimate,
 * that is based on historic behavior. For example, if in the past the actual
 * duration always was 50% of the next timer tick, the correction factor will
 * be 0.5.
 *
 * menu uses a running average for this correction factor, however it uses a
 * set of factors, not just a single factor. This stems from the realization
 * that the ratio is dependent on the order of magnitude of the expected
 * duration; if we expect 500 milliseconds of idle time the likelihood of
 * getting an interrupt very early is much higher than if we expect 50 micro
 * seconds of idle time. A second independent factor that has big impact on
 * the actual factor is if there is (disk) IO outstanding or not.
 * (as a special twist, we consider every sleep longer than 50 milliseconds
 * as perfect; there are no power gains for sleeping longer than this)
 *
 * For these two reasons we keep an array of 12 independent factors, that gets
 * indexed based on the magnitude of the expected duration as well as the
 * "is IO outstanding" property.
 *
80 81 82 83 84 85 86 87 88 89
 * Repeatable-interval-detector
 * ----------------------------
 * There are some cases where "next timer" is a completely unusable predictor:
 * Those cases where the interval is fixed, for example due to hardware
 * interrupt mitigation, but also due to fixed transfer rate devices such as
 * mice.
 * For this, we use a different predictor: We track the duration of the last 8
 * intervals and if the stand deviation of these 8 intervals is below a
 * threshold value, we use the average of these intervals as prediction.
 *
90 91 92
 * Limiting Performance Impact
 * ---------------------------
 * C states, especially those with large exit latencies, can have a real
L
Lucas De Marchi 已提交
93
 * noticeable impact on workloads, which is not acceptable for most sysadmins,
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
 * and in addition, less performance has a power price of its own.
 *
 * As a general rule of thumb, menu assumes that the following heuristic
 * holds:
 *     The busier the system, the less impact of C states is acceptable
 *
 * This rule-of-thumb is implemented using a performance-multiplier:
 * If the exit latency times the performance multiplier is longer than
 * the predicted duration, the C state is not considered a candidate
 * for selection due to a too high performance impact. So the higher
 * this multiplier is, the longer we need to be idle to pick a deep C
 * state, and thus the less likely a busy CPU will hit such a deep
 * C state.
 *
 * Two factors are used in determing this multiplier:
 * a value of 10 is added for each point of "per cpu load average" we have.
 * a value of 5 points is added for each process that is waiting for
 * IO on this CPU.
 * (these values are experimentally determined)
 *
 * The load average factor gives a longer term (few seconds) input to the
 * decision, while the iowait value gives a cpu local instantanious input.
 * The iowait factor may look low, but realize that this is also already
 * represented in the system load average.
 *
 */
120 121 122

struct menu_device {
	int		last_state_idx;
123
	int             needs_update;
124

125
	unsigned int	next_timer_us;
126
	unsigned int	predicted_us;
127
	unsigned int	bucket;
128
	unsigned int	correction_factor[BUCKETS];
129
	unsigned int	intervals[INTERVALS];
130
	int		interval_ptr;
131 132
};

133 134 135 136

#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)

137
static inline int get_loadavg(unsigned long load)
138
{
139
	return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
140 141
}

142
static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
143 144 145 146 147 148 149 150 151
{
	int bucket = 0;

	/*
	 * We keep two groups of stats; one with no
	 * IO pending, one without.
	 * This allows us to calculate
	 * E(duration)|iowait
	 */
152
	if (nr_iowaiters)
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
		bucket = BUCKETS/2;

	if (duration < 10)
		return bucket;
	if (duration < 100)
		return bucket + 1;
	if (duration < 1000)
		return bucket + 2;
	if (duration < 10000)
		return bucket + 3;
	if (duration < 100000)
		return bucket + 4;
	return bucket + 5;
}

/*
 * Return a multiplier for the exit latency that is intended
 * to take performance requirements into account.
 * The more performance critical we estimate the system
 * to be, the higher this multiplier, and thus the higher
 * the barrier to go to an expensive C state.
 */
175
static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
176 177 178 179 180
{
	int mult = 1;

	/* for higher loadavg, we are more reluctant */

181
	mult += 2 * get_loadavg(load);
182 183

	/* for IO wait tasks (per cpu!) we add 5x each */
184
	mult += 10 * nr_iowaiters;
185 186 187 188

	return mult;
}

189 190
static DEFINE_PER_CPU(struct menu_device, menu_devices);

191
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
192

193 194 195 196 197 198
/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
static u64 div_round64(u64 dividend, u32 divisor)
{
	return div_u64(dividend + (divisor / 2), divisor);
}

199 200 201 202 203 204
/*
 * Try detecting repeating patterns by keeping track of the last 8
 * intervals, and checking if the standard deviation of that set
 * of points is below a threshold. If it is... then use the
 * average of these 8 points as the estimated value.
 */
205
static void get_typical_interval(struct menu_device *data)
206
{
207
	int i, divisor;
208 209 210 211
	unsigned int max, thresh;
	uint64_t avg, stddev;

	thresh = UINT_MAX; /* Discard outliers above this value */
212

213
again:
214

215
	/* First calculate the average of past intervals */
216 217 218
	max = 0;
	avg = 0;
	divisor = 0;
219
	for (i = 0; i < INTERVALS; i++) {
220
		unsigned int value = data->intervals[i];
221 222 223 224 225 226 227
		if (value <= thresh) {
			avg += value;
			divisor++;
			if (value > max)
				max = value;
		}
	}
228 229 230 231
	if (divisor == INTERVALS)
		avg >>= INTERVAL_SHIFT;
	else
		do_div(avg, divisor);
232

233 234
	/* Then try to determine standard deviation */
	stddev = 0;
235
	for (i = 0; i < INTERVALS; i++) {
236
		unsigned int value = data->intervals[i];
237 238 239 240 241
		if (value <= thresh) {
			int64_t diff = value - avg;
			stddev += diff * diff;
		}
	}
242 243 244 245 246
	if (divisor == INTERVALS)
		stddev >>= INTERVAL_SHIFT;
	else
		do_div(stddev, divisor);

247
	/*
248 249
	 * The typical interval is obtained when standard deviation is small
	 * or standard deviation is small compared to the average interval.
250
	 *
251 252 253 254 255 256
	 * int_sqrt() formal parameter type is unsigned long. When the
	 * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
	 * the resulting squared standard deviation exceeds the input domain
	 * of int_sqrt on platforms where unsigned long is 32 bits in size.
	 * In such case reject the candidate average.
	 *
257
	 * Use this result only if there is no timer to wake us up sooner.
258
	 */
259 260 261
	if (likely(stddev <= ULONG_MAX)) {
		stddev = int_sqrt(stddev);
		if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
262
							|| stddev <= 20) {
263
			if (data->next_timer_us > avg)
264 265 266
				data->predicted_us = avg;
			return;
		}
267
	}
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282

	/*
	 * If we have outliers to the upside in our distribution, discard
	 * those by setting the threshold to exclude these outliers, then
	 * calculate the average and standard deviation again. Once we get
	 * down to the bottom 3/4 of our samples, stop excluding samples.
	 *
	 * This can deal with workloads that have long pauses interspersed
	 * with sporadic activity with a bunch of short pauses.
	 */
	if ((divisor * 4) <= INTERVALS * 3)
		return;

	thresh = max - 1;
	goto again;
283 284
}

285 286
/**
 * menu_select - selects the next idle state to enter
287
 * @drv: cpuidle driver containing state data
288 289
 * @dev: the CPU
 */
290
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
291
{
292
	struct menu_device *data = this_cpu_ptr(&menu_devices);
M
Mark Gross 已提交
293
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
294
	int i;
295
	unsigned int interactivity_req;
296
	unsigned long nr_iowaiters, cpu_load;
297

298
	if (data->needs_update) {
299
		menu_update(drv, dev);
300 301 302
		data->needs_update = 0;
	}

303
	data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
304

305
	/* Special case when user has set very strict latency requirement */
306
	if (unlikely(latency_req == 0))
307 308
		return 0;

309
	/* determine the expected residency time, round up */
310
	data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());
311

312
	get_iowait_load(&nr_iowaiters, &cpu_load);
313
	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
314

315 316 317 318 319
	/*
	 * Force the result of multiplication to be 64 bits even if both
	 * operands are 32 bits.
	 * Make sure to round up for half microseconds.
	 */
320
	data->predicted_us = div_round64((uint64_t)data->next_timer_us *
321
					 data->correction_factor[data->bucket],
322
					 RESOLUTION * DECAY);
323

324
	get_typical_interval(data);
325

326 327 328 329 330
	/*
	 * Performance multiplier defines a minimum predicted idle
	 * duration / latency ratio. Adjust the latency limit if
	 * necessary.
	 */
331
	interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
332 333 334
	if (latency_req > interactivity_req)
		latency_req = interactivity_req;

335 336 337 338
	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
339
	if (data->next_timer_us > 5 &&
340
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
341
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
342
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
343

344 345 346 347
	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
348 349
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
350
		struct cpuidle_state_usage *su = &dev->states_usage[i];
351

352
		if (s->disabled || su->disable)
353
			continue;
354
		if (s->target_residency > data->predicted_us)
355
			continue;
356
		if (s->exit_latency > latency_req)
357 358
			continue;

359
		data->last_state_idx = i;
360 361
	}

362
	return data->last_state_idx;
363 364 365
}

/**
366
 * menu_reflect - records that data structures need update
367
 * @dev: the CPU
368
 * @index: the index of actual entered state
369 370 371 372
 *
 * NOTE: it's important to be fast here because this operation will add to
 *       the overall exit latency.
 */
373
static void menu_reflect(struct cpuidle_device *dev, int index)
374
{
375
	struct menu_device *data = this_cpu_ptr(&menu_devices);
376 377 378
	data->last_state_idx = index;
	if (index >= 0)
		data->needs_update = 1;
379 380 381 382
}

/**
 * menu_update - attempts to guess what happened after entry
383
 * @drv: cpuidle driver containing state data
384 385
 * @dev: the CPU
 */
386
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
387
{
388
	struct menu_device *data = this_cpu_ptr(&menu_devices);
389
	int last_idx = data->last_state_idx;
390
	struct cpuidle_state *target = &drv->states[last_idx];
391
	unsigned int measured_us;
392
	unsigned int new_factor;
393 394

	/*
395 396 397 398 399 400 401 402 403
	 * Try to figure out how much time passed between entry to low
	 * power state and occurrence of the wakeup event.
	 *
	 * If the entered idle state didn't support residency measurements,
	 * we are basically lost in the dark how much time passed.
	 * As a compromise, assume we slept for the whole expected time.
	 *
	 * Any measured amount of time will include the exit latency.
	 * Since we are interested in when the wakeup begun, not when it
A
Antonio Ospite 已提交
404
	 * was completed, we must subtract the exit latency. However, if
405 406
	 * the measured amount of time is less than the exit latency,
	 * assume the state was never reached and the exit latency is 0.
407
	 */
408
	if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
409 410
		/* Use timer value as is */
		measured_us = data->next_timer_us;
411

412 413 414
	} else {
		/* Use measured value */
		measured_us = cpuidle_get_last_residency(dev);
415

416 417 418
		/* Deduct exit latency */
		if (measured_us > target->exit_latency)
			measured_us -= target->exit_latency;
419

420 421 422 423
		/* Make sure our coefficients do not exceed unity */
		if (measured_us > data->next_timer_us)
			measured_us = data->next_timer_us;
	}
424

425 426 427
	/* Update our correction ratio */
	new_factor = data->correction_factor[data->bucket];
	new_factor -= new_factor / DECAY;
428

429 430
	if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
		new_factor += RESOLUTION * measured_us / data->next_timer_us;
431
	else
432 433 434 435 436
		/*
		 * we were idle so long that we count it as a perfect
		 * prediction
		 */
		new_factor += RESOLUTION;
437

438 439
	/*
	 * We don't want 0 as factor; we always want at least
440 441 442
	 * a tiny bit of estimated time. Fortunately, due to rounding,
	 * new_factor will stay nonzero regardless of measured_us values
	 * and the compiler can eliminate this test as long as DECAY > 1.
443
	 */
444
	if (DECAY == 1 && unlikely(new_factor == 0))
445
		new_factor = 1;
446

447
	data->correction_factor[data->bucket] = new_factor;
448 449

	/* update the repeating-pattern data */
450
	data->intervals[data->interval_ptr++] = measured_us;
451 452
	if (data->interval_ptr >= INTERVALS)
		data->interval_ptr = 0;
453 454 455 456
}

/**
 * menu_enable_device - scans a CPU's states and does setup
457
 * @drv: cpuidle driver
458 459
 * @dev: the CPU
 */
460 461
static int menu_enable_device(struct cpuidle_driver *drv,
				struct cpuidle_device *dev)
462 463
{
	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
464
	int i;
465 466 467

	memset(data, 0, sizeof(struct menu_device));

468 469 470 471 472 473 474
	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	for(i = 0; i < BUCKETS; i++)
		data->correction_factor[i] = RESOLUTION * DECAY;

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	return 0;
}

static struct cpuidle_governor menu_governor = {
	.name =		"menu",
	.rating =	20,
	.enable =	menu_enable_device,
	.select =	menu_select,
	.reflect =	menu_reflect,
	.owner =	THIS_MODULE,
};

/**
 * init_menu - initializes the governor
 */
static int __init init_menu(void)
{
	return cpuidle_register_governor(&menu_governor);
}

495
postcore_initcall(init_menu);