menu.c 14.8 KB
Newer Older
1 2 3 4
/*
 * menu.c - the menu idle governor
 *
 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 6 7
 * Copyright (C) 2009 Intel Corporation
 * Author:
 *        Arjan van de Ven <arjan@linux.intel.com>
8
 *
9 10
 * This code is licenced under the GPL version 2 as described
 * in the COPYING file that acompanies the Linux Kernel.
11 12 13 14
 */

#include <linux/kernel.h>
#include <linux/cpuidle.h>
15
#include <linux/pm_qos.h>
16 17 18 19
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
20
#include <linux/sched.h>
21
#include <linux/math64.h>
22
#include <linux/module.h>
23

24 25 26 27 28 29 30 31 32
/*
 * Please note when changing the tuning values:
 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
 * a scaling operation multiplication may overflow on 32 bit platforms.
 * In that case, #define RESOLUTION as ULL to get 64 bit result:
 * #define RESOLUTION 1024ULL
 *
 * The default values do not overflow.
 */
33
#define BUCKETS 12
34
#define INTERVALS 8
35
#define RESOLUTION 1024
36
#define DECAY 8
37
#define MAX_INTERESTING 50000
38 39
#define STDDEV_THRESH 400

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

/*
 * Concepts and ideas behind the menu governor
 *
 * For the menu governor, there are 3 decision factors for picking a C
 * state:
 * 1) Energy break even point
 * 2) Performance impact
 * 3) Latency tolerance (from pmqos infrastructure)
 * These these three factors are treated independently.
 *
 * Energy break even point
 * -----------------------
 * C state entry and exit have an energy cost, and a certain amount of time in
 * the  C state is required to actually break even on this cost. CPUIDLE
 * provides us this duration in the "target_residency" field. So all that we
 * need is a good prediction of how long we'll be idle. Like the traditional
 * menu governor, we start with the actual known "next timer event" time.
 *
 * Since there are other source of wakeups (interrupts for example) than
 * the next timer event, this estimation is rather optimistic. To get a
 * more realistic estimate, a correction factor is applied to the estimate,
 * that is based on historic behavior. For example, if in the past the actual
 * duration always was 50% of the next timer tick, the correction factor will
 * be 0.5.
 *
 * menu uses a running average for this correction factor, however it uses a
 * set of factors, not just a single factor. This stems from the realization
 * that the ratio is dependent on the order of magnitude of the expected
 * duration; if we expect 500 milliseconds of idle time the likelihood of
 * getting an interrupt very early is much higher than if we expect 50 micro
 * seconds of idle time. A second independent factor that has big impact on
 * the actual factor is if there is (disk) IO outstanding or not.
 * (as a special twist, we consider every sleep longer than 50 milliseconds
 * as perfect; there are no power gains for sleeping longer than this)
 *
 * For these two reasons we keep an array of 12 independent factors, that gets
 * indexed based on the magnitude of the expected duration as well as the
 * "is IO outstanding" property.
 *
80 81 82 83 84 85 86 87 88 89
 * Repeatable-interval-detector
 * ----------------------------
 * There are some cases where "next timer" is a completely unusable predictor:
 * Those cases where the interval is fixed, for example due to hardware
 * interrupt mitigation, but also due to fixed transfer rate devices such as
 * mice.
 * For this, we use a different predictor: We track the duration of the last 8
 * intervals and if the stand deviation of these 8 intervals is below a
 * threshold value, we use the average of these intervals as prediction.
 *
90 91 92
 * Limiting Performance Impact
 * ---------------------------
 * C states, especially those with large exit latencies, can have a real
L
Lucas De Marchi 已提交
93
 * noticeable impact on workloads, which is not acceptable for most sysadmins,
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
 * and in addition, less performance has a power price of its own.
 *
 * As a general rule of thumb, menu assumes that the following heuristic
 * holds:
 *     The busier the system, the less impact of C states is acceptable
 *
 * This rule-of-thumb is implemented using a performance-multiplier:
 * If the exit latency times the performance multiplier is longer than
 * the predicted duration, the C state is not considered a candidate
 * for selection due to a too high performance impact. So the higher
 * this multiplier is, the longer we need to be idle to pick a deep C
 * state, and thus the less likely a busy CPU will hit such a deep
 * C state.
 *
 * Two factors are used in determing this multiplier:
 * a value of 10 is added for each point of "per cpu load average" we have.
 * a value of 5 points is added for each process that is waiting for
 * IO on this CPU.
 * (these values are experimentally determined)
 *
 * The load average factor gives a longer term (few seconds) input to the
 * decision, while the iowait value gives a cpu local instantanious input.
 * The iowait factor may look low, but realize that this is also already
 * represented in the system load average.
 *
 */
120 121 122

struct menu_device {
	int		last_state_idx;
123
	int             needs_update;
124

125
	unsigned int	next_timer_us;
126
	unsigned int	predicted_us;
127
	unsigned int	bucket;
128
	unsigned int	correction_factor[BUCKETS];
129
	unsigned int	intervals[INTERVALS];
130
	int		interval_ptr;
131 132
};

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154

#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)

static int get_loadavg(void)
{
	unsigned long this = this_cpu_load();


	return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
}

static inline int which_bucket(unsigned int duration)
{
	int bucket = 0;

	/*
	 * We keep two groups of stats; one with no
	 * IO pending, one without.
	 * This allows us to calculate
	 * E(duration)|iowait
	 */
155
	if (nr_iowait_cpu(smp_processor_id()))
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
		bucket = BUCKETS/2;

	if (duration < 10)
		return bucket;
	if (duration < 100)
		return bucket + 1;
	if (duration < 1000)
		return bucket + 2;
	if (duration < 10000)
		return bucket + 3;
	if (duration < 100000)
		return bucket + 4;
	return bucket + 5;
}

/*
 * Return a multiplier for the exit latency that is intended
 * to take performance requirements into account.
 * The more performance critical we estimate the system
 * to be, the higher this multiplier, and thus the higher
 * the barrier to go to an expensive C state.
 */
static inline int performance_multiplier(void)
{
	int mult = 1;

	/* for higher loadavg, we are more reluctant */

	mult += 2 * get_loadavg();

	/* for IO wait tasks (per cpu!) we add 5x each */
187
	mult += 10 * nr_iowait_cpu(smp_processor_id());
188 189 190 191

	return mult;
}

192 193
static DEFINE_PER_CPU(struct menu_device, menu_devices);

194
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
195

196 197 198 199 200 201
/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
static u64 div_round64(u64 dividend, u32 divisor)
{
	return div_u64(dividend + (divisor / 2), divisor);
}

202 203 204 205 206 207
/*
 * Try detecting repeating patterns by keeping track of the last 8
 * intervals, and checking if the standard deviation of that set
 * of points is below a threshold. If it is... then use the
 * average of these 8 points as the estimated value.
 */
208
static void get_typical_interval(struct menu_device *data)
209
{
210
	int i, divisor;
211 212 213 214
	unsigned int max, thresh;
	uint64_t avg, stddev;

	thresh = UINT_MAX; /* Discard outliers above this value */
215

216
again:
217

218
	/* First calculate the average of past intervals */
219 220 221
	max = 0;
	avg = 0;
	divisor = 0;
222
	for (i = 0; i < INTERVALS; i++) {
223
		unsigned int value = data->intervals[i];
224 225 226 227 228 229 230 231 232
		if (value <= thresh) {
			avg += value;
			divisor++;
			if (value > max)
				max = value;
		}
	}
	do_div(avg, divisor);

233 234
	/* Then try to determine standard deviation */
	stddev = 0;
235
	for (i = 0; i < INTERVALS; i++) {
236
		unsigned int value = data->intervals[i];
237 238 239 240 241 242
		if (value <= thresh) {
			int64_t diff = value - avg;
			stddev += diff * diff;
		}
	}
	do_div(stddev, divisor);
243
	/*
244 245
	 * The typical interval is obtained when standard deviation is small
	 * or standard deviation is small compared to the average interval.
246
	 *
247 248 249 250 251 252
	 * int_sqrt() formal parameter type is unsigned long. When the
	 * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
	 * the resulting squared standard deviation exceeds the input domain
	 * of int_sqrt on platforms where unsigned long is 32 bits in size.
	 * In such case reject the candidate average.
	 *
253
	 * Use this result only if there is no timer to wake us up sooner.
254
	 */
255 256 257
	if (likely(stddev <= ULONG_MAX)) {
		stddev = int_sqrt(stddev);
		if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
258
							|| stddev <= 20) {
259
			if (data->next_timer_us > avg)
260 261 262
				data->predicted_us = avg;
			return;
		}
263
	}
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278

	/*
	 * If we have outliers to the upside in our distribution, discard
	 * those by setting the threshold to exclude these outliers, then
	 * calculate the average and standard deviation again. Once we get
	 * down to the bottom 3/4 of our samples, stop excluding samples.
	 *
	 * This can deal with workloads that have long pauses interspersed
	 * with sporadic activity with a bunch of short pauses.
	 */
	if ((divisor * 4) <= INTERVALS * 3)
		return;

	thresh = max - 1;
	goto again;
279 280
}

281 282
/**
 * menu_select - selects the next idle state to enter
283
 * @drv: cpuidle driver containing state data
284 285
 * @dev: the CPU
 */
286
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
287 288
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
M
Mark Gross 已提交
289
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
290
	int i;
291
	unsigned int interactivity_req;
292
	struct timespec t;
293

294
	if (data->needs_update) {
295
		menu_update(drv, dev);
296 297 298
		data->needs_update = 0;
	}

299
	data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
300

301
	/* Special case when user has set very strict latency requirement */
302
	if (unlikely(latency_req == 0))
303 304
		return 0;

305
	/* determine the expected residency time, round up */
306
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
307
	data->next_timer_us =
308
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
309 310


311
	data->bucket = which_bucket(data->next_timer_us);
312

313 314 315 316 317
	/*
	 * Force the result of multiplication to be 64 bits even if both
	 * operands are 32 bits.
	 * Make sure to round up for half microseconds.
	 */
318
	data->predicted_us = div_round64((uint64_t)data->next_timer_us *
319
					 data->correction_factor[data->bucket],
320
					 RESOLUTION * DECAY);
321

322
	get_typical_interval(data);
323

324 325 326 327 328 329 330 331 332
	/*
	 * Performance multiplier defines a minimum predicted idle
	 * duration / latency ratio. Adjust the latency limit if
	 * necessary.
	 */
	interactivity_req = data->predicted_us / performance_multiplier();
	if (latency_req > interactivity_req)
		latency_req = interactivity_req;

333 334 335 336
	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
337
	if (data->next_timer_us > 5 &&
338
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
339
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
340
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
341

342 343 344 345
	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
346 347
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
348
		struct cpuidle_state_usage *su = &dev->states_usage[i];
349

350
		if (s->disabled || su->disable)
351
			continue;
352
		if (s->target_residency > data->predicted_us)
353
			continue;
354
		if (s->exit_latency > latency_req)
355 356
			continue;

357
		data->last_state_idx = i;
358 359
	}

360
	return data->last_state_idx;
361 362 363
}

/**
364
 * menu_reflect - records that data structures need update
365
 * @dev: the CPU
366
 * @index: the index of actual entered state
367 368 369 370
 *
 * NOTE: it's important to be fast here because this operation will add to
 *       the overall exit latency.
 */
371
static void menu_reflect(struct cpuidle_device *dev, int index)
372 373
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
374 375 376
	data->last_state_idx = index;
	if (index >= 0)
		data->needs_update = 1;
377 378 379 380
}

/**
 * menu_update - attempts to guess what happened after entry
381
 * @drv: cpuidle driver containing state data
382 383
 * @dev: the CPU
 */
384
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
385 386 387
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int last_idx = data->last_state_idx;
388
	struct cpuidle_state *target = &drv->states[last_idx];
389
	unsigned int measured_us;
390
	unsigned int new_factor;
391 392

	/*
393 394 395 396 397 398 399 400 401 402 403 404
	 * Try to figure out how much time passed between entry to low
	 * power state and occurrence of the wakeup event.
	 *
	 * If the entered idle state didn't support residency measurements,
	 * we are basically lost in the dark how much time passed.
	 * As a compromise, assume we slept for the whole expected time.
	 *
	 * Any measured amount of time will include the exit latency.
	 * Since we are interested in when the wakeup begun, not when it
	 * was completed, we must substract the exit latency. However, if
	 * the measured amount of time is less than the exit latency,
	 * assume the state was never reached and the exit latency is 0.
405
	 */
406 407 408
	if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) {
		/* Use timer value as is */
		measured_us = data->next_timer_us;
409

410 411 412
	} else {
		/* Use measured value */
		measured_us = cpuidle_get_last_residency(dev);
413

414 415 416
		/* Deduct exit latency */
		if (measured_us > target->exit_latency)
			measured_us -= target->exit_latency;
417

418 419 420 421
		/* Make sure our coefficients do not exceed unity */
		if (measured_us > data->next_timer_us)
			measured_us = data->next_timer_us;
	}
422

423 424 425
	/* Update our correction ratio */
	new_factor = data->correction_factor[data->bucket];
	new_factor -= new_factor / DECAY;
426

427 428
	if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
		new_factor += RESOLUTION * measured_us / data->next_timer_us;
429
	else
430 431 432 433 434
		/*
		 * we were idle so long that we count it as a perfect
		 * prediction
		 */
		new_factor += RESOLUTION;
435

436 437
	/*
	 * We don't want 0 as factor; we always want at least
438 439 440
	 * a tiny bit of estimated time. Fortunately, due to rounding,
	 * new_factor will stay nonzero regardless of measured_us values
	 * and the compiler can eliminate this test as long as DECAY > 1.
441
	 */
442
	if (DECAY == 1 && unlikely(new_factor == 0))
443
		new_factor = 1;
444

445
	data->correction_factor[data->bucket] = new_factor;
446 447

	/* update the repeating-pattern data */
448
	data->intervals[data->interval_ptr++] = measured_us;
449 450
	if (data->interval_ptr >= INTERVALS)
		data->interval_ptr = 0;
451 452 453 454
}

/**
 * menu_enable_device - scans a CPU's states and does setup
455
 * @drv: cpuidle driver
456 457
 * @dev: the CPU
 */
458 459
static int menu_enable_device(struct cpuidle_driver *drv,
				struct cpuidle_device *dev)
460 461
{
	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
462
	int i;
463 464 465

	memset(data, 0, sizeof(struct menu_device));

466 467 468 469 470 471 472
	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	for(i = 0; i < BUCKETS; i++)
		data->correction_factor[i] = RESOLUTION * DECAY;

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
	return 0;
}

static struct cpuidle_governor menu_governor = {
	.name =		"menu",
	.rating =	20,
	.enable =	menu_enable_device,
	.select =	menu_select,
	.reflect =	menu_reflect,
	.owner =	THIS_MODULE,
};

/**
 * init_menu - initializes the governor
 */
static int __init init_menu(void)
{
	return cpuidle_register_governor(&menu_governor);
}

493
postcore_initcall(init_menu);