cpufreq_ondemand.c 24.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 *  drivers/cpufreq/cpufreq_ondemand.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
A
Andrew Morton 已提交
17
#include <linux/cpu.h>
L
Linus Torvalds 已提交
18 19
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
20
#include <linux/mutex.h>
21 22 23
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/ktime.h>
24
#include <linux/sched.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30

/*
 * dbs is used in this file as a shortform for demandbased switching
 * It helps to keep variable names smaller, simpler
 */

31
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL		(10)
L
Linus Torvalds 已提交
32
#define DEF_FREQUENCY_UP_THRESHOLD		(80)
33 34
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(100000)
35 36
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL	(3)
#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
37
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
38
#define MIN_FREQUENCY_UP_THRESHOLD		(11)
L
Linus Torvalds 已提交
39 40
#define MAX_FREQUENCY_UP_THRESHOLD		(100)

41 42
/*
 * The polling frequency of this governor depends on the capability of
L
Linus Torvalds 已提交
43
 * the processor. Default polling frequency is 1000 times the transition
44 45
 * latency of the processor. The governor will work on any processor with
 * transition latency <= 10mS, using appropriate sampling
L
Linus Torvalds 已提交
46 47 48 49 50
 * rate.
 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
 * this governor will not work.
 * All times here are in uS.
 */
51
#define MIN_SAMPLING_RATE_RATIO			(2)
52

53 54
static unsigned int min_sampling_rate;

55
#define LATENCY_MULTIPLIER			(1000)
56
#define MIN_LATENCY_MULTIPLIER			(100)
57
#define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000)
L
Linus Torvalds 已提交
58

D
David Howells 已提交
59
static void do_dbs_timer(struct work_struct *work);
60 61 62 63 64 65 66 67 68 69 70 71
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
				unsigned int event);

#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
#endif
struct cpufreq_governor cpufreq_gov_ondemand = {
       .name                   = "ondemand",
       .governor               = cpufreq_governor_dbs,
       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
       .owner                  = THIS_MODULE,
};
D
David Howells 已提交
72 73

/* Sampling types */
74
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
L
Linus Torvalds 已提交
75 76

struct cpu_dbs_info_s {
77
	cputime64_t prev_cpu_idle;
78
	cputime64_t prev_cpu_iowait;
79
	cputime64_t prev_cpu_wall;
80
	cputime64_t prev_cpu_nice;
81
	struct cpufreq_policy *cur_policy;
82
	struct delayed_work work;
83 84 85 86
	struct cpufreq_frequency_table *freq_table;
	unsigned int freq_lo;
	unsigned int freq_lo_jiffies;
	unsigned int freq_hi_jiffies;
87
	unsigned int rate_mult;
88
	int cpu;
89 90 91 92 93 94 95
	unsigned int sample_type:1;
	/*
	 * percpu mutex that serializes governor limit change with
	 * do_dbs_timer invocation. We do not want do_dbs_timer to run
	 * when user is changing the governor or limits.
	 */
	struct mutex timer_mutex;
L
Linus Torvalds 已提交
96
};
97
static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
L
Linus Torvalds 已提交
98 99 100

static unsigned int dbs_enable;	/* number of CPUs using this policy */

101
/*
102
 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
103
 * different CPUs. It protects dbs_enable in governor start/stop.
104
 */
105
static DEFINE_MUTEX(dbs_mutex);
L
Linus Torvalds 已提交
106

107
static struct workqueue_struct	*kondemand_wq;
108

109
static struct dbs_tuners {
110 111
	unsigned int sampling_rate;
	unsigned int up_threshold;
112
	unsigned int down_differential;
113
	unsigned int ignore_nice;
114
	unsigned int sampling_down_factor;
115
	unsigned int powersave_bias;
116
	unsigned int io_is_busy;
117
} dbs_tuners_ins = {
118
	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
119
	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
120
	.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
121
	.ignore_nice = 0,
122
	.powersave_bias = 0,
L
Linus Torvalds 已提交
123 124
};

125 126
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
							cputime64_t *wall)
127
{
128
	cputime64_t idle_time;
129
	cputime64_t cur_wall_time;
130
	cputime64_t busy_time;
131

132
	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
133 134
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);
135

136 137 138
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
139
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
140

141 142
	idle_time = cputime64_sub(cur_wall_time, busy_time);
	if (wall)
143
		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
144

145
	return (cputime64_t)jiffies_to_usecs(idle_time);
146 147
}

148 149 150 151 152 153 154 155 156 157
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
{
	u64 idle_time = get_cpu_idle_time_us(cpu, wall);

	if (idle_time == -1ULL)
		return get_cpu_idle_time_jiffy(cpu, wall);

	return idle_time;
}

158 159 160 161 162 163 164 165 166 167
static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
{
	u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);

	if (iowait_time == -1ULL)
		return 0;

	return iowait_time;
}

168 169 170 171 172
/*
 * Find right freq to be set now with powersave_bias on.
 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
 */
173 174 175
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
					  unsigned int freq_next,
					  unsigned int relation)
176 177 178 179 180
{
	unsigned int freq_req, freq_reduc, freq_avg;
	unsigned int freq_hi, freq_lo;
	unsigned int index = 0;
	unsigned int jiffies_total, jiffies_hi, jiffies_lo;
181 182
	struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
						   policy->cpu);
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222

	if (!dbs_info->freq_table) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_next;
	}

	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
			relation, &index);
	freq_req = dbs_info->freq_table[index].frequency;
	freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
	freq_avg = freq_req - freq_reduc;

	/* Find freq bounds for freq_avg in freq_table */
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_H, &index);
	freq_lo = dbs_info->freq_table[index].frequency;
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_L, &index);
	freq_hi = dbs_info->freq_table[index].frequency;

	/* Find out how long we have to be in hi and lo freqs */
	if (freq_hi == freq_lo) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_lo;
	}
	jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
	jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
	jiffies_hi += ((freq_hi - freq_lo) / 2);
	jiffies_hi /= (freq_hi - freq_lo);
	jiffies_lo = jiffies_total - jiffies_hi;
	dbs_info->freq_lo = freq_lo;
	dbs_info->freq_lo_jiffies = jiffies_lo;
	dbs_info->freq_hi_jiffies = jiffies_hi;
	return freq_hi;
}

223 224
static void ondemand_powersave_bias_init_cpu(int cpu)
{
225
	struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
226 227 228 229
	dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
	dbs_info->freq_lo = 0;
}

230 231 232 233
static void ondemand_powersave_bias_init(void)
{
	int i;
	for_each_online_cpu(i) {
234
		ondemand_powersave_bias_init_cpu(i);
235 236 237
	}
}

L
Linus Torvalds 已提交
238
/************************** sysfs interface ************************/
239 240 241

static ssize_t show_sampling_rate_max(struct kobject *kobj,
				      struct attribute *attr, char *buf)
L
Linus Torvalds 已提交
242
{
243 244
	printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
	       "sysfs file is deprecated - used by: %s\n", current->comm);
245
	return sprintf(buf, "%u\n", -1U);
L
Linus Torvalds 已提交
246 247
}

248 249
static ssize_t show_sampling_rate_min(struct kobject *kobj,
				      struct attribute *attr, char *buf)
L
Linus Torvalds 已提交
250
{
251
	return sprintf(buf, "%u\n", min_sampling_rate);
L
Linus Torvalds 已提交
252 253
}

254 255
define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
L
Linus Torvalds 已提交
256 257 258 259

/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object)					\
static ssize_t show_##file_name						\
260
(struct kobject *kobj, struct attribute *attr, char *buf)              \
L
Linus Torvalds 已提交
261 262 263 264
{									\
	return sprintf(buf, "%u\n", dbs_tuners_ins.object);		\
}
show_one(sampling_rate, sampling_rate);
265
show_one(io_is_busy, io_is_busy);
L
Linus Torvalds 已提交
266
show_one(up_threshold, up_threshold);
267
show_one(sampling_down_factor, sampling_down_factor);
268
show_one(ignore_nice_load, ignore_nice);
269
show_one(powersave_bias, powersave_bias);
L
Linus Torvalds 已提交
270

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
/*** delete after deprecation time ***/

#define DEPRECATION_MSG(file_name)					\
	printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs "	\
		    "interface is deprecated - " #file_name "\n");

#define show_one_old(file_name)						\
static ssize_t show_##file_name##_old					\
(struct cpufreq_policy *unused, char *buf)				\
{									\
	printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs "	\
		    "interface is deprecated - " #file_name "\n");	\
	return show_##file_name(NULL, NULL, buf);			\
}
show_one_old(sampling_rate);
show_one_old(up_threshold);
show_one_old(ignore_nice_load);
show_one_old(powersave_bias);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);

292 293
cpufreq_freq_attr_ro_old(sampling_rate_min);
cpufreq_freq_attr_ro_old(sampling_rate_max);
294 295 296 297 298

/*** delete after deprecation time ***/

static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
				   const char *buf, size_t count)
L
Linus Torvalds 已提交
299 300 301
{
	unsigned int input;
	int ret;
302
	ret = sscanf(buf, "%u", &input);
303 304
	if (ret != 1)
		return -EINVAL;
L
Linus Torvalds 已提交
305

306
	mutex_lock(&dbs_mutex);
307
	dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
308
	mutex_unlock(&dbs_mutex);
L
Linus Torvalds 已提交
309 310 311 312

	return count;
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
				   const char *buf, size_t count)
{
	unsigned int input;
	int ret;

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;

	mutex_lock(&dbs_mutex);
	dbs_tuners_ins.io_is_busy = !!input;
	mutex_unlock(&dbs_mutex);

	return count;
}

330 331
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
				  const char *buf, size_t count)
L
Linus Torvalds 已提交
332 333 334
{
	unsigned int input;
	int ret;
335
	ret = sscanf(buf, "%u", &input);
L
Linus Torvalds 已提交
336

337
	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
338
			input < MIN_FREQUENCY_UP_THRESHOLD) {
L
Linus Torvalds 已提交
339 340 341
		return -EINVAL;
	}

342
	mutex_lock(&dbs_mutex);
L
Linus Torvalds 已提交
343
	dbs_tuners_ins.up_threshold = input;
344
	mutex_unlock(&dbs_mutex);
L
Linus Torvalds 已提交
345 346 347 348

	return count;
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
static ssize_t store_sampling_down_factor(struct kobject *a,
			struct attribute *b, const char *buf, size_t count)
{
	unsigned int input, j;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
		return -EINVAL;
	mutex_lock(&dbs_mutex);
	dbs_tuners_ins.sampling_down_factor = input;

	/* Reset down sampling multiplier in case it was active */
	for_each_online_cpu(j) {
		struct cpu_dbs_info_s *dbs_info;
		dbs_info = &per_cpu(od_cpu_dbs_info, j);
		dbs_info->rate_mult = 1;
	}
	mutex_unlock(&dbs_mutex);

	return count;
}

372 373
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
				      const char *buf, size_t count)
374 375 376 377 378
{
	unsigned int input;
	int ret;

	unsigned int j;
379

380
	ret = sscanf(buf, "%u", &input);
381
	if (ret != 1)
382 383
		return -EINVAL;

384
	if (input > 1)
385
		input = 1;
386

387
	mutex_lock(&dbs_mutex);
388
	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
389
		mutex_unlock(&dbs_mutex);
390 391 392 393
		return count;
	}
	dbs_tuners_ins.ignore_nice = input;

394
	/* we need to re-evaluate prev_cpu_idle */
395
	for_each_online_cpu(j) {
396
		struct cpu_dbs_info_s *dbs_info;
397
		dbs_info = &per_cpu(od_cpu_dbs_info, j);
398 399
		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
						&dbs_info->prev_cpu_wall);
400 401 402
		if (dbs_tuners_ins.ignore_nice)
			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;

403
	}
404
	mutex_unlock(&dbs_mutex);
405 406 407 408

	return count;
}

409 410
static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
				    const char *buf, size_t count)
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
{
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1)
		return -EINVAL;

	if (input > 1000)
		input = 1000;

	mutex_lock(&dbs_mutex);
	dbs_tuners_ins.powersave_bias = input;
	ondemand_powersave_bias_init();
	mutex_unlock(&dbs_mutex);

	return count;
}

430
define_one_global_rw(sampling_rate);
431
define_one_global_rw(io_is_busy);
432
define_one_global_rw(up_threshold);
433
define_one_global_rw(sampling_down_factor);
434 435
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
L
Linus Torvalds 已提交
436

437
static struct attribute *dbs_attributes[] = {
L
Linus Torvalds 已提交
438 439 440 441
	&sampling_rate_max.attr,
	&sampling_rate_min.attr,
	&sampling_rate.attr,
	&up_threshold.attr,
442
	&sampling_down_factor.attr,
443
	&ignore_nice_load.attr,
444
	&powersave_bias.attr,
445
	&io_is_busy.attr,
L
Linus Torvalds 已提交
446 447 448 449 450 451 452 453
	NULL
};

static struct attribute_group dbs_attr_group = {
	.attrs = dbs_attributes,
	.name = "ondemand",
};

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
/*** delete after deprecation time ***/

#define write_one_old(file_name)					\
static ssize_t store_##file_name##_old					\
(struct cpufreq_policy *unused, const char *buf, size_t count)		\
{									\
       printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs "	\
		   "interface is deprecated - " #file_name "\n");	\
       return store_##file_name(NULL, NULL, buf, count);		\
}
write_one_old(sampling_rate);
write_one_old(up_threshold);
write_one_old(ignore_nice_load);
write_one_old(powersave_bias);

469 470 471 472
cpufreq_freq_attr_rw_old(sampling_rate);
cpufreq_freq_attr_rw_old(up_threshold);
cpufreq_freq_attr_rw_old(ignore_nice_load);
cpufreq_freq_attr_rw_old(powersave_bias);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490

static struct attribute *dbs_attributes_old[] = {
       &sampling_rate_max_old.attr,
       &sampling_rate_min_old.attr,
       &sampling_rate_old.attr,
       &up_threshold_old.attr,
       &ignore_nice_load_old.attr,
       &powersave_bias_old.attr,
       NULL
};

static struct attribute_group dbs_attr_group_old = {
       .attrs = dbs_attributes_old,
       .name = "ondemand",
};

/*** delete after deprecation time ***/

L
Linus Torvalds 已提交
491 492
/************************** sysfs end ************************/

493 494 495 496 497 498 499 500 501 502 503
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
	if (dbs_tuners_ins.powersave_bias)
		freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
	else if (p->cur == p->max)
		return;

	__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}

504
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
L
Linus Torvalds 已提交
505
{
506
	unsigned int max_load_freq;
L
Linus Torvalds 已提交
507 508 509 510

	struct cpufreq_policy *policy;
	unsigned int j;

511
	this_dbs_info->freq_lo = 0;
L
Linus Torvalds 已提交
512
	policy = this_dbs_info->cur_policy;
513

514
	/*
515 516
	 * Every sampling_rate, we check, if current idle time is less
	 * than 20% (default), then we try to increase frequency
517
	 * Every sampling_rate, we look for a the lowest
518 519
	 * frequency which can sustain the load while keeping idle time over
	 * 30%. If such a frequency exist, we try to decrease to this frequency.
L
Linus Torvalds 已提交
520
	 *
521 522 523
	 * Any frequency increase takes it to the maximum frequency.
	 * Frequency reduction happens at minimum steps of
	 * 5% (default) of current frequency
L
Linus Torvalds 已提交
524 525
	 */

526 527 528
	/* Get Absolute Load - in terms of freq */
	max_load_freq = 0;

529
	for_each_cpu(j, policy->cpus) {
L
Linus Torvalds 已提交
530
		struct cpu_dbs_info_s *j_dbs_info;
531 532
		cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
		unsigned int idle_time, wall_time, iowait_time;
533 534
		unsigned int load, load_freq;
		int freq_avg;
L
Linus Torvalds 已提交
535

536
		j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
537 538

		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
539
		cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
540

541 542 543 544 545
		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
				j_dbs_info->prev_cpu_wall);
		j_dbs_info->prev_cpu_wall = cur_wall_time;

		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
546
				j_dbs_info->prev_cpu_idle);
547
		j_dbs_info->prev_cpu_idle = cur_idle_time;
L
Linus Torvalds 已提交
548

549 550 551 552
		iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
				j_dbs_info->prev_cpu_iowait);
		j_dbs_info->prev_cpu_iowait = cur_iowait_time;

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
		if (dbs_tuners_ins.ignore_nice) {
			cputime64_t cur_nice;
			unsigned long cur_nice_jiffies;

			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
					 j_dbs_info->prev_cpu_nice);
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

570 571 572 573 574 575 576
		/*
		 * For the purpose of ondemand, waiting for disk IO is an
		 * indication that you're performance critical, and not that
		 * the system is actually idle. So subtract the iowait time
		 * from the cpu idle time.
		 */

577
		if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
578 579
			idle_time -= iowait_time;

580
		if (unlikely(!wall_time || wall_time < idle_time))
581 582 583 584 585 586 587 588 589 590 591
			continue;

		load = 100 * (wall_time - idle_time) / wall_time;

		freq_avg = __cpufreq_driver_getavg(policy, j);
		if (freq_avg <= 0)
			freq_avg = policy->cur;

		load_freq = load * freq_avg;
		if (load_freq > max_load_freq)
			max_load_freq = load_freq;
L
Linus Torvalds 已提交
592 593
	}

594
	/* Check for frequency increase */
595
	if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
596 597 598 599
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			this_dbs_info->rate_mult =
				dbs_tuners_ins.sampling_down_factor;
600
		dbs_freq_increase(policy, policy->max);
L
Linus Torvalds 已提交
601 602 603 604
		return;
	}

	/* Check for frequency decrease */
605 606 607
	/* if we cannot reduce the frequency anymore, break out early */
	if (policy->cur == policy->min)
		return;
L
Linus Torvalds 已提交
608

609 610 611 612 613
	/*
	 * The optimal frequency is the frequency that is the lowest that
	 * can support the current CPU usage without triggering the up
	 * policy. To be safe, we focus 10 points under the threshold.
	 */
614 615 616
	if (max_load_freq <
	    (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
	     policy->cur) {
617
		unsigned int freq_next;
618 619 620
		freq_next = max_load_freq /
				(dbs_tuners_ins.up_threshold -
				 dbs_tuners_ins.down_differential);
621

622 623 624
		/* No longer fully busy, reset rate_mult */
		this_dbs_info->rate_mult = 1;

625 626 627
		if (freq_next < policy->min)
			freq_next = policy->min;

628 629 630 631 632 633 634 635 636
		if (!dbs_tuners_ins.powersave_bias) {
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		} else {
			int freq = powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
			__cpufreq_driver_target(policy, freq,
				CPUFREQ_RELATION_L);
		}
637
	}
L
Linus Torvalds 已提交
638 639
}

D
David Howells 已提交
640
static void do_dbs_timer(struct work_struct *work)
641
{
642 643 644 645 646
	struct cpu_dbs_info_s *dbs_info =
		container_of(work, struct cpu_dbs_info_s, work.work);
	unsigned int cpu = dbs_info->cpu;
	int sample_type = dbs_info->sample_type;

647
	/* We want all CPUs to do sampling nearly on same jiffy */
648 649
	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
		* dbs_info->rate_mult);
D
David Howells 已提交
650

651 652 653
	if (num_online_cpus() > 1)
		delay -= jiffies % delay;

654
	mutex_lock(&dbs_info->timer_mutex);
655

656
	/* Common NORMAL_SAMPLE setup */
D
David Howells 已提交
657
	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
658
	if (!dbs_tuners_ins.powersave_bias ||
D
David Howells 已提交
659
	    sample_type == DBS_NORMAL_SAMPLE) {
660 661 662
		dbs_check_cpu(dbs_info);
		if (dbs_info->freq_lo) {
			/* Setup timer for SUB_SAMPLE */
D
David Howells 已提交
663
			dbs_info->sample_type = DBS_SUB_SAMPLE;
664 665 666 667
			delay = dbs_info->freq_hi_jiffies;
		}
	} else {
		__cpufreq_driver_target(dbs_info->cur_policy,
668
			dbs_info->freq_lo, CPUFREQ_RELATION_H);
669
	}
670
	queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
671
	mutex_unlock(&dbs_info->timer_mutex);
672
}
L
Linus Torvalds 已提交
673

674
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
L
Linus Torvalds 已提交
675
{
676 677
	/* We want all CPUs to do sampling nearly on same jiffy */
	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
678 679 680

	if (num_online_cpus() > 1)
		delay -= jiffies % delay;
681

D
David Howells 已提交
682
	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
683
	INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
684
	queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
685
		delay);
L
Linus Torvalds 已提交
686 687
}

688
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
L
Linus Torvalds 已提交
689
{
690
	cancel_delayed_work_sync(&dbs_info->work);
L
Linus Torvalds 已提交
691 692
}

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
/*
 * Not all CPUs want IO time to be accounted as busy; this dependson how
 * efficient idling at a higher frequency/voltage is.
 * Pavel Machek says this is not so for various generations of AMD and old
 * Intel systems.
 * Mike Chan (androidlcom) calis this is also not true for ARM.
 * Because of this, whitelist specific known (series) of CPUs by default, and
 * leave all others up to the user.
 */
static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
	/*
	 * For Intel, Core 2 (model 15) andl later have an efficient idle.
	 */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
	    boot_cpu_data.x86 == 6 &&
	    boot_cpu_data.x86_model >= 15)
		return 1;
#endif
	return 0;
}

L
Linus Torvalds 已提交
716 717 718 719 720 721
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
				   unsigned int event)
{
	unsigned int cpu = policy->cpu;
	struct cpu_dbs_info_s *this_dbs_info;
	unsigned int j;
J
Jeff Garzik 已提交
722
	int rc;
L
Linus Torvalds 已提交
723

724
	this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
L
Linus Torvalds 已提交
725 726 727

	switch (event) {
	case CPUFREQ_GOV_START:
728
		if ((!cpu_online(cpu)) || (!policy->cur))
L
Linus Torvalds 已提交
729 730
			return -EINVAL;

731
		mutex_lock(&dbs_mutex);
J
Jeff Garzik 已提交
732

733
		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
J
Jeff Garzik 已提交
734 735 736 737 738
		if (rc) {
			mutex_unlock(&dbs_mutex);
			return rc;
		}

739
		dbs_enable++;
740
		for_each_cpu(j, policy->cpus) {
L
Linus Torvalds 已提交
741
			struct cpu_dbs_info_s *j_dbs_info;
742
			j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
L
Linus Torvalds 已提交
743
			j_dbs_info->cur_policy = policy;
744

745 746
			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
						&j_dbs_info->prev_cpu_wall);
747 748 749 750
			if (dbs_tuners_ins.ignore_nice) {
				j_dbs_info->prev_cpu_nice =
						kstat_cpu(j).cpustat.nice;
			}
L
Linus Torvalds 已提交
751
		}
752
		this_dbs_info->cpu = cpu;
753
		this_dbs_info->rate_mult = 1;
754
		ondemand_powersave_bias_init_cpu(cpu);
L
Linus Torvalds 已提交
755 756 757 758 759 760
		/*
		 * Start the timerschedule work, when this governor
		 * is used for first time
		 */
		if (dbs_enable == 1) {
			unsigned int latency;
761 762 763 764 765 766 767 768

			rc = sysfs_create_group(cpufreq_global_kobject,
						&dbs_attr_group);
			if (rc) {
				mutex_unlock(&dbs_mutex);
				return rc;
			}

L
Linus Torvalds 已提交
769
			/* policy latency is in nS. Convert it to uS first */
770 771 772
			latency = policy->cpuinfo.transition_latency / 1000;
			if (latency == 0)
				latency = 1;
773 774 775 776 777 778
			/* Bring kernel and HW constraints together */
			min_sampling_rate = max(min_sampling_rate,
					MIN_LATENCY_MULTIPLIER * latency);
			dbs_tuners_ins.sampling_rate =
				max(min_sampling_rate,
				    latency * LATENCY_MULTIPLIER);
779
			dbs_tuners_ins.io_is_busy = should_io_be_busy();
L
Linus Torvalds 已提交
780
		}
781
		mutex_unlock(&dbs_mutex);
782

783
		mutex_init(&this_dbs_info->timer_mutex);
784
		dbs_timer_init(this_dbs_info);
L
Linus Torvalds 已提交
785 786 787
		break;

	case CPUFREQ_GOV_STOP:
788
		dbs_timer_exit(this_dbs_info);
789 790

		mutex_lock(&dbs_mutex);
791
		sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
792
		mutex_destroy(&this_dbs_info->timer_mutex);
L
Linus Torvalds 已提交
793
		dbs_enable--;
794
		mutex_unlock(&dbs_mutex);
795 796 797
		if (!dbs_enable)
			sysfs_remove_group(cpufreq_global_kobject,
					   &dbs_attr_group);
L
Linus Torvalds 已提交
798 799 800 801

		break;

	case CPUFREQ_GOV_LIMITS:
802
		mutex_lock(&this_dbs_info->timer_mutex);
L
Linus Torvalds 已提交
803
		if (policy->max < this_dbs_info->cur_policy->cur)
804
			__cpufreq_driver_target(this_dbs_info->cur_policy,
805
				policy->max, CPUFREQ_RELATION_H);
L
Linus Torvalds 已提交
806
		else if (policy->min > this_dbs_info->cur_policy->cur)
807
			__cpufreq_driver_target(this_dbs_info->cur_policy,
808
				policy->min, CPUFREQ_RELATION_L);
809
		mutex_unlock(&this_dbs_info->timer_mutex);
L
Linus Torvalds 已提交
810 811 812 813 814 815 816
		break;
	}
	return 0;
}

static int __init cpufreq_gov_dbs_init(void)
{
817
	int err;
818
	cputime64_t wall;
819 820
	u64 idle_time;
	int cpu = get_cpu();
821

822 823
	idle_time = get_cpu_idle_time_us(cpu, &wall);
	put_cpu();
824 825 826 827 828
	if (idle_time != -1ULL) {
		/* Idle micro accounting is supported. Use finer thresholds */
		dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		dbs_tuners_ins.down_differential =
					MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
829 830 831 832 833 834 835 836 837 838
		/*
		 * In no_hz/micro accounting case we set the minimum frequency
		 * not depending on HZ, but fixed (very low). The deferred
		 * timer might skip some samples if idle/sleeping as needed.
		*/
		min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		/* For correct statistics, we need 10 ticks for each measure */
		min_sampling_rate =
			MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
839
	}
840

841 842 843 844 845
	kondemand_wq = create_workqueue("kondemand");
	if (!kondemand_wq) {
		printk(KERN_ERR "Creation of kondemand failed\n");
		return -EFAULT;
	}
846 847 848 849 850
	err = cpufreq_register_governor(&cpufreq_gov_ondemand);
	if (err)
		destroy_workqueue(kondemand_wq);

	return err;
L
Linus Torvalds 已提交
851 852 853 854
}

static void __exit cpufreq_gov_dbs_exit(void)
{
855
	cpufreq_unregister_governor(&cpufreq_gov_ondemand);
856
	destroy_workqueue(kondemand_wq);
L
Linus Torvalds 已提交
857 858 859
}


860 861 862
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
863
	"Low Latency Frequency Transition capable processors");
864
MODULE_LICENSE("GPL");
L
Linus Torvalds 已提交
865

866 867 868
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
fs_initcall(cpufreq_gov_dbs_init);
#else
L
Linus Torvalds 已提交
869
module_init(cpufreq_gov_dbs_init);
870
#endif
L
Linus Torvalds 已提交
871
module_exit(cpufreq_gov_dbs_exit);