cpufreq_ondemand.c 22.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 *  drivers/cpufreq/cpufreq_ondemand.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
A
Andrew Morton 已提交
17
#include <linux/cpu.h>
L
Linus Torvalds 已提交
18 19
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
20
#include <linux/mutex.h>
21 22 23
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/ktime.h>
24
#include <linux/sched.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30

/*
 * dbs is used in this file as a shortform for demandbased switching
 * It helps to keep variable names smaller, simpler
 */

31
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL		(10)
L
Linus Torvalds 已提交
32
#define DEF_FREQUENCY_UP_THRESHOLD		(80)
33 34
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(100000)
35 36
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL	(3)
#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
37
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
38
#define MIN_FREQUENCY_UP_THRESHOLD		(11)
L
Linus Torvalds 已提交
39 40
#define MAX_FREQUENCY_UP_THRESHOLD		(100)

41 42
/*
 * The polling frequency of this governor depends on the capability of
L
Linus Torvalds 已提交
43
 * the processor. Default polling frequency is 1000 times the transition
44 45
 * latency of the processor. The governor will work on any processor with
 * transition latency <= 10mS, using appropriate sampling
L
Linus Torvalds 已提交
46 47 48 49 50
 * rate.
 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
 * this governor will not work.
 * All times here are in uS.
 */
51
#define MIN_SAMPLING_RATE_RATIO			(2)
52

53 54
static unsigned int min_sampling_rate;

55
#define LATENCY_MULTIPLIER			(1000)
56
#define MIN_LATENCY_MULTIPLIER			(100)
57
#define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000)
L
Linus Torvalds 已提交
58

D
David Howells 已提交
59
static void do_dbs_timer(struct work_struct *work);
60 61 62 63 64 65 66 67 68 69 70 71
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
				unsigned int event);

#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
#endif
struct cpufreq_governor cpufreq_gov_ondemand = {
       .name                   = "ondemand",
       .governor               = cpufreq_governor_dbs,
       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
       .owner                  = THIS_MODULE,
};
D
David Howells 已提交
72 73

/* Sampling types */
74
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
L
Linus Torvalds 已提交
75 76

struct cpu_dbs_info_s {
77
	cputime64_t prev_cpu_idle;
78
	cputime64_t prev_cpu_iowait;
79
	cputime64_t prev_cpu_wall;
80
	cputime64_t prev_cpu_nice;
81
	struct cpufreq_policy *cur_policy;
82
	struct delayed_work work;
83 84 85 86
	struct cpufreq_frequency_table *freq_table;
	unsigned int freq_lo;
	unsigned int freq_lo_jiffies;
	unsigned int freq_hi_jiffies;
87
	unsigned int rate_mult;
88
	int cpu;
89 90 91 92 93 94 95
	unsigned int sample_type:1;
	/*
	 * percpu mutex that serializes governor limit change with
	 * do_dbs_timer invocation. We do not want do_dbs_timer to run
	 * when user is changing the governor or limits.
	 */
	struct mutex timer_mutex;
L
Linus Torvalds 已提交
96
};
97
static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
L
Linus Torvalds 已提交
98 99 100

static unsigned int dbs_enable;	/* number of CPUs using this policy */

101
/*
102
 * dbs_mutex protects dbs_enable in governor start/stop.
103
 */
104
static DEFINE_MUTEX(dbs_mutex);
L
Linus Torvalds 已提交
105

106
static struct dbs_tuners {
107 108
	unsigned int sampling_rate;
	unsigned int up_threshold;
109
	unsigned int down_differential;
110
	unsigned int ignore_nice;
111
	unsigned int sampling_down_factor;
112
	unsigned int powersave_bias;
113
	unsigned int io_is_busy;
114
} dbs_tuners_ins = {
115
	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
116
	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
117
	.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
118
	.ignore_nice = 0,
119
	.powersave_bias = 0,
L
Linus Torvalds 已提交
120 121
};

122
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
123
{
124
	u64 idle_time;
125
	u64 cur_wall_time;
126
	u64 busy_time;
127

128
	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
129

130 131
	busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
132 133 134 135
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
136 137

	idle_time = cur_wall_time - busy_time;
138
	if (wall)
139
		*wall = jiffies_to_usecs(cur_wall_time);
140

141
	return jiffies_to_usecs(idle_time);
142 143
}

144 145
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
{
146
	u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
147 148 149

	if (idle_time == -1ULL)
		return get_cpu_idle_time_jiffy(cpu, wall);
150 151
	else
		idle_time += get_cpu_iowait_time_us(cpu, wall);
152 153 154 155

	return idle_time;
}

156 157 158 159 160 161 162 163 164 165
static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
{
	u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);

	if (iowait_time == -1ULL)
		return 0;

	return iowait_time;
}

166 167 168 169 170
/*
 * Find right freq to be set now with powersave_bias on.
 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
 */
171 172 173
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
					  unsigned int freq_next,
					  unsigned int relation)
174 175 176 177 178
{
	unsigned int freq_req, freq_reduc, freq_avg;
	unsigned int freq_hi, freq_lo;
	unsigned int index = 0;
	unsigned int jiffies_total, jiffies_hi, jiffies_lo;
179 180
	struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
						   policy->cpu);
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220

	if (!dbs_info->freq_table) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_next;
	}

	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
			relation, &index);
	freq_req = dbs_info->freq_table[index].frequency;
	freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
	freq_avg = freq_req - freq_reduc;

	/* Find freq bounds for freq_avg in freq_table */
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_H, &index);
	freq_lo = dbs_info->freq_table[index].frequency;
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_L, &index);
	freq_hi = dbs_info->freq_table[index].frequency;

	/* Find out how long we have to be in hi and lo freqs */
	if (freq_hi == freq_lo) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_lo;
	}
	jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
	jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
	jiffies_hi += ((freq_hi - freq_lo) / 2);
	jiffies_hi /= (freq_hi - freq_lo);
	jiffies_lo = jiffies_total - jiffies_hi;
	dbs_info->freq_lo = freq_lo;
	dbs_info->freq_lo_jiffies = jiffies_lo;
	dbs_info->freq_hi_jiffies = jiffies_hi;
	return freq_hi;
}

221 222
static void ondemand_powersave_bias_init_cpu(int cpu)
{
223
	struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
224 225 226 227
	dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
	dbs_info->freq_lo = 0;
}

228 229 230 231
static void ondemand_powersave_bias_init(void)
{
	int i;
	for_each_online_cpu(i) {
232
		ondemand_powersave_bias_init_cpu(i);
233 234 235
	}
}

L
Linus Torvalds 已提交
236
/************************** sysfs interface ************************/
237 238 239

static ssize_t show_sampling_rate_min(struct kobject *kobj,
				      struct attribute *attr, char *buf)
L
Linus Torvalds 已提交
240
{
241
	return sprintf(buf, "%u\n", min_sampling_rate);
L
Linus Torvalds 已提交
242 243
}

244
define_one_global_ro(sampling_rate_min);
L
Linus Torvalds 已提交
245 246 247 248

/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object)					\
static ssize_t show_##file_name						\
249
(struct kobject *kobj, struct attribute *attr, char *buf)              \
L
Linus Torvalds 已提交
250 251 252 253
{									\
	return sprintf(buf, "%u\n", dbs_tuners_ins.object);		\
}
show_one(sampling_rate, sampling_rate);
254
show_one(io_is_busy, io_is_busy);
L
Linus Torvalds 已提交
255
show_one(up_threshold, up_threshold);
256
show_one(sampling_down_factor, sampling_down_factor);
257
show_one(ignore_nice_load, ignore_nice);
258
show_one(powersave_bias, powersave_bias);
L
Linus Torvalds 已提交
259

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/**
 * update_sampling_rate - update sampling rate effective immediately if needed.
 * @new_rate: new sampling rate
 *
 * If new rate is smaller than the old, simply updaing
 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
 * if the original sampling_rate was 1 second and the requested new sampling
 * rate is 10 ms because the user needs immediate reaction from ondemand
 * governor, but not sure if higher frequency will be required or not,
 * then, the governor may change the sampling rate too late; up to 1 second
 * later. Thus, if we are reducing the sampling rate, we need to make the
 * new value effective immediately.
 */
static void update_sampling_rate(unsigned int new_rate)
{
	int cpu;

	dbs_tuners_ins.sampling_rate = new_rate
				     = max(new_rate, min_sampling_rate);

	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
		struct cpu_dbs_info_s *dbs_info;
		unsigned long next_sampling, appointed_at;

		policy = cpufreq_cpu_get(cpu);
		if (!policy)
			continue;
		dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
		cpufreq_cpu_put(policy);

		mutex_lock(&dbs_info->timer_mutex);

		if (!delayed_work_pending(&dbs_info->work)) {
			mutex_unlock(&dbs_info->timer_mutex);
			continue;
		}

		next_sampling  = jiffies + usecs_to_jiffies(new_rate);
		appointed_at = dbs_info->work.timer.expires;


		if (time_before(next_sampling, appointed_at)) {

			mutex_unlock(&dbs_info->timer_mutex);
			cancel_delayed_work_sync(&dbs_info->work);
			mutex_lock(&dbs_info->timer_mutex);

			schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
						 usecs_to_jiffies(new_rate));

		}
		mutex_unlock(&dbs_info->timer_mutex);
	}
}

316 317
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
				   const char *buf, size_t count)
L
Linus Torvalds 已提交
318 319 320
{
	unsigned int input;
	int ret;
321
	ret = sscanf(buf, "%u", &input);
322 323
	if (ret != 1)
		return -EINVAL;
324
	update_sampling_rate(input);
L
Linus Torvalds 已提交
325 326 327
	return count;
}

328 329 330 331 332 333 334 335 336 337 338 339 340
static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
				   const char *buf, size_t count)
{
	unsigned int input;
	int ret;

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
	dbs_tuners_ins.io_is_busy = !!input;
	return count;
}

341 342
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
				  const char *buf, size_t count)
L
Linus Torvalds 已提交
343 344 345
{
	unsigned int input;
	int ret;
346
	ret = sscanf(buf, "%u", &input);
L
Linus Torvalds 已提交
347

348
	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
349
			input < MIN_FREQUENCY_UP_THRESHOLD) {
L
Linus Torvalds 已提交
350 351 352 353 354 355
		return -EINVAL;
	}
	dbs_tuners_ins.up_threshold = input;
	return count;
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
static ssize_t store_sampling_down_factor(struct kobject *a,
			struct attribute *b, const char *buf, size_t count)
{
	unsigned int input, j;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
		return -EINVAL;
	dbs_tuners_ins.sampling_down_factor = input;

	/* Reset down sampling multiplier in case it was active */
	for_each_online_cpu(j) {
		struct cpu_dbs_info_s *dbs_info;
		dbs_info = &per_cpu(od_cpu_dbs_info, j);
		dbs_info->rate_mult = 1;
	}
	return count;
}

376 377
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
				      const char *buf, size_t count)
378 379 380 381 382
{
	unsigned int input;
	int ret;

	unsigned int j;
383

384
	ret = sscanf(buf, "%u", &input);
385
	if (ret != 1)
386 387
		return -EINVAL;

388
	if (input > 1)
389
		input = 1;
390

391
	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
392 393 394 395
		return count;
	}
	dbs_tuners_ins.ignore_nice = input;

396
	/* we need to re-evaluate prev_cpu_idle */
397
	for_each_online_cpu(j) {
398
		struct cpu_dbs_info_s *dbs_info;
399
		dbs_info = &per_cpu(od_cpu_dbs_info, j);
400 401
		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
						&dbs_info->prev_cpu_wall);
402
		if (dbs_tuners_ins.ignore_nice)
403
			dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
404

405 406 407 408
	}
	return count;
}

409 410
static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
				    const char *buf, size_t count)
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
{
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1)
		return -EINVAL;

	if (input > 1000)
		input = 1000;

	dbs_tuners_ins.powersave_bias = input;
	ondemand_powersave_bias_init();
	return count;
}

427
define_one_global_rw(sampling_rate);
428
define_one_global_rw(io_is_busy);
429
define_one_global_rw(up_threshold);
430
define_one_global_rw(sampling_down_factor);
431 432
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
L
Linus Torvalds 已提交
433

434
static struct attribute *dbs_attributes[] = {
L
Linus Torvalds 已提交
435 436 437
	&sampling_rate_min.attr,
	&sampling_rate.attr,
	&up_threshold.attr,
438
	&sampling_down_factor.attr,
439
	&ignore_nice_load.attr,
440
	&powersave_bias.attr,
441
	&io_is_busy.attr,
L
Linus Torvalds 已提交
442 443 444 445 446 447 448 449 450 451
	NULL
};

static struct attribute_group dbs_attr_group = {
	.attrs = dbs_attributes,
	.name = "ondemand",
};

/************************** sysfs end ************************/

452 453 454 455 456 457 458 459 460 461 462
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
	if (dbs_tuners_ins.powersave_bias)
		freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
	else if (p->cur == p->max)
		return;

	__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}

463
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
L
Linus Torvalds 已提交
464
{
465
	unsigned int max_load_freq;
L
Linus Torvalds 已提交
466 467 468 469

	struct cpufreq_policy *policy;
	unsigned int j;

470
	this_dbs_info->freq_lo = 0;
L
Linus Torvalds 已提交
471
	policy = this_dbs_info->cur_policy;
472

473
	/*
474 475
	 * Every sampling_rate, we check, if current idle time is less
	 * than 20% (default), then we try to increase frequency
476
	 * Every sampling_rate, we look for a the lowest
477 478
	 * frequency which can sustain the load while keeping idle time over
	 * 30%. If such a frequency exist, we try to decrease to this frequency.
L
Linus Torvalds 已提交
479
	 *
480 481 482
	 * Any frequency increase takes it to the maximum frequency.
	 * Frequency reduction happens at minimum steps of
	 * 5% (default) of current frequency
L
Linus Torvalds 已提交
483 484
	 */

485 486 487
	/* Get Absolute Load - in terms of freq */
	max_load_freq = 0;

488
	for_each_cpu(j, policy->cpus) {
L
Linus Torvalds 已提交
489
		struct cpu_dbs_info_s *j_dbs_info;
490 491
		cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
		unsigned int idle_time, wall_time, iowait_time;
492 493
		unsigned int load, load_freq;
		int freq_avg;
L
Linus Torvalds 已提交
494

495
		j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
496 497

		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
498
		cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
499

500 501
		wall_time = (unsigned int)
			(cur_wall_time - j_dbs_info->prev_cpu_wall);
502 503
		j_dbs_info->prev_cpu_wall = cur_wall_time;

504 505
		idle_time = (unsigned int)
			(cur_idle_time - j_dbs_info->prev_cpu_idle);
506
		j_dbs_info->prev_cpu_idle = cur_idle_time;
L
Linus Torvalds 已提交
507

508 509
		iowait_time = (unsigned int)
			(cur_iowait_time - j_dbs_info->prev_cpu_iowait);
510 511
		j_dbs_info->prev_cpu_iowait = cur_iowait_time;

512
		if (dbs_tuners_ins.ignore_nice) {
513
			u64 cur_nice;
514 515
			unsigned long cur_nice_jiffies;

516 517
			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
					 j_dbs_info->prev_cpu_nice;
518 519 520 521 522 523 524
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

525
			j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
526 527 528
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

529 530 531 532 533 534 535
		/*
		 * For the purpose of ondemand, waiting for disk IO is an
		 * indication that you're performance critical, and not that
		 * the system is actually idle. So subtract the iowait time
		 * from the cpu idle time.
		 */

536
		if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
537 538
			idle_time -= iowait_time;

539
		if (unlikely(!wall_time || wall_time < idle_time))
540 541 542 543 544 545 546 547 548 549 550
			continue;

		load = 100 * (wall_time - idle_time) / wall_time;

		freq_avg = __cpufreq_driver_getavg(policy, j);
		if (freq_avg <= 0)
			freq_avg = policy->cur;

		load_freq = load * freq_avg;
		if (load_freq > max_load_freq)
			max_load_freq = load_freq;
L
Linus Torvalds 已提交
551 552
	}

553
	/* Check for frequency increase */
554
	if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
555 556 557 558
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			this_dbs_info->rate_mult =
				dbs_tuners_ins.sampling_down_factor;
559
		dbs_freq_increase(policy, policy->max);
L
Linus Torvalds 已提交
560 561 562 563
		return;
	}

	/* Check for frequency decrease */
564 565 566
	/* if we cannot reduce the frequency anymore, break out early */
	if (policy->cur == policy->min)
		return;
L
Linus Torvalds 已提交
567

568 569 570 571 572
	/*
	 * The optimal frequency is the frequency that is the lowest that
	 * can support the current CPU usage without triggering the up
	 * policy. To be safe, we focus 10 points under the threshold.
	 */
573 574 575
	if (max_load_freq <
	    (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
	     policy->cur) {
576
		unsigned int freq_next;
577 578 579
		freq_next = max_load_freq /
				(dbs_tuners_ins.up_threshold -
				 dbs_tuners_ins.down_differential);
580

581 582 583
		/* No longer fully busy, reset rate_mult */
		this_dbs_info->rate_mult = 1;

584 585 586
		if (freq_next < policy->min)
			freq_next = policy->min;

587 588 589 590 591 592 593 594 595
		if (!dbs_tuners_ins.powersave_bias) {
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		} else {
			int freq = powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
			__cpufreq_driver_target(policy, freq,
				CPUFREQ_RELATION_L);
		}
596
	}
L
Linus Torvalds 已提交
597 598
}

D
David Howells 已提交
599
static void do_dbs_timer(struct work_struct *work)
600
{
601 602 603 604 605
	struct cpu_dbs_info_s *dbs_info =
		container_of(work, struct cpu_dbs_info_s, work.work);
	unsigned int cpu = dbs_info->cpu;
	int sample_type = dbs_info->sample_type;

606
	int delay;
607

608
	mutex_lock(&dbs_info->timer_mutex);
609

610
	/* Common NORMAL_SAMPLE setup */
D
David Howells 已提交
611
	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
612
	if (!dbs_tuners_ins.powersave_bias ||
D
David Howells 已提交
613
	    sample_type == DBS_NORMAL_SAMPLE) {
614 615 616
		dbs_check_cpu(dbs_info);
		if (dbs_info->freq_lo) {
			/* Setup timer for SUB_SAMPLE */
D
David Howells 已提交
617
			dbs_info->sample_type = DBS_SUB_SAMPLE;
618
			delay = dbs_info->freq_hi_jiffies;
619 620 621 622 623 624 625 626 627
		} else {
			/* We want all CPUs to do sampling nearly on
			 * same jiffy
			 */
			delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
				* dbs_info->rate_mult);

			if (num_online_cpus() > 1)
				delay -= jiffies % delay;
628 629 630
		}
	} else {
		__cpufreq_driver_target(dbs_info->cur_policy,
631
			dbs_info->freq_lo, CPUFREQ_RELATION_H);
632
		delay = dbs_info->freq_lo_jiffies;
633
	}
634
	schedule_delayed_work_on(cpu, &dbs_info->work, delay);
635
	mutex_unlock(&dbs_info->timer_mutex);
636
}
L
Linus Torvalds 已提交
637

638
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
L
Linus Torvalds 已提交
639
{
640 641
	/* We want all CPUs to do sampling nearly on same jiffy */
	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
642 643 644

	if (num_online_cpus() > 1)
		delay -= jiffies % delay;
645

D
David Howells 已提交
646
	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
647
	INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
648
	schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
L
Linus Torvalds 已提交
649 650
}

651
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
L
Linus Torvalds 已提交
652
{
653
	cancel_delayed_work_sync(&dbs_info->work);
L
Linus Torvalds 已提交
654 655
}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
/*
 * Not all CPUs want IO time to be accounted as busy; this dependson how
 * efficient idling at a higher frequency/voltage is.
 * Pavel Machek says this is not so for various generations of AMD and old
 * Intel systems.
 * Mike Chan (androidlcom) calis this is also not true for ARM.
 * Because of this, whitelist specific known (series) of CPUs by default, and
 * leave all others up to the user.
 */
static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
	/*
	 * For Intel, Core 2 (model 15) andl later have an efficient idle.
	 */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
	    boot_cpu_data.x86 == 6 &&
	    boot_cpu_data.x86_model >= 15)
		return 1;
#endif
	return 0;
}

L
Linus Torvalds 已提交
679 680 681 682 683 684
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
				   unsigned int event)
{
	unsigned int cpu = policy->cpu;
	struct cpu_dbs_info_s *this_dbs_info;
	unsigned int j;
J
Jeff Garzik 已提交
685
	int rc;
L
Linus Torvalds 已提交
686

687
	this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
L
Linus Torvalds 已提交
688 689 690

	switch (event) {
	case CPUFREQ_GOV_START:
691
		if ((!cpu_online(cpu)) || (!policy->cur))
L
Linus Torvalds 已提交
692 693
			return -EINVAL;

694
		mutex_lock(&dbs_mutex);
J
Jeff Garzik 已提交
695

696
		dbs_enable++;
697
		for_each_cpu(j, policy->cpus) {
L
Linus Torvalds 已提交
698
			struct cpu_dbs_info_s *j_dbs_info;
699
			j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
L
Linus Torvalds 已提交
700
			j_dbs_info->cur_policy = policy;
701

702 703
			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
						&j_dbs_info->prev_cpu_wall);
704
			if (dbs_tuners_ins.ignore_nice)
705
				j_dbs_info->prev_cpu_nice =
706
						kcpustat_cpu(j).cpustat[CPUTIME_NICE];
L
Linus Torvalds 已提交
707
		}
708
		this_dbs_info->cpu = cpu;
709
		this_dbs_info->rate_mult = 1;
710
		ondemand_powersave_bias_init_cpu(cpu);
L
Linus Torvalds 已提交
711 712 713 714 715 716
		/*
		 * Start the timerschedule work, when this governor
		 * is used for first time
		 */
		if (dbs_enable == 1) {
			unsigned int latency;
717 718 719 720 721 722 723 724

			rc = sysfs_create_group(cpufreq_global_kobject,
						&dbs_attr_group);
			if (rc) {
				mutex_unlock(&dbs_mutex);
				return rc;
			}

L
Linus Torvalds 已提交
725
			/* policy latency is in nS. Convert it to uS first */
726 727 728
			latency = policy->cpuinfo.transition_latency / 1000;
			if (latency == 0)
				latency = 1;
729 730 731 732 733 734
			/* Bring kernel and HW constraints together */
			min_sampling_rate = max(min_sampling_rate,
					MIN_LATENCY_MULTIPLIER * latency);
			dbs_tuners_ins.sampling_rate =
				max(min_sampling_rate,
				    latency * LATENCY_MULTIPLIER);
735
			dbs_tuners_ins.io_is_busy = should_io_be_busy();
L
Linus Torvalds 已提交
736
		}
737
		mutex_unlock(&dbs_mutex);
738

739
		mutex_init(&this_dbs_info->timer_mutex);
740
		dbs_timer_init(this_dbs_info);
L
Linus Torvalds 已提交
741 742 743
		break;

	case CPUFREQ_GOV_STOP:
744
		dbs_timer_exit(this_dbs_info);
745 746

		mutex_lock(&dbs_mutex);
747
		mutex_destroy(&this_dbs_info->timer_mutex);
L
Linus Torvalds 已提交
748
		dbs_enable--;
749
		mutex_unlock(&dbs_mutex);
750 751 752
		if (!dbs_enable)
			sysfs_remove_group(cpufreq_global_kobject,
					   &dbs_attr_group);
L
Linus Torvalds 已提交
753 754 755 756

		break;

	case CPUFREQ_GOV_LIMITS:
757
		mutex_lock(&this_dbs_info->timer_mutex);
L
Linus Torvalds 已提交
758
		if (policy->max < this_dbs_info->cur_policy->cur)
759
			__cpufreq_driver_target(this_dbs_info->cur_policy,
760
				policy->max, CPUFREQ_RELATION_H);
L
Linus Torvalds 已提交
761
		else if (policy->min > this_dbs_info->cur_policy->cur)
762
			__cpufreq_driver_target(this_dbs_info->cur_policy,
763
				policy->min, CPUFREQ_RELATION_L);
764
		mutex_unlock(&this_dbs_info->timer_mutex);
L
Linus Torvalds 已提交
765 766 767 768 769 770 771
		break;
	}
	return 0;
}

static int __init cpufreq_gov_dbs_init(void)
{
772 773
	u64 idle_time;
	int cpu = get_cpu();
774

775
	idle_time = get_cpu_idle_time_us(cpu, NULL);
776
	put_cpu();
777 778 779 780 781
	if (idle_time != -1ULL) {
		/* Idle micro accounting is supported. Use finer thresholds */
		dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		dbs_tuners_ins.down_differential =
					MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
782
		/*
783
		 * In nohz/micro accounting case we set the minimum frequency
784 785 786 787 788 789 790 791
		 * not depending on HZ, but fixed (very low). The deferred
		 * timer might skip some samples if idle/sleeping as needed.
		*/
		min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		/* For correct statistics, we need 10 ticks for each measure */
		min_sampling_rate =
			MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
792
	}
793

794
	return cpufreq_register_governor(&cpufreq_gov_ondemand);
L
Linus Torvalds 已提交
795 796 797 798
}

static void __exit cpufreq_gov_dbs_exit(void)
{
799
	cpufreq_unregister_governor(&cpufreq_gov_ondemand);
L
Linus Torvalds 已提交
800 801 802
}


803 804 805
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
806
	"Low Latency Frequency Transition capable processors");
807
MODULE_LICENSE("GPL");
L
Linus Torvalds 已提交
808

809 810 811
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
fs_initcall(cpufreq_gov_dbs_init);
#else
L
Linus Torvalds 已提交
812
module_init(cpufreq_gov_dbs_init);
813
#endif
L
Linus Torvalds 已提交
814
module_exit(cpufreq_gov_dbs_exit);