cpufreq_ondemand.c 17.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  drivers/cpufreq/cpufreq_ondemand.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13 14
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

L
Linus Torvalds 已提交
15
#include <linux/cpufreq.h>
16 17
#include <linux/init.h>
#include <linux/kernel.h>
L
Linus Torvalds 已提交
18
#include <linux/kernel_stat.h>
19 20
#include <linux/kobject.h>
#include <linux/module.h>
21
#include <linux/mutex.h>
22
#include <linux/percpu-defs.h>
23
#include <linux/slab.h>
24
#include <linux/sysfs.h>
25
#include <linux/tick.h>
26
#include <linux/types.h>
27
#include <linux/cpu.h>
L
Linus Torvalds 已提交
28

29
#include "cpufreq_governor.h"
L
Linus Torvalds 已提交
30

31
/* On-demand governor macros */
L
Linus Torvalds 已提交
32
#define DEF_FREQUENCY_UP_THRESHOLD		(80)
33 34
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(100000)
35
#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
36
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
37
#define MIN_FREQUENCY_UP_THRESHOLD		(11)
L
Linus Torvalds 已提交
38 39
#define MAX_FREQUENCY_UP_THRESHOLD		(100)

40
static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
L
Linus Torvalds 已提交
41

42 43
static struct od_ops od_ops;

44 45 46 47
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static struct cpufreq_governor cpufreq_gov_ondemand;
#endif

48 49
static unsigned int default_powersave_bias;

50
static void ondemand_powersave_bias_init_cpu(int cpu)
51
{
52
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
53

54 55 56
	dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
	dbs_info->freq_lo = 0;
}
57

58 59 60 61 62
/*
 * Not all CPUs want IO time to be accounted as busy; this depends on how
 * efficient idling at a higher frequency/voltage is.
 * Pavel Machek says this is not so for various generations of AMD and old
 * Intel systems.
63
 * Mike Chan (android.com) claims this is also not true for ARM.
64 65 66 67 68 69 70
 * Because of this, whitelist specific known (series) of CPUs by default, and
 * leave all others up to the user.
 */
static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
	/*
71
	 * For Intel, Core 2 (model 15) and later have an efficient idle.
72 73 74 75 76 77 78
	 */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
			boot_cpu_data.x86 == 6 &&
			boot_cpu_data.x86_model >= 15)
		return 1;
#endif
	return 0;
79 80
}

81 82 83 84 85
/*
 * Find right freq to be set now with powersave_bias on.
 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
 */
86
static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
87
		unsigned int freq_next, unsigned int relation)
88 89 90 91 92
{
	unsigned int freq_req, freq_reduc, freq_avg;
	unsigned int freq_hi, freq_lo;
	unsigned int index = 0;
	unsigned int jiffies_total, jiffies_hi, jiffies_lo;
93
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
94
						   policy->cpu);
95 96
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
97 98 99 100 101 102 103 104 105 106

	if (!dbs_info->freq_table) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_next;
	}

	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
			relation, &index);
	freq_req = dbs_info->freq_table[index].frequency;
107
	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	freq_avg = freq_req - freq_reduc;

	/* Find freq bounds for freq_avg in freq_table */
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_H, &index);
	freq_lo = dbs_info->freq_table[index].frequency;
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_L, &index);
	freq_hi = dbs_info->freq_table[index].frequency;

	/* Find out how long we have to be in hi and lo freqs */
	if (freq_hi == freq_lo) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_lo;
	}
126
	jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
127 128 129 130 131 132 133 134 135 136 137 138 139 140
	jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
	jiffies_hi += ((freq_hi - freq_lo) / 2);
	jiffies_hi /= (freq_hi - freq_lo);
	jiffies_lo = jiffies_total - jiffies_hi;
	dbs_info->freq_lo = freq_lo;
	dbs_info->freq_lo_jiffies = jiffies_lo;
	dbs_info->freq_hi_jiffies = jiffies_hi;
	return freq_hi;
}

static void ondemand_powersave_bias_init(void)
{
	int i;
	for_each_online_cpu(i) {
141
		ondemand_powersave_bias_init_cpu(i);
142 143 144
	}
}

145 146
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
147 148 149 150
	struct dbs_data *dbs_data = p->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	if (od_tuners->powersave_bias)
151 152
		freq = od_ops.powersave_bias_target(p, freq,
				CPUFREQ_RELATION_H);
153 154
	else if (p->cur == p->max)
		return;
155

156
	__cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
157 158 159 160 161
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}

/*
 * Every sampling_rate, we check, if current idle time is less than 20%
162 163
 * (default), then we try to increase frequency. Else, we adjust the frequency
 * proportional to load.
164
 */
165
static void od_check_cpu(int cpu, unsigned int load)
L
Linus Torvalds 已提交
166
{
167 168
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
169 170
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
171 172 173 174

	dbs_info->freq_lo = 0;

	/* Check for frequency increase */
175
	if (load > od_tuners->up_threshold) {
176 177 178
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
179
				od_tuners->sampling_down_factor;
180 181
		dbs_freq_increase(policy, policy->max);
		return;
182 183
	} else {
		/* Calculate the next frequency proportional to load */
184
		unsigned int freq_next;
185
		freq_next = load * policy->cpuinfo.max_freq / 100;
186 187 188 189 190 191 192

		/* No longer fully busy, reset rate_mult */
		dbs_info->rate_mult = 1;

		if (freq_next < policy->min)
			freq_next = policy->min;

193
		if (!od_tuners->powersave_bias) {
194 195
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);
196
			return;
197
		}
198 199 200 201

		freq_next = od_ops.powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
202
	}
L
Linus Torvalds 已提交
203 204
}

205
static void od_dbs_timer(struct work_struct *work)
206
{
207 208
	struct od_cpu_dbs_info_s *dbs_info =
		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
209
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
210 211
	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
			cpu);
212 213
	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
214
	int delay = 0, sample_type = core_dbs_info->sample_type;
215
	bool modify_all = true;
216 217

	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
218 219
	if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
		modify_all = false;
220
		goto max_delay;
221
	}
L
Linus Torvalds 已提交
222

223
	/* Common NORMAL_SAMPLE setup */
224
	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
225
	if (sample_type == OD_SUB_SAMPLE) {
226
		delay = core_dbs_info->freq_lo_jiffies;
227 228
		__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
				core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
229
	} else {
230
		dbs_check_cpu(dbs_data, cpu);
231
		if (core_dbs_info->freq_lo) {
232
			/* Setup timer for SUB_SAMPLE */
233 234
			core_dbs_info->sample_type = OD_SUB_SAMPLE;
			delay = core_dbs_info->freq_hi_jiffies;
235 236 237
		}
	}

238 239 240 241 242
max_delay:
	if (!delay)
		delay = delay_for_sampling_rate(od_tuners->sampling_rate
				* core_dbs_info->rate_mult);

243
	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
244
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
245 246
}

247
/************************** sysfs interface ************************/
248
static struct common_dbs_data od_dbs_cdata;
L
Linus Torvalds 已提交
249

250 251 252 253
/**
 * update_sampling_rate - update sampling rate effective immediately if needed.
 * @new_rate: new sampling rate
 *
254
 * If new rate is smaller than the old, simply updating
255 256 257 258 259 260 261
 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
262
 */
263 264
static void update_sampling_rate(struct dbs_data *dbs_data,
		unsigned int new_rate)
265
{
266
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
267 268
	int cpu;

269 270
	od_tuners->sampling_rate = new_rate = max(new_rate,
			dbs_data->min_sampling_rate);
271 272 273

	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
274
		struct od_cpu_dbs_info_s *dbs_info;
275 276 277 278 279
		unsigned long next_sampling, appointed_at;

		policy = cpufreq_cpu_get(cpu);
		if (!policy)
			continue;
280 281 282 283
		if (policy->governor != &cpufreq_gov_ondemand) {
			cpufreq_cpu_put(policy);
			continue;
		}
284
		dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
285 286
		cpufreq_cpu_put(policy);

287
		mutex_lock(&dbs_info->cdbs.timer_mutex);
288

289 290
		if (!delayed_work_pending(&dbs_info->cdbs.work)) {
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
291 292 293
			continue;
		}

294 295
		next_sampling = jiffies + usecs_to_jiffies(new_rate);
		appointed_at = dbs_info->cdbs.work.timer.expires;
296 297 298

		if (time_before(next_sampling, appointed_at)) {

299 300 301
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			cancel_delayed_work_sync(&dbs_info->cdbs.work);
			mutex_lock(&dbs_info->cdbs.timer_mutex);
302

303 304
			gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
					usecs_to_jiffies(new_rate), true);
305 306

		}
307
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
308 309 310
	}
}

311 312
static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
		size_t count)
L
Linus Torvalds 已提交
313 314 315
{
	unsigned int input;
	int ret;
316
	ret = sscanf(buf, "%u", &input);
317 318
	if (ret != 1)
		return -EINVAL;
319 320

	update_sampling_rate(dbs_data, input);
L
Linus Torvalds 已提交
321 322 323
	return count;
}

324 325
static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
		size_t count)
326
{
327
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
328 329
	unsigned int input;
	int ret;
330
	unsigned int j;
331 332 333 334

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
335
	od_tuners->io_is_busy = !!input;
336 337 338 339 340 341 342 343

	/* we need to re-evaluate prev_cpu_idle */
	for_each_online_cpu(j) {
		struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
									j);
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
	}
344 345 346
	return count;
}

347 348
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
		size_t count)
L
Linus Torvalds 已提交
349
{
350
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
L
Linus Torvalds 已提交
351 352
	unsigned int input;
	int ret;
353
	ret = sscanf(buf, "%u", &input);
L
Linus Torvalds 已提交
354

355
	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
356
			input < MIN_FREQUENCY_UP_THRESHOLD) {
L
Linus Torvalds 已提交
357 358
		return -EINVAL;
	}
359

360
	od_tuners->up_threshold = input;
L
Linus Torvalds 已提交
361 362 363
	return count;
}

364 365
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
		const char *buf, size_t count)
366
{
367
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
368 369 370 371 372 373
	unsigned int input, j;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
		return -EINVAL;
374
	od_tuners->sampling_down_factor = input;
375 376 377

	/* Reset down sampling multiplier in case it was active */
	for_each_online_cpu(j) {
378 379
		struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
				j);
380 381 382 383 384
		dbs_info->rate_mult = 1;
	}
	return count;
}

385 386
static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
		size_t count)
387
{
388
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
389 390 391 392
	unsigned int input;
	int ret;

	unsigned int j;
393

394
	ret = sscanf(buf, "%u", &input);
395
	if (ret != 1)
396 397
		return -EINVAL;

398
	if (input > 1)
399
		input = 1;
400

401
	if (input == od_tuners->ignore_nice) { /* nothing to do */
402 403
		return count;
	}
404
	od_tuners->ignore_nice = input;
405

406
	/* we need to re-evaluate prev_cpu_idle */
407
	for_each_online_cpu(j) {
408
		struct od_cpu_dbs_info_s *dbs_info;
409
		dbs_info = &per_cpu(od_cpu_dbs_info, j);
410
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
411
			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
412
		if (od_tuners->ignore_nice)
413 414
			dbs_info->cdbs.prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
415

416 417 418 419
	}
	return count;
}

420 421
static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
		size_t count)
422
{
423
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
424 425 426 427 428 429 430 431 432 433
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1)
		return -EINVAL;

	if (input > 1000)
		input = 1000;

434
	od_tuners->powersave_bias = input;
435 436 437 438
	ondemand_powersave_bias_init();
	return count;
}

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
show_store_one(od, sampling_rate);
show_store_one(od, io_is_busy);
show_store_one(od, up_threshold);
show_store_one(od, sampling_down_factor);
show_store_one(od, ignore_nice);
show_store_one(od, powersave_bias);
declare_show_sampling_rate_min(od);

gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(io_is_busy);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(ignore_nice);
gov_sys_pol_attr_rw(powersave_bias);
gov_sys_pol_attr_ro(sampling_rate_min);

static struct attribute *dbs_attributes_gov_sys[] = {
	&sampling_rate_min_gov_sys.attr,
	&sampling_rate_gov_sys.attr,
	&up_threshold_gov_sys.attr,
	&sampling_down_factor_gov_sys.attr,
	&ignore_nice_gov_sys.attr,
	&powersave_bias_gov_sys.attr,
	&io_is_busy_gov_sys.attr,
L
Linus Torvalds 已提交
463 464 465
	NULL
};

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static struct attribute_group od_attr_group_gov_sys = {
	.attrs = dbs_attributes_gov_sys,
	.name = "ondemand",
};

static struct attribute *dbs_attributes_gov_pol[] = {
	&sampling_rate_min_gov_pol.attr,
	&sampling_rate_gov_pol.attr,
	&up_threshold_gov_pol.attr,
	&sampling_down_factor_gov_pol.attr,
	&ignore_nice_gov_pol.attr,
	&powersave_bias_gov_pol.attr,
	&io_is_busy_gov_pol.attr,
	NULL
};

static struct attribute_group od_attr_group_gov_pol = {
	.attrs = dbs_attributes_gov_pol,
L
Linus Torvalds 已提交
484 485 486 487 488
	.name = "ondemand",
};

/************************** sysfs end ************************/

489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
static int od_init(struct dbs_data *dbs_data)
{
	struct od_dbs_tuners *tuners;
	u64 idle_time;
	int cpu;

	tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	cpu = get_cpu();
	idle_time = get_cpu_idle_time_us(cpu, NULL);
	put_cpu();
	if (idle_time != -1ULL) {
		/* Idle micro accounting is supported. Use finer thresholds */
		tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		/*
		 * In nohz/micro accounting case we set the minimum frequency
		 * not depending on HZ, but fixed (very low). The deferred
		 * timer might skip some samples if idle/sleeping as needed.
		*/
		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;

		/* For correct statistics, we need 10 ticks for each measure */
		dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
			jiffies_to_usecs(10);
	}

	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice = 0;
523
	tuners->powersave_bias = default_powersave_bias;
524 525 526 527 528 529 530 531 532 533 534 535
	tuners->io_is_busy = should_io_be_busy();

	dbs_data->tuners = tuners;
	mutex_init(&dbs_data->mutex);
	return 0;
}

static void od_exit(struct dbs_data *dbs_data)
{
	kfree(dbs_data->tuners);
}

536
define_get_cpu_dbs_routines(od_cpu_dbs_info);
537

538 539
static struct od_ops od_ops = {
	.powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
540
	.powersave_bias_target = generic_powersave_bias_target,
541 542
	.freq_increase = dbs_freq_increase,
};
543

544
static struct common_dbs_data od_dbs_cdata = {
545
	.governor = GOV_ONDEMAND,
546 547
	.attr_group_gov_sys = &od_attr_group_gov_sys,
	.attr_group_gov_pol = &od_attr_group_gov_pol,
548 549 550 551 552
	.get_cpu_cdbs = get_cpu_cdbs,
	.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
	.gov_dbs_timer = od_dbs_timer,
	.gov_check_cpu = od_check_cpu,
	.gov_ops = &od_ops,
553 554
	.init = od_init,
	.exit = od_exit,
555
};
L
Linus Torvalds 已提交
556

557 558 559 560 561 562 563 564
static void od_set_powersave_bias(unsigned int powersave_bias)
{
	struct cpufreq_policy *policy;
	struct dbs_data *dbs_data;
	struct od_dbs_tuners *od_tuners;
	unsigned int cpu;
	cpumask_t done;

565
	default_powersave_bias = powersave_bias;
566 567 568 569 570 571 572 573
	cpumask_clear(&done);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		if (cpumask_test_cpu(cpu, &done))
			continue;

		policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
574 575
		if (!policy)
			continue;
576 577

		cpumask_or(&done, &done, policy->cpus);
578 579 580 581 582 583 584

		if (policy->governor != &cpufreq_gov_ondemand)
			continue;

		dbs_data = policy->governor_data;
		od_tuners = dbs_data->tuners;
		od_tuners->powersave_bias = default_powersave_bias;
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	}
	put_online_cpus();
}

void od_register_powersave_bias_handler(unsigned int (*f)
		(struct cpufreq_policy *, unsigned int, unsigned int),
		unsigned int powersave_bias)
{
	od_ops.powersave_bias_target = f;
	od_set_powersave_bias(powersave_bias);
}
EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);

void od_unregister_powersave_bias_handler(void)
{
	od_ops.powersave_bias_target = generic_powersave_bias_target;
	od_set_powersave_bias(0);
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);

605 606
static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
		unsigned int event)
L
Linus Torvalds 已提交
607
{
608
	return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
L
Linus Torvalds 已提交
609 610
}

611 612
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
613
#endif
614 615 616 617 618 619
struct cpufreq_governor cpufreq_gov_ondemand = {
	.name			= "ondemand",
	.governor		= od_cpufreq_governor_dbs,
	.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
	.owner			= THIS_MODULE,
};
L
Linus Torvalds 已提交
620 621 622

static int __init cpufreq_gov_dbs_init(void)
{
623
	return cpufreq_register_governor(&cpufreq_gov_ondemand);
L
Linus Torvalds 已提交
624 625 626 627
}

static void __exit cpufreq_gov_dbs_exit(void)
{
628
	cpufreq_unregister_governor(&cpufreq_gov_ondemand);
L
Linus Torvalds 已提交
629 630
}

631 632 633
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
634
	"Low Latency Frequency Transition capable processors");
635
MODULE_LICENSE("GPL");
L
Linus Torvalds 已提交
636

637 638 639
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
fs_initcall(cpufreq_gov_dbs_init);
#else
L
Linus Torvalds 已提交
640
module_init(cpufreq_gov_dbs_init);
641
#endif
L
Linus Torvalds 已提交
642
module_exit(cpufreq_gov_dbs_exit);