cpufreq_ondemand.c 17.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  drivers/cpufreq/cpufreq_ondemand.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13 14
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

15
#include <linux/cpu.h>
16
#include <linux/percpu-defs.h>
17
#include <linux/slab.h>
18
#include <linux/tick.h>
19
#include "cpufreq_governor.h"
L
Linus Torvalds 已提交
20

21
/* On-demand governor macros */
L
Linus Torvalds 已提交
22
#define DEF_FREQUENCY_UP_THRESHOLD		(80)
23 24
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(100000)
25
#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
26
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
27
#define MIN_FREQUENCY_UP_THRESHOLD		(11)
L
Linus Torvalds 已提交
28 29
#define MAX_FREQUENCY_UP_THRESHOLD		(100)

30
static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
L
Linus Torvalds 已提交
31

32 33
static struct od_ops od_ops;

34 35 36 37
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static struct cpufreq_governor cpufreq_gov_ondemand;
#endif

38 39
static unsigned int default_powersave_bias;

40
static void ondemand_powersave_bias_init_cpu(int cpu)
41
{
42
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
43

44 45 46
	dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
	dbs_info->freq_lo = 0;
}
47

48 49 50 51 52
/*
 * Not all CPUs want IO time to be accounted as busy; this depends on how
 * efficient idling at a higher frequency/voltage is.
 * Pavel Machek says this is not so for various generations of AMD and old
 * Intel systems.
53
 * Mike Chan (android.com) claims this is also not true for ARM.
54 55 56 57 58 59 60
 * Because of this, whitelist specific known (series) of CPUs by default, and
 * leave all others up to the user.
 */
static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
	/*
61
	 * For Intel, Core 2 (model 15) and later have an efficient idle.
62 63 64 65 66 67 68
	 */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
			boot_cpu_data.x86 == 6 &&
			boot_cpu_data.x86_model >= 15)
		return 1;
#endif
	return 0;
69 70
}

71 72 73 74 75
/*
 * Find right freq to be set now with powersave_bias on.
 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
 */
76
static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
77
		unsigned int freq_next, unsigned int relation)
78 79 80 81 82
{
	unsigned int freq_req, freq_reduc, freq_avg;
	unsigned int freq_hi, freq_lo;
	unsigned int index = 0;
	unsigned int jiffies_total, jiffies_hi, jiffies_lo;
83
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
84
						   policy->cpu);
85 86
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
87 88 89 90 91 92 93 94 95 96

	if (!dbs_info->freq_table) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_next;
	}

	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
			relation, &index);
	freq_req = dbs_info->freq_table[index].frequency;
97
	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	freq_avg = freq_req - freq_reduc;

	/* Find freq bounds for freq_avg in freq_table */
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_H, &index);
	freq_lo = dbs_info->freq_table[index].frequency;
	index = 0;
	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
			CPUFREQ_RELATION_L, &index);
	freq_hi = dbs_info->freq_table[index].frequency;

	/* Find out how long we have to be in hi and lo freqs */
	if (freq_hi == freq_lo) {
		dbs_info->freq_lo = 0;
		dbs_info->freq_lo_jiffies = 0;
		return freq_lo;
	}
116
	jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
117 118 119 120 121 122 123 124 125 126 127 128 129 130
	jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
	jiffies_hi += ((freq_hi - freq_lo) / 2);
	jiffies_hi /= (freq_hi - freq_lo);
	jiffies_lo = jiffies_total - jiffies_hi;
	dbs_info->freq_lo = freq_lo;
	dbs_info->freq_lo_jiffies = jiffies_lo;
	dbs_info->freq_hi_jiffies = jiffies_hi;
	return freq_hi;
}

static void ondemand_powersave_bias_init(void)
{
	int i;
	for_each_online_cpu(i) {
131
		ondemand_powersave_bias_init_cpu(i);
132 133 134
	}
}

135 136
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
137 138 139 140
	struct dbs_data *dbs_data = p->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	if (od_tuners->powersave_bias)
141 142
		freq = od_ops.powersave_bias_target(p, freq,
				CPUFREQ_RELATION_H);
143 144
	else if (p->cur == p->max)
		return;
145

146
	__cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
147 148 149 150 151
			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}

/*
 * Every sampling_rate, we check, if current idle time is less than 20%
152 153
 * (default), then we try to increase frequency. Else, we adjust the frequency
 * proportional to load.
154
 */
155
static void od_check_cpu(int cpu, unsigned int load)
L
Linus Torvalds 已提交
156
{
157 158
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
159 160
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
161 162 163 164

	dbs_info->freq_lo = 0;

	/* Check for frequency increase */
165
	if (load > od_tuners->up_threshold) {
166 167 168
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
169
				od_tuners->sampling_down_factor;
170 171
		dbs_freq_increase(policy, policy->max);
		return;
172 173
	} else {
		/* Calculate the next frequency proportional to load */
174
		unsigned int freq_next;
175
		freq_next = load * policy->cpuinfo.max_freq / 100;
176 177 178 179 180 181 182

		/* No longer fully busy, reset rate_mult */
		dbs_info->rate_mult = 1;

		if (freq_next < policy->min)
			freq_next = policy->min;

183
		if (!od_tuners->powersave_bias) {
184 185
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);
186
			return;
187
		}
188 189 190 191

		freq_next = od_ops.powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
192
	}
L
Linus Torvalds 已提交
193 194
}

195
static void od_dbs_timer(struct work_struct *work)
196
{
197 198
	struct od_cpu_dbs_info_s *dbs_info =
		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
199
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
200 201
	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
			cpu);
202 203
	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
204
	int delay = 0, sample_type = core_dbs_info->sample_type;
205
	bool modify_all = true;
206 207

	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
208 209
	if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
		modify_all = false;
210
		goto max_delay;
211
	}
L
Linus Torvalds 已提交
212

213
	/* Common NORMAL_SAMPLE setup */
214
	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
215
	if (sample_type == OD_SUB_SAMPLE) {
216
		delay = core_dbs_info->freq_lo_jiffies;
217 218
		__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
				core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
219
	} else {
220
		dbs_check_cpu(dbs_data, cpu);
221
		if (core_dbs_info->freq_lo) {
222
			/* Setup timer for SUB_SAMPLE */
223 224
			core_dbs_info->sample_type = OD_SUB_SAMPLE;
			delay = core_dbs_info->freq_hi_jiffies;
225 226 227
		}
	}

228 229 230 231 232
max_delay:
	if (!delay)
		delay = delay_for_sampling_rate(od_tuners->sampling_rate
				* core_dbs_info->rate_mult);

233
	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
234
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
235 236
}

237
/************************** sysfs interface ************************/
238
static struct common_dbs_data od_dbs_cdata;
L
Linus Torvalds 已提交
239

240 241 242 243
/**
 * update_sampling_rate - update sampling rate effective immediately if needed.
 * @new_rate: new sampling rate
 *
244
 * If new rate is smaller than the old, simply updating
245 246 247 248 249 250 251
 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
252
 */
253 254
static void update_sampling_rate(struct dbs_data *dbs_data,
		unsigned int new_rate)
255
{
256
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
257 258
	int cpu;

259 260
	od_tuners->sampling_rate = new_rate = max(new_rate,
			dbs_data->min_sampling_rate);
261 262 263

	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
264
		struct od_cpu_dbs_info_s *dbs_info;
265 266 267 268 269
		unsigned long next_sampling, appointed_at;

		policy = cpufreq_cpu_get(cpu);
		if (!policy)
			continue;
270 271 272 273
		if (policy->governor != &cpufreq_gov_ondemand) {
			cpufreq_cpu_put(policy);
			continue;
		}
274
		dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
275 276
		cpufreq_cpu_put(policy);

277
		mutex_lock(&dbs_info->cdbs.timer_mutex);
278

279 280
		if (!delayed_work_pending(&dbs_info->cdbs.work)) {
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
281 282 283
			continue;
		}

284 285
		next_sampling = jiffies + usecs_to_jiffies(new_rate);
		appointed_at = dbs_info->cdbs.work.timer.expires;
286 287 288

		if (time_before(next_sampling, appointed_at)) {

289 290 291
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			cancel_delayed_work_sync(&dbs_info->cdbs.work);
			mutex_lock(&dbs_info->cdbs.timer_mutex);
292

293 294
			gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
					usecs_to_jiffies(new_rate), true);
295 296

		}
297
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
298 299 300
	}
}

301 302
static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
		size_t count)
L
Linus Torvalds 已提交
303 304 305
{
	unsigned int input;
	int ret;
306
	ret = sscanf(buf, "%u", &input);
307 308
	if (ret != 1)
		return -EINVAL;
309 310

	update_sampling_rate(dbs_data, input);
L
Linus Torvalds 已提交
311 312 313
	return count;
}

314 315
static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
		size_t count)
316
{
317
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
318 319
	unsigned int input;
	int ret;
320
	unsigned int j;
321 322 323 324

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
325
	od_tuners->io_is_busy = !!input;
326 327 328 329 330 331 332 333

	/* we need to re-evaluate prev_cpu_idle */
	for_each_online_cpu(j) {
		struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
									j);
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
	}
334 335 336
	return count;
}

337 338
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
		size_t count)
L
Linus Torvalds 已提交
339
{
340
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
L
Linus Torvalds 已提交
341 342
	unsigned int input;
	int ret;
343
	ret = sscanf(buf, "%u", &input);
L
Linus Torvalds 已提交
344

345
	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
346
			input < MIN_FREQUENCY_UP_THRESHOLD) {
L
Linus Torvalds 已提交
347 348
		return -EINVAL;
	}
349

350
	od_tuners->up_threshold = input;
L
Linus Torvalds 已提交
351 352 353
	return count;
}

354 355
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
		const char *buf, size_t count)
356
{
357
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
358 359 360 361 362 363
	unsigned int input, j;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
		return -EINVAL;
364
	od_tuners->sampling_down_factor = input;
365 366 367

	/* Reset down sampling multiplier in case it was active */
	for_each_online_cpu(j) {
368 369
		struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
				j);
370 371 372 373 374
		dbs_info->rate_mult = 1;
	}
	return count;
}

375 376
static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
		size_t count)
377
{
378
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
379 380 381 382
	unsigned int input;
	int ret;

	unsigned int j;
383

384
	ret = sscanf(buf, "%u", &input);
385
	if (ret != 1)
386 387
		return -EINVAL;

388
	if (input > 1)
389
		input = 1;
390

391
	if (input == od_tuners->ignore_nice) { /* nothing to do */
392 393
		return count;
	}
394
	od_tuners->ignore_nice = input;
395

396
	/* we need to re-evaluate prev_cpu_idle */
397
	for_each_online_cpu(j) {
398
		struct od_cpu_dbs_info_s *dbs_info;
399
		dbs_info = &per_cpu(od_cpu_dbs_info, j);
400
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
401
			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
402
		if (od_tuners->ignore_nice)
403 404
			dbs_info->cdbs.prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
405

406 407 408 409
	}
	return count;
}

410 411
static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
		size_t count)
412
{
413
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
414 415 416 417 418 419 420 421 422 423
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);

	if (ret != 1)
		return -EINVAL;

	if (input > 1000)
		input = 1000;

424
	od_tuners->powersave_bias = input;
425 426 427 428
	ondemand_powersave_bias_init();
	return count;
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
show_store_one(od, sampling_rate);
show_store_one(od, io_is_busy);
show_store_one(od, up_threshold);
show_store_one(od, sampling_down_factor);
show_store_one(od, ignore_nice);
show_store_one(od, powersave_bias);
declare_show_sampling_rate_min(od);

gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(io_is_busy);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(ignore_nice);
gov_sys_pol_attr_rw(powersave_bias);
gov_sys_pol_attr_ro(sampling_rate_min);

static struct attribute *dbs_attributes_gov_sys[] = {
	&sampling_rate_min_gov_sys.attr,
	&sampling_rate_gov_sys.attr,
	&up_threshold_gov_sys.attr,
	&sampling_down_factor_gov_sys.attr,
	&ignore_nice_gov_sys.attr,
	&powersave_bias_gov_sys.attr,
	&io_is_busy_gov_sys.attr,
L
Linus Torvalds 已提交
453 454 455
	NULL
};

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
static struct attribute_group od_attr_group_gov_sys = {
	.attrs = dbs_attributes_gov_sys,
	.name = "ondemand",
};

static struct attribute *dbs_attributes_gov_pol[] = {
	&sampling_rate_min_gov_pol.attr,
	&sampling_rate_gov_pol.attr,
	&up_threshold_gov_pol.attr,
	&sampling_down_factor_gov_pol.attr,
	&ignore_nice_gov_pol.attr,
	&powersave_bias_gov_pol.attr,
	&io_is_busy_gov_pol.attr,
	NULL
};

static struct attribute_group od_attr_group_gov_pol = {
	.attrs = dbs_attributes_gov_pol,
L
Linus Torvalds 已提交
474 475 476 477 478
	.name = "ondemand",
};

/************************** sysfs end ************************/

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
static int od_init(struct dbs_data *dbs_data)
{
	struct od_dbs_tuners *tuners;
	u64 idle_time;
	int cpu;

	tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	cpu = get_cpu();
	idle_time = get_cpu_idle_time_us(cpu, NULL);
	put_cpu();
	if (idle_time != -1ULL) {
		/* Idle micro accounting is supported. Use finer thresholds */
		tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		/*
		 * In nohz/micro accounting case we set the minimum frequency
		 * not depending on HZ, but fixed (very low). The deferred
		 * timer might skip some samples if idle/sleeping as needed.
		*/
		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;

		/* For correct statistics, we need 10 ticks for each measure */
		dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
			jiffies_to_usecs(10);
	}

	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice = 0;
513
	tuners->powersave_bias = default_powersave_bias;
514 515 516 517 518 519 520 521 522 523 524 525
	tuners->io_is_busy = should_io_be_busy();

	dbs_data->tuners = tuners;
	mutex_init(&dbs_data->mutex);
	return 0;
}

static void od_exit(struct dbs_data *dbs_data)
{
	kfree(dbs_data->tuners);
}

526
define_get_cpu_dbs_routines(od_cpu_dbs_info);
527

528 529
static struct od_ops od_ops = {
	.powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
530
	.powersave_bias_target = generic_powersave_bias_target,
531 532
	.freq_increase = dbs_freq_increase,
};
533

534
static struct common_dbs_data od_dbs_cdata = {
535
	.governor = GOV_ONDEMAND,
536 537
	.attr_group_gov_sys = &od_attr_group_gov_sys,
	.attr_group_gov_pol = &od_attr_group_gov_pol,
538 539 540 541 542
	.get_cpu_cdbs = get_cpu_cdbs,
	.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
	.gov_dbs_timer = od_dbs_timer,
	.gov_check_cpu = od_check_cpu,
	.gov_ops = &od_ops,
543 544
	.init = od_init,
	.exit = od_exit,
545
};
L
Linus Torvalds 已提交
546

547 548 549 550 551 552 553 554
static void od_set_powersave_bias(unsigned int powersave_bias)
{
	struct cpufreq_policy *policy;
	struct dbs_data *dbs_data;
	struct od_dbs_tuners *od_tuners;
	unsigned int cpu;
	cpumask_t done;

555
	default_powersave_bias = powersave_bias;
556 557 558 559 560 561 562 563
	cpumask_clear(&done);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		if (cpumask_test_cpu(cpu, &done))
			continue;

		policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
564 565
		if (!policy)
			continue;
566 567

		cpumask_or(&done, &done, policy->cpus);
568 569 570 571 572 573 574

		if (policy->governor != &cpufreq_gov_ondemand)
			continue;

		dbs_data = policy->governor_data;
		od_tuners = dbs_data->tuners;
		od_tuners->powersave_bias = default_powersave_bias;
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
	}
	put_online_cpus();
}

void od_register_powersave_bias_handler(unsigned int (*f)
		(struct cpufreq_policy *, unsigned int, unsigned int),
		unsigned int powersave_bias)
{
	od_ops.powersave_bias_target = f;
	od_set_powersave_bias(powersave_bias);
}
EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);

void od_unregister_powersave_bias_handler(void)
{
	od_ops.powersave_bias_target = generic_powersave_bias_target;
	od_set_powersave_bias(0);
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);

595 596
static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
		unsigned int event)
L
Linus Torvalds 已提交
597
{
598
	return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
L
Linus Torvalds 已提交
599 600
}

601 602
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
603
#endif
604 605 606 607 608 609
struct cpufreq_governor cpufreq_gov_ondemand = {
	.name			= "ondemand",
	.governor		= od_cpufreq_governor_dbs,
	.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
	.owner			= THIS_MODULE,
};
L
Linus Torvalds 已提交
610 611 612

static int __init cpufreq_gov_dbs_init(void)
{
613
	return cpufreq_register_governor(&cpufreq_gov_ondemand);
L
Linus Torvalds 已提交
614 615 616 617
}

static void __exit cpufreq_gov_dbs_exit(void)
{
618
	cpufreq_unregister_governor(&cpufreq_gov_ondemand);
L
Linus Torvalds 已提交
619 620
}

621 622 623
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
624
	"Low Latency Frequency Transition capable processors");
625
MODULE_LICENSE("GPL");
L
Linus Torvalds 已提交
626

627 628 629
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
fs_initcall(cpufreq_gov_dbs_init);
#else
L
Linus Torvalds 已提交
630
module_init(cpufreq_gov_dbs_init);
631
#endif
L
Linus Torvalds 已提交
632
module_exit(cpufreq_gov_dbs_exit);