cpufreq_conservative.c 18.1 KB
Newer Older
1 2 3 4 5 6
/*
 *  drivers/cpufreq/cpufreq_conservative.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
7
 *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8 9 10 11 12 13 14 15 16 17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
A
Andrew Morton 已提交
18
#include <linux/cpu.h>
19 20
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
21
#include <linux/mutex.h>
22 23 24 25 26
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/ktime.h>
#include <linux/sched.h>

27 28 29 30 31 32 33 34
/*
 * dbs is used in this file as a shortform for demandbased switching
 * It helps to keep variable names smaller, simpler
 */

#define DEF_FREQUENCY_UP_THRESHOLD		(80)
#define DEF_FREQUENCY_DOWN_THRESHOLD		(20)

35 36
/*
 * The polling frequency of this governor depends on the capability of
37
 * the processor. Default polling frequency is 1000 times the transition
38 39
 * latency of the processor. The governor will work on any processor with
 * transition latency <= 10mS, using appropriate sampling
40
 * rate.
41 42
 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
 * this governor will not work.
43 44
 * All times here are in uS.
 */
45
static unsigned int def_sampling_rate;
46 47
#define MIN_SAMPLING_RATE_RATIO			(2)
/* for correct statistics, we need at least 10 ticks between each measure */
48
#define MIN_STAT_SAMPLING_RATE 			\
49 50 51
			(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE			\
			(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
52 53 54 55 56 57 58 59 60 61 62 63
/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
 * Define the minimal settable sampling rate to the greater of:
 *   - "HW transition latency" * 100 (same as default sampling / 10)
 *   - MIN_STAT_SAMPLING_RATE
 * To avoid that userspace shoots itself.
*/
static unsigned int minimum_sampling_rate(void)
{
	return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
}

/* This will also vanish soon with removing sampling_rate_max */
64
#define MAX_SAMPLING_RATE			(500 * def_sampling_rate)
65
#define LATENCY_MULTIPLIER			(1000)
66 67
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(10)
68
#define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000)
69

D
David Howells 已提交
70
static void do_dbs_timer(struct work_struct *work);
71 72

struct cpu_dbs_info_s {
73 74 75
	cputime64_t prev_cpu_idle;
	cputime64_t prev_cpu_wall;
	cputime64_t prev_cpu_nice;
76
	struct cpufreq_policy *cur_policy;
77
	struct delayed_work work;
78 79
	unsigned int down_skip;
	unsigned int requested_freq;
80 81
	int cpu;
	unsigned int enable:1;
82 83 84 85 86
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);

static unsigned int dbs_enable;	/* number of CPUs using this policy */

87 88 89 90 91 92 93 94
/*
 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
 * lock and dbs_mutex. cpu_hotplug lock should always be held before
 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
 * is recursive for the same process. -Venki
 */
95
static DEFINE_MUTEX(dbs_mutex);
96

97 98 99
static struct workqueue_struct	*kconservative_wq;

static struct dbs_tuners {
100 101 102 103 104 105
	unsigned int sampling_rate;
	unsigned int sampling_down_factor;
	unsigned int up_threshold;
	unsigned int down_threshold;
	unsigned int ignore_nice;
	unsigned int freq_step;
106
} dbs_tuners_ins = {
107 108 109 110 111
	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
	.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
	.ignore_nice = 0,
	.freq_step = 5,
112 113
};

114 115
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
							cputime64_t *wall)
116
{
117 118 119 120 121 122 123
	cputime64_t idle_time;
	cputime64_t cur_wall_time;
	cputime64_t busy_time;

	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);
124

125 126 127 128
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
129

130 131 132
	idle_time = cputime64_sub(cur_wall_time, busy_time);
	if (wall)
		*wall = cur_wall_time;
133

134 135 136 137 138 139 140 141 142 143 144
	return idle_time;
}

static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
{
	u64 idle_time = get_cpu_idle_time_us(cpu, wall);

	if (idle_time == -1ULL)
		return get_cpu_idle_time_jiffy(cpu, wall);

	return idle_time;
145 146
}

147 148 149 150 151 152 153 154 155
/* keep track of frequency transitions */
static int
dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
		     void *data)
{
	struct cpufreq_freqs *freq = data;
	struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
							freq->cpu);

156 157
	struct cpufreq_policy *policy;

158 159 160
	if (!this_dbs_info->enable)
		return 0;

161 162 163 164 165 166 167 168 169 170
	policy = this_dbs_info->cur_policy;

	/*
	 * we only care if our internally tracked freq moves outside
	 * the 'valid' ranges of freqency available to us otherwise
	 * we do not change it
	*/
	if (this_dbs_info->requested_freq > policy->max
			|| this_dbs_info->requested_freq < policy->min)
		this_dbs_info->requested_freq = freq->new;
171 172 173 174 175 176 177 178

	return 0;
}

static struct notifier_block dbs_cpufreq_notifier_block = {
	.notifier_call = dbs_cpufreq_notifier
};

179 180 181
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
182 183 184 185 186 187 188 189
	static int print_once;

	if (!print_once) {
		printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
		       "sysfs file is deprecated - used by: %s\n",
		       current->comm);
		print_once = 1;
	}
190
	return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
191 192 193 194
}

static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
195 196 197 198 199 200 201
	static int print_once;

	if (!print_once) {
		printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
		       "sysfs file is deprecated - used by: %s\n", current->comm);
		print_once = 1;
	}
202
	return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
203 204
}

205 206
#define define_one_ro(_name)		\
static struct freq_attr _name =		\
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
__ATTR(_name, 0444, show_##_name, NULL)

define_one_ro(sampling_rate_max);
define_one_ro(sampling_rate_min);

/* cpufreq_conservative Governor Tunables */
#define show_one(file_name, object)					\
static ssize_t show_##file_name						\
(struct cpufreq_policy *unused, char *buf)				\
{									\
	return sprintf(buf, "%u\n", dbs_tuners_ins.object);		\
}
show_one(sampling_rate, sampling_rate);
show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
show_one(down_threshold, down_threshold);
223
show_one(ignore_nice_load, ignore_nice);
224 225
show_one(freq_step, freq_step);

226
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
227 228 229 230
		const char *buf, size_t count)
{
	unsigned int input;
	int ret;
231
	ret = sscanf(buf, "%u", &input);
232

233
	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
234 235
		return -EINVAL;

236
	mutex_lock(&dbs_mutex);
237
	dbs_tuners_ins.sampling_down_factor = input;
238
	mutex_unlock(&dbs_mutex);
239 240 241 242

	return count;
}

243
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
244 245 246 247
		const char *buf, size_t count)
{
	unsigned int input;
	int ret;
248
	ret = sscanf(buf, "%u", &input);
249

250
	if (ret != 1)
251
		return -EINVAL;
252 253

	mutex_lock(&dbs_mutex);
254
	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
255
	mutex_unlock(&dbs_mutex);
256 257 258 259

	return count;
}

260
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
261 262 263 264
		const char *buf, size_t count)
{
	unsigned int input;
	int ret;
265
	ret = sscanf(buf, "%u", &input);
266

267
	mutex_lock(&dbs_mutex);
268
	if (ret != 1 || input > 100 ||
269
			input <= dbs_tuners_ins.down_threshold) {
270
		mutex_unlock(&dbs_mutex);
271 272 273 274
		return -EINVAL;
	}

	dbs_tuners_ins.up_threshold = input;
275
	mutex_unlock(&dbs_mutex);
276 277 278 279

	return count;
}

280
static ssize_t store_down_threshold(struct cpufreq_policy *unused,
281 282 283 284
		const char *buf, size_t count)
{
	unsigned int input;
	int ret;
285
	ret = sscanf(buf, "%u", &input);
286

287
	mutex_lock(&dbs_mutex);
288 289 290
	/* cannot be lower than 11 otherwise freq will not fall */
	if (ret != 1 || input < 11 || input > 100 ||
			input >= dbs_tuners_ins.up_threshold) {
291
		mutex_unlock(&dbs_mutex);
292 293 294 295
		return -EINVAL;
	}

	dbs_tuners_ins.down_threshold = input;
296
	mutex_unlock(&dbs_mutex);
297 298 299 300

	return count;
}

301
static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
302 303 304 305 306 307
		const char *buf, size_t count)
{
	unsigned int input;
	int ret;

	unsigned int j;
308 309 310

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
311 312
		return -EINVAL;

313
	if (input > 1)
314
		input = 1;
315

316
	mutex_lock(&dbs_mutex);
317
	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
318
		mutex_unlock(&dbs_mutex);
319 320 321 322
		return count;
	}
	dbs_tuners_ins.ignore_nice = input;

323
	/* we need to re-evaluate prev_cpu_idle */
324
	for_each_online_cpu(j) {
325 326 327 328 329 330
		struct cpu_dbs_info_s *dbs_info;
		dbs_info = &per_cpu(cpu_dbs_info, j);
		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
						&dbs_info->prev_cpu_wall);
		if (dbs_tuners_ins.ignore_nice)
			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
331
	}
332
	mutex_unlock(&dbs_mutex);
333 334 335 336 337 338 339 340 341

	return count;
}

static ssize_t store_freq_step(struct cpufreq_policy *policy,
		const char *buf, size_t count)
{
	unsigned int input;
	int ret;
342
	ret = sscanf(buf, "%u", &input);
343

344
	if (ret != 1)
345 346
		return -EINVAL;

347
	if (input > 100)
348
		input = 100;
349

350 351
	/* no need to test here if freq_step is zero as the user might actually
	 * want this, they would be crazy though :) */
352
	mutex_lock(&dbs_mutex);
353
	dbs_tuners_ins.freq_step = input;
354
	mutex_unlock(&dbs_mutex);
355 356 357 358 359 360 361 362 363 364 365 366

	return count;
}

#define define_one_rw(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)

define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
define_one_rw(up_threshold);
define_one_rw(down_threshold);
367
define_one_rw(ignore_nice_load);
368 369
define_one_rw(freq_step);

370
static struct attribute *dbs_attributes[] = {
371 372 373 374 375 376
	&sampling_rate_max.attr,
	&sampling_rate_min.attr,
	&sampling_rate.attr,
	&sampling_down_factor.attr,
	&up_threshold.attr,
	&down_threshold.attr,
377
	&ignore_nice_load.attr,
378 379 380 381 382 383 384 385 386 387 388
	&freq_step.attr,
	NULL
};

static struct attribute_group dbs_attr_group = {
	.attrs = dbs_attributes,
	.name = "conservative",
};

/************************** sysfs end ************************/

389
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
390
{
391
	unsigned int load = 0;
392
	unsigned int freq_target;
393

394 395
	struct cpufreq_policy *policy;
	unsigned int j;
396

397 398
	policy = this_dbs_info->cur_policy;

399
	/*
400 401 402 403
	 * Every sampling_rate, we check, if current idle time is less
	 * than 20% (default), then we try to increase frequency
	 * Every sampling_rate*sampling_down_factor, we check, if current
	 * idle time is more than 80%, then we try to decrease frequency
404
	 *
405 406
	 * Any frequency increase takes it to the maximum frequency.
	 * Frequency reduction happens at minimum steps of
407
	 * 5% (default) of maximum frequency
408 409
	 */

410 411 412 413 414
	/* Get Absolute Load */
	for_each_cpu(j, policy->cpus) {
		struct cpu_dbs_info_s *j_dbs_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;
415

416 417 418 419 420 421 422
		j_dbs_info = &per_cpu(cpu_dbs_info, j);

		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);

		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
				j_dbs_info->prev_cpu_wall);
		j_dbs_info->prev_cpu_wall = cur_wall_time;
423

424 425 426
		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
				j_dbs_info->prev_cpu_idle);
		j_dbs_info->prev_cpu_idle = cur_idle_time;
427

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
		if (dbs_tuners_ins.ignore_nice) {
			cputime64_t cur_nice;
			unsigned long cur_nice_jiffies;

			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
					 j_dbs_info->prev_cpu_nice);
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

		load = 100 * (wall_time - idle_time) / wall_time;
	}

	/*
	 * break out if we 'cannot' reduce the speed as the user might
	 * want freq_step to be zero
	 */
	if (dbs_tuners_ins.freq_step == 0)
		return;
457

458 459
	/* Check for frequency increase */
	if (load > dbs_tuners_ins.up_threshold) {
460
		this_dbs_info->down_skip = 0;
461

462
		/* if we are already at full speed then break out early */
463
		if (this_dbs_info->requested_freq == policy->max)
464
			return;
465

466
		freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
467 468

		/* max freq cannot be less than 100. But who knows.... */
469 470
		if (unlikely(freq_target == 0))
			freq_target = 5;
471

472
		this_dbs_info->requested_freq += freq_target;
473 474
		if (this_dbs_info->requested_freq > policy->max)
			this_dbs_info->requested_freq = policy->max;
475

476
		__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
477 478 479 480
			CPUFREQ_RELATION_H);
		return;
	}

481 482 483 484 485 486
	/*
	 * The optimal frequency is the frequency that is the lowest that
	 * can support the current CPU usage without triggering the up
	 * policy. To be safe, we focus 10 points under the threshold.
	 */
	if (load < (dbs_tuners_ins.down_threshold - 10)) {
487
		freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
488

489
		this_dbs_info->requested_freq -= freq_target;
490 491
		if (this_dbs_info->requested_freq < policy->min)
			this_dbs_info->requested_freq = policy->min;
492

493 494 495 496 497 498
		/*
		 * if we cannot reduce the frequency anymore, break out early
		 */
		if (policy->cur == policy->min)
			return;

499
		__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
500
				CPUFREQ_RELATION_H);
501 502 503 504
		return;
	}
}

D
David Howells 已提交
505
static void do_dbs_timer(struct work_struct *work)
506
{
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	struct cpu_dbs_info_s *dbs_info =
		container_of(work, struct cpu_dbs_info_s, work.work);
	unsigned int cpu = dbs_info->cpu;

	/* We want all CPUs to do sampling nearly on same jiffy */
	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);

	delay -= jiffies % delay;

	if (lock_policy_rwsem_write(cpu) < 0)
		return;

	if (!dbs_info->enable) {
		unlock_policy_rwsem_write(cpu);
		return;
	}

	dbs_check_cpu(dbs_info);

	queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
	unlock_policy_rwsem_write(cpu);
528
}
529

530
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
531
{
532 533 534 535 536 537 538 539
	/* We want all CPUs to do sampling nearly on same jiffy */
	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
	delay -= jiffies % delay;

	dbs_info->enable = 1;
	INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
	queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
				delay);
540 541
}

542
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
543
{
544 545
	dbs_info->enable = 0;
	cancel_delayed_work(&dbs_info->work);
546 547 548 549 550 551 552 553
}

static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
				   unsigned int event)
{
	unsigned int cpu = policy->cpu;
	struct cpu_dbs_info_s *this_dbs_info;
	unsigned int j;
J
Jeff Garzik 已提交
554
	int rc;
555 556 557 558 559

	this_dbs_info = &per_cpu(cpu_dbs_info, cpu);

	switch (event) {
	case CPUFREQ_GOV_START:
560
		if ((!cpu_online(cpu)) || (!policy->cur))
561 562 563 564
			return -EINVAL;

		if (this_dbs_info->enable) /* Already enabled */
			break;
565

566
		mutex_lock(&dbs_mutex);
J
Jeff Garzik 已提交
567 568 569 570 571 572 573

		rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
		if (rc) {
			mutex_unlock(&dbs_mutex);
			return rc;
		}

574
		for_each_cpu(j, policy->cpus) {
575 576 577
			struct cpu_dbs_info_s *j_dbs_info;
			j_dbs_info = &per_cpu(cpu_dbs_info, j);
			j_dbs_info->cur_policy = policy;
578

579 580 581 582 583 584
			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
						&j_dbs_info->prev_cpu_wall);
			if (dbs_tuners_ins.ignore_nice) {
				j_dbs_info->prev_cpu_nice =
						kstat_cpu(j).cpustat.nice;
			}
585
		}
586 587
		this_dbs_info->down_skip = 0;
		this_dbs_info->requested_freq = policy->cur;
J
Jeff Garzik 已提交
588

589 590 591 592 593 594 595 596
		dbs_enable++;
		/*
		 * Start the timerschedule work, when this governor
		 * is used for first time
		 */
		if (dbs_enable == 1) {
			unsigned int latency;
			/* policy latency is in nS. Convert it to uS first */
597 598 599
			latency = policy->cpuinfo.transition_latency / 1000;
			if (latency == 0)
				latency = 1;
600

601
			def_sampling_rate =
602
				max(latency * LATENCY_MULTIPLIER,
603
				    MIN_STAT_SAMPLING_RATE);
604

605 606
			dbs_tuners_ins.sampling_rate = def_sampling_rate;

607 608 609
			cpufreq_register_notifier(
					&dbs_cpufreq_notifier_block,
					CPUFREQ_TRANSITION_NOTIFIER);
610
		}
611
		dbs_timer_init(this_dbs_info);
612

613
		mutex_unlock(&dbs_mutex);
614

615 616 617
		break;

	case CPUFREQ_GOV_STOP:
618
		mutex_lock(&dbs_mutex);
619
		dbs_timer_exit(this_dbs_info);
620 621
		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
		dbs_enable--;
622

623 624 625 626
		/*
		 * Stop the timerschedule work, when this governor
		 * is used for first time
		 */
627
		if (dbs_enable == 0)
628 629 630 631
			cpufreq_unregister_notifier(
					&dbs_cpufreq_notifier_block,
					CPUFREQ_TRANSITION_NOTIFIER);

632
		mutex_unlock(&dbs_mutex);
633 634 635 636

		break;

	case CPUFREQ_GOV_LIMITS:
637
		mutex_lock(&dbs_mutex);
638 639 640
		if (policy->max < this_dbs_info->cur_policy->cur)
			__cpufreq_driver_target(
					this_dbs_info->cur_policy,
641
					policy->max, CPUFREQ_RELATION_H);
642 643 644
		else if (policy->min > this_dbs_info->cur_policy->cur)
			__cpufreq_driver_target(
					this_dbs_info->cur_policy,
645
					policy->min, CPUFREQ_RELATION_L);
646
		mutex_unlock(&dbs_mutex);
647

648 649 650 651 652
		break;
	}
	return 0;
}

653 654 655
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
#endif
656 657 658 659 660
struct cpufreq_governor cpufreq_gov_conservative = {
	.name			= "conservative",
	.governor		= cpufreq_governor_dbs,
	.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
	.owner			= THIS_MODULE,
661 662 663 664
};

static int __init cpufreq_gov_dbs_init(void)
{
665 666 667 668 669 670 671 672 673 674 675 676 677
	int err;

	kconservative_wq = create_workqueue("kconservative");
	if (!kconservative_wq) {
		printk(KERN_ERR "Creation of kconservative failed\n");
		return -EFAULT;
	}

	err = cpufreq_register_governor(&cpufreq_gov_conservative);
	if (err)
		destroy_workqueue(kconservative_wq);

	return err;
678 679 680 681
}

static void __exit cpufreq_gov_dbs_exit(void)
{
682
	cpufreq_unregister_governor(&cpufreq_gov_conservative);
683
	destroy_workqueue(kconservative_wq);
684 685 686
}


687
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
688
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
689 690
		"Low Latency Frequency Transition capable processors "
		"optimised for use in a battery environment");
691
MODULE_LICENSE("GPL");
692

693 694 695
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
fs_initcall(cpufreq_gov_dbs_init);
#else
696
module_init(cpufreq_gov_dbs_init);
697
#endif
698
module_exit(cpufreq_gov_dbs_exit);