cpufreq_governor.c 17.9 KB
Newer Older
1 2 3 4 5
/*
 * drivers/cpufreq/cpufreq_governor.c
 *
 * CPUFREQ governors common code
 *
6 7 8 9 10 11
 * Copyright	(C) 2001 Russell King
 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 *
12 13 14 15 16
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17 18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

19 20
#include <linux/export.h>
#include <linux/kernel_stat.h>
21
#include <linux/slab.h>
22 23 24

#include "cpufreq_governor.h"

25 26
static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);

27
static DEFINE_MUTEX(gov_dbs_data_mutex);
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
/* Common sysfs tunables */
/**
 * store_sampling_rate - update sampling rate effective immediately if needed.
 *
 * If new rate is smaller than the old, simply updating
 * dbs.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
 *
 * This must be called with dbs_data->mutex held, otherwise traversing
 * policy_dbs_list isn't safe.
 */
ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
			    size_t count)
{
	struct policy_dbs_info *policy_dbs;
	unsigned int rate;
	int ret;
	ret = sscanf(buf, "%u", &rate);
	if (ret != 1)
		return -EINVAL;

	dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);

	/*
	 * We are operating under dbs_data->mutex and so the list and its
	 * entries can't be freed concurrently.
	 */
	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
		mutex_lock(&policy_dbs->timer_mutex);
		/*
		 * On 32-bit architectures this may race with the
		 * sample_delay_ns read in dbs_update_util_handler(), but that
		 * really doesn't matter.  If the read returns a value that's
		 * too big, the sample will be skipped, but the next invocation
		 * of dbs_update_util_handler() (when the update has been
69
		 * completed) will take a sample.
70 71 72
		 *
		 * If this runs in parallel with dbs_work_handler(), we may end
		 * up overwriting the sample_delay_ns value that it has just
73 74
		 * written, but it will be corrected next time a sample is
		 * taken, so it shouldn't be significant.
75
		 */
76
		gov_update_sample_delay(policy_dbs, 0);
77 78 79 80 81 82 83
		mutex_unlock(&policy_dbs->timer_mutex);
	}

	return count;
}
EXPORT_SYMBOL_GPL(store_sampling_rate);

84 85 86 87 88 89 90 91 92 93
/**
 * gov_update_cpu_data - Update CPU load data.
 * @dbs_data: Top-level governor data pointer.
 *
 * Update CPU load data for all CPUs in the domain governed by @dbs_data
 * (that may be a single policy or a bunch of them if governor tunables are
 * system-wide).
 *
 * Call under the @dbs_data mutex.
 */
94
void gov_update_cpu_data(struct dbs_data *dbs_data)
95 96 97 98 99 100 101
{
	struct policy_dbs_info *policy_dbs;

	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
		unsigned int j;

		for_each_cpu(j, policy_dbs->policy->cpus) {
102
			struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
103 104 105 106 107 108 109 110 111 112

			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
								  dbs_data->io_is_busy);
			if (dbs_data->ignore_nice_load)
				j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
		}
	}
}
EXPORT_SYMBOL_GPL(gov_update_cpu_data);

113
static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
114
{
115
	return container_of(kobj, struct dbs_data, kobj);
116 117
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
static inline struct governor_attr *to_gov_attr(struct attribute *attr)
{
	return container_of(attr, struct governor_attr, attr);
}

static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
			     char *buf)
{
	struct dbs_data *dbs_data = to_dbs_data(kobj);
	struct governor_attr *gattr = to_gov_attr(attr);
	int ret = -EIO;

	if (gattr->show)
		ret = gattr->show(dbs_data, buf);

	return ret;
}

static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
			      const char *buf, size_t count)
{
	struct dbs_data *dbs_data = to_dbs_data(kobj);
	struct governor_attr *gattr = to_gov_attr(attr);
	int ret = -EIO;

	mutex_lock(&dbs_data->mutex);

145
	if (dbs_data->usage_count && gattr->store)
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
		ret = gattr->store(dbs_data, buf, count);

	mutex_unlock(&dbs_data->mutex);

	return ret;
}

/*
 * Sysfs Ops for accessing governor attributes.
 *
 * All show/store invocations for governor specific sysfs attributes, will first
 * call the below show/store callbacks and the attribute specific callback will
 * be called from within it.
 */
static const struct sysfs_ops governor_sysfs_ops = {
	.show	= governor_show,
	.store	= governor_store,
};

165
unsigned int dbs_update(struct cpufreq_policy *policy)
166
{
167 168
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
169
	unsigned int ignore_nice = dbs_data->ignore_nice_load;
170
	unsigned int max_load = 0;
171
	unsigned int sampling_rate, io_busy, j;
172

173 174 175 176 177 178 179
	/*
	 * Sometimes governors may use an additional multiplier to increase
	 * sample delays temporarily.  Apply that multiplier to sampling_rate
	 * so as to keep the wake-up-from-idle detection logic a bit
	 * conservative.
	 */
	sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
180 181 182 183 184 185
	/*
	 * For the purpose of ondemand, waiting for disk IO is an indication
	 * that you're performance critical, and not that the system is actually
	 * idle, so do not add the iowait time to the CPU idle time then.
	 */
	io_busy = dbs_data->io_is_busy;
186

187
	/* Get Absolute Load */
188
	for_each_cpu(j, policy->cpus) {
189
		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
190 191
		u64 cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;
192 193
		unsigned int load;

194
		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
195

196
		wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
197 198
		j_cdbs->prev_cpu_wall = cur_wall_time;

199 200 201 202 203 204
		if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
			idle_time = 0;
		} else {
			idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
			j_cdbs->prev_cpu_idle = cur_idle_time;
		}
205 206

		if (ignore_nice) {
207 208 209 210
			u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];

			idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
			j_cdbs->prev_cpu_nice = cur_nice;
211 212 213 214 215
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
		/*
		 * If the CPU had gone completely idle, and a task just woke up
		 * on this CPU now, it would be unfair to calculate 'load' the
		 * usual way for this elapsed time-window, because it will show
		 * near-zero load, irrespective of how CPU intensive that task
		 * actually is. This is undesirable for latency-sensitive bursty
		 * workloads.
		 *
		 * To avoid this, we reuse the 'load' from the previous
		 * time-window and give this task a chance to start with a
		 * reasonably high CPU frequency. (However, we shouldn't over-do
		 * this copy, lest we get stuck at a high load (high frequency)
		 * for too long, even when the current system load has actually
		 * dropped down. So we perform the copy only once, upon the
		 * first wake-up from idle.)
		 *
232 233 234 235
		 * Detecting this situation is easy: the governor's utilization
		 * update handler would not have run during CPU-idle periods.
		 * Hence, an unusually large 'wall_time' (as compared to the
		 * sampling rate) indicates this scenario.
236 237 238 239 240
		 *
		 * prev_load can be zero in two cases and we must recalculate it
		 * for both cases:
		 * - during long idle intervals
		 * - explicitly set to zero
241
		 */
242 243
		if (unlikely(wall_time > (2 * sampling_rate) &&
			     j_cdbs->prev_load)) {
244
			load = j_cdbs->prev_load;
245 246 247 248 249 250 251

			/*
			 * Perform a destructive copy, to ensure that we copy
			 * the previous load only once, upon the first wake-up
			 * from idle.
			 */
			j_cdbs->prev_load = 0;
252 253 254 255
		} else {
			load = 100 * (wall_time - idle_time) / wall_time;
			j_cdbs->prev_load = load;
		}
256 257 258 259

		if (load > max_load)
			max_load = load;
	}
260
	return max_load;
261
}
262
EXPORT_SYMBOL_GPL(dbs_update);
263

264 265
static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
				unsigned int delay_us)
266
{
267
	struct cpufreq_policy *policy = policy_dbs->policy;
268
	int cpu;
269

270 271
	gov_update_sample_delay(policy_dbs, delay_us);
	policy_dbs->last_sample_time = 0;
272

273
	for_each_cpu(cpu, policy->cpus) {
274
		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
275 276

		cpufreq_set_update_util_data(cpu, &cdbs->update_util);
277 278 279
	}
}

280
static inline void gov_clear_update_util(struct cpufreq_policy *policy)
281 282
{
	int i;
283

284 285 286 287
	for_each_cpu(i, policy->cpus)
		cpufreq_set_update_util_data(i, NULL);

	synchronize_rcu();
288 289
}

290
static void gov_cancel_work(struct cpufreq_policy *policy)
291
{
292 293
	struct policy_dbs_info *policy_dbs = policy->governor_data;

294 295 296
	gov_clear_update_util(policy_dbs->policy);
	irq_work_sync(&policy_dbs->irq_work);
	cancel_work_sync(&policy_dbs->work);
297
	atomic_set(&policy_dbs->work_count, 0);
298
	policy_dbs->work_in_progress = false;
299
}
300

301
static void dbs_work_handler(struct work_struct *work)
302
{
303
	struct policy_dbs_info *policy_dbs;
304
	struct cpufreq_policy *policy;
305
	struct dbs_governor *gov;
306

307 308
	policy_dbs = container_of(work, struct policy_dbs_info, work);
	policy = policy_dbs->policy;
309
	gov = dbs_governor_of(policy);
310

311
	/*
312 313
	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
	 * ondemand governor isn't updating the sampling rate in parallel.
314
	 */
315
	mutex_lock(&policy_dbs->timer_mutex);
316
	gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
317
	mutex_unlock(&policy_dbs->timer_mutex);
318

319 320
	/* Allow the utilization update handler to queue up more work. */
	atomic_set(&policy_dbs->work_count, 0);
321
	/*
322 323 324
	 * If the update below is reordered with respect to the sample delay
	 * modification, the utilization update handler may end up using a stale
	 * sample delay value.
325
	 */
326 327
	smp_wmb();
	policy_dbs->work_in_progress = false;
328 329 330 331
}

static void dbs_irq_work(struct irq_work *irq_work)
{
332
	struct policy_dbs_info *policy_dbs;
333

334 335
	policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
	schedule_work(&policy_dbs->work);
336 337
}

338 339 340 341
static void dbs_update_util_handler(struct update_util_data *data, u64 time,
				    unsigned long util, unsigned long max)
{
	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
342
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
343
	u64 delta_ns, lst;
344 345

	/*
346 347 348 349
	 * The work may not be allowed to be queued up right now.
	 * Possible reasons:
	 * - Work has already been queued up or is in progress.
	 * - It is too early (too little time from the previous sample).
350
	 */
351 352 353 354 355 356 357 358
	if (policy_dbs->work_in_progress)
		return;

	/*
	 * If the reads below are reordered before the check above, the value
	 * of sample_delay_ns used in the computation may be stale.
	 */
	smp_rmb();
359 360
	lst = READ_ONCE(policy_dbs->last_sample_time);
	delta_ns = time - lst;
361 362 363 364 365 366 367 368
	if ((s64)delta_ns < policy_dbs->sample_delay_ns)
		return;

	/*
	 * If the policy is not shared, the irq_work may be queued up right away
	 * at this point.  Otherwise, we need to ensure that only one of the
	 * CPUs sharing the policy will do that.
	 */
369 370 371 372 373 374 375 376 377 378 379 380 381
	if (policy_dbs->is_shared) {
		if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
			return;

		/*
		 * If another CPU updated last_sample_time in the meantime, we
		 * shouldn't be here, so clear the work counter and bail out.
		 */
		if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
			atomic_set(&policy_dbs->work_count, 0);
			return;
		}
	}
382 383 384 385

	policy_dbs->last_sample_time = time;
	policy_dbs->work_in_progress = true;
	irq_work_queue(&policy_dbs->irq_work);
386
}
387

388 389
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
						     struct dbs_governor *gov)
390
{
391
	struct policy_dbs_info *policy_dbs;
392 393
	int j;

394 395
	/* Allocate memory for per-policy governor data. */
	policy_dbs = gov->alloc();
396
	if (!policy_dbs)
397
		return NULL;
398

399
	policy_dbs->policy = policy;
400
	mutex_init(&policy_dbs->timer_mutex);
401
	atomic_set(&policy_dbs->work_count, 0);
402 403
	init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
	INIT_WORK(&policy_dbs->work, dbs_work_handler);
404 405 406

	/* Set policy_dbs for all CPUs, online+offline */
	for_each_cpu(j, policy->related_cpus) {
407
		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
408 409 410 411

		j_cdbs->policy_dbs = policy_dbs;
		j_cdbs->update_util.func = dbs_update_util_handler;
	}
412
	return policy_dbs;
413 414
}

415
static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
416
				 struct dbs_governor *gov)
417 418 419
{
	int j;

420
	mutex_destroy(&policy_dbs->timer_mutex);
421

422 423
	for_each_cpu(j, policy_dbs->policy->related_cpus) {
		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
424

425 426 427
		j_cdbs->policy_dbs = NULL;
		j_cdbs->update_util.func = NULL;
	}
428
	gov->free(policy_dbs);
429 430
}

431
static int cpufreq_governor_init(struct cpufreq_policy *policy)
432
{
433
	struct dbs_governor *gov = dbs_governor_of(policy);
434
	struct dbs_data *dbs_data;
435
	struct policy_dbs_info *policy_dbs;
436
	unsigned int latency;
437
	int ret = 0;
438

439 440 441 442
	/* State should be equivalent to EXIT */
	if (policy->governor_data)
		return -EBUSY;

443 444 445
	policy_dbs = alloc_policy_dbs_info(policy, gov);
	if (!policy_dbs)
		return -ENOMEM;
446

447 448 449 450
	/* Protect gov->gdbs_data against concurrent updates. */
	mutex_lock(&gov_dbs_data_mutex);

	dbs_data = gov->gdbs_data;
451 452 453 454 455 456 457
	if (dbs_data) {
		if (WARN_ON(have_governor_per_policy())) {
			ret = -EINVAL;
			goto free_policy_dbs_info;
		}
		policy_dbs->dbs_data = dbs_data;
		policy->governor_data = policy_dbs;
458 459 460 461 462

		mutex_lock(&dbs_data->mutex);
		dbs_data->usage_count++;
		list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
		mutex_unlock(&dbs_data->mutex);
463
		goto out;
464
	}
465

466
	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
467 468 469 470
	if (!dbs_data) {
		ret = -ENOMEM;
		goto free_policy_dbs_info;
	}
471

472
	INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
473
	mutex_init(&dbs_data->mutex);
474

475
	ret = gov->init(dbs_data, !policy->governor->initialized);
476
	if (ret)
477
		goto free_policy_dbs_info;
478

479 480 481 482
	/* policy latency is in ns. Convert it to us first */
	latency = policy->cpuinfo.transition_latency / 1000;
	if (latency == 0)
		latency = 1;
483

484 485 486
	/* Bring kernel and HW constraints together */
	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
					  MIN_LATENCY_MULTIPLIER * latency);
487 488
	dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
				      LATENCY_MULTIPLIER * latency);
489

490
	if (!have_governor_per_policy())
491
		gov->gdbs_data = dbs_data;
492

493
	policy->governor_data = policy_dbs;
494

495 496 497 498
	policy_dbs->dbs_data = dbs_data;
	dbs_data->usage_count = 1;
	list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);

499 500 501 502
	gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
	ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
				   get_governor_parent_kobj(policy),
				   "%s", gov->gov.name);
503
	if (!ret)
504
		goto out;
505

506
	/* Failure, so roll back. */
507
	pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
508

509 510
	policy->governor_data = NULL;

511
	if (!have_governor_per_policy())
512 513
		gov->gdbs_data = NULL;
	gov->exit(dbs_data, !policy->governor->initialized);
514 515
	kfree(dbs_data);

516
free_policy_dbs_info:
517
	free_policy_dbs_info(policy_dbs, gov);
518 519 520

out:
	mutex_unlock(&gov_dbs_data_mutex);
521 522
	return ret;
}
523

524
static int cpufreq_governor_exit(struct cpufreq_policy *policy)
525
{
526
	struct dbs_governor *gov = dbs_governor_of(policy);
527 528
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
529
	int count;
530

531 532 533
	/* Protect gov->gdbs_data against concurrent updates. */
	mutex_lock(&gov_dbs_data_mutex);

534 535 536 537 538 539
	mutex_lock(&dbs_data->mutex);
	list_del(&policy_dbs->list);
	count = --dbs_data->usage_count;
	mutex_unlock(&dbs_data->mutex);

	if (!count) {
540
		kobject_put(&dbs_data->kobj);
541

542 543
		policy->governor_data = NULL;

544
		if (!have_governor_per_policy())
545
			gov->gdbs_data = NULL;
546

547
		gov->exit(dbs_data, policy->governor->initialized == 1);
548
		mutex_destroy(&dbs_data->mutex);
549
		kfree(dbs_data);
550 551
	} else {
		policy->governor_data = NULL;
552
	}
553

554
	free_policy_dbs_info(policy_dbs, gov);
555 556

	mutex_unlock(&gov_dbs_data_mutex);
557
	return 0;
558
}
559

560
static int cpufreq_governor_start(struct cpufreq_policy *policy)
561
{
562
	struct dbs_governor *gov = dbs_governor_of(policy);
563 564
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
565
	unsigned int sampling_rate, ignore_nice, j;
566
	unsigned int io_busy;
567 568 569 570

	if (!policy->cur)
		return -EINVAL;

571
	policy_dbs->is_shared = policy_is_shared(policy);
572
	policy_dbs->rate_mult = 1;
573

574 575
	sampling_rate = dbs_data->sampling_rate;
	ignore_nice = dbs_data->ignore_nice_load;
576
	io_busy = dbs_data->io_is_busy;
577

578
	for_each_cpu(j, policy->cpus) {
579
		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
580
		unsigned int prev_load;
581

582
		j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
583

584 585
		prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
		j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
586

587 588 589
		if (ignore_nice)
			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
	}
590

591
	gov->start(policy);
592

593
	gov_set_update_util(policy_dbs, sampling_rate);
594 595 596
	return 0;
}

597
static int cpufreq_governor_stop(struct cpufreq_policy *policy)
598
{
599
	gov_cancel_work(policy);
600
	return 0;
601
}
602

603
static int cpufreq_governor_limits(struct cpufreq_policy *policy)
604
{
605
	struct policy_dbs_info *policy_dbs = policy->governor_data;
606

607
	mutex_lock(&policy_dbs->timer_mutex);
608

609 610 611 612
	if (policy->max < policy->cur)
		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
	else if (policy->min > policy->cur)
		__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
613 614 615

	gov_update_sample_delay(policy_dbs, 0);

616
	mutex_unlock(&policy_dbs->timer_mutex);
617 618

	return 0;
619
}
620

621
int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
622
{
623
	if (event == CPUFREQ_GOV_POLICY_INIT) {
624
		return cpufreq_governor_init(policy);
625 626 627
	} else if (policy->governor_data) {
		switch (event) {
		case CPUFREQ_GOV_POLICY_EXIT:
628
			return cpufreq_governor_exit(policy);
629
		case CPUFREQ_GOV_START:
630
			return cpufreq_governor_start(policy);
631
		case CPUFREQ_GOV_STOP:
632
			return cpufreq_governor_stop(policy);
633
		case CPUFREQ_GOV_LIMITS:
634
			return cpufreq_governor_limits(policy);
635
		}
636
	}
637
	return -EINVAL;
638 639
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);