cpufreq_governor.c 18.2 KB
Newer Older
1 2 3 4 5
/*
 * drivers/cpufreq/cpufreq_governor.c
 *
 * CPUFREQ governors common code
 *
6 7 8 9 10 11
 * Copyright	(C) 2001 Russell King
 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 *
12 13 14 15 16
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17 18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

19 20
#include <linux/export.h>
#include <linux/kernel_stat.h>
21
#include <linux/slab.h>
22 23 24

#include "cpufreq_governor.h"

25 26 27
DEFINE_MUTEX(dbs_data_mutex);
EXPORT_SYMBOL_GPL(dbs_data_mutex);

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/* Common sysfs tunables */
/**
 * store_sampling_rate - update sampling rate effective immediately if needed.
 *
 * If new rate is smaller than the old, simply updating
 * dbs.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
 *
 * On the other hand, if new rate is larger than the old, then we may evaluate
 * the load too soon, and it might we worth updating sample_delay_ns then as
 * well.
 *
 * This must be called with dbs_data->mutex held, otherwise traversing
 * policy_dbs_list isn't safe.
 */
ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
			    size_t count)
{
	struct policy_dbs_info *policy_dbs;
	unsigned int rate;
	int ret;
	ret = sscanf(buf, "%u", &rate);
	if (ret != 1)
		return -EINVAL;

	dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);

	/*
	 * We are operating under dbs_data->mutex and so the list and its
	 * entries can't be freed concurrently.
	 */
	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
		mutex_lock(&policy_dbs->timer_mutex);
		/*
		 * On 32-bit architectures this may race with the
		 * sample_delay_ns read in dbs_update_util_handler(), but that
		 * really doesn't matter.  If the read returns a value that's
		 * too big, the sample will be skipped, but the next invocation
		 * of dbs_update_util_handler() (when the update has been
		 * completed) will take a sample.  If the returned value is too
		 * small, the sample will be taken immediately, but that isn't a
		 * problem, as we want the new rate to take effect immediately
		 * anyway.
		 *
		 * If this runs in parallel with dbs_work_handler(), we may end
		 * up overwriting the sample_delay_ns value that it has just
		 * written, but the difference should not be too big and it will
		 * be corrected next time a sample is taken, so it shouldn't be
		 * significant.
		 */
		gov_update_sample_delay(policy_dbs, dbs_data->sampling_rate);
		mutex_unlock(&policy_dbs->timer_mutex);
	}

	return count;
}
EXPORT_SYMBOL_GPL(store_sampling_rate);

91
static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
92
{
93
	return container_of(kobj, struct dbs_data, kobj);
94 95
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
static inline struct governor_attr *to_gov_attr(struct attribute *attr)
{
	return container_of(attr, struct governor_attr, attr);
}

static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
			     char *buf)
{
	struct dbs_data *dbs_data = to_dbs_data(kobj);
	struct governor_attr *gattr = to_gov_attr(attr);
	int ret = -EIO;

	if (gattr->show)
		ret = gattr->show(dbs_data, buf);

	return ret;
}

static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
			      const char *buf, size_t count)
{
	struct dbs_data *dbs_data = to_dbs_data(kobj);
	struct governor_attr *gattr = to_gov_attr(attr);
	int ret = -EIO;

	mutex_lock(&dbs_data->mutex);

	if (gattr->store)
		ret = gattr->store(dbs_data, buf, count);

	mutex_unlock(&dbs_data->mutex);

	return ret;
}

/*
 * Sysfs Ops for accessing governor attributes.
 *
 * All show/store invocations for governor specific sysfs attributes, will first
 * call the below show/store callbacks and the attribute specific callback will
 * be called from within it.
 */
static const struct sysfs_ops governor_sysfs_ops = {
	.show	= governor_show,
	.store	= governor_store,
};

143
void dbs_check_cpu(struct cpufreq_policy *policy)
144
{
145
	int cpu = policy->cpu;
146
	struct dbs_governor *gov = dbs_governor_of(policy);
147 148
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
149
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
150 151
	unsigned int sampling_rate = dbs_data->sampling_rate;
	unsigned int ignore_nice = dbs_data->ignore_nice_load;
152 153 154
	unsigned int max_load = 0;
	unsigned int j;

155
	if (gov->governor == GOV_ONDEMAND) {
156
		struct od_cpu_dbs_info_s *od_dbs_info =
157
				gov->get_cpu_dbs_info_s(cpu);
158 159 160 161 162 163 164 165 166 167

		/*
		 * Sometimes, the ondemand governor uses an additional
		 * multiplier to give long delays. So apply this multiplier to
		 * the 'sampling_rate', so as to keep the wake-up-from-idle
		 * detection logic a bit conservative.
		 */
		sampling_rate *= od_dbs_info->rate_mult;

	}
168

169
	/* Get Absolute Load */
170
	for_each_cpu(j, policy->cpus) {
171
		struct cpu_dbs_info *j_cdbs;
172 173
		u64 cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;
174
		unsigned int load;
175
		int io_busy = 0;
176

177
		j_cdbs = gov->get_cpu_cdbs(j);
178

179 180 181 182 183 184
		/*
		 * For the purpose of ondemand, waiting for disk IO is
		 * an indication that you're performance critical, and
		 * not that the system is actually idle. So do not add
		 * the iowait time to the cpu idle time.
		 */
185
		if (gov->governor == GOV_ONDEMAND)
186 187
			io_busy = od_tuners->io_is_busy;
		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
188 189 190 191 192

		wall_time = (unsigned int)
			(cur_wall_time - j_cdbs->prev_cpu_wall);
		j_cdbs->prev_cpu_wall = cur_wall_time;

193 194 195
		if (cur_idle_time < j_cdbs->prev_cpu_idle)
			cur_idle_time = j_cdbs->prev_cpu_idle;

196 197 198 199 200
		idle_time = (unsigned int)
			(cur_idle_time - j_cdbs->prev_cpu_idle);
		j_cdbs->prev_cpu_idle = cur_idle_time;

		if (ignore_nice) {
201 202 203 204
			u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];

			idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
			j_cdbs->prev_cpu_nice = cur_nice;
205 206 207 208 209
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
		/*
		 * If the CPU had gone completely idle, and a task just woke up
		 * on this CPU now, it would be unfair to calculate 'load' the
		 * usual way for this elapsed time-window, because it will show
		 * near-zero load, irrespective of how CPU intensive that task
		 * actually is. This is undesirable for latency-sensitive bursty
		 * workloads.
		 *
		 * To avoid this, we reuse the 'load' from the previous
		 * time-window and give this task a chance to start with a
		 * reasonably high CPU frequency. (However, we shouldn't over-do
		 * this copy, lest we get stuck at a high load (high frequency)
		 * for too long, even when the current system load has actually
		 * dropped down. So we perform the copy only once, upon the
		 * first wake-up from idle.)
		 *
226 227 228 229
		 * Detecting this situation is easy: the governor's utilization
		 * update handler would not have run during CPU-idle periods.
		 * Hence, an unusually large 'wall_time' (as compared to the
		 * sampling rate) indicates this scenario.
230 231 232 233 234
		 *
		 * prev_load can be zero in two cases and we must recalculate it
		 * for both cases:
		 * - during long idle intervals
		 * - explicitly set to zero
235
		 */
236 237
		if (unlikely(wall_time > (2 * sampling_rate) &&
			     j_cdbs->prev_load)) {
238
			load = j_cdbs->prev_load;
239 240 241 242 243 244 245

			/*
			 * Perform a destructive copy, to ensure that we copy
			 * the previous load only once, upon the first wake-up
			 * from idle.
			 */
			j_cdbs->prev_load = 0;
246 247 248 249
		} else {
			load = 100 * (wall_time - idle_time) / wall_time;
			j_cdbs->prev_load = load;
		}
250 251 252 253 254

		if (load > max_load)
			max_load = load;
	}

255
	gov->gov_check_cpu(cpu, max_load);
256 257 258
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);

259
void gov_set_update_util(struct policy_dbs_info *policy_dbs,
260
			 unsigned int delay_us)
261
{
262
	struct cpufreq_policy *policy = policy_dbs->policy;
263
	struct dbs_governor *gov = dbs_governor_of(policy);
264
	int cpu;
265

266 267
	gov_update_sample_delay(policy_dbs, delay_us);
	policy_dbs->last_sample_time = 0;
268

269
	for_each_cpu(cpu, policy->cpus) {
270
		struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
271 272

		cpufreq_set_update_util_data(cpu, &cdbs->update_util);
273 274
	}
}
275
EXPORT_SYMBOL_GPL(gov_set_update_util);
276

277
static inline void gov_clear_update_util(struct cpufreq_policy *policy)
278 279
{
	int i;
280

281 282 283 284
	for_each_cpu(i, policy->cpus)
		cpufreq_set_update_util_data(i, NULL);

	synchronize_rcu();
285 286
}

287
static void gov_cancel_work(struct cpufreq_policy *policy)
288
{
289 290
	struct policy_dbs_info *policy_dbs = policy->governor_data;

291 292 293
	gov_clear_update_util(policy_dbs->policy);
	irq_work_sync(&policy_dbs->irq_work);
	cancel_work_sync(&policy_dbs->work);
294
	atomic_set(&policy_dbs->work_count, 0);
295
	policy_dbs->work_in_progress = false;
296
}
297

298
static void dbs_work_handler(struct work_struct *work)
299
{
300
	struct policy_dbs_info *policy_dbs;
301
	struct cpufreq_policy *policy;
302
	struct dbs_governor *gov;
303
	unsigned int delay;
304

305 306
	policy_dbs = container_of(work, struct policy_dbs_info, work);
	policy = policy_dbs->policy;
307
	gov = dbs_governor_of(policy);
308

309
	/*
310 311
	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
	 * ondemand governor isn't updating the sampling rate in parallel.
312
	 */
313
	mutex_lock(&policy_dbs->timer_mutex);
314
	delay = gov->gov_dbs_timer(policy);
315 316
	policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay);
	mutex_unlock(&policy_dbs->timer_mutex);
317

318 319
	/* Allow the utilization update handler to queue up more work. */
	atomic_set(&policy_dbs->work_count, 0);
320
	/*
321 322 323
	 * If the update below is reordered with respect to the sample delay
	 * modification, the utilization update handler may end up using a stale
	 * sample delay value.
324
	 */
325 326
	smp_wmb();
	policy_dbs->work_in_progress = false;
327 328 329 330
}

static void dbs_irq_work(struct irq_work *irq_work)
{
331
	struct policy_dbs_info *policy_dbs;
332

333 334
	policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
	schedule_work(&policy_dbs->work);
335 336
}

337 338 339 340
static void dbs_update_util_handler(struct update_util_data *data, u64 time,
				    unsigned long util, unsigned long max)
{
	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
341
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
342
	u64 delta_ns;
343 344

	/*
345 346 347 348
	 * The work may not be allowed to be queued up right now.
	 * Possible reasons:
	 * - Work has already been queued up or is in progress.
	 * - It is too early (too little time from the previous sample).
349
	 */
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	if (policy_dbs->work_in_progress)
		return;

	/*
	 * If the reads below are reordered before the check above, the value
	 * of sample_delay_ns used in the computation may be stale.
	 */
	smp_rmb();
	delta_ns = time - policy_dbs->last_sample_time;
	if ((s64)delta_ns < policy_dbs->sample_delay_ns)
		return;

	/*
	 * If the policy is not shared, the irq_work may be queued up right away
	 * at this point.  Otherwise, we need to ensure that only one of the
	 * CPUs sharing the policy will do that.
	 */
	if (policy_dbs->is_shared &&
	    !atomic_add_unless(&policy_dbs->work_count, 1, 1))
		return;

	policy_dbs->last_sample_time = time;
	policy_dbs->work_in_progress = true;
	irq_work_queue(&policy_dbs->irq_work);
374
}
375

376 377
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
						     struct dbs_governor *gov)
378
{
379
	struct policy_dbs_info *policy_dbs;
380 381 382
	int j;

	/* Allocate memory for the common information for policy->cpus */
383 384
	policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL);
	if (!policy_dbs)
385
		return NULL;
386

387
	policy_dbs->policy = policy;
388
	mutex_init(&policy_dbs->timer_mutex);
389
	atomic_set(&policy_dbs->work_count, 0);
390 391
	init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
	INIT_WORK(&policy_dbs->work, dbs_work_handler);
392 393 394 395 396 397 398 399

	/* Set policy_dbs for all CPUs, online+offline */
	for_each_cpu(j, policy->related_cpus) {
		struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);

		j_cdbs->policy_dbs = policy_dbs;
		j_cdbs->update_util.func = dbs_update_util_handler;
	}
400
	return policy_dbs;
401 402
}

403
static void free_policy_dbs_info(struct cpufreq_policy *policy,
404
				 struct dbs_governor *gov)
405
{
406
	struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
407
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
408 409
	int j;

410
	mutex_destroy(&policy_dbs->timer_mutex);
411

412 413
	for_each_cpu(j, policy->related_cpus) {
		struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
414

415 416 417
		j_cdbs->policy_dbs = NULL;
		j_cdbs->update_util.func = NULL;
	}
418
	kfree(policy_dbs);
419 420
}

421
static int cpufreq_governor_init(struct cpufreq_policy *policy)
422
{
423
	struct dbs_governor *gov = dbs_governor_of(policy);
424
	struct dbs_data *dbs_data = gov->gdbs_data;
425
	struct policy_dbs_info *policy_dbs;
426 427
	unsigned int latency;
	int ret;
428

429 430 431 432
	/* State should be equivalent to EXIT */
	if (policy->governor_data)
		return -EBUSY;

433 434 435
	policy_dbs = alloc_policy_dbs_info(policy, gov);
	if (!policy_dbs)
		return -ENOMEM;
436

437 438 439 440 441 442 443
	if (dbs_data) {
		if (WARN_ON(have_governor_per_policy())) {
			ret = -EINVAL;
			goto free_policy_dbs_info;
		}
		policy_dbs->dbs_data = dbs_data;
		policy->governor_data = policy_dbs;
444 445 446 447 448 449

		mutex_lock(&dbs_data->mutex);
		dbs_data->usage_count++;
		list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
		mutex_unlock(&dbs_data->mutex);

450 451
		return 0;
	}
452

453
	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
454 455 456 457
	if (!dbs_data) {
		ret = -ENOMEM;
		goto free_policy_dbs_info;
	}
458

459
	INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
460
	mutex_init(&dbs_data->mutex);
461

462
	ret = gov->init(dbs_data, !policy->governor->initialized);
463
	if (ret)
464
		goto free_policy_dbs_info;
465

466 467 468 469
	/* policy latency is in ns. Convert it to us first */
	latency = policy->cpuinfo.transition_latency / 1000;
	if (latency == 0)
		latency = 1;
470

471 472 473
	/* Bring kernel and HW constraints together */
	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
					  MIN_LATENCY_MULTIPLIER * latency);
474 475
	dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
				      LATENCY_MULTIPLIER * latency);
476

477
	if (!have_governor_per_policy())
478
		gov->gdbs_data = dbs_data;
479

480
	policy->governor_data = policy_dbs;
481

482 483 484 485
	policy_dbs->dbs_data = dbs_data;
	dbs_data->usage_count = 1;
	list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);

486 487 488 489
	gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
	ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
				   get_governor_parent_kobj(policy),
				   "%s", gov->gov.name);
490 491
	if (!ret)
		return 0;
492

493
	/* Failure, so roll back. */
494
	pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
495

496 497
	policy->governor_data = NULL;

498
	if (!have_governor_per_policy())
499 500
		gov->gdbs_data = NULL;
	gov->exit(dbs_data, !policy->governor->initialized);
501 502
	kfree(dbs_data);

503 504
free_policy_dbs_info:
	free_policy_dbs_info(policy, gov);
505 506
	return ret;
}
507

508
static int cpufreq_governor_exit(struct cpufreq_policy *policy)
509
{
510
	struct dbs_governor *gov = dbs_governor_of(policy);
511 512
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
513
	int count;
514

515 516 517 518 519 520
	mutex_lock(&dbs_data->mutex);
	list_del(&policy_dbs->list);
	count = --dbs_data->usage_count;
	mutex_unlock(&dbs_data->mutex);

	if (!count) {
521
		kobject_put(&dbs_data->kobj);
522

523 524
		policy->governor_data = NULL;

525
		if (!have_governor_per_policy())
526
			gov->gdbs_data = NULL;
527

528
		gov->exit(dbs_data, policy->governor->initialized == 1);
529
		mutex_destroy(&dbs_data->mutex);
530
		kfree(dbs_data);
531 532
	} else {
		policy->governor_data = NULL;
533
	}
534

535
	free_policy_dbs_info(policy, gov);
536
	return 0;
537
}
538

539
static int cpufreq_governor_start(struct cpufreq_policy *policy)
540
{
541
	struct dbs_governor *gov = dbs_governor_of(policy);
542 543
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
544 545 546 547 548 549
	unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
	int io_busy = 0;

	if (!policy->cur)
		return -EINVAL;

550 551
	policy_dbs->is_shared = policy_is_shared(policy);

552 553
	sampling_rate = dbs_data->sampling_rate;
	ignore_nice = dbs_data->ignore_nice_load;
554

555
	if (gov->governor == GOV_ONDEMAND) {
556 557
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;

558
		io_busy = od_tuners->io_is_busy;
559 560
	}

561
	for_each_cpu(j, policy->cpus) {
562
		struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
563
		unsigned int prev_load;
564

565 566
		j_cdbs->prev_cpu_idle =
			get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
567

568 569 570 571
		prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
					    j_cdbs->prev_cpu_idle);
		j_cdbs->prev_load = 100 * prev_load /
				    (unsigned int)j_cdbs->prev_cpu_wall;
572

573 574 575
		if (ignore_nice)
			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
	}
576

577
	if (gov->governor == GOV_CONSERVATIVE) {
578
		struct cs_cpu_dbs_info_s *cs_dbs_info =
579
			gov->get_cpu_dbs_info_s(cpu);
580

581 582 583
		cs_dbs_info->down_skip = 0;
		cs_dbs_info->requested_freq = policy->cur;
	} else {
584 585
		struct od_ops *od_ops = gov->gov_ops;
		struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu);
586

587 588 589 590
		od_dbs_info->rate_mult = 1;
		od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
		od_ops->powersave_bias_init_cpu(cpu);
	}
591

592
	gov_set_update_util(policy_dbs, sampling_rate);
593 594 595
	return 0;
}

596
static int cpufreq_governor_stop(struct cpufreq_policy *policy)
597
{
598
	gov_cancel_work(policy);
599

600
	return 0;
601
}
602

603
static int cpufreq_governor_limits(struct cpufreq_policy *policy)
604
{
605
	struct policy_dbs_info *policy_dbs = policy->governor_data;
606

607 608 609 610 611
	mutex_lock(&policy_dbs->timer_mutex);
	if (policy->max < policy->cur)
		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
	else if (policy->min > policy->cur)
		__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
612
	dbs_check_cpu(policy);
613
	mutex_unlock(&policy_dbs->timer_mutex);
614 615

	return 0;
616
}
617

618
int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
619
{
620
	int ret = -EINVAL;
621

622
	/* Lock governor to block concurrent initialization of governor */
623
	mutex_lock(&dbs_data_mutex);
624

625
	if (event == CPUFREQ_GOV_POLICY_INIT) {
626
		ret = cpufreq_governor_init(policy);
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
	} else if (policy->governor_data) {
		switch (event) {
		case CPUFREQ_GOV_POLICY_EXIT:
			ret = cpufreq_governor_exit(policy);
			break;
		case CPUFREQ_GOV_START:
			ret = cpufreq_governor_start(policy);
			break;
		case CPUFREQ_GOV_STOP:
			ret = cpufreq_governor_stop(policy);
			break;
		case CPUFREQ_GOV_LIMITS:
			ret = cpufreq_governor_limits(policy);
			break;
		}
642
	}
643

644
	mutex_unlock(&dbs_data_mutex);
645
	return ret;
646 647
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);