cpufreq_governor.c 18.3 KB
Newer Older
1 2 3 4 5
/*
 * drivers/cpufreq/cpufreq_governor.c
 *
 * CPUFREQ governors common code
 *
6 7 8 9 10 11
 * Copyright	(C) 2001 Russell King
 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 *
12 13 14 15 16
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17 18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

19 20
#include <linux/export.h>
#include <linux/kernel_stat.h>
21
#include <linux/slab.h>
22 23 24

#include "cpufreq_governor.h"

25 26 27
DEFINE_MUTEX(dbs_data_mutex);
EXPORT_SYMBOL_GPL(dbs_data_mutex);

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/* Common sysfs tunables */
/**
 * store_sampling_rate - update sampling rate effective immediately if needed.
 *
 * If new rate is smaller than the old, simply updating
 * dbs.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
 *
 * On the other hand, if new rate is larger than the old, then we may evaluate
 * the load too soon, and it might we worth updating sample_delay_ns then as
 * well.
 *
 * This must be called with dbs_data->mutex held, otherwise traversing
 * policy_dbs_list isn't safe.
 */
ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
			    size_t count)
{
	struct policy_dbs_info *policy_dbs;
	unsigned int rate;
	int ret;
	ret = sscanf(buf, "%u", &rate);
	if (ret != 1)
		return -EINVAL;

	dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);

	/*
	 * We are operating under dbs_data->mutex and so the list and its
	 * entries can't be freed concurrently.
	 */
	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
		mutex_lock(&policy_dbs->timer_mutex);
		/*
		 * On 32-bit architectures this may race with the
		 * sample_delay_ns read in dbs_update_util_handler(), but that
		 * really doesn't matter.  If the read returns a value that's
		 * too big, the sample will be skipped, but the next invocation
		 * of dbs_update_util_handler() (when the update has been
		 * completed) will take a sample.  If the returned value is too
		 * small, the sample will be taken immediately, but that isn't a
		 * problem, as we want the new rate to take effect immediately
		 * anyway.
		 *
		 * If this runs in parallel with dbs_work_handler(), we may end
		 * up overwriting the sample_delay_ns value that it has just
		 * written, but the difference should not be too big and it will
		 * be corrected next time a sample is taken, so it shouldn't be
		 * significant.
		 */
		gov_update_sample_delay(policy_dbs, dbs_data->sampling_rate);
		mutex_unlock(&policy_dbs->timer_mutex);
	}

	return count;
}
EXPORT_SYMBOL_GPL(store_sampling_rate);

91
static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
92
{
93
	return container_of(kobj, struct dbs_data, kobj);
94 95
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
static inline struct governor_attr *to_gov_attr(struct attribute *attr)
{
	return container_of(attr, struct governor_attr, attr);
}

static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
			     char *buf)
{
	struct dbs_data *dbs_data = to_dbs_data(kobj);
	struct governor_attr *gattr = to_gov_attr(attr);
	int ret = -EIO;

	if (gattr->show)
		ret = gattr->show(dbs_data, buf);

	return ret;
}

static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
			      const char *buf, size_t count)
{
	struct dbs_data *dbs_data = to_dbs_data(kobj);
	struct governor_attr *gattr = to_gov_attr(attr);
	int ret = -EIO;

	mutex_lock(&dbs_data->mutex);

	if (gattr->store)
		ret = gattr->store(dbs_data, buf, count);

	mutex_unlock(&dbs_data->mutex);

	return ret;
}

/*
 * Sysfs Ops for accessing governor attributes.
 *
 * All show/store invocations for governor specific sysfs attributes, will first
 * call the below show/store callbacks and the attribute specific callback will
 * be called from within it.
 */
static const struct sysfs_ops governor_sysfs_ops = {
	.show	= governor_show,
	.store	= governor_store,
};

143
void dbs_check_cpu(struct cpufreq_policy *policy)
144
{
145
	int cpu = policy->cpu;
146
	struct dbs_governor *gov = dbs_governor_of(policy);
147 148
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
149
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
150 151
	unsigned int sampling_rate = dbs_data->sampling_rate;
	unsigned int ignore_nice = dbs_data->ignore_nice_load;
152 153 154
	unsigned int max_load = 0;
	unsigned int j;

155
	if (gov->governor == GOV_ONDEMAND) {
156
		struct od_cpu_dbs_info_s *od_dbs_info =
157
				gov->get_cpu_dbs_info_s(cpu);
158 159 160 161 162 163 164 165 166 167

		/*
		 * Sometimes, the ondemand governor uses an additional
		 * multiplier to give long delays. So apply this multiplier to
		 * the 'sampling_rate', so as to keep the wake-up-from-idle
		 * detection logic a bit conservative.
		 */
		sampling_rate *= od_dbs_info->rate_mult;

	}
168

169
	/* Get Absolute Load */
170
	for_each_cpu(j, policy->cpus) {
171
		struct cpu_dbs_info *j_cdbs;
172 173
		u64 cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;
174
		unsigned int load;
175
		int io_busy = 0;
176

177
		j_cdbs = gov->get_cpu_cdbs(j);
178

179 180 181 182 183 184
		/*
		 * For the purpose of ondemand, waiting for disk IO is
		 * an indication that you're performance critical, and
		 * not that the system is actually idle. So do not add
		 * the iowait time to the cpu idle time.
		 */
185
		if (gov->governor == GOV_ONDEMAND)
186 187
			io_busy = od_tuners->io_is_busy;
		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
188 189 190 191 192

		wall_time = (unsigned int)
			(cur_wall_time - j_cdbs->prev_cpu_wall);
		j_cdbs->prev_cpu_wall = cur_wall_time;

193 194 195
		if (cur_idle_time < j_cdbs->prev_cpu_idle)
			cur_idle_time = j_cdbs->prev_cpu_idle;

196 197 198 199 200
		idle_time = (unsigned int)
			(cur_idle_time - j_cdbs->prev_cpu_idle);
		j_cdbs->prev_cpu_idle = cur_idle_time;

		if (ignore_nice) {
201
			struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
			u64 cur_nice;
			unsigned long cur_nice_jiffies;

			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
					 cdbs->prev_cpu_nice;
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

			cdbs->prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
		/*
		 * If the CPU had gone completely idle, and a task just woke up
		 * on this CPU now, it would be unfair to calculate 'load' the
		 * usual way for this elapsed time-window, because it will show
		 * near-zero load, irrespective of how CPU intensive that task
		 * actually is. This is undesirable for latency-sensitive bursty
		 * workloads.
		 *
		 * To avoid this, we reuse the 'load' from the previous
		 * time-window and give this task a chance to start with a
		 * reasonably high CPU frequency. (However, we shouldn't over-do
		 * this copy, lest we get stuck at a high load (high frequency)
		 * for too long, even when the current system load has actually
		 * dropped down. So we perform the copy only once, upon the
		 * first wake-up from idle.)
		 *
238 239 240 241
		 * Detecting this situation is easy: the governor's utilization
		 * update handler would not have run during CPU-idle periods.
		 * Hence, an unusually large 'wall_time' (as compared to the
		 * sampling rate) indicates this scenario.
242 243 244 245 246
		 *
		 * prev_load can be zero in two cases and we must recalculate it
		 * for both cases:
		 * - during long idle intervals
		 * - explicitly set to zero
247
		 */
248 249
		if (unlikely(wall_time > (2 * sampling_rate) &&
			     j_cdbs->prev_load)) {
250
			load = j_cdbs->prev_load;
251 252 253 254 255 256 257

			/*
			 * Perform a destructive copy, to ensure that we copy
			 * the previous load only once, upon the first wake-up
			 * from idle.
			 */
			j_cdbs->prev_load = 0;
258 259 260 261
		} else {
			load = 100 * (wall_time - idle_time) / wall_time;
			j_cdbs->prev_load = load;
		}
262 263 264 265 266

		if (load > max_load)
			max_load = load;
	}

267
	gov->gov_check_cpu(cpu, max_load);
268 269 270
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);

271
void gov_set_update_util(struct policy_dbs_info *policy_dbs,
272
			 unsigned int delay_us)
273
{
274
	struct cpufreq_policy *policy = policy_dbs->policy;
275
	struct dbs_governor *gov = dbs_governor_of(policy);
276
	int cpu;
277

278 279
	gov_update_sample_delay(policy_dbs, delay_us);
	policy_dbs->last_sample_time = 0;
280

281
	for_each_cpu(cpu, policy->cpus) {
282
		struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
283 284

		cpufreq_set_update_util_data(cpu, &cdbs->update_util);
285 286
	}
}
287
EXPORT_SYMBOL_GPL(gov_set_update_util);
288

289
static inline void gov_clear_update_util(struct cpufreq_policy *policy)
290 291
{
	int i;
292

293 294 295 296
	for_each_cpu(i, policy->cpus)
		cpufreq_set_update_util_data(i, NULL);

	synchronize_rcu();
297 298
}

299
static void gov_cancel_work(struct cpufreq_policy *policy)
300
{
301 302
	struct policy_dbs_info *policy_dbs = policy->governor_data;

303
	/* Tell dbs_update_util_handler() to skip queuing up work items. */
304
	atomic_inc(&policy_dbs->work_count);
305
	/*
306
	 * If dbs_update_util_handler() is already running, it may not notice
307
	 * the incremented work_count, so wait for it to complete to prevent its
308
	 * work item from being queued up after the cancel_work_sync() below.
309
	 */
310 311 312
	gov_clear_update_util(policy_dbs->policy);
	irq_work_sync(&policy_dbs->irq_work);
	cancel_work_sync(&policy_dbs->work);
313
	atomic_set(&policy_dbs->work_count, 0);
314
}
315

316
static void dbs_work_handler(struct work_struct *work)
317
{
318
	struct policy_dbs_info *policy_dbs;
319
	struct cpufreq_policy *policy;
320
	struct dbs_governor *gov;
321
	unsigned int delay;
322

323 324
	policy_dbs = container_of(work, struct policy_dbs_info, work);
	policy = policy_dbs->policy;
325
	gov = dbs_governor_of(policy);
326

327
	/*
328 329
	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
	 * ondemand governor isn't updating the sampling rate in parallel.
330
	 */
331
	mutex_lock(&policy_dbs->timer_mutex);
332
	delay = gov->gov_dbs_timer(policy);
333 334
	policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay);
	mutex_unlock(&policy_dbs->timer_mutex);
335

336 337 338 339 340 341
	/*
	 * If the atomic operation below is reordered with respect to the
	 * sample delay modification, the utilization update handler may end
	 * up using a stale sample delay value.
	 */
	smp_mb__before_atomic();
342
	atomic_dec(&policy_dbs->work_count);
343 344 345 346
}

static void dbs_irq_work(struct irq_work *irq_work)
{
347
	struct policy_dbs_info *policy_dbs;
348

349 350
	policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
	schedule_work(&policy_dbs->work);
351 352
}

353 354 355 356
static void dbs_update_util_handler(struct update_util_data *data, u64 time,
				    unsigned long util, unsigned long max)
{
	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
357
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
358 359

	/*
360 361 362 363 364
	 * The work may not be allowed to be queued up right now.
	 * Possible reasons:
	 * - Work has already been queued up or is in progress.
	 * - The governor is being stopped.
	 * - It is too early (too little time from the previous sample).
365
	 */
366
	if (atomic_inc_return(&policy_dbs->work_count) == 1) {
367 368
		u64 delta_ns;

369 370 371
		delta_ns = time - policy_dbs->last_sample_time;
		if ((s64)delta_ns >= policy_dbs->sample_delay_ns) {
			policy_dbs->last_sample_time = time;
372
			irq_work_queue(&policy_dbs->irq_work);
373 374 375
			return;
		}
	}
376
	atomic_dec(&policy_dbs->work_count);
377
}
378

379 380
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
						     struct dbs_governor *gov)
381
{
382
	struct policy_dbs_info *policy_dbs;
383 384 385
	int j;

	/* Allocate memory for the common information for policy->cpus */
386 387
	policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL);
	if (!policy_dbs)
388
		return NULL;
389

390
	policy_dbs->policy = policy;
391
	mutex_init(&policy_dbs->timer_mutex);
392
	atomic_set(&policy_dbs->work_count, 0);
393 394
	init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
	INIT_WORK(&policy_dbs->work, dbs_work_handler);
395 396 397 398 399 400 401 402

	/* Set policy_dbs for all CPUs, online+offline */
	for_each_cpu(j, policy->related_cpus) {
		struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);

		j_cdbs->policy_dbs = policy_dbs;
		j_cdbs->update_util.func = dbs_update_util_handler;
	}
403
	return policy_dbs;
404 405
}

406
static void free_policy_dbs_info(struct cpufreq_policy *policy,
407
				 struct dbs_governor *gov)
408
{
409
	struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
410
	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
411 412
	int j;

413
	mutex_destroy(&policy_dbs->timer_mutex);
414

415 416
	for_each_cpu(j, policy->related_cpus) {
		struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
417

418 419 420
		j_cdbs->policy_dbs = NULL;
		j_cdbs->update_util.func = NULL;
	}
421
	kfree(policy_dbs);
422 423
}

424
static int cpufreq_governor_init(struct cpufreq_policy *policy)
425
{
426
	struct dbs_governor *gov = dbs_governor_of(policy);
427
	struct dbs_data *dbs_data = gov->gdbs_data;
428
	struct policy_dbs_info *policy_dbs;
429 430
	unsigned int latency;
	int ret;
431

432 433 434 435
	/* State should be equivalent to EXIT */
	if (policy->governor_data)
		return -EBUSY;

436 437 438
	policy_dbs = alloc_policy_dbs_info(policy, gov);
	if (!policy_dbs)
		return -ENOMEM;
439

440 441 442 443 444 445 446
	if (dbs_data) {
		if (WARN_ON(have_governor_per_policy())) {
			ret = -EINVAL;
			goto free_policy_dbs_info;
		}
		policy_dbs->dbs_data = dbs_data;
		policy->governor_data = policy_dbs;
447 448 449 450 451 452

		mutex_lock(&dbs_data->mutex);
		dbs_data->usage_count++;
		list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
		mutex_unlock(&dbs_data->mutex);

453 454
		return 0;
	}
455

456
	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
457 458 459 460
	if (!dbs_data) {
		ret = -ENOMEM;
		goto free_policy_dbs_info;
	}
461

462
	INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
463
	mutex_init(&dbs_data->mutex);
464

465
	ret = gov->init(dbs_data, !policy->governor->initialized);
466
	if (ret)
467
		goto free_policy_dbs_info;
468

469 470 471 472
	/* policy latency is in ns. Convert it to us first */
	latency = policy->cpuinfo.transition_latency / 1000;
	if (latency == 0)
		latency = 1;
473

474 475 476
	/* Bring kernel and HW constraints together */
	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
					  MIN_LATENCY_MULTIPLIER * latency);
477 478
	dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
				      LATENCY_MULTIPLIER * latency);
479

480
	if (!have_governor_per_policy())
481
		gov->gdbs_data = dbs_data;
482

483
	policy->governor_data = policy_dbs;
484

485 486 487 488
	policy_dbs->dbs_data = dbs_data;
	dbs_data->usage_count = 1;
	list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);

489 490 491 492
	gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
	ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
				   get_governor_parent_kobj(policy),
				   "%s", gov->gov.name);
493 494
	if (!ret)
		return 0;
495

496
	/* Failure, so roll back. */
497
	pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
498

499 500
	policy->governor_data = NULL;

501
	if (!have_governor_per_policy())
502 503
		gov->gdbs_data = NULL;
	gov->exit(dbs_data, !policy->governor->initialized);
504 505
	kfree(dbs_data);

506 507
free_policy_dbs_info:
	free_policy_dbs_info(policy, gov);
508 509
	return ret;
}
510

511
static int cpufreq_governor_exit(struct cpufreq_policy *policy)
512
{
513
	struct dbs_governor *gov = dbs_governor_of(policy);
514 515
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
516
	int count;
517

518 519 520 521 522 523
	mutex_lock(&dbs_data->mutex);
	list_del(&policy_dbs->list);
	count = --dbs_data->usage_count;
	mutex_unlock(&dbs_data->mutex);

	if (!count) {
524
		kobject_put(&dbs_data->kobj);
525

526 527
		policy->governor_data = NULL;

528
		if (!have_governor_per_policy())
529
			gov->gdbs_data = NULL;
530

531
		gov->exit(dbs_data, policy->governor->initialized == 1);
532
		mutex_destroy(&dbs_data->mutex);
533
		kfree(dbs_data);
534 535
	} else {
		policy->governor_data = NULL;
536
	}
537

538
	free_policy_dbs_info(policy, gov);
539
	return 0;
540
}
541

542
static int cpufreq_governor_start(struct cpufreq_policy *policy)
543
{
544
	struct dbs_governor *gov = dbs_governor_of(policy);
545 546
	struct policy_dbs_info *policy_dbs = policy->governor_data;
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
547 548 549 550 551 552
	unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
	int io_busy = 0;

	if (!policy->cur)
		return -EINVAL;

553 554
	sampling_rate = dbs_data->sampling_rate;
	ignore_nice = dbs_data->ignore_nice_load;
555

556
	if (gov->governor == GOV_ONDEMAND) {
557 558
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;

559
		io_busy = od_tuners->io_is_busy;
560 561
	}

562
	for_each_cpu(j, policy->cpus) {
563
		struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
564
		unsigned int prev_load;
565

566 567
		j_cdbs->prev_cpu_idle =
			get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
568

569 570 571 572
		prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
					    j_cdbs->prev_cpu_idle);
		j_cdbs->prev_load = 100 * prev_load /
				    (unsigned int)j_cdbs->prev_cpu_wall;
573

574 575 576
		if (ignore_nice)
			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
	}
577

578
	if (gov->governor == GOV_CONSERVATIVE) {
579
		struct cs_cpu_dbs_info_s *cs_dbs_info =
580
			gov->get_cpu_dbs_info_s(cpu);
581

582 583 584
		cs_dbs_info->down_skip = 0;
		cs_dbs_info->requested_freq = policy->cur;
	} else {
585 586
		struct od_ops *od_ops = gov->gov_ops;
		struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu);
587

588 589 590 591
		od_dbs_info->rate_mult = 1;
		od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
		od_ops->powersave_bias_init_cpu(cpu);
	}
592

593
	gov_set_update_util(policy_dbs, sampling_rate);
594 595 596
	return 0;
}

597
static int cpufreq_governor_stop(struct cpufreq_policy *policy)
598
{
599
	gov_cancel_work(policy);
600

601
	return 0;
602
}
603

604
static int cpufreq_governor_limits(struct cpufreq_policy *policy)
605
{
606
	struct policy_dbs_info *policy_dbs = policy->governor_data;
607

608 609 610 611 612
	mutex_lock(&policy_dbs->timer_mutex);
	if (policy->max < policy->cur)
		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
	else if (policy->min > policy->cur)
		__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
613
	dbs_check_cpu(policy);
614
	mutex_unlock(&policy_dbs->timer_mutex);
615 616

	return 0;
617
}
618

619
int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
620
{
621
	int ret = -EINVAL;
622

623
	/* Lock governor to block concurrent initialization of governor */
624
	mutex_lock(&dbs_data_mutex);
625

626
	if (event == CPUFREQ_GOV_POLICY_INIT) {
627
		ret = cpufreq_governor_init(policy);
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	} else if (policy->governor_data) {
		switch (event) {
		case CPUFREQ_GOV_POLICY_EXIT:
			ret = cpufreq_governor_exit(policy);
			break;
		case CPUFREQ_GOV_START:
			ret = cpufreq_governor_start(policy);
			break;
		case CPUFREQ_GOV_STOP:
			ret = cpufreq_governor_stop(policy);
			break;
		case CPUFREQ_GOV_LIMITS:
			ret = cpufreq_governor_limits(policy);
			break;
		}
643
	}
644

645
	mutex_unlock(&dbs_data_mutex);
646
	return ret;
647 648
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);