cpufreq_governor.c 15.2 KB
Newer Older
1 2 3 4 5
/*
 * drivers/cpufreq/cpufreq_governor.c
 *
 * CPUFREQ governors common code
 *
6 7 8 9 10 11
 * Copyright	(C) 2001 Russell King
 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 *
12 13 14 15 16
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17 18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

19 20
#include <linux/export.h>
#include <linux/kernel_stat.h>
21
#include <linux/slab.h>
22 23 24

#include "cpufreq_governor.h"

25 26 27 28 29 30 31 32
static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
{
	if (have_governor_per_policy())
		return dbs_data->cdata->attr_group_gov_pol;
	else
		return dbs_data->cdata->attr_group_gov_sys;
}

33 34
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{
35
	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
36 37
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
38
	struct cpufreq_policy *policy = cdbs->shared->policy;
39
	unsigned int sampling_rate;
40 41 42 43
	unsigned int max_load = 0;
	unsigned int ignore_nice;
	unsigned int j;

44 45 46 47 48 49 50 51 52 53 54 55 56
	if (dbs_data->cdata->governor == GOV_ONDEMAND) {
		struct od_cpu_dbs_info_s *od_dbs_info =
				dbs_data->cdata->get_cpu_dbs_info_s(cpu);

		/*
		 * Sometimes, the ondemand governor uses an additional
		 * multiplier to give long delays. So apply this multiplier to
		 * the 'sampling_rate', so as to keep the wake-up-from-idle
		 * detection logic a bit conservative.
		 */
		sampling_rate = od_tuners->sampling_rate;
		sampling_rate *= od_dbs_info->rate_mult;

57
		ignore_nice = od_tuners->ignore_nice_load;
58 59
	} else {
		sampling_rate = cs_tuners->sampling_rate;
60
		ignore_nice = cs_tuners->ignore_nice_load;
61
	}
62

63
	/* Get Absolute Load */
64
	for_each_cpu(j, policy->cpus) {
65
		struct cpu_dbs_info *j_cdbs;
66 67
		u64 cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;
68
		unsigned int load;
69
		int io_busy = 0;
70

71
		j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
72

73 74 75 76 77 78 79 80 81
		/*
		 * For the purpose of ondemand, waiting for disk IO is
		 * an indication that you're performance critical, and
		 * not that the system is actually idle. So do not add
		 * the iowait time to the cpu idle time.
		 */
		if (dbs_data->cdata->governor == GOV_ONDEMAND)
			io_busy = od_tuners->io_is_busy;
		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111

		wall_time = (unsigned int)
			(cur_wall_time - j_cdbs->prev_cpu_wall);
		j_cdbs->prev_cpu_wall = cur_wall_time;

		idle_time = (unsigned int)
			(cur_idle_time - j_cdbs->prev_cpu_idle);
		j_cdbs->prev_cpu_idle = cur_idle_time;

		if (ignore_nice) {
			u64 cur_nice;
			unsigned long cur_nice_jiffies;

			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
					 cdbs->prev_cpu_nice;
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

			cdbs->prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
		/*
		 * If the CPU had gone completely idle, and a task just woke up
		 * on this CPU now, it would be unfair to calculate 'load' the
		 * usual way for this elapsed time-window, because it will show
		 * near-zero load, irrespective of how CPU intensive that task
		 * actually is. This is undesirable for latency-sensitive bursty
		 * workloads.
		 *
		 * To avoid this, we reuse the 'load' from the previous
		 * time-window and give this task a chance to start with a
		 * reasonably high CPU frequency. (However, we shouldn't over-do
		 * this copy, lest we get stuck at a high load (high frequency)
		 * for too long, even when the current system load has actually
		 * dropped down. So we perform the copy only once, upon the
		 * first wake-up from idle.)
		 *
		 * Detecting this situation is easy: the governor's deferrable
		 * timer would not have fired during CPU-idle periods. Hence
		 * an unusually large 'wall_time' (as compared to the sampling
		 * rate) indicates this scenario.
132 133 134 135 136
		 *
		 * prev_load can be zero in two cases and we must recalculate it
		 * for both cases:
		 * - during long idle intervals
		 * - explicitly set to zero
137
		 */
138 139
		if (unlikely(wall_time > (2 * sampling_rate) &&
			     j_cdbs->prev_load)) {
140
			load = j_cdbs->prev_load;
141 142 143 144 145 146 147

			/*
			 * Perform a destructive copy, to ensure that we copy
			 * the previous load only once, upon the first wake-up
			 * from idle.
			 */
			j_cdbs->prev_load = 0;
148 149 150 151
		} else {
			load = 100 * (wall_time - idle_time) / wall_time;
			j_cdbs->prev_load = load;
		}
152 153 154 155 156

		if (load > max_load)
			max_load = load;
	}

157
	dbs_data->cdata->gov_check_cpu(cpu, max_load);
158 159 160
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);

161 162
static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
		unsigned int delay)
163
{
164
	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
165

166
	mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
167 168
}

169 170
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
		unsigned int delay, bool all_cpus)
171
{
172 173 174
	int i;

	if (!all_cpus) {
175 176 177 178 179 180 181 182
		/*
		 * Use raw_smp_processor_id() to avoid preemptible warnings.
		 * We know that this is only called with all_cpus == false from
		 * works that have been queued with *_work_on() functions and
		 * those works are canceled during CPU_DOWN_PREPARE so they
		 * can't possibly run on any other CPU.
		 */
		__gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
183 184 185 186 187 188 189 190 191 192
	} else {
		for_each_cpu(i, policy->cpus)
			__gov_queue_work(i, dbs_data, delay);
	}
}
EXPORT_SYMBOL_GPL(gov_queue_work);

static inline void gov_cancel_work(struct dbs_data *dbs_data,
		struct cpufreq_policy *policy)
{
193
	struct cpu_dbs_info *cdbs;
194
	int i;
195

196 197
	for_each_cpu(i, policy->cpus) {
		cdbs = dbs_data->cdata->get_cpu_cdbs(i);
198
		cancel_delayed_work_sync(&cdbs->dwork);
199
	}
200 201
}

202
/* Will return if we need to evaluate cpu load again or not */
203 204
static bool need_load_eval(struct cpu_common_dbs_info *shared,
			   unsigned int sampling_rate)
205
{
206
	if (policy_is_shared(shared->policy)) {
207
		ktime_t time_now = ktime_get();
208
		s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
209 210 211 212 213

		/* Do nothing if we recently have sampled */
		if (delta_us < (s64)(sampling_rate / 2))
			return false;
		else
214
			shared->time_stamp = time_now;
215 216 217 218
	}

	return true;
}
219 220 221 222 223 224

static void dbs_timer(struct work_struct *work)
{
	struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
						 dwork.work);
	struct cpu_common_dbs_info *shared = cdbs->shared;
225 226
	struct cpufreq_policy *policy;
	struct dbs_data *dbs_data;
227 228 229 230 231
	unsigned int sampling_rate, delay;
	bool modify_all = true;

	mutex_lock(&shared->timer_mutex);

232 233 234 235 236 237 238 239 240 241 242
	policy = shared->policy;

	/*
	 * Governor might already be disabled and there is no point continuing
	 * with the work-handler.
	 */
	if (!policy)
		goto unlock;

	dbs_data = policy->governor_data;

243 244 245 246 247 248 249 250 251 252 253 254 255
	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;

		sampling_rate = cs_tuners->sampling_rate;
	} else {
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;

		sampling_rate = od_tuners->sampling_rate;
	}

	if (!need_load_eval(cdbs->shared, sampling_rate))
		modify_all = false;

256
	delay = dbs_data->cdata->gov_dbs_timer(policy, modify_all);
257 258
	gov_queue_work(dbs_data, policy, delay, modify_all);

259
unlock:
260 261
	mutex_unlock(&shared->timer_mutex);
}
262

263 264 265 266 267 268 269 270 271 272 273 274
static void set_sampling_rate(struct dbs_data *dbs_data,
		unsigned int sampling_rate)
{
	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
		cs_tuners->sampling_rate = sampling_rate;
	} else {
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;
		od_tuners->sampling_rate = sampling_rate;
	}
}

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
static int alloc_common_dbs_info(struct cpufreq_policy *policy,
				 struct common_dbs_data *cdata)
{
	struct cpu_common_dbs_info *shared;
	int j;

	/* Allocate memory for the common information for policy->cpus */
	shared = kzalloc(sizeof(*shared), GFP_KERNEL);
	if (!shared)
		return -ENOMEM;

	/* Set shared for all CPUs, online+offline */
	for_each_cpu(j, policy->related_cpus)
		cdata->get_cpu_cdbs(j)->shared = shared;

290
	mutex_init(&shared->timer_mutex);
291 292 293 294 295 296 297 298 299 300
	return 0;
}

static void free_common_dbs_info(struct cpufreq_policy *policy,
				 struct common_dbs_data *cdata)
{
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
	struct cpu_common_dbs_info *shared = cdbs->shared;
	int j;

301 302
	mutex_destroy(&shared->timer_mutex);

303 304 305 306 307 308
	for_each_cpu(j, policy->cpus)
		cdata->get_cpu_cdbs(j)->shared = NULL;

	kfree(shared);
}

309 310 311
static int cpufreq_governor_init(struct cpufreq_policy *policy,
				 struct dbs_data *dbs_data,
				 struct common_dbs_data *cdata)
312
{
313 314
	unsigned int latency;
	int ret;
315

316 317 318 319
	/* State should be equivalent to EXIT */
	if (policy->governor_data)
		return -EBUSY;

320 321 322
	if (dbs_data) {
		if (WARN_ON(have_governor_per_policy()))
			return -EINVAL;
323 324 325 326 327

		ret = alloc_common_dbs_info(policy, cdata);
		if (ret)
			return ret;

328 329 330 331
		dbs_data->usage_count++;
		policy->governor_data = dbs_data;
		return 0;
	}
332

333 334 335
	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
	if (!dbs_data)
		return -ENOMEM;
336

337 338 339 340
	ret = alloc_common_dbs_info(policy, cdata);
	if (ret)
		goto free_dbs_data;

341 342
	dbs_data->cdata = cdata;
	dbs_data->usage_count = 1;
343

344 345
	ret = cdata->init(dbs_data, !policy->governor->initialized);
	if (ret)
346
		goto free_common_dbs_info;
347

348 349 350 351
	/* policy latency is in ns. Convert it to us first */
	latency = policy->cpuinfo.transition_latency / 1000;
	if (latency == 0)
		latency = 1;
352

353 354 355 356 357
	/* Bring kernel and HW constraints together */
	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
					  MIN_LATENCY_MULTIPLIER * latency);
	set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
					latency * LATENCY_MULTIPLIER));
358

359
	if (!have_governor_per_policy())
360
		cdata->gdbs_data = dbs_data;
361

362 363 364
	ret = sysfs_create_group(get_governor_parent_kobj(policy),
				 get_sysfs_attr(dbs_data));
	if (ret)
365
		goto reset_gdbs_data;
366

367
	policy->governor_data = dbs_data;
368

369
	return 0;
370

371 372
reset_gdbs_data:
	if (!have_governor_per_policy())
373 374
		cdata->gdbs_data = NULL;
	cdata->exit(dbs_data, !policy->governor->initialized);
375 376
free_common_dbs_info:
	free_common_dbs_info(policy, cdata);
377 378 379 380
free_dbs_data:
	kfree(dbs_data);
	return ret;
}
381

382 383
static int cpufreq_governor_exit(struct cpufreq_policy *policy,
				 struct dbs_data *dbs_data)
384 385
{
	struct common_dbs_data *cdata = dbs_data->cdata;
386 387 388 389 390
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);

	/* State should be equivalent to INIT */
	if (!cdbs->shared || cdbs->shared->policy)
		return -EBUSY;
391

392 393 394 395
	policy->governor_data = NULL;
	if (!--dbs_data->usage_count) {
		sysfs_remove_group(get_governor_parent_kobj(policy),
				   get_sysfs_attr(dbs_data));
396

397
		if (!have_governor_per_policy())
398
			cdata->gdbs_data = NULL;
399

400 401
		cdata->exit(dbs_data, policy->governor->initialized == 1);
		kfree(dbs_data);
402
	}
403 404

	free_common_dbs_info(policy, cdata);
405
	return 0;
406
}
407

408 409 410 411 412
static int cpufreq_governor_start(struct cpufreq_policy *policy,
				  struct dbs_data *dbs_data)
{
	struct common_dbs_data *cdata = dbs_data->cdata;
	unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
413
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
414
	struct cpu_common_dbs_info *shared = cdbs->shared;
415 416 417 418 419
	int io_busy = 0;

	if (!policy->cur)
		return -EINVAL;

420 421 422 423
	/* State should be equivalent to INIT */
	if (!shared || shared->policy)
		return -EBUSY;

424 425
	if (cdata->governor == GOV_CONSERVATIVE) {
		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
426 427

		sampling_rate = cs_tuners->sampling_rate;
428
		ignore_nice = cs_tuners->ignore_nice_load;
429
	} else {
430 431
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;

432
		sampling_rate = od_tuners->sampling_rate;
433
		ignore_nice = od_tuners->ignore_nice_load;
434
		io_busy = od_tuners->io_is_busy;
435 436
	}

437 438 439
	shared->policy = policy;
	shared->time_stamp = ktime_get();

440
	for_each_cpu(j, policy->cpus) {
441
		struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
442
		unsigned int prev_load;
443

444 445
		j_cdbs->prev_cpu_idle =
			get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
446

447 448 449 450
		prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
					    j_cdbs->prev_cpu_idle);
		j_cdbs->prev_load = 100 * prev_load /
				    (unsigned int)j_cdbs->prev_cpu_wall;
451

452 453
		if (ignore_nice)
			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
454

455
		INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
456
	}
457

458 459 460
	if (cdata->governor == GOV_CONSERVATIVE) {
		struct cs_cpu_dbs_info_s *cs_dbs_info =
			cdata->get_cpu_dbs_info_s(cpu);
461

462 463 464 465 466
		cs_dbs_info->down_skip = 0;
		cs_dbs_info->requested_freq = policy->cur;
	} else {
		struct od_ops *od_ops = cdata->gov_ops;
		struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
467

468 469 470 471
		od_dbs_info->rate_mult = 1;
		od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
		od_ops->powersave_bias_init_cpu(cpu);
	}
472

473 474 475 476 477
	gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
		       true);
	return 0;
}

478 479
static int cpufreq_governor_stop(struct cpufreq_policy *policy,
				 struct dbs_data *dbs_data)
480
{
481
	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
482 483
	struct cpu_common_dbs_info *shared = cdbs->shared;

484 485 486 487
	/* State should be equivalent to START */
	if (!shared || !shared->policy)
		return -EBUSY;

488 489 490 491 492 493 494 495 496
	/*
	 * Work-handler must see this updated, as it should not proceed any
	 * further after governor is disabled. And so timer_mutex is taken while
	 * updating this value.
	 */
	mutex_lock(&shared->timer_mutex);
	shared->policy = NULL;
	mutex_unlock(&shared->timer_mutex);

497
	gov_cancel_work(dbs_data, policy);
498
	return 0;
499
}
500

501 502
static int cpufreq_governor_limits(struct cpufreq_policy *policy,
				   struct dbs_data *dbs_data)
503 504 505
{
	struct common_dbs_data *cdata = dbs_data->cdata;
	unsigned int cpu = policy->cpu;
506
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
507

508
	/* State should be equivalent to START */
509
	if (!cdbs->shared || !cdbs->shared->policy)
510
		return -EBUSY;
511

512 513 514
	mutex_lock(&cdbs->shared->timer_mutex);
	if (policy->max < cdbs->shared->policy->cur)
		__cpufreq_driver_target(cdbs->shared->policy, policy->max,
515
					CPUFREQ_RELATION_H);
516 517
	else if (policy->min > cdbs->shared->policy->cur)
		__cpufreq_driver_target(cdbs->shared->policy, policy->min,
518 519
					CPUFREQ_RELATION_L);
	dbs_check_cpu(dbs_data, cpu);
520
	mutex_unlock(&cdbs->shared->timer_mutex);
521 522

	return 0;
523
}
524

525 526 527 528
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
			 struct common_dbs_data *cdata, unsigned int event)
{
	struct dbs_data *dbs_data;
529
	int ret;
530

531 532 533
	/* Lock governor to block concurrent initialization of governor */
	mutex_lock(&cdata->mutex);

534 535 536 537 538
	if (have_governor_per_policy())
		dbs_data = policy->governor_data;
	else
		dbs_data = cdata->gdbs_data;

539
	if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
540 541 542
		ret = -EINVAL;
		goto unlock;
	}
543 544 545 546 547 548

	switch (event) {
	case CPUFREQ_GOV_POLICY_INIT:
		ret = cpufreq_governor_init(policy, dbs_data, cdata);
		break;
	case CPUFREQ_GOV_POLICY_EXIT:
549
		ret = cpufreq_governor_exit(policy, dbs_data);
550 551 552 553 554
		break;
	case CPUFREQ_GOV_START:
		ret = cpufreq_governor_start(policy, dbs_data);
		break;
	case CPUFREQ_GOV_STOP:
555
		ret = cpufreq_governor_stop(policy, dbs_data);
556
		break;
557
	case CPUFREQ_GOV_LIMITS:
558
		ret = cpufreq_governor_limits(policy, dbs_data);
559
		break;
560 561
	default:
		ret = -EINVAL;
562
	}
563

564 565 566
unlock:
	mutex_unlock(&cdata->mutex);

567
	return ret;
568 569
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);