cpufreq_governor.c 12.9 KB
Newer Older
1 2 3 4 5
/*
 * drivers/cpufreq/cpufreq_governor.c
 *
 * CPUFREQ governors common code
 *
6 7 8 9 10 11
 * Copyright	(C) 2001 Russell King
 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 *
12 13 14 15 16
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17 18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

19 20
#include <linux/export.h>
#include <linux/kernel_stat.h>
21
#include <linux/slab.h>
22 23 24

#include "cpufreq_governor.h"

25 26 27 28 29 30 31 32
static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
{
	if (have_governor_per_policy())
		return dbs_data->cdata->attr_group_gov_pol;
	else
		return dbs_data->cdata->attr_group_gov_sys;
}

33 34
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{
35
	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
36 37 38
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
	struct cpufreq_policy *policy;
39
	unsigned int sampling_rate;
40 41 42 43
	unsigned int max_load = 0;
	unsigned int ignore_nice;
	unsigned int j;

44 45 46 47 48 49 50 51 52 53 54 55 56
	if (dbs_data->cdata->governor == GOV_ONDEMAND) {
		struct od_cpu_dbs_info_s *od_dbs_info =
				dbs_data->cdata->get_cpu_dbs_info_s(cpu);

		/*
		 * Sometimes, the ondemand governor uses an additional
		 * multiplier to give long delays. So apply this multiplier to
		 * the 'sampling_rate', so as to keep the wake-up-from-idle
		 * detection logic a bit conservative.
		 */
		sampling_rate = od_tuners->sampling_rate;
		sampling_rate *= od_dbs_info->rate_mult;

57
		ignore_nice = od_tuners->ignore_nice_load;
58 59
	} else {
		sampling_rate = cs_tuners->sampling_rate;
60
		ignore_nice = cs_tuners->ignore_nice_load;
61
	}
62 63 64

	policy = cdbs->cur_policy;

65
	/* Get Absolute Load */
66
	for_each_cpu(j, policy->cpus) {
67
		struct cpu_dbs_info *j_cdbs;
68 69
		u64 cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;
70
		unsigned int load;
71
		int io_busy = 0;
72

73
		j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
74

75 76 77 78 79 80 81 82 83
		/*
		 * For the purpose of ondemand, waiting for disk IO is
		 * an indication that you're performance critical, and
		 * not that the system is actually idle. So do not add
		 * the iowait time to the cpu idle time.
		 */
		if (dbs_data->cdata->governor == GOV_ONDEMAND)
			io_busy = od_tuners->io_is_busy;
		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

		wall_time = (unsigned int)
			(cur_wall_time - j_cdbs->prev_cpu_wall);
		j_cdbs->prev_cpu_wall = cur_wall_time;

		idle_time = (unsigned int)
			(cur_idle_time - j_cdbs->prev_cpu_idle);
		j_cdbs->prev_cpu_idle = cur_idle_time;

		if (ignore_nice) {
			u64 cur_nice;
			unsigned long cur_nice_jiffies;

			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
					 cdbs->prev_cpu_nice;
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

			cdbs->prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
		/*
		 * If the CPU had gone completely idle, and a task just woke up
		 * on this CPU now, it would be unfair to calculate 'load' the
		 * usual way for this elapsed time-window, because it will show
		 * near-zero load, irrespective of how CPU intensive that task
		 * actually is. This is undesirable for latency-sensitive bursty
		 * workloads.
		 *
		 * To avoid this, we reuse the 'load' from the previous
		 * time-window and give this task a chance to start with a
		 * reasonably high CPU frequency. (However, we shouldn't over-do
		 * this copy, lest we get stuck at a high load (high frequency)
		 * for too long, even when the current system load has actually
		 * dropped down. So we perform the copy only once, upon the
		 * first wake-up from idle.)
		 *
		 * Detecting this situation is easy: the governor's deferrable
		 * timer would not have fired during CPU-idle periods. Hence
		 * an unusually large 'wall_time' (as compared to the sampling
		 * rate) indicates this scenario.
134 135 136 137 138
		 *
		 * prev_load can be zero in two cases and we must recalculate it
		 * for both cases:
		 * - during long idle intervals
		 * - explicitly set to zero
139
		 */
140 141
		if (unlikely(wall_time > (2 * sampling_rate) &&
			     j_cdbs->prev_load)) {
142
			load = j_cdbs->prev_load;
143 144 145 146 147 148 149

			/*
			 * Perform a destructive copy, to ensure that we copy
			 * the previous load only once, upon the first wake-up
			 * from idle.
			 */
			j_cdbs->prev_load = 0;
150 151 152 153
		} else {
			load = 100 * (wall_time - idle_time) / wall_time;
			j_cdbs->prev_load = load;
		}
154 155 156 157 158

		if (load > max_load)
			max_load = load;
	}

159
	dbs_data->cdata->gov_check_cpu(cpu, max_load);
160 161 162
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);

163 164
static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
		unsigned int delay)
165
{
166
	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
167

168
	mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
169 170
}

171 172
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
		unsigned int delay, bool all_cpus)
173
{
174 175
	int i;

176
	mutex_lock(&cpufreq_governor_lock);
177
	if (!policy->governor_enabled)
178
		goto out_unlock;
179

180
	if (!all_cpus) {
181 182 183 184 185 186 187 188
		/*
		 * Use raw_smp_processor_id() to avoid preemptible warnings.
		 * We know that this is only called with all_cpus == false from
		 * works that have been queued with *_work_on() functions and
		 * those works are canceled during CPU_DOWN_PREPARE so they
		 * can't possibly run on any other CPU.
		 */
		__gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
189 190 191 192
	} else {
		for_each_cpu(i, policy->cpus)
			__gov_queue_work(i, dbs_data, delay);
	}
193 194 195

out_unlock:
	mutex_unlock(&cpufreq_governor_lock);
196 197 198 199 200 201
}
EXPORT_SYMBOL_GPL(gov_queue_work);

static inline void gov_cancel_work(struct dbs_data *dbs_data,
		struct cpufreq_policy *policy)
{
202
	struct cpu_dbs_info *cdbs;
203
	int i;
204

205 206
	for_each_cpu(i, policy->cpus) {
		cdbs = dbs_data->cdata->get_cpu_cdbs(i);
207
		cancel_delayed_work_sync(&cdbs->dwork);
208
	}
209 210
}

211
/* Will return if we need to evaluate cpu load again or not */
212
bool need_load_eval(struct cpu_dbs_info *cdbs, unsigned int sampling_rate)
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
{
	if (policy_is_shared(cdbs->cur_policy)) {
		ktime_t time_now = ktime_get();
		s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);

		/* Do nothing if we recently have sampled */
		if (delta_us < (s64)(sampling_rate / 2))
			return false;
		else
			cdbs->time_stamp = time_now;
	}

	return true;
}
EXPORT_SYMBOL_GPL(need_load_eval);

229 230 231 232 233 234 235 236 237 238 239 240
static void set_sampling_rate(struct dbs_data *dbs_data,
		unsigned int sampling_rate)
{
	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
		cs_tuners->sampling_rate = sampling_rate;
	} else {
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;
		od_tuners->sampling_rate = sampling_rate;
	}
}

241 242 243
static int cpufreq_governor_init(struct cpufreq_policy *policy,
				 struct dbs_data *dbs_data,
				 struct common_dbs_data *cdata)
244
{
245 246
	unsigned int latency;
	int ret;
247

248 249 250 251 252 253 254
	if (dbs_data) {
		if (WARN_ON(have_governor_per_policy()))
			return -EINVAL;
		dbs_data->usage_count++;
		policy->governor_data = dbs_data;
		return 0;
	}
255

256 257 258
	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
	if (!dbs_data)
		return -ENOMEM;
259

260 261
	dbs_data->cdata = cdata;
	dbs_data->usage_count = 1;
262

263 264 265
	ret = cdata->init(dbs_data, !policy->governor->initialized);
	if (ret)
		goto free_dbs_data;
266

267 268 269 270
	/* policy latency is in ns. Convert it to us first */
	latency = policy->cpuinfo.transition_latency / 1000;
	if (latency == 0)
		latency = 1;
271

272 273 274 275 276
	/* Bring kernel and HW constraints together */
	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
					  MIN_LATENCY_MULTIPLIER * latency);
	set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
					latency * LATENCY_MULTIPLIER));
277

278 279 280 281
	if (!have_governor_per_policy()) {
		if (WARN_ON(cpufreq_get_global_kobject())) {
			ret = -EINVAL;
			goto cdata_exit;
282
		}
283 284
		cdata->gdbs_data = dbs_data;
	}
285

286 287 288 289
	ret = sysfs_create_group(get_governor_parent_kobj(policy),
				 get_sysfs_attr(dbs_data));
	if (ret)
		goto put_kobj;
290

291
	policy->governor_data = dbs_data;
292

293
	return 0;
294

295 296 297 298 299 300 301 302 303 304 305
put_kobj:
	if (!have_governor_per_policy()) {
		cdata->gdbs_data = NULL;
		cpufreq_put_global_kobject();
	}
cdata_exit:
	cdata->exit(dbs_data, !policy->governor->initialized);
free_dbs_data:
	kfree(dbs_data);
	return ret;
}
306

307 308 309 310
static void cpufreq_governor_exit(struct cpufreq_policy *policy,
				  struct dbs_data *dbs_data)
{
	struct common_dbs_data *cdata = dbs_data->cdata;
311

312 313 314 315
	policy->governor_data = NULL;
	if (!--dbs_data->usage_count) {
		sysfs_remove_group(get_governor_parent_kobj(policy),
				   get_sysfs_attr(dbs_data));
316

317
		if (!have_governor_per_policy()) {
318
			cdata->gdbs_data = NULL;
319
			cpufreq_put_global_kobject();
320
		}
321

322 323
		cdata->exit(dbs_data, policy->governor->initialized == 1);
		kfree(dbs_data);
324
	}
325
}
326

327 328 329 330 331
static int cpufreq_governor_start(struct cpufreq_policy *policy,
				  struct dbs_data *dbs_data)
{
	struct common_dbs_data *cdata = dbs_data->cdata;
	unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
332
	struct cpu_dbs_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
333 334 335 336 337 338 339
	int io_busy = 0;

	if (!policy->cur)
		return -EINVAL;

	if (cdata->governor == GOV_CONSERVATIVE) {
		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
340 341

		sampling_rate = cs_tuners->sampling_rate;
342
		ignore_nice = cs_tuners->ignore_nice_load;
343
	} else {
344 345
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;

346
		sampling_rate = od_tuners->sampling_rate;
347
		ignore_nice = od_tuners->ignore_nice_load;
348
		io_busy = od_tuners->io_is_busy;
349 350
	}

351
	for_each_cpu(j, policy->cpus) {
352
		struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
353
		unsigned int prev_load;
354

355 356 357
		j_cdbs->cur_policy = policy;
		j_cdbs->prev_cpu_idle =
			get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
358

359 360 361 362
		prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
					    j_cdbs->prev_cpu_idle);
		j_cdbs->prev_load = 100 * prev_load /
				    (unsigned int)j_cdbs->prev_cpu_wall;
363

364 365
		if (ignore_nice)
			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
366

367
		mutex_init(&j_cdbs->timer_mutex);
368
		INIT_DEFERRABLE_WORK(&j_cdbs->dwork, cdata->gov_dbs_timer);
369
	}
370

371 372 373
	if (cdata->governor == GOV_CONSERVATIVE) {
		struct cs_cpu_dbs_info_s *cs_dbs_info =
			cdata->get_cpu_dbs_info_s(cpu);
374

375 376 377 378 379 380
		cs_dbs_info->down_skip = 0;
		cs_dbs_info->enable = 1;
		cs_dbs_info->requested_freq = policy->cur;
	} else {
		struct od_ops *od_ops = cdata->gov_ops;
		struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
381

382 383 384 385
		od_dbs_info->rate_mult = 1;
		od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
		od_ops->powersave_bias_init_cpu(cpu);
	}
386

387 388
	/* Initiate timer time stamp */
	cpu_cdbs->time_stamp = ktime_get();
389

390 391 392 393 394 395 396 397 398 399
	gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
		       true);
	return 0;
}

static void cpufreq_governor_stop(struct cpufreq_policy *policy,
				  struct dbs_data *dbs_data)
{
	struct common_dbs_data *cdata = dbs_data->cdata;
	unsigned int cpu = policy->cpu;
400
	struct cpu_dbs_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
401 402 403 404

	if (cdata->governor == GOV_CONSERVATIVE) {
		struct cs_cpu_dbs_info_s *cs_dbs_info =
			cdata->get_cpu_dbs_info_s(cpu);
405

406 407 408 409 410 411 412 413
		cs_dbs_info->enable = 0;
	}

	gov_cancel_work(dbs_data, policy);

	mutex_destroy(&cpu_cdbs->timer_mutex);
	cpu_cdbs->cur_policy = NULL;
}
414

415 416 417 418 419
static void cpufreq_governor_limits(struct cpufreq_policy *policy,
				    struct dbs_data *dbs_data)
{
	struct common_dbs_data *cdata = dbs_data->cdata;
	unsigned int cpu = policy->cpu;
420
	struct cpu_dbs_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
421

422
	if (!cpu_cdbs->cur_policy)
423
		return;
424

425 426 427 428 429 430 431 432 433 434
	mutex_lock(&cpu_cdbs->timer_mutex);
	if (policy->max < cpu_cdbs->cur_policy->cur)
		__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
					CPUFREQ_RELATION_H);
	else if (policy->min > cpu_cdbs->cur_policy->cur)
		__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
					CPUFREQ_RELATION_L);
	dbs_check_cpu(dbs_data, cpu);
	mutex_unlock(&cpu_cdbs->timer_mutex);
}
435

436 437 438 439 440 441
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
			 struct common_dbs_data *cdata, unsigned int event)
{
	struct dbs_data *dbs_data;
	int ret = 0;

442 443 444
	/* Lock governor to block concurrent initialization of governor */
	mutex_lock(&cdata->mutex);

445 446 447 448 449
	if (have_governor_per_policy())
		dbs_data = policy->governor_data;
	else
		dbs_data = cdata->gdbs_data;

450 451 452 453
	if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
		ret = -EINVAL;
		goto unlock;
	}
454 455 456 457 458 459 460 461 462 463 464 465 466 467

	switch (event) {
	case CPUFREQ_GOV_POLICY_INIT:
		ret = cpufreq_governor_init(policy, dbs_data, cdata);
		break;
	case CPUFREQ_GOV_POLICY_EXIT:
		cpufreq_governor_exit(policy, dbs_data);
		break;
	case CPUFREQ_GOV_START:
		ret = cpufreq_governor_start(policy, dbs_data);
		break;
	case CPUFREQ_GOV_STOP:
		cpufreq_governor_stop(policy, dbs_data);
		break;
468
	case CPUFREQ_GOV_LIMITS:
469
		cpufreq_governor_limits(policy, dbs_data);
470 471
		break;
	}
472

473 474 475
unlock:
	mutex_unlock(&cdata->mutex);

476
	return ret;
477 478
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);