cpufreq_governor.c 15.7 KB
Newer Older
1 2 3 4 5
/*
 * drivers/cpufreq/cpufreq_governor.c
 *
 * CPUFREQ governors common code
 *
6 7 8 9 10 11
 * Copyright	(C) 2001 Russell King
 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 *
12 13 14 15 16
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17 18
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

19 20
#include <linux/export.h>
#include <linux/kernel_stat.h>
21
#include <linux/slab.h>
22 23 24

#include "cpufreq_governor.h"

25 26 27
DEFINE_MUTEX(dbs_data_mutex);
EXPORT_SYMBOL_GPL(dbs_data_mutex);

28 29 30 31 32 33 34 35
static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
{
	if (have_governor_per_policy())
		return dbs_data->cdata->attr_group_gov_pol;
	else
		return dbs_data->cdata->attr_group_gov_sys;
}

36 37
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{
38
	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
39 40
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
41
	struct cpufreq_policy *policy = cdbs->shared->policy;
42
	unsigned int sampling_rate;
43 44 45 46
	unsigned int max_load = 0;
	unsigned int ignore_nice;
	unsigned int j;

47 48 49 50 51 52 53 54 55 56 57 58 59
	if (dbs_data->cdata->governor == GOV_ONDEMAND) {
		struct od_cpu_dbs_info_s *od_dbs_info =
				dbs_data->cdata->get_cpu_dbs_info_s(cpu);

		/*
		 * Sometimes, the ondemand governor uses an additional
		 * multiplier to give long delays. So apply this multiplier to
		 * the 'sampling_rate', so as to keep the wake-up-from-idle
		 * detection logic a bit conservative.
		 */
		sampling_rate = od_tuners->sampling_rate;
		sampling_rate *= od_dbs_info->rate_mult;

60
		ignore_nice = od_tuners->ignore_nice_load;
61 62
	} else {
		sampling_rate = cs_tuners->sampling_rate;
63
		ignore_nice = cs_tuners->ignore_nice_load;
64
	}
65

66
	/* Get Absolute Load */
67
	for_each_cpu(j, policy->cpus) {
68
		struct cpu_dbs_info *j_cdbs;
69 70
		u64 cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;
71
		unsigned int load;
72
		int io_busy = 0;
73

74
		j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
75

76 77 78 79 80 81 82 83 84
		/*
		 * For the purpose of ondemand, waiting for disk IO is
		 * an indication that you're performance critical, and
		 * not that the system is actually idle. So do not add
		 * the iowait time to the cpu idle time.
		 */
		if (dbs_data->cdata->governor == GOV_ONDEMAND)
			io_busy = od_tuners->io_is_busy;
		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
85 86 87 88 89

		wall_time = (unsigned int)
			(cur_wall_time - j_cdbs->prev_cpu_wall);
		j_cdbs->prev_cpu_wall = cur_wall_time;

90 91 92
		if (cur_idle_time < j_cdbs->prev_cpu_idle)
			cur_idle_time = j_cdbs->prev_cpu_idle;

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
		idle_time = (unsigned int)
			(cur_idle_time - j_cdbs->prev_cpu_idle);
		j_cdbs->prev_cpu_idle = cur_idle_time;

		if (ignore_nice) {
			u64 cur_nice;
			unsigned long cur_nice_jiffies;

			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
					 cdbs->prev_cpu_nice;
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

			cdbs->prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
		/*
		 * If the CPU had gone completely idle, and a task just woke up
		 * on this CPU now, it would be unfair to calculate 'load' the
		 * usual way for this elapsed time-window, because it will show
		 * near-zero load, irrespective of how CPU intensive that task
		 * actually is. This is undesirable for latency-sensitive bursty
		 * workloads.
		 *
		 * To avoid this, we reuse the 'load' from the previous
		 * time-window and give this task a chance to start with a
		 * reasonably high CPU frequency. (However, we shouldn't over-do
		 * this copy, lest we get stuck at a high load (high frequency)
		 * for too long, even when the current system load has actually
		 * dropped down. So we perform the copy only once, upon the
		 * first wake-up from idle.)
		 *
134 135 136 137
		 * Detecting this situation is easy: the governor's utilization
		 * update handler would not have run during CPU-idle periods.
		 * Hence, an unusually large 'wall_time' (as compared to the
		 * sampling rate) indicates this scenario.
138 139 140 141 142
		 *
		 * prev_load can be zero in two cases and we must recalculate it
		 * for both cases:
		 * - during long idle intervals
		 * - explicitly set to zero
143
		 */
144 145
		if (unlikely(wall_time > (2 * sampling_rate) &&
			     j_cdbs->prev_load)) {
146
			load = j_cdbs->prev_load;
147 148 149 150 151 152 153

			/*
			 * Perform a destructive copy, to ensure that we copy
			 * the previous load only once, upon the first wake-up
			 * from idle.
			 */
			j_cdbs->prev_load = 0;
154 155 156 157
		} else {
			load = 100 * (wall_time - idle_time) / wall_time;
			j_cdbs->prev_load = load;
		}
158 159 160 161 162

		if (load > max_load)
			max_load = load;
	}

163
	dbs_data->cdata->gov_check_cpu(cpu, max_load);
164 165 166
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);

167 168
void gov_set_update_util(struct cpu_common_dbs_info *shared,
			 unsigned int delay_us)
169
{
170
	struct cpufreq_policy *policy = shared->policy;
171 172
	struct dbs_data *dbs_data = policy->governor_data;
	int cpu;
173

174 175 176
	gov_update_sample_delay(shared, delay_us);
	shared->last_sample_time = 0;

177
	for_each_cpu(cpu, policy->cpus) {
178 179 180
		struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);

		cpufreq_set_update_util_data(cpu, &cdbs->update_util);
181 182
	}
}
183
EXPORT_SYMBOL_GPL(gov_set_update_util);
184

185
static inline void gov_clear_update_util(struct cpufreq_policy *policy)
186 187
{
	int i;
188

189 190 191 192
	for_each_cpu(i, policy->cpus)
		cpufreq_set_update_util_data(i, NULL);

	synchronize_rcu();
193 194
}

195
static void gov_cancel_work(struct cpu_common_dbs_info *shared)
196
{
197
	/* Tell dbs_update_util_handler() to skip queuing up work items. */
198
	atomic_inc(&shared->skip_work);
199
	/*
200 201 202
	 * If dbs_update_util_handler() is already running, it may not notice
	 * the incremented skip_work, so wait for it to complete to prevent its
	 * work item from being queued up after the cancel_work_sync() below.
203
	 */
204 205
	gov_clear_update_util(shared->policy);
	irq_work_sync(&shared->irq_work);
206
	cancel_work_sync(&shared->work);
207
	atomic_set(&shared->skip_work, 0);
208
}
209

210
static void dbs_work_handler(struct work_struct *work)
211
{
212 213
	struct cpu_common_dbs_info *shared = container_of(work, struct
					cpu_common_dbs_info, work);
214 215
	struct cpufreq_policy *policy;
	struct dbs_data *dbs_data;
216
	unsigned int delay;
217

218 219 220
	policy = shared->policy;
	dbs_data = policy->governor_data;

221
	/*
222 223
	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
	 * ondemand governor isn't updating the sampling rate in parallel.
224 225
	 */
	mutex_lock(&shared->timer_mutex);
226 227
	delay = dbs_data->cdata->gov_dbs_timer(policy);
	shared->sample_delay_ns = jiffies_to_nsecs(delay);
228
	mutex_unlock(&shared->timer_mutex);
229

230 231 232 233 234 235
	/*
	 * If the atomic operation below is reordered with respect to the
	 * sample delay modification, the utilization update handler may end
	 * up using a stale sample delay value.
	 */
	smp_mb__before_atomic();
236
	atomic_dec(&shared->skip_work);
237 238 239 240 241
}

static void dbs_irq_work(struct irq_work *irq_work)
{
	struct cpu_common_dbs_info *shared;
242

243 244
	shared = container_of(irq_work, struct cpu_common_dbs_info, irq_work);
	schedule_work(&shared->work);
245 246
}

247
static inline void gov_queue_irq_work(struct cpu_common_dbs_info *shared)
248
{
249 250 251 252 253 254 255 256 257 258 259
#ifdef CONFIG_SMP
	irq_work_queue_on(&shared->irq_work, smp_processor_id());
#else
	irq_work_queue(&shared->irq_work);
#endif
}

static void dbs_update_util_handler(struct update_util_data *data, u64 time,
				    unsigned long util, unsigned long max)
{
	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
260 261 262
	struct cpu_common_dbs_info *shared = cdbs->shared;

	/*
263 264 265 266 267
	 * The work may not be allowed to be queued up right now.
	 * Possible reasons:
	 * - Work has already been queued up or is in progress.
	 * - The governor is being stopped.
	 * - It is too early (too little time from the previous sample).
268
	 */
269 270 271 272 273 274 275 276 277 278 279
	if (atomic_inc_return(&shared->skip_work) == 1) {
		u64 delta_ns;

		delta_ns = time - shared->last_sample_time;
		if ((s64)delta_ns >= shared->sample_delay_ns) {
			shared->last_sample_time = time;
			gov_queue_irq_work(shared);
			return;
		}
	}
	atomic_dec(&shared->skip_work);
280
}
281

282 283 284 285 286 287 288 289 290 291 292 293
static void set_sampling_rate(struct dbs_data *dbs_data,
		unsigned int sampling_rate)
{
	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
		cs_tuners->sampling_rate = sampling_rate;
	} else {
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;
		od_tuners->sampling_rate = sampling_rate;
	}
}

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
static int alloc_common_dbs_info(struct cpufreq_policy *policy,
				 struct common_dbs_data *cdata)
{
	struct cpu_common_dbs_info *shared;
	int j;

	/* Allocate memory for the common information for policy->cpus */
	shared = kzalloc(sizeof(*shared), GFP_KERNEL);
	if (!shared)
		return -ENOMEM;

	/* Set shared for all CPUs, online+offline */
	for_each_cpu(j, policy->related_cpus)
		cdata->get_cpu_cdbs(j)->shared = shared;

309
	mutex_init(&shared->timer_mutex);
310
	atomic_set(&shared->skip_work, 0);
311
	init_irq_work(&shared->irq_work, dbs_irq_work);
312
	INIT_WORK(&shared->work, dbs_work_handler);
313 314 315 316 317 318 319 320 321 322
	return 0;
}

static void free_common_dbs_info(struct cpufreq_policy *policy,
				 struct common_dbs_data *cdata)
{
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
	struct cpu_common_dbs_info *shared = cdbs->shared;
	int j;

323 324
	mutex_destroy(&shared->timer_mutex);

325 326 327 328 329 330
	for_each_cpu(j, policy->cpus)
		cdata->get_cpu_cdbs(j)->shared = NULL;

	kfree(shared);
}

331 332
static int cpufreq_governor_init(struct cpufreq_policy *policy,
				 struct common_dbs_data *cdata)
333
{
334
	struct dbs_data *dbs_data = cdata->gdbs_data;
335 336
	unsigned int latency;
	int ret;
337

338 339 340 341
	/* State should be equivalent to EXIT */
	if (policy->governor_data)
		return -EBUSY;

342 343 344
	if (dbs_data) {
		if (WARN_ON(have_governor_per_policy()))
			return -EINVAL;
345 346 347 348 349

		ret = alloc_common_dbs_info(policy, cdata);
		if (ret)
			return ret;

350 351 352 353
		dbs_data->usage_count++;
		policy->governor_data = dbs_data;
		return 0;
	}
354

355 356 357
	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
	if (!dbs_data)
		return -ENOMEM;
358

359 360 361 362
	ret = alloc_common_dbs_info(policy, cdata);
	if (ret)
		goto free_dbs_data;

363 364
	dbs_data->cdata = cdata;
	dbs_data->usage_count = 1;
365

366 367
	ret = cdata->init(dbs_data, !policy->governor->initialized);
	if (ret)
368
		goto free_common_dbs_info;
369

370 371 372 373
	/* policy latency is in ns. Convert it to us first */
	latency = policy->cpuinfo.transition_latency / 1000;
	if (latency == 0)
		latency = 1;
374

375 376 377 378 379
	/* Bring kernel and HW constraints together */
	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
					  MIN_LATENCY_MULTIPLIER * latency);
	set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
					latency * LATENCY_MULTIPLIER));
380

381
	if (!have_governor_per_policy())
382
		cdata->gdbs_data = dbs_data;
383

384 385
	policy->governor_data = dbs_data;

386 387 388
	ret = sysfs_create_group(get_governor_parent_kobj(policy),
				 get_sysfs_attr(dbs_data));
	if (ret)
389
		goto reset_gdbs_data;
390

391
	return 0;
392

393
reset_gdbs_data:
394 395
	policy->governor_data = NULL;

396
	if (!have_governor_per_policy())
397 398
		cdata->gdbs_data = NULL;
	cdata->exit(dbs_data, !policy->governor->initialized);
399 400
free_common_dbs_info:
	free_common_dbs_info(policy, cdata);
401 402 403 404
free_dbs_data:
	kfree(dbs_data);
	return ret;
}
405

406
static int cpufreq_governor_exit(struct cpufreq_policy *policy)
407
{
408
	struct dbs_data *dbs_data = policy->governor_data;
409
	struct common_dbs_data *cdata = dbs_data->cdata;
410 411 412 413 414
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);

	/* State should be equivalent to INIT */
	if (!cdbs->shared || cdbs->shared->policy)
		return -EBUSY;
415

416 417 418
	if (!--dbs_data->usage_count) {
		sysfs_remove_group(get_governor_parent_kobj(policy),
				   get_sysfs_attr(dbs_data));
419

420 421
		policy->governor_data = NULL;

422
		if (!have_governor_per_policy())
423
			cdata->gdbs_data = NULL;
424

425 426
		cdata->exit(dbs_data, policy->governor->initialized == 1);
		kfree(dbs_data);
427 428
	} else {
		policy->governor_data = NULL;
429
	}
430 431

	free_common_dbs_info(policy, cdata);
432
	return 0;
433
}
434

435
static int cpufreq_governor_start(struct cpufreq_policy *policy)
436
{
437
	struct dbs_data *dbs_data = policy->governor_data;
438 439
	struct common_dbs_data *cdata = dbs_data->cdata;
	unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
440
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
441
	struct cpu_common_dbs_info *shared = cdbs->shared;
442 443 444 445 446
	int io_busy = 0;

	if (!policy->cur)
		return -EINVAL;

447 448 449 450
	/* State should be equivalent to INIT */
	if (!shared || shared->policy)
		return -EBUSY;

451 452
	if (cdata->governor == GOV_CONSERVATIVE) {
		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
453 454

		sampling_rate = cs_tuners->sampling_rate;
455
		ignore_nice = cs_tuners->ignore_nice_load;
456
	} else {
457 458
		struct od_dbs_tuners *od_tuners = dbs_data->tuners;

459
		sampling_rate = od_tuners->sampling_rate;
460
		ignore_nice = od_tuners->ignore_nice_load;
461
		io_busy = od_tuners->io_is_busy;
462 463
	}

464
	for_each_cpu(j, policy->cpus) {
465
		struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
466
		unsigned int prev_load;
467

468 469
		j_cdbs->prev_cpu_idle =
			get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
470

471 472 473 474
		prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
					    j_cdbs->prev_cpu_idle);
		j_cdbs->prev_load = 100 * prev_load /
				    (unsigned int)j_cdbs->prev_cpu_wall;
475

476 477
		if (ignore_nice)
			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
478

479
		j_cdbs->update_util.func = dbs_update_util_handler;
480
	}
481
	shared->policy = policy;
482

483 484 485
	if (cdata->governor == GOV_CONSERVATIVE) {
		struct cs_cpu_dbs_info_s *cs_dbs_info =
			cdata->get_cpu_dbs_info_s(cpu);
486

487 488 489 490 491
		cs_dbs_info->down_skip = 0;
		cs_dbs_info->requested_freq = policy->cur;
	} else {
		struct od_ops *od_ops = cdata->gov_ops;
		struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
492

493 494 495 496
		od_dbs_info->rate_mult = 1;
		od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
		od_ops->powersave_bias_init_cpu(cpu);
	}
497

498
	gov_set_update_util(shared, sampling_rate);
499 500 501
	return 0;
}

502
static int cpufreq_governor_stop(struct cpufreq_policy *policy)
503
{
504
	struct dbs_data *dbs_data = policy->governor_data;
505
	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
506 507
	struct cpu_common_dbs_info *shared = cdbs->shared;

508 509 510 511
	/* State should be equivalent to START */
	if (!shared || !shared->policy)
		return -EBUSY;

512
	gov_cancel_work(shared);
513 514
	shared->policy = NULL;

515
	return 0;
516
}
517

518
static int cpufreq_governor_limits(struct cpufreq_policy *policy)
519
{
520
	struct dbs_data *dbs_data = policy->governor_data;
521 522
	struct common_dbs_data *cdata = dbs_data->cdata;
	unsigned int cpu = policy->cpu;
523
	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
524

525
	/* State should be equivalent to START */
526
	if (!cdbs->shared || !cdbs->shared->policy)
527
		return -EBUSY;
528

529 530 531
	mutex_lock(&cdbs->shared->timer_mutex);
	if (policy->max < cdbs->shared->policy->cur)
		__cpufreq_driver_target(cdbs->shared->policy, policy->max,
532
					CPUFREQ_RELATION_H);
533 534
	else if (policy->min > cdbs->shared->policy->cur)
		__cpufreq_driver_target(cdbs->shared->policy, policy->min,
535 536
					CPUFREQ_RELATION_L);
	dbs_check_cpu(dbs_data, cpu);
537
	mutex_unlock(&cdbs->shared->timer_mutex);
538 539

	return 0;
540
}
541

542 543 544
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
			 struct common_dbs_data *cdata, unsigned int event)
{
545
	int ret = -EINVAL;
546

547
	/* Lock governor to block concurrent initialization of governor */
548
	mutex_lock(&dbs_data_mutex);
549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	if (event == CPUFREQ_GOV_POLICY_INIT) {
		ret = cpufreq_governor_init(policy, cdata);
	} else if (policy->governor_data) {
		switch (event) {
		case CPUFREQ_GOV_POLICY_EXIT:
			ret = cpufreq_governor_exit(policy);
			break;
		case CPUFREQ_GOV_START:
			ret = cpufreq_governor_start(policy);
			break;
		case CPUFREQ_GOV_STOP:
			ret = cpufreq_governor_stop(policy);
			break;
		case CPUFREQ_GOV_LIMITS:
			ret = cpufreq_governor_limits(policy);
			break;
		}
567
	}
568

569
	mutex_unlock(&dbs_data_mutex);
570
	return ret;
571 572
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);