cpufreq_conservative.c 10.9 KB
Newer Older
1 2 3 4 5 6
/*
 *  drivers/cpufreq/cpufreq_conservative.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
7
 *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/cpufreq.h>
15 16
#include <linux/init.h>
#include <linux/kernel.h>
17
#include <linux/kernel_stat.h>
18 19
#include <linux/kobject.h>
#include <linux/module.h>
20
#include <linux/mutex.h>
21 22
#include <linux/notifier.h>
#include <linux/percpu-defs.h>
23
#include <linux/slab.h>
24 25
#include <linux/sysfs.h>
#include <linux/types.h>
26

27
#include "cpufreq_governor.h"
28

29
/* Conservative governor macros */
30 31
#define DEF_FREQUENCY_UP_THRESHOLD		(80)
#define DEF_FREQUENCY_DOWN_THRESHOLD		(20)
32 33
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(10)
34

35
static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
36

37 38 39 40 41 42 43 44 45 46
/*
 * Every sampling_rate, we check, if current idle time is less than 20%
 * (default), then we try to increase frequency Every sampling_rate *
 * sampling_down_factor, we check, if current idle time is more than 80%, then
 * we try to decrease frequency
 *
 * Any frequency increase takes it to the maximum frequency. Frequency reduction
 * happens at minimum steps of 5% (default) of maximum frequency
 */
static void cs_check_cpu(int cpu, unsigned int load)
47
{
48 49
	struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
50 51
	struct dbs_data *dbs_data = policy->governor_data;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
52 53 54 55 56 57
	unsigned int freq_target;

	/*
	 * break out if we 'cannot' reduce the speed as the user might
	 * want freq_step to be zero
	 */
58
	if (cs_tuners->freq_step == 0)
59 60 61
		return;

	/* Check for frequency increase */
62
	if (load > cs_tuners->up_threshold) {
63 64 65 66 67 68
		dbs_info->down_skip = 0;

		/* if we are already at full speed then break out early */
		if (dbs_info->requested_freq == policy->max)
			return;

69
		freq_target = (cs_tuners->freq_step * policy->max) / 100;
70 71 72 73 74 75 76 77

		/* max freq cannot be less than 100. But who knows.... */
		if (unlikely(freq_target == 0))
			freq_target = 5;

		dbs_info->requested_freq += freq_target;
		if (dbs_info->requested_freq > policy->max)
			dbs_info->requested_freq = policy->max;
78

79 80 81 82 83 84 85 86 87 88
		__cpufreq_driver_target(policy, dbs_info->requested_freq,
			CPUFREQ_RELATION_H);
		return;
	}

	/*
	 * The optimal frequency is the frequency that is the lowest that can
	 * support the current CPU usage without triggering the up policy. To be
	 * safe, we focus 10 points under the threshold.
	 */
89 90
	if (load < (cs_tuners->down_threshold - 10)) {
		freq_target = (cs_tuners->freq_step * policy->max) / 100;
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107

		dbs_info->requested_freq -= freq_target;
		if (dbs_info->requested_freq < policy->min)
			dbs_info->requested_freq = policy->min;

		/*
		 * if we cannot reduce the frequency anymore, break out early
		 */
		if (policy->cur == policy->min)
			return;

		__cpufreq_driver_target(policy, dbs_info->requested_freq,
				CPUFREQ_RELATION_H);
		return;
	}
}

108
static void cs_dbs_timer(struct work_struct *work)
109
{
110 111 112
	struct delayed_work *dw = to_delayed_work(work);
	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
			struct cs_cpu_dbs_info_s, cdbs.work.work);
113
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
114 115
	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
			cpu);
116 117 118
	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
	int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
119

120
	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
121 122
	if (need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
		dbs_check_cpu(dbs_data, cpu);
123 124

	schedule_delayed_work_on(smp_processor_id(), dw, delay);
125
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
126 127
}

128 129 130 131 132 133
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
		void *data)
{
	struct cpufreq_freqs *freq = data;
	struct cs_cpu_dbs_info_s *dbs_info =
					&per_cpu(cs_cpu_dbs_info, freq->cpu);
134 135
	struct cpufreq_policy *policy;

136
	if (!dbs_info->enable)
137 138
		return 0;

139
	policy = dbs_info->cdbs.cur_policy;
140 141

	/*
142
	 * we only care if our internally tracked freq moves outside the 'valid'
143
	 * ranges of frequency available to us otherwise we do not change it
144
	*/
145 146 147
	if (dbs_info->requested_freq > policy->max
			|| dbs_info->requested_freq < policy->min)
		dbs_info->requested_freq = freq->new;
148 149 150 151

	return 0;
}

152
/************************** sysfs interface ************************/
153
static struct common_dbs_data cs_dbs_cdata;
154

155 156
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
		const char *buf, size_t count)
157
{
158
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
159 160
	unsigned int input;
	int ret;
161
	ret = sscanf(buf, "%u", &input);
162

163
	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
164 165
		return -EINVAL;

166
	cs_tuners->sampling_down_factor = input;
167 168 169
	return count;
}

170 171
static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
		size_t count)
172
{
173
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
174 175
	unsigned int input;
	int ret;
176
	ret = sscanf(buf, "%u", &input);
177

178
	if (ret != 1)
179
		return -EINVAL;
180

181
	cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
182 183 184
	return count;
}

185 186
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
		size_t count)
187
{
188
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
189 190
	unsigned int input;
	int ret;
191
	ret = sscanf(buf, "%u", &input);
192

193
	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
194 195
		return -EINVAL;

196
	cs_tuners->up_threshold = input;
197 198 199
	return count;
}

200 201
static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
		size_t count)
202
{
203
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
204 205
	unsigned int input;
	int ret;
206
	ret = sscanf(buf, "%u", &input);
207

208 209
	/* cannot be lower than 11 otherwise freq will not fall */
	if (ret != 1 || input < 11 || input > 100 ||
210
			input >= cs_tuners->up_threshold)
211 212
		return -EINVAL;

213
	cs_tuners->down_threshold = input;
214 215 216
	return count;
}

217 218
static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
		size_t count)
219
{
220
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
221
	unsigned int input, j;
222 223
	int ret;

224 225
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
226 227
		return -EINVAL;

228
	if (input > 1)
229
		input = 1;
230

231
	if (input == cs_tuners->ignore_nice) /* nothing to do */
232
		return count;
233

234
	cs_tuners->ignore_nice = input;
235

236
	/* we need to re-evaluate prev_cpu_idle */
237
	for_each_online_cpu(j) {
238
		struct cs_cpu_dbs_info_s *dbs_info;
239
		dbs_info = &per_cpu(cs_cpu_dbs_info, j);
240 241
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
						&dbs_info->cdbs.prev_cpu_wall);
242
		if (cs_tuners->ignore_nice)
243 244
			dbs_info->cdbs.prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
245 246 247 248
	}
	return count;
}

249 250
static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
		size_t count)
251
{
252
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
253 254
	unsigned int input;
	int ret;
255
	ret = sscanf(buf, "%u", &input);
256

257
	if (ret != 1)
258 259
		return -EINVAL;

260
	if (input > 100)
261
		input = 100;
262

263 264 265 266
	/*
	 * no need to test here if freq_step is zero as the user might actually
	 * want this, they would be crazy though :)
	 */
267
	cs_tuners->freq_step = input;
268 269 270
	return count;
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
show_store_one(cs, sampling_rate);
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
show_store_one(cs, ignore_nice);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);

gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
gov_sys_pol_attr_rw(ignore_nice);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);

static struct attribute *dbs_attributes_gov_sys[] = {
	&sampling_rate_min_gov_sys.attr,
	&sampling_rate_gov_sys.attr,
	&sampling_down_factor_gov_sys.attr,
	&up_threshold_gov_sys.attr,
	&down_threshold_gov_sys.attr,
	&ignore_nice_gov_sys.attr,
	&freq_step_gov_sys.attr,
295 296 297
	NULL
};

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static struct attribute_group cs_attr_group_gov_sys = {
	.attrs = dbs_attributes_gov_sys,
	.name = "conservative",
};

static struct attribute *dbs_attributes_gov_pol[] = {
	&sampling_rate_min_gov_pol.attr,
	&sampling_rate_gov_pol.attr,
	&sampling_down_factor_gov_pol.attr,
	&up_threshold_gov_pol.attr,
	&down_threshold_gov_pol.attr,
	&ignore_nice_gov_pol.attr,
	&freq_step_gov_pol.attr,
	NULL
};

static struct attribute_group cs_attr_group_gov_pol = {
	.attrs = dbs_attributes_gov_pol,
316 317 318 319 320
	.name = "conservative",
};

/************************** sysfs end ************************/

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
static int cs_init(struct dbs_data *dbs_data)
{
	struct cs_dbs_tuners *tuners;

	tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice = 0;
	tuners->freq_step = 5;

	dbs_data->tuners = tuners;
	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
		jiffies_to_usecs(10);
	mutex_init(&dbs_data->mutex);
	return 0;
}

static void cs_exit(struct dbs_data *dbs_data)
{
	kfree(dbs_data->tuners);
}

349
define_get_cpu_dbs_routines(cs_cpu_dbs_info);
350

351 352 353
static struct notifier_block cs_cpufreq_notifier_block = {
	.notifier_call = dbs_cpufreq_notifier,
};
354

355 356 357
static struct cs_ops cs_ops = {
	.notifier_block = &cs_cpufreq_notifier_block,
};
358

359
static struct common_dbs_data cs_dbs_cdata = {
360
	.governor = GOV_CONSERVATIVE,
361 362
	.attr_group_gov_sys = &cs_attr_group_gov_sys,
	.attr_group_gov_pol = &cs_attr_group_gov_pol,
363 364 365 366 367
	.get_cpu_cdbs = get_cpu_cdbs,
	.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
	.gov_dbs_timer = cs_dbs_timer,
	.gov_check_cpu = cs_check_cpu,
	.gov_ops = &cs_ops,
368 369
	.init = cs_init,
	.exit = cs_exit,
370
};
371

372
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
373 374
				   unsigned int event)
{
375
	return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
376 377
}

378 379 380
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
#endif
381 382
struct cpufreq_governor cpufreq_gov_conservative = {
	.name			= "conservative",
383
	.governor		= cs_cpufreq_governor_dbs,
384 385
	.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
	.owner			= THIS_MODULE,
386 387 388 389
};

static int __init cpufreq_gov_dbs_init(void)
{
390
	return cpufreq_register_governor(&cpufreq_gov_conservative);
391 392 393 394
}

static void __exit cpufreq_gov_dbs_exit(void)
{
395
	cpufreq_unregister_governor(&cpufreq_gov_conservative);
396 397
}

398
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
399
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
400 401
		"Low Latency Frequency Transition capable processors "
		"optimised for use in a battery environment");
402
MODULE_LICENSE("GPL");
403

404 405 406
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
fs_initcall(cpufreq_gov_dbs_init);
#else
407
module_init(cpufreq_gov_dbs_init);
408
#endif
409
module_exit(cpufreq_gov_dbs_exit);