cpufreq_conservative.c 11.0 KB
Newer Older
1 2 3 4 5 6
/*
 *  drivers/cpufreq/cpufreq_conservative.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
7
 *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/cpufreq.h>
15 16
#include <linux/init.h>
#include <linux/kernel.h>
17
#include <linux/kernel_stat.h>
18 19
#include <linux/kobject.h>
#include <linux/module.h>
20
#include <linux/mutex.h>
21 22
#include <linux/notifier.h>
#include <linux/percpu-defs.h>
23
#include <linux/slab.h>
24 25
#include <linux/sysfs.h>
#include <linux/types.h>
26

27
#include "cpufreq_governor.h"
28

29
/* Conservative governor macros */
30 31
#define DEF_FREQUENCY_UP_THRESHOLD		(80)
#define DEF_FREQUENCY_DOWN_THRESHOLD		(20)
32 33
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(10)
34

35
static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
36

37 38 39 40 41 42 43 44 45 46
/*
 * Every sampling_rate, we check, if current idle time is less than 20%
 * (default), then we try to increase frequency Every sampling_rate *
 * sampling_down_factor, we check, if current idle time is more than 80%, then
 * we try to decrease frequency
 *
 * Any frequency increase takes it to the maximum frequency. Frequency reduction
 * happens at minimum steps of 5% (default) of maximum frequency
 */
static void cs_check_cpu(int cpu, unsigned int load)
47
{
48 49
	struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
50 51
	struct dbs_data *dbs_data = policy->governor_data;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
52 53 54 55 56 57
	unsigned int freq_target;

	/*
	 * break out if we 'cannot' reduce the speed as the user might
	 * want freq_step to be zero
	 */
58
	if (cs_tuners->freq_step == 0)
59 60 61
		return;

	/* Check for frequency increase */
62
	if (load > cs_tuners->up_threshold) {
63 64 65 66 67 68
		dbs_info->down_skip = 0;

		/* if we are already at full speed then break out early */
		if (dbs_info->requested_freq == policy->max)
			return;

69
		freq_target = (cs_tuners->freq_step * policy->max) / 100;
70 71 72 73 74 75 76 77

		/* max freq cannot be less than 100. But who knows.... */
		if (unlikely(freq_target == 0))
			freq_target = 5;

		dbs_info->requested_freq += freq_target;
		if (dbs_info->requested_freq > policy->max)
			dbs_info->requested_freq = policy->max;
78

79 80 81 82 83 84 85 86 87 88
		__cpufreq_driver_target(policy, dbs_info->requested_freq,
			CPUFREQ_RELATION_H);
		return;
	}

	/*
	 * The optimal frequency is the frequency that is the lowest that can
	 * support the current CPU usage without triggering the up policy. To be
	 * safe, we focus 10 points under the threshold.
	 */
89
	if (load < (cs_tuners->down_threshold - 10)) {
90 91 92 93 94 95
		/*
		 * if we cannot reduce the frequency anymore, break out early
		 */
		if (policy->cur == policy->min)
			return;

96 97 98 99 100 101
		freq_target = (cs_tuners->freq_step * policy->max) / 100;

		dbs_info->requested_freq -= freq_target;
		if (dbs_info->requested_freq < policy->min)
			dbs_info->requested_freq = policy->min;

102
		__cpufreq_driver_target(policy, dbs_info->requested_freq,
103
				CPUFREQ_RELATION_L);
104 105 106 107
		return;
	}
}

108
static void cs_dbs_timer(struct work_struct *work)
109
{
110 111
	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
			struct cs_cpu_dbs_info_s, cdbs.work.work);
112
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
113 114
	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
			cpu);
115 116 117
	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
	int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
118
	bool modify_all = true;
119

120
	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
121 122 123
	if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
		modify_all = false;
	else
124
		dbs_check_cpu(dbs_data, cpu);
125

126
	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
127
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
128 129
}

130 131 132 133 134 135
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
		void *data)
{
	struct cpufreq_freqs *freq = data;
	struct cs_cpu_dbs_info_s *dbs_info =
					&per_cpu(cs_cpu_dbs_info, freq->cpu);
136 137
	struct cpufreq_policy *policy;

138
	if (!dbs_info->enable)
139 140
		return 0;

141
	policy = dbs_info->cdbs.cur_policy;
142 143

	/*
144
	 * we only care if our internally tracked freq moves outside the 'valid'
145
	 * ranges of frequency available to us otherwise we do not change it
146
	*/
147 148 149
	if (dbs_info->requested_freq > policy->max
			|| dbs_info->requested_freq < policy->min)
		dbs_info->requested_freq = freq->new;
150 151 152 153

	return 0;
}

154
/************************** sysfs interface ************************/
155
static struct common_dbs_data cs_dbs_cdata;
156

157 158
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
		const char *buf, size_t count)
159
{
160
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
161 162
	unsigned int input;
	int ret;
163
	ret = sscanf(buf, "%u", &input);
164

165
	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
166 167
		return -EINVAL;

168
	cs_tuners->sampling_down_factor = input;
169 170 171
	return count;
}

172 173
static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
		size_t count)
174
{
175
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
176 177
	unsigned int input;
	int ret;
178
	ret = sscanf(buf, "%u", &input);
179

180
	if (ret != 1)
181
		return -EINVAL;
182

183
	cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
184 185 186
	return count;
}

187 188
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
		size_t count)
189
{
190
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
191 192
	unsigned int input;
	int ret;
193
	ret = sscanf(buf, "%u", &input);
194

195
	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
196 197
		return -EINVAL;

198
	cs_tuners->up_threshold = input;
199 200 201
	return count;
}

202 203
static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
		size_t count)
204
{
205
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
206 207
	unsigned int input;
	int ret;
208
	ret = sscanf(buf, "%u", &input);
209

210 211
	/* cannot be lower than 11 otherwise freq will not fall */
	if (ret != 1 || input < 11 || input > 100 ||
212
			input >= cs_tuners->up_threshold)
213 214
		return -EINVAL;

215
	cs_tuners->down_threshold = input;
216 217 218
	return count;
}

219 220
static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
		size_t count)
221
{
222
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
223
	unsigned int input, j;
224 225
	int ret;

226 227
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
228 229
		return -EINVAL;

230
	if (input > 1)
231
		input = 1;
232

233
	if (input == cs_tuners->ignore_nice) /* nothing to do */
234
		return count;
235

236
	cs_tuners->ignore_nice = input;
237

238
	/* we need to re-evaluate prev_cpu_idle */
239
	for_each_online_cpu(j) {
240
		struct cs_cpu_dbs_info_s *dbs_info;
241
		dbs_info = &per_cpu(cs_cpu_dbs_info, j);
242 243
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
						&dbs_info->cdbs.prev_cpu_wall);
244
		if (cs_tuners->ignore_nice)
245 246
			dbs_info->cdbs.prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
247 248 249 250
	}
	return count;
}

251 252
static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
		size_t count)
253
{
254
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
255 256
	unsigned int input;
	int ret;
257
	ret = sscanf(buf, "%u", &input);
258

259
	if (ret != 1)
260 261
		return -EINVAL;

262
	if (input > 100)
263
		input = 100;
264

265 266 267 268
	/*
	 * no need to test here if freq_step is zero as the user might actually
	 * want this, they would be crazy though :)
	 */
269
	cs_tuners->freq_step = input;
270 271 272
	return count;
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
show_store_one(cs, sampling_rate);
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
show_store_one(cs, ignore_nice);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);

gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
gov_sys_pol_attr_rw(ignore_nice);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);

static struct attribute *dbs_attributes_gov_sys[] = {
	&sampling_rate_min_gov_sys.attr,
	&sampling_rate_gov_sys.attr,
	&sampling_down_factor_gov_sys.attr,
	&up_threshold_gov_sys.attr,
	&down_threshold_gov_sys.attr,
	&ignore_nice_gov_sys.attr,
	&freq_step_gov_sys.attr,
297 298 299
	NULL
};

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
static struct attribute_group cs_attr_group_gov_sys = {
	.attrs = dbs_attributes_gov_sys,
	.name = "conservative",
};

static struct attribute *dbs_attributes_gov_pol[] = {
	&sampling_rate_min_gov_pol.attr,
	&sampling_rate_gov_pol.attr,
	&sampling_down_factor_gov_pol.attr,
	&up_threshold_gov_pol.attr,
	&down_threshold_gov_pol.attr,
	&ignore_nice_gov_pol.attr,
	&freq_step_gov_pol.attr,
	NULL
};

static struct attribute_group cs_attr_group_gov_pol = {
	.attrs = dbs_attributes_gov_pol,
318 319 320 321 322
	.name = "conservative",
};

/************************** sysfs end ************************/

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
static int cs_init(struct dbs_data *dbs_data)
{
	struct cs_dbs_tuners *tuners;

	tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice = 0;
	tuners->freq_step = 5;

	dbs_data->tuners = tuners;
	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
		jiffies_to_usecs(10);
	mutex_init(&dbs_data->mutex);
	return 0;
}

static void cs_exit(struct dbs_data *dbs_data)
{
	kfree(dbs_data->tuners);
}

351
define_get_cpu_dbs_routines(cs_cpu_dbs_info);
352

353 354 355
static struct notifier_block cs_cpufreq_notifier_block = {
	.notifier_call = dbs_cpufreq_notifier,
};
356

357 358 359
static struct cs_ops cs_ops = {
	.notifier_block = &cs_cpufreq_notifier_block,
};
360

361
static struct common_dbs_data cs_dbs_cdata = {
362
	.governor = GOV_CONSERVATIVE,
363 364
	.attr_group_gov_sys = &cs_attr_group_gov_sys,
	.attr_group_gov_pol = &cs_attr_group_gov_pol,
365 366 367 368 369
	.get_cpu_cdbs = get_cpu_cdbs,
	.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
	.gov_dbs_timer = cs_dbs_timer,
	.gov_check_cpu = cs_check_cpu,
	.gov_ops = &cs_ops,
370 371
	.init = cs_init,
	.exit = cs_exit,
372
};
373

374
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
375 376
				   unsigned int event)
{
377
	return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
378 379
}

380 381 382
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
#endif
383 384
struct cpufreq_governor cpufreq_gov_conservative = {
	.name			= "conservative",
385
	.governor		= cs_cpufreq_governor_dbs,
386 387
	.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
	.owner			= THIS_MODULE,
388 389 390 391
};

static int __init cpufreq_gov_dbs_init(void)
{
392
	return cpufreq_register_governor(&cpufreq_gov_conservative);
393 394 395 396
}

static void __exit cpufreq_gov_dbs_exit(void)
{
397
	cpufreq_unregister_governor(&cpufreq_gov_conservative);
398 399
}

400
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
401
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
402 403
		"Low Latency Frequency Transition capable processors "
		"optimised for use in a battery environment");
404
MODULE_LICENSE("GPL");
405

406 407 408
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
fs_initcall(cpufreq_gov_dbs_init);
#else
409
module_init(cpufreq_gov_dbs_init);
410
#endif
411
module_exit(cpufreq_gov_dbs_exit);