cpufreq_conservative.c 10.0 KB
Newer Older
1 2 3 4 5 6
/*
 *  drivers/cpufreq/cpufreq_conservative.c
 *
 *  Copyright (C)  2001 Russell King
 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
 *                      Jun Nakajima <jun.nakajima@intel.com>
7
 *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

14
#include <linux/slab.h>
15
#include "cpufreq_governor.h"
16

17 18 19 20 21 22 23 24 25 26 27
struct cs_policy_dbs_info {
	struct policy_dbs_info policy_dbs;
	unsigned int down_skip;
	unsigned int requested_freq;
};

static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
{
	return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
}

28 29 30 31 32
struct cs_dbs_tuners {
	unsigned int down_threshold;
	unsigned int freq_step;
};

33
/* Conservative governor macros */
34 35
#define DEF_FREQUENCY_UP_THRESHOLD		(80)
#define DEF_FREQUENCY_DOWN_THRESHOLD		(20)
36
#define DEF_FREQUENCY_STEP			(5)
37 38
#define DEF_SAMPLING_DOWN_FACTOR		(1)
#define MAX_SAMPLING_DOWN_FACTOR		(10)
39

40 41 42 43 44 45 46 47 48 49 50 51
static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
					   struct cpufreq_policy *policy)
{
	unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;

	/* max freq cannot be less than 100. But who knows... */
	if (unlikely(freq_target == 0))
		freq_target = DEF_FREQUENCY_STEP;

	return freq_target;
}

52 53
/*
 * Every sampling_rate, we check, if current idle time is less than 20%
54 55 56
 * (default), then we try to increase frequency. Every sampling_rate *
 * sampling_down_factor, we check, if current idle time is more than 80%
 * (default), then we try to decrease frequency
57 58 59 60
 *
 * Any frequency increase takes it to the maximum frequency. Frequency reduction
 * happens at minimum steps of 5% (default) of maximum frequency
 */
61
static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
62
{
63
	struct policy_dbs_info *policy_dbs = policy->governor_data;
64
	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
65
	struct dbs_data *dbs_data = policy_dbs->dbs_data;
66
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
67
	unsigned int load = dbs_update(policy);
68 69 70 71 72

	/*
	 * break out if we 'cannot' reduce the speed as the user might
	 * want freq_step to be zero
	 */
73
	if (cs_tuners->freq_step == 0)
74
		goto out;
75 76

	/* Check for frequency increase */
77
	if (load > dbs_data->up_threshold) {
78 79 80 81
		dbs_info->down_skip = 0;

		/* if we are already at full speed then break out early */
		if (dbs_info->requested_freq == policy->max)
82
			goto out;
83

84
		dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
85

86 87 88
		if (dbs_info->requested_freq > policy->max)
			dbs_info->requested_freq = policy->max;

89 90
		__cpufreq_driver_target(policy, dbs_info->requested_freq,
			CPUFREQ_RELATION_H);
91
		goto out;
92 93
	}

94
	/* if sampling_down_factor is active break out early */
95
	if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
96
		goto out;
97 98
	dbs_info->down_skip = 0;

99 100
	/* Check for frequency decrease */
	if (load < cs_tuners->down_threshold) {
101
		unsigned int freq_target;
102 103 104 105
		/*
		 * if we cannot reduce the frequency anymore, break out early
		 */
		if (policy->cur == policy->min)
106
			goto out;
107

108 109 110 111 112
		freq_target = get_freq_target(cs_tuners, policy);
		if (dbs_info->requested_freq > freq_target)
			dbs_info->requested_freq -= freq_target;
		else
			dbs_info->requested_freq = policy->min;
113

114
		__cpufreq_driver_target(policy, dbs_info->requested_freq,
115
				CPUFREQ_RELATION_L);
116
	}
117

118
 out:
119
	return dbs_data->sampling_rate;
120 121
}

122
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
123
				void *data);
124

125 126 127 128
static struct notifier_block cs_cpufreq_notifier_block = {
	.notifier_call = dbs_cpufreq_notifier,
};

129
/************************** sysfs interface ************************/
130
static struct dbs_governor cs_dbs_gov;
131

132 133
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
					  const char *buf, size_t count)
134
{
135
	struct dbs_data *dbs_data = to_dbs_data(attr_set);
136 137
	unsigned int input;
	int ret;
138
	ret = sscanf(buf, "%u", &input);
139

140
	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
141 142
		return -EINVAL;

143
	dbs_data->sampling_down_factor = input;
144 145 146
	return count;
}

147 148
static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
				  const char *buf, size_t count)
149
{
150
	struct dbs_data *dbs_data = to_dbs_data(attr_set);
151
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
152 153
	unsigned int input;
	int ret;
154
	ret = sscanf(buf, "%u", &input);
155

156
	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
157 158
		return -EINVAL;

159
	dbs_data->up_threshold = input;
160 161 162
	return count;
}

163 164
static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
				    const char *buf, size_t count)
165
{
166
	struct dbs_data *dbs_data = to_dbs_data(attr_set);
167
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
168 169
	unsigned int input;
	int ret;
170
	ret = sscanf(buf, "%u", &input);
171

172 173
	/* cannot be lower than 11 otherwise freq will not fall */
	if (ret != 1 || input < 11 || input > 100 ||
174
			input >= dbs_data->up_threshold)
175 176
		return -EINVAL;

177
	cs_tuners->down_threshold = input;
178 179 180
	return count;
}

181 182
static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
				      const char *buf, size_t count)
183
{
184
	struct dbs_data *dbs_data = to_dbs_data(attr_set);
185
	unsigned int input;
186 187
	int ret;

188 189
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
190 191
		return -EINVAL;

192
	if (input > 1)
193
		input = 1;
194

195
	if (input == dbs_data->ignore_nice_load) /* nothing to do */
196
		return count;
197

198
	dbs_data->ignore_nice_load = input;
199

200
	/* we need to re-evaluate prev_cpu_idle */
201
	gov_update_cpu_data(dbs_data);
202

203 204 205
	return count;
}

206 207
static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
			       size_t count)
208
{
209
	struct dbs_data *dbs_data = to_dbs_data(attr_set);
210
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
211 212
	unsigned int input;
	int ret;
213
	ret = sscanf(buf, "%u", &input);
214

215
	if (ret != 1)
216 217
		return -EINVAL;

218
	if (input > 100)
219
		input = 100;
220

221 222 223 224
	/*
	 * no need to test here if freq_step is zero as the user might actually
	 * want this, they would be crazy though :)
	 */
225
	cs_tuners->freq_step = input;
226 227 228
	return count;
}

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
gov_show_one_common(sampling_rate);
gov_show_one_common(sampling_down_factor);
gov_show_one_common(up_threshold);
gov_show_one_common(ignore_nice_load);
gov_show_one_common(min_sampling_rate);
gov_show_one(cs, down_threshold);
gov_show_one(cs, freq_step);

gov_attr_rw(sampling_rate);
gov_attr_rw(sampling_down_factor);
gov_attr_rw(up_threshold);
gov_attr_rw(ignore_nice_load);
gov_attr_ro(min_sampling_rate);
gov_attr_rw(down_threshold);
gov_attr_rw(freq_step);

static struct attribute *cs_attributes[] = {
	&min_sampling_rate.attr,
	&sampling_rate.attr,
	&sampling_down_factor.attr,
	&up_threshold.attr,
	&down_threshold.attr,
	&ignore_nice_load.attr,
	&freq_step.attr,
253 254 255 256 257
	NULL
};

/************************** sysfs end ************************/

258 259 260 261 262 263 264 265 266 267 268 269 270
static struct policy_dbs_info *cs_alloc(void)
{
	struct cs_policy_dbs_info *dbs_info;

	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
	return dbs_info ? &dbs_info->policy_dbs : NULL;
}

static void cs_free(struct policy_dbs_info *policy_dbs)
{
	kfree(to_dbs_info(policy_dbs));
}

271
static int cs_init(struct dbs_data *dbs_data, bool notify)
272 273 274
{
	struct cs_dbs_tuners *tuners;

275
	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
276 277 278 279 280 281
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
282
	tuners->freq_step = DEF_FREQUENCY_STEP;
283 284 285
	dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	dbs_data->ignore_nice_load = 0;
286 287 288 289

	dbs_data->tuners = tuners;
	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
		jiffies_to_usecs(10);
290 291 292 293 294

	if (notify)
		cpufreq_register_notifier(&cs_cpufreq_notifier_block,
					  CPUFREQ_TRANSITION_NOTIFIER);

295 296 297
	return 0;
}

298
static void cs_exit(struct dbs_data *dbs_data, bool notify)
299
{
300 301 302 303
	if (notify)
		cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
					    CPUFREQ_TRANSITION_NOTIFIER);

304 305 306
	kfree(dbs_data->tuners);
}

307 308
static void cs_start(struct cpufreq_policy *policy)
{
309
	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
310 311 312 313 314

	dbs_info->down_skip = 0;
	dbs_info->requested_freq = policy->cur;
}

315
static struct dbs_governor cs_dbs_gov = {
316 317
	.gov = {
		.name = "conservative",
318
		.governor = cpufreq_governor_dbs,
319 320 321
		.max_transition_latency = TRANSITION_LATENCY_LIMIT,
		.owner = THIS_MODULE,
	},
322
	.kobj_type = { .default_attrs = cs_attributes },
323
	.gov_dbs_timer = cs_dbs_timer,
324 325
	.alloc = cs_alloc,
	.free = cs_free,
326 327
	.init = cs_init,
	.exit = cs_exit,
328
	.start = cs_start,
329
};
330

331
#define CPU_FREQ_GOV_CONSERVATIVE	(&cs_dbs_gov.gov)
332 333 334 335 336 337

static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct cpufreq_freqs *freq = data;
	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
338
	struct cs_policy_dbs_info *dbs_info;
339 340 341 342 343 344 345 346

	if (!policy)
		return 0;

	/* policy isn't governed by conservative governor */
	if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
		return 0;

347
	dbs_info = to_dbs_info(policy->governor_data);
348 349 350 351 352 353 354 355 356 357 358
	/*
	 * we only care if our internally tracked freq moves outside the 'valid'
	 * ranges of frequency available to us otherwise we do not change it
	*/
	if (dbs_info->requested_freq > policy->max
			|| dbs_info->requested_freq < policy->min)
		dbs_info->requested_freq = freq->new;

	return 0;
}

359 360
static int __init cpufreq_gov_dbs_init(void)
{
361
	return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
362 363 364 365
}

static void __exit cpufreq_gov_dbs_exit(void)
{
366
	cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
367 368
}

369
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
370
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
371 372
		"Low Latency Frequency Transition capable processors "
		"optimised for use in a battery environment");
373
MODULE_LICENSE("GPL");
374

375
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
376 377
struct cpufreq_governor *cpufreq_default_governor(void)
{
378
	return CPU_FREQ_GOV_CONSERVATIVE;
379 380
}

381 382
fs_initcall(cpufreq_gov_dbs_init);
#else
383
module_init(cpufreq_gov_dbs_init);
384
#endif
385
module_exit(cpufreq_gov_dbs_exit);