omap-cpufreq.c 8.0 KB
Newer Older
1
/*
2
 *  CPU frequency scaling for OMAP using OPP information
3 4 5 6 7 8
 *
 *  Copyright (C) 2005 Nokia Corporation
 *  Written by Tony Lindgren <tony@atomide.com>
 *
 *  Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
 *
9 10 11
 * Copyright (C) 2007-2011 Texas Instruments, Inc.
 * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
 *
12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/err.h>
23
#include <linux/clk.h>
24
#include <linux/io.h>
25
#include <linux/opp.h>
26
#include <linux/cpu.h>
27
#include <linux/module.h>
28
#include <linux/regulator/consumer.h>
29 30

#include <asm/system.h>
31
#include <asm/smp_plat.h>
32
#include <asm/cpu.h>
33

34 35 36
#include <plat/clock.h>
#include <plat/omap-pm.h>
#include <plat/common.h>
37
#include <plat/omap_device.h>
38

39
#include <mach/hardware.h>
40

41 42 43
/* OPP tolerance in percentage */
#define	OPP_TOLERANCE	4

44 45 46 47 48 49 50 51 52 53
#ifdef CONFIG_SMP
struct lpj_info {
	unsigned long	ref;
	unsigned int	freq;
};

static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
static struct lpj_info global_lpj_ref;
#endif

54
static struct cpufreq_frequency_table *freq_table;
55
static atomic_t freq_table_users = ATOMIC_INIT(0);
56
static struct clk *mpu_clk;
57
static char *mpu_clk_name;
58
static struct device *mpu_dev;
59
static struct regulator *mpu_reg;
60

61
static int omap_verify_speed(struct cpufreq_policy *policy)
62
{
63
	if (!freq_table)
64
		return -EINVAL;
65
	return cpufreq_frequency_table_verify(policy, freq_table);
66 67
}

68
static unsigned int omap_getspeed(unsigned int cpu)
69 70 71
{
	unsigned long rate;

72
	if (cpu >= NR_CPUS)
73 74 75 76 77 78 79 80 81 82
		return 0;

	rate = clk_get_rate(mpu_clk) / 1000;
	return rate;
}

static int omap_target(struct cpufreq_policy *policy,
		       unsigned int target_freq,
		       unsigned int relation)
{
83
	unsigned int i;
84
	int r, ret = 0;
85
	struct cpufreq_freqs freqs;
86
	struct opp *opp;
87
	unsigned long freq, volt = 0, volt_old = 0, tol = 0;
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	if (!freq_table) {
		dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
				policy->cpu);
		return -EINVAL;
	}

	ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
			relation, &i);
	if (ret) {
		dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
			__func__, policy->cpu, target_freq, ret);
		return ret;
	}
	freqs.new = freq_table[i].frequency;
	if (!freqs.new) {
		dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
			policy->cpu, target_freq);
		return -EINVAL;
	}
108

109 110
	freqs.old = omap_getspeed(policy->cpu);
	freqs.cpu = policy->cpu;
111

112
	if (freqs.old == freqs.new && policy->cur == freqs.new)
113 114
		return ret;

115 116 117 118 119
	/* notifiers */
	for_each_cpu(i, policy->cpus) {
		freqs.cpu = i;
		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
	}
120

121 122 123 124 125 126 127 128 129 130
	freq = freqs.new * 1000;

	if (mpu_reg) {
		opp = opp_find_freq_ceil(mpu_dev, &freq);
		if (IS_ERR(opp)) {
			dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
				__func__, freqs.new);
			return -EINVAL;
		}
		volt = opp_get_voltage(opp);
131
		tol = volt * OPP_TOLERANCE / 100;
132 133 134 135 136 137 138 139 140
		volt_old = regulator_get_voltage(mpu_reg);
	}

	dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n", 
		freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
		freqs.new / 1000, volt ? volt / 1000 : -1);

	/* scaling up?  scale voltage before frequency */
	if (mpu_reg && (freqs.new > freqs.old)) {
141
		r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
142 143 144 145 146 147 148
		if (r < 0) {
			dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
				 __func__);
			freqs.new = freqs.old;
			goto done;
		}
	}
149

150
	ret = clk_set_rate(mpu_clk, freqs.new * 1000);
151

152 153
	/* scaling down?  scale voltage after frequency */
	if (mpu_reg && (freqs.new < freqs.old)) {
154
		r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
155 156 157 158 159 160 161 162 163 164
		if (r < 0) {
			dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
				 __func__);
			ret = clk_set_rate(mpu_clk, freqs.old * 1000);
			freqs.new = freqs.old;
			goto done;
		}
	}

	freqs.new = omap_getspeed(policy->cpu);
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
#ifdef CONFIG_SMP
	/*
	 * Note that loops_per_jiffy is not updated on SMP systems in
	 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
	 * on frequency transition. We need to update all dependent CPUs.
	 */
	for_each_cpu(i, policy->cpus) {
		struct lpj_info *lpj = &per_cpu(lpj_ref, i);
		if (!lpj->freq) {
			lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
			lpj->freq = freqs.old;
		}

		per_cpu(cpu_data, i).loops_per_jiffy =
			cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
	}
181

182 183 184 185 186 187 188 189 190
	/* And don't forget to adjust the global one */
	if (!global_lpj_ref.freq) {
		global_lpj_ref.ref = loops_per_jiffy;
		global_lpj_ref.freq = freqs.old;
	}
	loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
					freqs.new);
#endif

191
done:
192 193 194 195 196
	/* notifiers */
	for_each_cpu(i, policy->cpus) {
		freqs.cpu = i;
		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
	}
197 198 199 200

	return ret;
}

201 202 203 204 205 206
static inline void freq_table_free(void)
{
	if (atomic_dec_and_test(&freq_table_users))
		opp_free_cpufreq_table(mpu_dev, &freq_table);
}

207
static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
208
{
209
	int result = 0;
210

211
	mpu_clk = clk_get(NULL, mpu_clk_name);
212 213 214
	if (IS_ERR(mpu_clk))
		return PTR_ERR(mpu_clk);

215 216 217 218
	if (policy->cpu >= NR_CPUS) {
		result = -EINVAL;
		goto fail_ck;
	}
219

220
	policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
221 222 223

	if (atomic_inc_return(&freq_table_users) == 1)
		result = opp_init_cpufreq_table(mpu_dev, &freq_table);
224 225 226 227

	if (result) {
		dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
				__func__, policy->cpu, result);
228
		goto fail_ck;
229 230
	}

231
	result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
232 233 234 235
	if (result)
		goto fail_table;

	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
236

237 238
	policy->min = policy->cpuinfo.min_freq;
	policy->max = policy->cpuinfo.max_freq;
239 240 241 242 243 244 245 246 247 248 249
	policy->cur = omap_getspeed(policy->cpu);

	/*
	 * On OMAP SMP configuartion, both processors share the voltage
	 * and clock. So both CPUs needs to be scaled together and hence
	 * needs software co-ordination. Use cpufreq affected_cpus
	 * interface to handle this scenario. Additional is_smp() check
	 * is to keep SMP_ON_UP build working.
	 */
	if (is_smp()) {
		policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
250
		cpumask_setall(policy->cpus);
251
	}
252

253
	/* FIXME: what's the actual transition time? */
254
	policy->cpuinfo.transition_latency = 300 * 1000;
255 256

	return 0;
257

258 259
fail_table:
	freq_table_free();
260 261 262
fail_ck:
	clk_put(mpu_clk);
	return result;
263 264
}

265 266
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
267
	freq_table_free();
268 269 270 271
	clk_put(mpu_clk);
	return 0;
}

272 273 274 275 276
static struct freq_attr *omap_cpufreq_attr[] = {
	&cpufreq_freq_attr_scaling_available_freqs,
	NULL,
};

277 278 279 280 281 282
static struct cpufreq_driver omap_driver = {
	.flags		= CPUFREQ_STICKY,
	.verify		= omap_verify_speed,
	.target		= omap_target,
	.get		= omap_getspeed,
	.init		= omap_cpu_init,
283
	.exit		= omap_cpu_exit,
284
	.name		= "omap",
285
	.attr		= omap_cpufreq_attr,
286 287 288 289
};

static int __init omap_cpufreq_init(void)
{
290 291 292 293 294 295 296 297 298 299 300
	if (cpu_is_omap24xx())
		mpu_clk_name = "virt_prcm_set";
	else if (cpu_is_omap34xx())
		mpu_clk_name = "dpll1_ck";
	else if (cpu_is_omap44xx())
		mpu_clk_name = "dpll_mpu_ck";

	if (!mpu_clk_name) {
		pr_err("%s: unsupported Silicon?\n", __func__);
		return -EINVAL;
	}
301

302
	mpu_dev = omap_device_get_by_hwmod_name("mpu");
303 304 305 306 307
	if (!mpu_dev) {
		pr_warning("%s: unable to get the mpu device\n", __func__);
		return -EINVAL;
	}

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
	mpu_reg = regulator_get(mpu_dev, "vcc");
	if (IS_ERR(mpu_reg)) {
		pr_warning("%s: unable to get MPU regulator\n", __func__);
		mpu_reg = NULL;
	} else {
		/* 
		 * Ensure physical regulator is present.
		 * (e.g. could be dummy regulator.)
		 */
		if (regulator_get_voltage(mpu_reg) < 0) {
			pr_warn("%s: physical regulator not present for MPU\n",
				__func__);
			regulator_put(mpu_reg);
			mpu_reg = NULL;
		}
	}

325 326 327
	return cpufreq_register_driver(&omap_driver);
}

328 329 330 331
static void __exit omap_cpufreq_exit(void)
{
	cpufreq_unregister_driver(&omap_driver);
}
332

333 334 335 336
MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
MODULE_LICENSE("GPL");
module_init(omap_cpufreq_init);
module_exit(omap_cpufreq_exit);