clock.c 7.8 KB
Newer Older
1 2 3
/*
 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
 *
4
 *  Copyright (C) 2005, 2006, 2007  Paul Mundt
5 6 7
 *
 * This clock framework is derived from the OMAP version by:
 *
P
Paul Mundt 已提交
8
 *	Copyright (C) 2004 - 2005 Nokia Corporation
9 10
 *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
 *
P
Paul Mundt 已提交
11 12
 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
 *
13 14 15 16 17 18 19
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
20
#include <linux/mutex.h>
21 22 23 24
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
25
#include <linux/platform_device.h>
26
#include <linux/proc_fs.h>
27 28 29 30 31
#include <asm/clock.h>
#include <asm/timer.h>

static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
32
static DEFINE_MUTEX(clock_list_sem);
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85

/*
 * Each subtype is expected to define the init routines for these clocks,
 * as each subtype (or processor family) will have these clocks at the
 * very least. These are all provided through the CPG, which even some of
 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
 *
 * The processor-specific code is expected to register any additional
 * clock sources that are of interest.
 */
static struct clk master_clk = {
	.name		= "master_clk",
	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
	.rate		= CONFIG_SH_PCLK_FREQ,
};

static struct clk module_clk = {
	.name		= "module_clk",
	.parent		= &master_clk,
	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
};

static struct clk bus_clk = {
	.name		= "bus_clk",
	.parent		= &master_clk,
	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
};

static struct clk cpu_clk = {
	.name		= "cpu_clk",
	.parent		= &master_clk,
	.flags		= CLK_ALWAYS_ENABLED,
};

/*
 * The ordering of these clocks matters, do not change it.
 */
static struct clk *onchip_clocks[] = {
	&master_clk,
	&module_clk,
	&bus_clk,
	&cpu_clk,
};

static void propagate_rate(struct clk *clk)
{
	struct clk *clkp;

	list_for_each_entry(clkp, &clock_list, node) {
		if (likely(clkp->parent != clk))
			continue;
		if (likely(clkp->ops && clkp->ops->recalc))
			clkp->ops->recalc(clkp);
86 87
		if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
			propagate_rate(clkp);
88 89 90
	}
}

A
Adrian Bunk 已提交
91
static int __clk_enable(struct clk *clk)
92 93 94 95 96 97 98 99 100 101 102 103
{
	/*
	 * See if this is the first time we're enabling the clock, some
	 * clocks that are always enabled still require "special"
	 * initialization. This is especially true if the clock mode
	 * changes and the clock needs to hunt for the proper set of
	 * divisors to use before it can effectively recalc.
	 */
	if (unlikely(atomic_read(&clk->kref.refcount) == 1))
		if (clk->ops && clk->ops->init)
			clk->ops->init(clk);

104 105
	kref_get(&clk->kref);

106 107 108 109 110 111 112 113 114 115 116 117 118 119
	if (clk->flags & CLK_ALWAYS_ENABLED)
		return 0;

	if (likely(clk->ops && clk->ops->enable))
		clk->ops->enable(clk);

	return 0;
}

int clk_enable(struct clk *clk)
{
	unsigned long flags;
	int ret;

120 121 122 123 124
	if (!clk)
		return -EINVAL;

	clk_enable(clk->parent);

125 126 127 128 129 130
	spin_lock_irqsave(&clock_lock, flags);
	ret = __clk_enable(clk);
	spin_unlock_irqrestore(&clock_lock, flags);

	return ret;
}
131
EXPORT_SYMBOL_GPL(clk_enable);
132 133 134 135 136 137

static void clk_kref_release(struct kref *kref)
{
	/* Nothing to do */
}

A
Adrian Bunk 已提交
138
static void __clk_disable(struct clk *clk)
139
{
140 141
	int count = kref_put(&clk->kref, clk_kref_release);

142 143 144
	if (clk->flags & CLK_ALWAYS_ENABLED)
		return;

145 146 147 148
	if (!count) {	/* count reaches zero, disable the clock */
		if (likely(clk->ops && clk->ops->disable))
			clk->ops->disable(clk);
	}
149 150 151 152 153 154
}

void clk_disable(struct clk *clk)
{
	unsigned long flags;

155
	if (!clk)
156
		return;
157

158 159 160
	spin_lock_irqsave(&clock_lock, flags);
	__clk_disable(clk);
	spin_unlock_irqrestore(&clock_lock, flags);
161 162

	clk_disable(clk->parent);
163
}
164
EXPORT_SYMBOL_GPL(clk_disable);
165 166 167

int clk_register(struct clk *clk)
{
168
	mutex_lock(&clock_list_sem);
169 170 171 172

	list_add(&clk->node, &clock_list);
	kref_init(&clk->kref);

173
	mutex_unlock(&clock_list_sem);
174

175 176 177 178 179 180 181 182 183
	if (clk->flags & CLK_ALWAYS_ENABLED) {
		pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
		if (clk->ops && clk->ops->init)
			clk->ops->init(clk);
		if (clk->ops && clk->ops->enable)
			clk->ops->enable(clk);
		pr_debug( "Enabled.");
	}

184 185
	return 0;
}
186
EXPORT_SYMBOL_GPL(clk_register);
187 188 189

void clk_unregister(struct clk *clk)
{
190
	mutex_lock(&clock_list_sem);
191
	list_del(&clk->node);
192
	mutex_unlock(&clock_list_sem);
193
}
194
EXPORT_SYMBOL_GPL(clk_unregister);
195

196
unsigned long clk_get_rate(struct clk *clk)
197 198 199
{
	return clk->rate;
}
200
EXPORT_SYMBOL_GPL(clk_get_rate);
201 202

int clk_set_rate(struct clk *clk, unsigned long rate)
203 204 205
{
	return clk_set_rate_ex(clk, rate, 0);
}
206
EXPORT_SYMBOL_GPL(clk_set_rate);
207 208

int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
209 210 211 212 213 214 215
{
	int ret = -EOPNOTSUPP;

	if (likely(clk->ops && clk->ops->set_rate)) {
		unsigned long flags;

		spin_lock_irqsave(&clock_lock, flags);
216
		ret = clk->ops->set_rate(clk, rate, algo_id);
217 218 219 220 221 222 223 224
		spin_unlock_irqrestore(&clock_lock, flags);
	}

	if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
		propagate_rate(clk);

	return ret;
}
225
EXPORT_SYMBOL_GPL(clk_set_rate_ex);
226 227 228 229 230 231 232 233 234 235 236 237 238 239

void clk_recalc_rate(struct clk *clk)
{
	if (likely(clk->ops && clk->ops->recalc)) {
		unsigned long flags;

		spin_lock_irqsave(&clock_lock, flags);
		clk->ops->recalc(clk);
		spin_unlock_irqrestore(&clock_lock, flags);
	}

	if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
		propagate_rate(clk);
}
240
EXPORT_SYMBOL_GPL(clk_recalc_rate);
241

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
long clk_round_rate(struct clk *clk, unsigned long rate)
{
	if (likely(clk->ops && clk->ops->round_rate)) {
		unsigned long flags, rounded;

		spin_lock_irqsave(&clock_lock, flags);
		rounded = clk->ops->round_rate(clk, rate);
		spin_unlock_irqrestore(&clock_lock, flags);

		return rounded;
	}

	return clk_get_rate(clk);
}
EXPORT_SYMBOL_GPL(clk_round_rate);

P
Paul Mundt 已提交
258 259 260 261 262
/*
 * Returns a clock. Note that we first try to use device id on the bus
 * and clock name. If this fails, we try to use clock name only.
 */
struct clk *clk_get(struct device *dev, const char *id)
263 264
{
	struct clk *p, *clk = ERR_PTR(-ENOENT);
P
Paul Mundt 已提交
265 266 267 268 269 270
	int idno;

	if (dev == NULL || dev->bus != &platform_bus_type)
		idno = -1;
	else
		idno = to_platform_device(dev)->id;
271

272
	mutex_lock(&clock_list_sem);
P
Paul Mundt 已提交
273 274 275 276 277 278 279 280
	list_for_each_entry(p, &clock_list, node) {
		if (p->id == idno &&
		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			goto found;
		}
	}

281 282 283 284 285 286
	list_for_each_entry(p, &clock_list, node) {
		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			break;
		}
	}
P
Paul Mundt 已提交
287 288

found:
289
	mutex_unlock(&clock_list_sem);
290 291 292

	return clk;
}
293
EXPORT_SYMBOL_GPL(clk_get);
294 295 296 297 298 299

void clk_put(struct clk *clk)
{
	if (clk && !IS_ERR(clk))
		module_put(clk->owner);
}
300
EXPORT_SYMBOL_GPL(clk_put);
301 302 303 304 305 306

void __init __attribute__ ((weak))
arch_init_clk_ops(struct clk_ops **ops, int type)
{
}

P
Paul Mundt 已提交
307
int __init __attribute__ ((weak))
308 309
arch_clk_init(void)
{
P
Paul Mundt 已提交
310
	return 0;
311 312
}

313 314 315 316 317 318 319 320 321
static int show_clocks(char *buf, char **start, off_t off,
		       int len, int *eof, void *data)
{
	struct clk *clk;
	char *p = buf;

	list_for_each_entry_reverse(clk, &clock_list, node) {
		unsigned long rate = clk_get_rate(clk);

322 323 324 325 326
		p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
			     rate / 1000000, (rate % 1000000) / 10000,
			     ((clk->flags & CLK_ALWAYS_ENABLED) ||
			      (atomic_read(&clk->kref.refcount) != 1)) ?
			     "enabled" : "disabled");
327 328 329 330 331
	}

	return p - buf;
}

332 333 334 335
int __init clk_init(void)
{
	int i, ret = 0;

P
Paul Mundt 已提交
336
	BUG_ON(!master_clk.rate);
337 338 339 340 341 342 343 344

	for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
		struct clk *clk = onchip_clocks[i];

		arch_init_clk_ops(&clk->ops, i);
		ret |= clk_register(clk);
	}

P
Paul Mundt 已提交
345
	ret |= arch_clk_init();
346

347 348 349 350 351 352 353
	/* Kick the child clocks.. */
	propagate_rate(&master_clk);
	propagate_rate(&bus_clk);

	return ret;
}

354
static int __init clk_proc_init(void)
355
{
356 357 358 359 360
	struct proc_dir_entry *p;
	p = create_proc_read_entry("clocks", S_IRUSR, NULL,
				   show_clocks, NULL);
	if (unlikely(!p))
		return -EINVAL;
361 362 363

	return 0;
}
364
subsys_initcall(clk_proc_init);