clock.c 7.7 KB
Newer Older
1 2 3
/*
 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
 *
4
 *  Copyright (C) 2005, 2006, 2007  Paul Mundt
5 6 7
 *
 * This clock framework is derived from the OMAP version by:
 *
P
Paul Mundt 已提交
8
 *	Copyright (C) 2004 - 2005 Nokia Corporation
9 10
 *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
 *
P
Paul Mundt 已提交
11 12
 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
 *
13 14 15 16 17 18 19
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
20
#include <linux/mutex.h>
21 22 23 24
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
25
#include <linux/platform_device.h>
26
#include <linux/proc_fs.h>
27 28 29 30 31
#include <asm/clock.h>
#include <asm/timer.h>

static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
32
static DEFINE_MUTEX(clock_list_sem);
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85

/*
 * Each subtype is expected to define the init routines for these clocks,
 * as each subtype (or processor family) will have these clocks at the
 * very least. These are all provided through the CPG, which even some of
 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
 *
 * The processor-specific code is expected to register any additional
 * clock sources that are of interest.
 */
static struct clk master_clk = {
	.name		= "master_clk",
	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
	.rate		= CONFIG_SH_PCLK_FREQ,
};

static struct clk module_clk = {
	.name		= "module_clk",
	.parent		= &master_clk,
	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
};

static struct clk bus_clk = {
	.name		= "bus_clk",
	.parent		= &master_clk,
	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
};

static struct clk cpu_clk = {
	.name		= "cpu_clk",
	.parent		= &master_clk,
	.flags		= CLK_ALWAYS_ENABLED,
};

/*
 * The ordering of these clocks matters, do not change it.
 */
static struct clk *onchip_clocks[] = {
	&master_clk,
	&module_clk,
	&bus_clk,
	&cpu_clk,
};

static void propagate_rate(struct clk *clk)
{
	struct clk *clkp;

	list_for_each_entry(clkp, &clock_list, node) {
		if (likely(clkp->parent != clk))
			continue;
		if (likely(clkp->ops && clkp->ops->recalc))
			clkp->ops->recalc(clkp);
86 87
		if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
			propagate_rate(clkp);
88 89 90
	}
}

A
Adrian Bunk 已提交
91
static int __clk_enable(struct clk *clk)
92 93 94 95 96 97 98 99 100 101 102 103
{
	/*
	 * See if this is the first time we're enabling the clock, some
	 * clocks that are always enabled still require "special"
	 * initialization. This is especially true if the clock mode
	 * changes and the clock needs to hunt for the proper set of
	 * divisors to use before it can effectively recalc.
	 */
	if (unlikely(atomic_read(&clk->kref.refcount) == 1))
		if (clk->ops && clk->ops->init)
			clk->ops->init(clk);

104 105
	kref_get(&clk->kref);

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	if (clk->flags & CLK_ALWAYS_ENABLED)
		return 0;

	if (likely(clk->ops && clk->ops->enable))
		clk->ops->enable(clk);

	return 0;
}

int clk_enable(struct clk *clk)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&clock_lock, flags);
	ret = __clk_enable(clk);
	spin_unlock_irqrestore(&clock_lock, flags);

	return ret;
}
126
EXPORT_SYMBOL_GPL(clk_enable);
127 128 129 130 131 132

static void clk_kref_release(struct kref *kref)
{
	/* Nothing to do */
}

A
Adrian Bunk 已提交
133
static void __clk_disable(struct clk *clk)
134
{
135 136
	int count = kref_put(&clk->kref, clk_kref_release);

137 138 139
	if (clk->flags & CLK_ALWAYS_ENABLED)
		return;

140 141 142 143
	if (!count) {	/* count reaches zero, disable the clock */
		if (likely(clk->ops && clk->ops->disable))
			clk->ops->disable(clk);
	}
144 145 146 147 148 149 150 151 152 153
}

void clk_disable(struct clk *clk)
{
	unsigned long flags;

	spin_lock_irqsave(&clock_lock, flags);
	__clk_disable(clk);
	spin_unlock_irqrestore(&clock_lock, flags);
}
154
EXPORT_SYMBOL_GPL(clk_disable);
155 156 157

int clk_register(struct clk *clk)
{
158
	mutex_lock(&clock_list_sem);
159 160 161 162

	list_add(&clk->node, &clock_list);
	kref_init(&clk->kref);

163
	mutex_unlock(&clock_list_sem);
164

165 166 167 168 169 170 171 172 173
	if (clk->flags & CLK_ALWAYS_ENABLED) {
		pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
		if (clk->ops && clk->ops->init)
			clk->ops->init(clk);
		if (clk->ops && clk->ops->enable)
			clk->ops->enable(clk);
		pr_debug( "Enabled.");
	}

174 175
	return 0;
}
176
EXPORT_SYMBOL_GPL(clk_register);
177 178 179

void clk_unregister(struct clk *clk)
{
180
	mutex_lock(&clock_list_sem);
181
	list_del(&clk->node);
182
	mutex_unlock(&clock_list_sem);
183
}
184
EXPORT_SYMBOL_GPL(clk_unregister);
185

186
unsigned long clk_get_rate(struct clk *clk)
187 188 189
{
	return clk->rate;
}
190
EXPORT_SYMBOL_GPL(clk_get_rate);
191 192

int clk_set_rate(struct clk *clk, unsigned long rate)
193 194 195
{
	return clk_set_rate_ex(clk, rate, 0);
}
196
EXPORT_SYMBOL_GPL(clk_set_rate);
197 198

int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
199 200 201 202 203 204 205
{
	int ret = -EOPNOTSUPP;

	if (likely(clk->ops && clk->ops->set_rate)) {
		unsigned long flags;

		spin_lock_irqsave(&clock_lock, flags);
206
		ret = clk->ops->set_rate(clk, rate, algo_id);
207 208 209 210 211 212 213 214
		spin_unlock_irqrestore(&clock_lock, flags);
	}

	if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
		propagate_rate(clk);

	return ret;
}
215
EXPORT_SYMBOL_GPL(clk_set_rate_ex);
216 217 218 219 220 221 222 223 224 225 226 227 228 229

void clk_recalc_rate(struct clk *clk)
{
	if (likely(clk->ops && clk->ops->recalc)) {
		unsigned long flags;

		spin_lock_irqsave(&clock_lock, flags);
		clk->ops->recalc(clk);
		spin_unlock_irqrestore(&clock_lock, flags);
	}

	if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
		propagate_rate(clk);
}
230
EXPORT_SYMBOL_GPL(clk_recalc_rate);
231

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
long clk_round_rate(struct clk *clk, unsigned long rate)
{
	if (likely(clk->ops && clk->ops->round_rate)) {
		unsigned long flags, rounded;

		spin_lock_irqsave(&clock_lock, flags);
		rounded = clk->ops->round_rate(clk, rate);
		spin_unlock_irqrestore(&clock_lock, flags);

		return rounded;
	}

	return clk_get_rate(clk);
}
EXPORT_SYMBOL_GPL(clk_round_rate);

P
Paul Mundt 已提交
248 249 250 251 252
/*
 * Returns a clock. Note that we first try to use device id on the bus
 * and clock name. If this fails, we try to use clock name only.
 */
struct clk *clk_get(struct device *dev, const char *id)
253 254
{
	struct clk *p, *clk = ERR_PTR(-ENOENT);
P
Paul Mundt 已提交
255 256 257 258 259 260
	int idno;

	if (dev == NULL || dev->bus != &platform_bus_type)
		idno = -1;
	else
		idno = to_platform_device(dev)->id;
261

262
	mutex_lock(&clock_list_sem);
P
Paul Mundt 已提交
263 264 265 266 267 268 269 270
	list_for_each_entry(p, &clock_list, node) {
		if (p->id == idno &&
		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			goto found;
		}
	}

271 272 273 274 275 276
	list_for_each_entry(p, &clock_list, node) {
		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			break;
		}
	}
P
Paul Mundt 已提交
277 278

found:
279
	mutex_unlock(&clock_list_sem);
280 281 282

	return clk;
}
283
EXPORT_SYMBOL_GPL(clk_get);
284 285 286 287 288 289

void clk_put(struct clk *clk)
{
	if (clk && !IS_ERR(clk))
		module_put(clk->owner);
}
290
EXPORT_SYMBOL_GPL(clk_put);
291 292 293 294 295 296

void __init __attribute__ ((weak))
arch_init_clk_ops(struct clk_ops **ops, int type)
{
}

P
Paul Mundt 已提交
297
int __init __attribute__ ((weak))
298 299
arch_clk_init(void)
{
P
Paul Mundt 已提交
300
	return 0;
301 302
}

303 304 305 306 307 308 309 310 311
static int show_clocks(char *buf, char **start, off_t off,
		       int len, int *eof, void *data)
{
	struct clk *clk;
	char *p = buf;

	list_for_each_entry_reverse(clk, &clock_list, node) {
		unsigned long rate = clk_get_rate(clk);

312 313 314 315 316
		p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
			     rate / 1000000, (rate % 1000000) / 10000,
			     ((clk->flags & CLK_ALWAYS_ENABLED) ||
			      (atomic_read(&clk->kref.refcount) != 1)) ?
			     "enabled" : "disabled");
317 318 319 320 321
	}

	return p - buf;
}

322 323 324 325
int __init clk_init(void)
{
	int i, ret = 0;

P
Paul Mundt 已提交
326
	BUG_ON(!master_clk.rate);
327 328 329 330 331 332 333 334

	for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
		struct clk *clk = onchip_clocks[i];

		arch_init_clk_ops(&clk->ops, i);
		ret |= clk_register(clk);
	}

P
Paul Mundt 已提交
335
	ret |= arch_clk_init();
336

337 338 339 340 341 342 343
	/* Kick the child clocks.. */
	propagate_rate(&master_clk);
	propagate_rate(&bus_clk);

	return ret;
}

344
static int __init clk_proc_init(void)
345
{
346 347 348 349 350
	struct proc_dir_entry *p;
	p = create_proc_read_entry("clocks", S_IRUSR, NULL,
				   show_clocks, NULL);
	if (unlikely(!p))
		return -EINVAL;
351 352 353

	return 0;
}
354
subsys_initcall(clk_proc_init);