clock.c 12.6 KB
Newer Older
1 2 3
/*
 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
 *
4
 *  Copyright (C) 2005 - 2009  Paul Mundt
5 6 7
 *
 * This clock framework is derived from the OMAP version by:
 *
8
 *	Copyright (C) 2004 - 2008 Nokia Corporation
9 10
 *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
 *
P
Paul Mundt 已提交
11 12
 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
 *
13 14 15 16 17 18 19
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
20
#include <linux/mutex.h>
21
#include <linux/list.h>
22 23
#include <linux/kobject.h>
#include <linux/sysdev.h>
24 25
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
26
#include <linux/platform_device.h>
27
#include <linux/debugfs.h>
28
#include <linux/cpufreq.h>
P
Paul Mundt 已提交
29
#include <linux/clk.h>
30
#include <asm/clock.h>
31
#include <asm/machvec.h>
32 33 34

static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
35
static DEFINE_MUTEX(clock_list_sem);
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
void clk_rate_table_build(struct clk *clk,
			  struct cpufreq_frequency_table *freq_table,
			  int nr_freqs,
			  struct clk_div_mult_table *src_table,
			  unsigned long *bitmap)
{
	unsigned long mult, div;
	unsigned long freq;
	int i;

	for (i = 0; i < nr_freqs; i++) {
		div = 1;
		mult = 1;

		if (src_table->divisors && i < src_table->nr_divisors)
			div = src_table->divisors[i];

		if (src_table->multipliers && i < src_table->nr_multipliers)
			mult = src_table->multipliers[i];

		if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
			freq = CPUFREQ_ENTRY_INVALID;
		else
			freq = clk->parent->rate * mult / div;

		freq_table[i].index = i;
		freq_table[i].frequency = freq;
	}

	/* Termination entry */
	freq_table[i].index = i;
	freq_table[i].frequency = CPUFREQ_TABLE_END;
}

long clk_rate_table_round(struct clk *clk,
			  struct cpufreq_frequency_table *freq_table,
			  unsigned long rate)
{
	unsigned long rate_error, rate_error_prev = ~0UL;
	unsigned long rate_best_fit = rate;
	unsigned long highest, lowest;
	int i;

	highest = lowest = 0;

	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned long freq = freq_table[i].frequency;

		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;

		if (freq > highest)
			highest = freq;
		if (freq < lowest)
			lowest = freq;

		rate_error = abs(freq - rate);
		if (rate_error < rate_error_prev) {
			rate_best_fit = freq;
			rate_error_prev = rate_error;
		}

		if (rate_error == 0)
			break;
	}

	if (rate >= highest)
		rate_best_fit = highest;
	if (rate <= lowest)
		rate_best_fit = lowest;

	return rate_best_fit;
}

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
int clk_rate_table_find(struct clk *clk,
			struct cpufreq_frequency_table *freq_table,
			unsigned long rate)
{
	int i;

	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned long freq = freq_table[i].frequency;

		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;

		if (freq == rate)
			return i;
	}

	return -ENOENT;
}

130 131 132
/* Used for clocks that always have same value as the parent clock */
unsigned long followparent_recalc(struct clk *clk)
{
133
	return clk->parent ? clk->parent->rate : 0;
134 135
}

136 137 138 139 140 141 142 143 144 145 146 147 148
int clk_reparent(struct clk *child, struct clk *parent)
{
	list_del_init(&child->sibling);
	if (parent)
		list_add(&child->sibling, &parent->children);
	child->parent = parent;

	/* now do the debugfs renaming to reattach the child
	   to the proper parent */

	return 0;
}

149
/* Propagate rate to children */
150
void propagate_rate(struct clk *tclk)
151 152 153
{
	struct clk *clkp;

154
	list_for_each_entry(clkp, &tclk->children, sibling) {
155
		if (clkp->ops && clkp->ops->recalc)
156
			clkp->rate = clkp->ops->recalc(clkp);
157

158
		propagate_rate(clkp);
159 160 161
	}
}

162
static void __clk_disable(struct clk *clk)
163
{
164 165 166 167 168
	if (clk->usecount == 0) {
		printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
		       clk->name);
		WARN_ON(1);
		return;
169
	}
170

171 172 173 174 175 176
	if (!(--clk->usecount)) {
		if (likely(clk->ops && clk->ops->disable))
			clk->ops->disable(clk);
		if (likely(clk->parent))
			__clk_disable(clk->parent);
	}
177 178
}

179
void clk_disable(struct clk *clk)
180 181 182
{
	unsigned long flags;

183
	if (!clk)
184
		return;
185

186
	spin_lock_irqsave(&clock_lock, flags);
187
	__clk_disable(clk);
188 189
	spin_unlock_irqrestore(&clock_lock, flags);
}
190
EXPORT_SYMBOL_GPL(clk_disable);
191

192
static int __clk_enable(struct clk *clk)
193
{
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	int ret = 0;

	if (clk->usecount++ == 0) {
		if (clk->parent) {
			ret = __clk_enable(clk->parent);
			if (unlikely(ret))
				goto err;
		}

		if (clk->ops && clk->ops->enable) {
			ret = clk->ops->enable(clk);
			if (ret) {
				if (clk->parent)
					__clk_disable(clk->parent);
				goto err;
			}
		}
211
	}
212 213 214 215 216

	return ret;
err:
	clk->usecount--;
	return ret;
217 218
}

219
int clk_enable(struct clk *clk)
220 221
{
	unsigned long flags;
222
	int ret;
223

224
	if (!clk)
225
		return -EINVAL;
226

227
	spin_lock_irqsave(&clock_lock, flags);
228
	ret = __clk_enable(clk);
229
	spin_unlock_irqrestore(&clock_lock, flags);
230 231

	return ret;
232
}
233
EXPORT_SYMBOL_GPL(clk_enable);
234

235 236 237 238 239 240 241 242 243 244 245 246 247 248
static LIST_HEAD(root_clks);

/**
 * recalculate_root_clocks - recalculate and propagate all root clocks
 *
 * Recalculates all root clocks (clocks with no parent), which if the
 * clock's .recalc is set correctly, should also propagate their rates.
 * Called at init.
 */
void recalculate_root_clocks(void)
{
	struct clk *clkp;

	list_for_each_entry(clkp, &root_clks, sibling) {
249
		if (clkp->ops && clkp->ops->recalc)
250 251 252 253 254
			clkp->rate = clkp->ops->recalc(clkp);
		propagate_rate(clkp);
	}
}

255 256
int clk_register(struct clk *clk)
{
257 258 259 260 261 262 263 264 265
	if (clk == NULL || IS_ERR(clk))
		return -EINVAL;

	/*
	 * trap out already registered clocks
	 */
	if (clk->node.next || clk->node.prev)
		return 0;

266
	mutex_lock(&clock_list_sem);
267

268
	INIT_LIST_HEAD(&clk->children);
269
	clk->usecount = 0;
270 271 272 273 274 275

	if (clk->parent)
		list_add(&clk->sibling, &clk->parent->children);
	else
		list_add(&clk->sibling, &root_clks);

276
	list_add(&clk->node, &clock_list);
277
	if (clk->ops && clk->ops->init)
278
		clk->ops->init(clk);
279
	mutex_unlock(&clock_list_sem);
280 281 282

	return 0;
}
283
EXPORT_SYMBOL_GPL(clk_register);
284 285 286

void clk_unregister(struct clk *clk)
{
287
	mutex_lock(&clock_list_sem);
288
	list_del(&clk->sibling);
289
	list_del(&clk->node);
290
	mutex_unlock(&clock_list_sem);
291
}
292
EXPORT_SYMBOL_GPL(clk_unregister);
293

294 295 296 297 298 299 300 301 302
static void clk_enable_init_clocks(void)
{
	struct clk *clkp;

	list_for_each_entry(clkp, &clock_list, node)
		if (clkp->flags & CLK_ENABLE_ON_INIT)
			clk_enable(clkp);
}

303
unsigned long clk_get_rate(struct clk *clk)
304 305 306
{
	return clk->rate;
}
307
EXPORT_SYMBOL_GPL(clk_get_rate);
308 309

int clk_set_rate(struct clk *clk, unsigned long rate)
310 311 312
{
	return clk_set_rate_ex(clk, rate, 0);
}
313
EXPORT_SYMBOL_GPL(clk_set_rate);
314 315

int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
316 317
{
	int ret = -EOPNOTSUPP;
318
	unsigned long flags;
319

320
	spin_lock_irqsave(&clock_lock, flags);
321

322
	if (likely(clk->ops && clk->ops->set_rate)) {
323
		ret = clk->ops->set_rate(clk, rate, algo_id);
324 325 326 327 328
		if (ret != 0)
			goto out_unlock;
	} else {
		clk->rate = rate;
		ret = 0;
329 330
	}

331 332 333 334 335 336 337 338
	if (clk->ops && clk->ops->recalc)
		clk->rate = clk->ops->recalc(clk);

	propagate_rate(clk);

out_unlock:
	spin_unlock_irqrestore(&clock_lock, flags);

339 340
	return ret;
}
341
EXPORT_SYMBOL_GPL(clk_set_rate_ex);
342

343 344
int clk_set_parent(struct clk *clk, struct clk *parent)
{
345
	unsigned long flags;
346 347 348 349
	int ret = -EINVAL;

	if (!parent || !clk)
		return ret;
350 351
	if (clk->parent == parent)
		return 0;
352

353 354 355 356
	spin_lock_irqsave(&clock_lock, flags);
	if (clk->usecount == 0) {
		if (clk->ops->set_parent)
			ret = clk->ops->set_parent(clk, parent);
357 358 359
		else
			ret = clk_reparent(clk, parent);

360
		if (ret == 0) {
361 362
			pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
				 clk->name, clk->parent->name, clk->rate);
363 364 365 366 367 368 369
			if (clk->ops->recalc)
				clk->rate = clk->ops->recalc(clk);
			propagate_rate(clk);
		}
	} else
		ret = -EBUSY;
	spin_unlock_irqrestore(&clock_lock, flags);
370 371 372 373 374 375 376 377 378 379 380

	return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);

struct clk *clk_get_parent(struct clk *clk)
{
	return clk->parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
long clk_round_rate(struct clk *clk, unsigned long rate)
{
	if (likely(clk->ops && clk->ops->round_rate)) {
		unsigned long flags, rounded;

		spin_lock_irqsave(&clock_lock, flags);
		rounded = clk->ops->round_rate(clk, rate);
		spin_unlock_irqrestore(&clock_lock, flags);

		return rounded;
	}

	return clk_get_rate(clk);
}
EXPORT_SYMBOL_GPL(clk_round_rate);

P
Paul Mundt 已提交
397 398 399 400 401
/*
 * Returns a clock. Note that we first try to use device id on the bus
 * and clock name. If this fails, we try to use clock name only.
 */
struct clk *clk_get(struct device *dev, const char *id)
402
{
403
	const char *dev_id = dev ? dev_name(dev) : NULL;
404
	struct clk *p, *clk = ERR_PTR(-ENOENT);
P
Paul Mundt 已提交
405 406
	int idno;

407
	clk = clk_get_sys(dev_id, id);
408
	if (clk && !IS_ERR(clk))
409 410
		return clk;

P
Paul Mundt 已提交
411 412 413 414
	if (dev == NULL || dev->bus != &platform_bus_type)
		idno = -1;
	else
		idno = to_platform_device(dev)->id;
415

416
	mutex_lock(&clock_list_sem);
P
Paul Mundt 已提交
417 418 419 420 421 422 423 424
	list_for_each_entry(p, &clock_list, node) {
		if (p->id == idno &&
		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			goto found;
		}
	}

425 426 427 428 429 430
	list_for_each_entry(p, &clock_list, node) {
		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
			clk = p;
			break;
		}
	}
P
Paul Mundt 已提交
431 432

found:
433
	mutex_unlock(&clock_list_sem);
434 435 436

	return clk;
}
437
EXPORT_SYMBOL_GPL(clk_get);
438 439 440 441 442 443

void clk_put(struct clk *clk)
{
	if (clk && !IS_ERR(clk))
		module_put(clk->owner);
}
444
EXPORT_SYMBOL_GPL(clk_put);
445

446 447 448 449 450 451 452 453 454
#ifdef CONFIG_PM
static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
	struct clk *clkp;

	switch (state.event) {
	case PM_EVENT_ON:
		/* Resumeing from hibernation */
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
		if (prev_state.event != PM_EVENT_FREEZE)
			break;

		list_for_each_entry(clkp, &clock_list, node) {
			if (likely(clkp->ops)) {
				unsigned long rate = clkp->rate;

				if (likely(clkp->ops->set_parent))
					clkp->ops->set_parent(clkp,
						clkp->parent);
				if (likely(clkp->ops->set_rate))
					clkp->ops->set_rate(clkp,
						rate, NO_CHANGE);
				else if (likely(clkp->ops->recalc))
					clkp->rate = clkp->ops->recalc(clkp);
			}
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
		}
		break;
	case PM_EVENT_FREEZE:
		break;
	case PM_EVENT_SUSPEND:
		break;
	}

	prev_state = state;
	return 0;
}

static int clks_sysdev_resume(struct sys_device *dev)
{
	return clks_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_class clks_sysdev_class = {
	.name = "clks",
};

static struct sysdev_driver clks_sysdev_driver = {
	.suspend = clks_sysdev_suspend,
	.resume = clks_sysdev_resume,
};

static struct sys_device clks_sysdev_dev = {
	.cls = &clks_sysdev_class,
};

static int __init clk_sysdev_init(void)
{
	sysdev_class_register(&clks_sysdev_class);
	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
	sysdev_register(&clks_sysdev_dev);

	return 0;
}
subsys_initcall(clk_sysdev_init);
#endif

512 513
int __init clk_init(void)
{
514
	int ret;
515

516 517 518 519
	ret = arch_clk_init();
	if (unlikely(ret)) {
		pr_err("%s: CPU clock registration failed.\n", __func__);
		return ret;
520 521
	}

522 523 524 525 526 527 528 529
	if (sh_mv.mv_clk_init) {
		ret = sh_mv.mv_clk_init();
		if (unlikely(ret)) {
			pr_err("%s: machvec clock initialization failed.\n",
			       __func__);
			return ret;
		}
	}
530

531
	/* Kick the child clocks.. */
532
	recalculate_root_clocks();
533

534 535 536
	/* Enable the necessary init clocks */
	clk_enable_init_clocks();

537 538 539
	return ret;
}

540 541 542 543 544 545
/*
 *	debugfs support to trace clock tree hierarchy and attributes
 */
static struct dentry *clk_debugfs_root;

static int clk_debugfs_register_one(struct clk *c)
546
{
547
	int err;
548
	struct dentry *d, *child, *child_tmp;
549 550 551 552 553
	struct clk *pa = c->parent;
	char s[255];
	char *p = s;

	p += sprintf(p, "%s", c->name);
554
	if (c->id >= 0)
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
		sprintf(p, ":%d", c->id);
	d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
	if (!d)
		return -ENOMEM;
	c->dentry = d;

	d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
	if (!d) {
		err = -ENOMEM;
		goto err_out;
	}
	d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
	if (!d) {
		err = -ENOMEM;
		goto err_out;
	}
	d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
	if (!d) {
		err = -ENOMEM;
		goto err_out;
	}
	return 0;

err_out:
	d = c->dentry;
580
	list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
		debugfs_remove(child);
	debugfs_remove(c->dentry);
	return err;
}

static int clk_debugfs_register(struct clk *c)
{
	int err;
	struct clk *pa = c->parent;

	if (pa && !pa->dentry) {
		err = clk_debugfs_register(pa);
		if (err)
			return err;
	}
596

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
	if (!c->dentry) {
		err = clk_debugfs_register_one(c);
		if (err)
			return err;
	}
	return 0;
}

static int __init clk_debugfs_init(void)
{
	struct clk *c;
	struct dentry *d;
	int err;

	d = debugfs_create_dir("clock", NULL);
	if (!d)
		return -ENOMEM;
	clk_debugfs_root = d;

	list_for_each_entry(c, &clock_list, node) {
		err = clk_debugfs_register(c);
		if (err)
			goto err_out;
	}
621
	return 0;
622 623 624
err_out:
	debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
	return err;
625
}
626
late_initcall(clk_debugfs_init);