ccu_nm.c 4.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright (C) 2016 Maxime Ripard
 * Maxime Ripard <maxime.ripard@free-electrons.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 2 of
 * the License, or (at your option) any later version.
 */

#include <linux/clk-provider.h>

#include "ccu_frac.h"
#include "ccu_gate.h"
#include "ccu_nm.h"

17
struct _ccu_nm {
18 19
	unsigned long	n, min_n, max_n;
	unsigned long	m, min_m, max_m;
20 21 22 23 24 25 26 27 28
};

static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
			     struct _ccu_nm *nm)
{
	unsigned long best_rate = 0;
	unsigned long best_n = 0, best_m = 0;
	unsigned long _n, _m;

29 30
	for (_n = nm->min_n; _n <= nm->max_n; _n++) {
		for (_m = nm->min_m; _m <= nm->max_m; _m++) {
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
			unsigned long tmp_rate = parent * _n  / _m;

			if (tmp_rate > rate)
				continue;

			if ((rate - tmp_rate) < (rate - best_rate)) {
				best_rate = tmp_rate;
				best_n = _n;
				best_m = _m;
			}
		}
	}

	nm->n = best_n;
	nm->m = best_m;
}

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
static void ccu_nm_disable(struct clk_hw *hw)
{
	struct ccu_nm *nm = hw_to_ccu_nm(hw);

	return ccu_gate_helper_disable(&nm->common, nm->enable);
}

static int ccu_nm_enable(struct clk_hw *hw)
{
	struct ccu_nm *nm = hw_to_ccu_nm(hw);

	return ccu_gate_helper_enable(&nm->common, nm->enable);
}

static int ccu_nm_is_enabled(struct clk_hw *hw)
{
	struct ccu_nm *nm = hw_to_ccu_nm(hw);

	return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
}

static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
					unsigned long parent_rate)
{
	struct ccu_nm *nm = hw_to_ccu_nm(hw);
	unsigned long n, m;
	u32 reg;

	if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac))
		return ccu_frac_helper_read_rate(&nm->common, &nm->frac);

	reg = readl(nm->common.base + nm->common.reg);

	n = reg >> nm->n.shift;
	n &= (1 << nm->n.width) - 1;
83 84 85
	n += nm->n.offset;
	if (!n)
		n++;
86 87 88

	m = reg >> nm->m.shift;
	m &= (1 << nm->m.width) - 1;
89 90 91
	m += nm->m.offset;
	if (!m)
		m++;
92

93 94 95 96 97 98 99 100
	if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm)) {
		unsigned long rate =
			ccu_sdm_helper_read_rate(&nm->common, &nm->sdm,
						 m, n);
		if (rate)
			return rate;
	}

101
	return parent_rate * n / m;
102 103 104 105 106 107
}

static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
			      unsigned long *parent_rate)
{
	struct ccu_nm *nm = hw_to_ccu_nm(hw);
108
	struct _ccu_nm _nm;
109

110 111 112
	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
		return rate;

113 114 115
	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate))
		return rate;

116
	_nm.min_n = nm->n.min ?: 1;
117
	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
118
	_nm.min_m = 1;
119
	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
120

121
	ccu_nm_find_best(*parent_rate, rate, &_nm);
122

123
	return *parent_rate * _nm.n / _nm.m;
124 125 126 127 128 129
}

static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
			   unsigned long parent_rate)
{
	struct ccu_nm *nm = hw_to_ccu_nm(hw);
130
	struct _ccu_nm _nm;
131 132 133
	unsigned long flags;
	u32 reg;

134 135 136 137 138 139 140 141 142 143 144 145
	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
		spin_lock_irqsave(nm->common.lock, flags);

		/* most SoCs require M to be 0 if fractional mode is used */
		reg = readl(nm->common.base + nm->common.reg);
		reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
		writel(reg, nm->common.base + nm->common.reg);

		spin_unlock_irqrestore(nm->common.lock, flags);

		ccu_frac_helper_enable(&nm->common, &nm->frac);

146 147
		return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
						rate, nm->lock);
148
	} else {
149
		ccu_frac_helper_disable(&nm->common, &nm->frac);
150
	}
151

152
	_nm.min_n = nm->n.min ?: 1;
153
	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
154
	_nm.min_m = 1;
155
	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
156

157 158 159 160 161 162 163 164 165 166
	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
		ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);

		/* Sigma delta modulation requires specific N and M factors */
		ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
					   &_nm.m, &_nm.n);
	} else {
		ccu_sdm_helper_disable(&nm->common, &nm->sdm);
		ccu_nm_find_best(parent_rate, rate, &_nm);
	}
167 168 169 170 171 172 173

	spin_lock_irqsave(nm->common.lock, flags);

	reg = readl(nm->common.base + nm->common.reg);
	reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
	reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);

174 175 176
	reg |= (_nm.n - nm->n.offset) << nm->n.shift;
	reg |= (_nm.m - nm->m.offset) << nm->m.shift;
	writel(reg, nm->common.base + nm->common.reg);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193

	spin_unlock_irqrestore(nm->common.lock, flags);

	ccu_helper_wait_for_lock(&nm->common, nm->lock);

	return 0;
}

const struct clk_ops ccu_nm_ops = {
	.disable	= ccu_nm_disable,
	.enable		= ccu_nm_enable,
	.is_enabled	= ccu_nm_is_enabled,

	.recalc_rate	= ccu_nm_recalc_rate,
	.round_rate	= ccu_nm_round_rate,
	.set_rate	= ccu_nm_set_rate,
};