提交 d41bd8f3 编写于 作者: L Linus Torvalds

Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux

Pull clk fixes from Stephen Boyd:
 "The typical collection of minor bug fixes in clk drivers. We don't
  have anything in the core framework here, just driver fixes.

  There's a boot fix for Samsung devices and a safety measure for qoriq
  to prevent CPUs from running too fast. There's also a fix for i.MX6Q
  to properly handle audio clock rates. We also have some "that's
  obviously wrong" fixes like bad NULL pointer checks in the MPP driver
  and a poor usage of __pa in the xgene clk driver that are fixed here"

* tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux:
  clk: mmp: pxa910: fix return value check in pxa910_clk_init()
  clk: mmp: pxa168: fix return value check in pxa168_clk_init()
  clk: mmp: mmp2: fix return value check in mmp2_clk_init()
  clk: qoriq: Don't allow CPU clocks higher than starting value
  clk: imx: fix integer overflow in AV PLL round rate
  clk: xgene: Don't call __pa on ioremaped address
  clk/samsung: Use CLK_OF_DECLARE_DRIVER initialization method for CLKOUT
  clk: rockchip: don't return NULL when failing to register ddrclk branch
......@@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
struct mux_hwclock *hwc,
const struct clk_ops *ops,
unsigned long min_rate,
unsigned long max_rate,
unsigned long pct80_rate,
const char *fmt, int idx)
{
......@@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
continue;
if (rate < min_rate)
continue;
if (rate > max_rate)
continue;
parent_names[j] = div->name;
hwc->parent_to_clksel[j] = i;
......@@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
struct mux_hwclock *hwc;
const struct clockgen_pll_div *div;
unsigned long plat_rate, min_rate;
u64 pct80_rate;
u64 max_rate, pct80_rate;
u32 clksel;
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
......@@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
return NULL;
}
pct80_rate = clk_get_rate(div->clk);
pct80_rate *= 8;
max_rate = clk_get_rate(div->clk);
pct80_rate = max_rate * 8;
do_div(pct80_rate, 10);
plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
......@@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
else
min_rate = plat_rate / 2;
return create_mux_common(cg, hwc, &cmux_ops, min_rate,
return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
pct80_rate, "cg-cmux%d", idx);
}
......@@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
hwc->reg = cg->regs + 0x20 * idx + 0x10;
hwc->info = cg->info.hwaccel[idx];
return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
"cg-hwaccel%d", idx);
}
......
......@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
phys_addr_t reg;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg != NULL) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
reg = __pa(pclk->param.csr_reg);
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
data |= pclk->param.reg_clk_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
clk_hw_get_name(hw), &reg,
pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
clk_hw_get_name(hw),
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
data);
......@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
data &= ~pclk->param.reg_csr_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
clk_hw_get_name(hw), &reg,
pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
clk_hw_get_name(hw),
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
data);
}
......
......@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
temp64 *= mfn;
do_div(temp64, mfd);
return (parent_rate * div) + (u32)temp64;
return parent_rate * div + (unsigned long)temp64;
}
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
......@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
do_div(temp64, parent_rate);
mfn = temp64;
return parent_rate * div + parent_rate * mfn / mfd;
temp64 = (u64)parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
return parent_rate * div + (unsigned long)temp64;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
......
......@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
if (!pxa_unit->mpmu_base) {
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
......
......@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
if (!pxa_unit->mpmu_base) {
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
......
......@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
if (!pxa_unit->mpmu_base) {
if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
......@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apbcp_base = of_iomap(np, 3);
if (!pxa_unit->mpmu_base) {
if (!pxa_unit->apbcp_base) {
pr_err("failed to map apbcp registers\n");
return;
}
......
......@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
ddrclk->ddr_flag = ddr_flag;
clk = clk_register(NULL, &ddrclk->hw);
if (IS_ERR(clk)) {
pr_err("%s: could not register ddrclk %s\n", __func__, name);
if (IS_ERR(clk))
kfree(ddrclk);
return NULL;
}
return clk;
}
......@@ -132,28 +132,34 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
pr_err("%s: failed to register clkout clock\n", __func__);
}
/*
* We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
* the OF_POPULATED flag on the pmu device tree node, so later the
* Exynos PMU platform device can be properly probed with PMU driver.
*/
static void __init exynos4_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
}
CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
exynos4_clkout_init);
CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
exynos4_clkout_init);
CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu",
CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
exynos4_clkout_init);
static void __init exynos5_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
}
CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
exynos5_clkout_init);
CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu",
CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
exynos5_clkout_init);
CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
exynos5_clkout_init);
CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
exynos5_clkout_init);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册