omap4-common.c 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * OMAP4 specific common source file.
 *
 * Copyright (C) 2010 Texas Instruments, Inc.
 * Author:
 *	Santosh Shilimkar <santosh.shilimkar@ti.com>
 *
 *
 * This program is free software,you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
18
#include <linux/memblock.h>
19 20 21

#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
22
#include <asm/mach/map.h>
23
#include <asm/memblock.h>
24

25
#include <plat/irqs.h>
26
#include <plat/sram.h>
27
#include <plat/omap-secure.h>
28

29
#include <mach/hardware.h>
30
#include <mach/omap-wakeupgen.h>
31 32

#include "common.h"
33
#include "omap4-sar-layout.h"
34 35

#ifdef CONFIG_CACHE_L2X0
36
static void __iomem *l2cache_base;
37 38
#endif

39 40
static void __iomem *sar_ram_base;

41 42 43 44 45 46
#ifdef CONFIG_OMAP4_ERRATA_I688
/* Used to implement memory barrier on DRAM path */
#define OMAP4_DRAM_BARRIER_VA			0xfe600000

void __iomem *dram_sync, *sram_sync;

47 48 49
static phys_addr_t paddr;
static u32 size;

50 51 52 53 54 55 56 57 58
void omap_bus_sync(void)
{
	if (dram_sync && sram_sync) {
		writel_relaxed(readl_relaxed(dram_sync), dram_sync);
		writel_relaxed(readl_relaxed(sram_sync), sram_sync);
		isb();
	}
}

59 60
/* Steal one page physical memory for barrier implementation */
int __init omap_barrier_reserve_memblock(void)
61 62 63
{

	size = ALIGN(PAGE_SIZE, SZ_1M);
64 65
	paddr = arm_memblock_steal(size, SZ_1M);

66 67 68 69 70 71 72
	return 0;
}

void __init omap_barriers_init(void)
{
	struct map_desc dram_io_desc[1];

73 74 75 76 77 78 79 80 81 82 83 84
	dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
	dram_io_desc[0].pfn = __phys_to_pfn(paddr);
	dram_io_desc[0].length = size;
	dram_io_desc[0].type = MT_MEMORY_SO;
	iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
	dram_sync = (void __iomem *) dram_io_desc[0].virtual;
	sram_sync = (void __iomem *) OMAP4_SRAM_VA;

	pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
		(long long) paddr, dram_io_desc[0].virtual);

}
85 86 87
#else
void __init omap_barriers_init(void)
{}
88 89
#endif

90 91
void __init gic_init_irq(void)
{
92 93 94
	void __iomem *omap_irq_base;
	void __iomem *gic_dist_base_addr;

95 96 97 98 99
	/* Static mapping, never released */
	gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
	BUG_ON(!gic_dist_base_addr);

	/* Static mapping, never released */
100 101
	omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
	BUG_ON(!omap_irq_base);
102

103 104
	omap_wakeupgen_init();

105
	gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
106 107 108
}

#ifdef CONFIG_CACHE_L2X0
109

110 111 112 113 114
void __iomem *omap4_get_l2cache_base(void)
{
	return l2cache_base;
}

115 116 117 118 119 120
static void omap4_l2x0_disable(void)
{
	/* Disable PL310 L2 Cache controller */
	omap_smc1(0x102, 0x0);
}

121 122 123 124 125 126
static void omap4_l2x0_set_debug(unsigned long val)
{
	/* Program PL310 L2 Cache controller debug register */
	omap_smc1(0x100, val);
}

127 128
static int __init omap_l2_cache_init(void)
{
129 130
	u32 aux_ctrl = 0;

131 132 133 134 135 136 137 138 139
	/*
	 * To avoid code running on other OMAPs in
	 * multi-omap builds
	 */
	if (!cpu_is_omap44xx())
		return -ENODEV;

	/* Static mapping, never released */
	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
140 141
	if (WARN_ON(!l2cache_base))
		return -ENOMEM;
142 143

	/*
144 145 146
	 * 16-way associativity, parity disabled
	 * Way size - 32KB (es1.0)
	 * Way size - 64KB (es2.0 +)
147
	 */
148 149 150 151 152
	aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
			(0x1 << 25) |
			(0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
			(0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));

153
	if (omap_rev() == OMAP4430_REV_ES1_0) {
154
		aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
155 156
	} else {
		aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
157
			(1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
158
			(1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
159 160
			(1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
			(1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
161 162 163 164 165 166
	}
	if (omap_rev() != OMAP4430_REV_ES1_0)
		omap_smc1(0x109, aux_ctrl);

	/* Enable PL310 L2 Cache controller */
	omap_smc1(0x102, 0x1);
167 168

	l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
169

170 171 172 173 174
	/*
	 * Override default outer_cache.disable with a OMAP4
	 * specific one
	*/
	outer_cache.disable = omap4_l2x0_disable;
175
	outer_cache.set_debug = omap4_l2x0_set_debug;
176

177 178 179 180
	return 0;
}
early_initcall(omap_l2_cache_init);
#endif
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

void __iomem *omap4_get_sar_ram_base(void)
{
	return sar_ram_base;
}

/*
 * SAR RAM used to save and restore the HW
 * context in low power modes
 */
static int __init omap4_sar_ram_init(void)
{
	/*
	 * To avoid code running on other OMAPs in
	 * multi-omap builds
	 */
	if (!cpu_is_omap44xx())
		return -ENOMEM;

	/* Static mapping, never released */
	sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
	if (WARN_ON(!sar_ram_base))
		return -ENOMEM;

	return 0;
}
early_initcall(omap4_sar_ram_init);