ct-ca9x4.c 4.7 KB
Newer Older
1 2 3 4
/*
 * Versatile Express Core Tile Cortex A9x4 Support
 */
#include <linux/init.h>
T
Tejun Heo 已提交
5
#include <linux/gfp.h>
6 7
#include <linux/device.h>
#include <linux/dma-mapping.h>
8
#include <linux/platform_device.h>
9 10
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
11
#include <linux/clkdev.h>
12
#include <linux/vexpress.h>
13
#include <linux/irqchip/arm-gic.h>
14 15 16

#include <asm/hardware/arm_timer.h>
#include <asm/hardware/cache-l2x0.h>
17
#include <asm/smp_scu.h>
18
#include <asm/smp_twd.h>
19 20 21

#include <mach/ct-ca9x4.h>

22
#include <asm/hardware/timer-sp.h>
23 24 25 26 27 28 29

#include <asm/mach/map.h>
#include <asm/mach/time.h>

#include "core.h"

#include <mach/motherboard.h>
30
#include <mach/irqs.h>
31

32 33
#include <plat/clcd.h>

34 35
static struct map_desc ct_ca9x4_io_desc[] __initdata = {
	{
P
Pawel Moll 已提交
36 37 38 39
		.virtual        = V2T_PERIPH,
		.pfn            = __phys_to_pfn(CT_CA9X4_MPIC),
		.length         = SZ_8K,
		.type           = MT_DEVICE,
40 41 42 43 44
	},
};

static void __init ct_ca9x4_map_io(void)
{
P
Pawel Moll 已提交
45
	iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
46 47
}

48 49 50 51 52 53 54 55 56 57 58 59 60
#ifdef CONFIG_HAVE_ARM_TWD
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);

static void __init ca9x4_twd_init(void)
{
	int err = twd_local_timer_register(&twd_local_timer);
	if (err)
		pr_err("twd_local_timer_register failed %d\n", err);
}
#else
#define ca9x4_twd_init()	do {} while(0)
#endif

61 62
static void __init ct_ca9x4_init_irq(void)
{
P
Pawel Moll 已提交
63 64
	gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
		 ioremap(A9_MPCORE_GIC_CPU, SZ_256));
65
	ca9x4_twd_init();
66 67 68 69 70 71
}

static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
{
	unsigned long framesize = 1024 * 768 * 2;

72 73 74
	fb->panel = versatile_clcd_get_panel("XVGA");
	if (!fb->panel)
		return -EINVAL;
75

76
	return versatile_clcd_setup_dma(fb, framesize);
77 78 79 80
}

static struct clcd_board ct_ca9x4_clcd_data = {
	.name		= "CT-CA9X4",
81
	.caps		= CLCD_CAP_5551 | CLCD_CAP_565,
82 83 84
	.check		= clcdfb_check,
	.decode		= clcdfb_decode,
	.setup		= ct_ca9x4_clcd_setup,
85 86
	.mmap		= versatile_clcd_mmap_dma,
	.remove		= versatile_clcd_remove_dma,
87 88
};

89 90 91 92
static AMBA_AHB_DEVICE(clcd, "ct:clcd", 0, CT_CA9X4_CLCDC, IRQ_CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
static AMBA_APB_DEVICE(dmc, "ct:dmc", 0, CT_CA9X4_DMC, IRQ_CT_CA9X4_DMC, NULL);
static AMBA_APB_DEVICE(smc, "ct:smc", 0, CT_CA9X4_SMC, IRQ_CT_CA9X4_SMC, NULL);
static AMBA_APB_DEVICE(gpio, "ct:gpio", 0, CT_CA9X4_GPIO, IRQ_CT_CA9X4_GPIO, NULL);
93 94 95 96 97 98 99 100

static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
	&clcd_device,
	&dmc_device,
	&smc_device,
	&gpio_device,
};

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static struct resource pmu_resources[] = {
	[0] = {
		.start	= IRQ_CT_CA9X4_PMU_CPU0,
		.end	= IRQ_CT_CA9X4_PMU_CPU0,
		.flags	= IORESOURCE_IRQ,
	},
	[1] = {
		.start	= IRQ_CT_CA9X4_PMU_CPU1,
		.end	= IRQ_CT_CA9X4_PMU_CPU1,
		.flags	= IORESOURCE_IRQ,
	},
	[2] = {
		.start	= IRQ_CT_CA9X4_PMU_CPU2,
		.end	= IRQ_CT_CA9X4_PMU_CPU2,
		.flags	= IORESOURCE_IRQ,
	},
	[3] = {
		.start	= IRQ_CT_CA9X4_PMU_CPU3,
		.end	= IRQ_CT_CA9X4_PMU_CPU3,
		.flags	= IORESOURCE_IRQ,
	},
};

static struct platform_device pmu_device = {
	.name		= "arm-pmu",
126
	.id		= -1,
127 128 129 130
	.num_resources	= ARRAY_SIZE(pmu_resources),
	.resource	= pmu_resources,
};

131 132 133 134 135 136 137 138 139
static struct platform_device osc1_device = {
	.name		= "vexpress-osc",
	.id		= 1,
	.num_resources	= 1,
	.resource	= (struct resource []) {
		VEXPRESS_RES_FUNC(0xf, 1),
	},
};

140
static void __init ct_ca9x4_init(void)
141 142 143 144
{
	int i;

#ifdef CONFIG_CACHE_L2X0
P
Pawel Moll 已提交
145
	void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
146 147 148 149 150 151

	/* set RAM latencies to 1 cycle for this core tile. */
	writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
	writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);

	l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
152 153 154 155
#endif

	for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
		amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
156 157

	platform_device_register(&pmu_device);
158 159 160 161
	platform_device_register(&osc1_device);

	WARN_ON(clk_register_clkdev(vexpress_osc_setup(&osc1_device.dev),
			NULL, "ct:clcd"));
162 163
}

164
#ifdef CONFIG_SMP
P
Pawel Moll 已提交
165 166
static void *ct_ca9x4_scu_base __initdata;

167
static void __init ct_ca9x4_init_cpu_map(void)
168
{
P
Pawel Moll 已提交
169 170 171 172 173 174 175
	int i, ncores;

	ct_ca9x4_scu_base = ioremap(A9_MPCORE_SCU, SZ_128);
	if (WARN_ON(!ct_ca9x4_scu_base))
		return;

	ncores = scu_get_core_count(ct_ca9x4_scu_base);
176

177 178 179 180 181 182
	if (ncores > nr_cpu_ids) {
		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
			ncores, nr_cpu_ids);
		ncores = nr_cpu_ids;
	}

183 184 185 186
	for (i = 0; i < ncores; ++i)
		set_cpu_possible(i, true);
}

187
static void __init ct_ca9x4_smp_enable(unsigned int max_cpus)
188
{
P
Pawel Moll 已提交
189
	scu_enable(ct_ca9x4_scu_base);
190 191 192 193 194 195
}
#endif

struct ct_desc ct_ca9x4_desc __initdata = {
	.id		= V2M_CT_ID_CA9,
	.name		= "CA9x4",
196
	.map_io		= ct_ca9x4_map_io,
197 198 199 200 201
	.init_irq	= ct_ca9x4_init_irq,
	.init_tile	= ct_ca9x4_init,
#ifdef CONFIG_SMP
	.init_cpu_map	= ct_ca9x4_init_cpu_map,
	.smp_enable	= ct_ca9x4_smp_enable,
202
#endif
203
};