init.c 8.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * arch/sh/kernel/cpu/init.c
 *
 * CPU init code
 *
6
 * Copyright (C) 2002 - 2009  Paul Mundt
7
 * Copyright (C) 2003  Richard Curnow
L
Linus Torvalds 已提交
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
P
Paul Mundt 已提交
15
#include <linux/mm.h>
16
#include <linux/log2.h>
P
Paul Mundt 已提交
17
#include <asm/mmu_context.h>
L
Linus Torvalds 已提交
18 19
#include <asm/processor.h>
#include <asm/uaccess.h>
20
#include <asm/page.h>
L
Linus Torvalds 已提交
21 22 23
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
24
#include <asm/elf.h>
L
Linus Torvalds 已提交
25
#include <asm/io.h>
26
#include <asm/smp.h>
27 28 29
#ifdef CONFIG_SUPERH32
#include <asm/ubc.h>
#endif
L
Linus Torvalds 已提交
30

31 32 33 34 35 36 37 38 39 40 41 42
#ifdef CONFIG_SH_FPU
#define cpu_has_fpu	1
#else
#define cpu_has_fpu	0
#endif

#ifdef CONFIG_SH_DSP
#define cpu_has_dsp	1
#else
#define cpu_has_dsp	0
#endif

L
Linus Torvalds 已提交
43 44 45 46
/*
 * Generic wrapper for command line arguments to disable on-chip
 * peripherals (nofpu, nodsp, and so forth).
 */
47 48 49 50 51 52 53 54
#define onchip_setup(x)					\
static int x##_disabled __initdata = !cpu_has_##x;	\
							\
static int __init x##_setup(char *opts)			\
{							\
	x##_disabled = 1;				\
	return 1;					\
}							\
L
Linus Torvalds 已提交
55 56 57 58 59
__setup("no" __stringify(x), x##_setup);

onchip_setup(fpu);
onchip_setup(dsp);

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
#ifdef CONFIG_SPECULATIVE_EXECUTION
#define CPUOPM		0xff2f0000
#define CPUOPM_RABD	(1 << 5)

static void __init speculative_execution_init(void)
{
	/* Clear RABD */
	ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);

	/* Flush the update */
	(void)ctrl_inl(CPUOPM);
	ctrl_barrier();
}
#else
#define speculative_execution_init()	do { } while (0)
#endif

77 78 79 80 81 82 83 84 85 86 87 88 89
#ifdef CONFIG_CPU_SH4A
#define EXPMASK			0xff2f0004
#define EXPMASK_RTEDS		(1 << 0)
#define EXPMASK_BRDSSLP		(1 << 1)
#define EXPMASK_MMCAW		(1 << 4)

static void __init expmask_init(void)
{
	unsigned long expmask = __raw_readl(EXPMASK);

	/*
	 * Future proofing.
	 *
90 91 92
	 * Disable support for slottable sleep instruction, non-nop
	 * instructions in the rte delay slot, and associative writes to
	 * the memory-mapped cache array.
93
	 */
94
	expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
95 96 97 98 99 100 101 102

	__raw_writel(expmask, EXPMASK);
	ctrl_barrier();
}
#else
#define expmask_init()	do { } while (0)
#endif

103 104 105 106 107
/* 2nd-level cache init */
void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
{
}

L
Linus Torvalds 已提交
108 109 110
/*
 * Generic first-level cache init
 */
111
#ifdef CONFIG_SUPERH32
112
static void __uses_jump_to_uncached cache_init(void)
L
Linus Torvalds 已提交
113 114 115
{
	unsigned long ccr, flags;

116
	jump_to_uncached();
L
Linus Torvalds 已提交
117 118 119
	ccr = ctrl_inl(CCR);

	/*
120 121 122 123 124 125 126 127 128
	 * At this point we don't know whether the cache is enabled or not - a
	 * bootloader may have enabled it.  There are at least 2 things that
	 * could be dirty in the cache at this point:
	 * 1. kernel command line set up by boot loader
	 * 2. spilled registers from the prolog of this function
	 * => before re-initialising the cache, we must do a purge of the whole
	 * cache out to memory for safety.  As long as nothing is spilled
	 * during the loop to lines that have already been done, this is safe.
	 * - RPC
L
Linus Torvalds 已提交
129 130 131 132
	 */
	if (ccr & CCR_CACHE_ENABLE) {
		unsigned long ways, waysize, addrstart;

133
		waysize = current_cpu_data.dcache.sets;
L
Linus Torvalds 已提交
134

135
#ifdef CCR_CACHE_ORA
L
Linus Torvalds 已提交
136 137 138 139 140 141
		/*
		 * If the OC is already in RAM mode, we only have
		 * half of the entries to flush..
		 */
		if (ccr & CCR_CACHE_ORA)
			waysize >>= 1;
142
#endif
L
Linus Torvalds 已提交
143

144
		waysize <<= current_cpu_data.dcache.entry_shift;
L
Linus Torvalds 已提交
145 146 147 148 149 150 151

#ifdef CCR_CACHE_EMODE
		/* If EMODE is not set, we only have 1 way to flush. */
		if (!(ccr & CCR_CACHE_EMODE))
			ways = 1;
		else
#endif
152
			ways = current_cpu_data.dcache.ways;
L
Linus Torvalds 已提交
153 154 155 156 157 158 159

		addrstart = CACHE_OC_ADDRESS_ARRAY;
		do {
			unsigned long addr;

			for (addr = addrstart;
			     addr < addrstart + waysize;
160
			     addr += current_cpu_data.dcache.linesz)
L
Linus Torvalds 已提交
161 162
				ctrl_outl(0, addr);

163
			addrstart += current_cpu_data.dcache.way_incr;
L
Linus Torvalds 已提交
164 165 166 167 168 169 170 171 172 173 174
		} while (--ways);
	}

	/*
	 * Default CCR values .. enable the caches
	 * and invalidate them immediately..
	 */
	flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;

#ifdef CCR_CACHE_EMODE
	/* Force EMODE if possible */
175
	if (current_cpu_data.dcache.ways > 1)
L
Linus Torvalds 已提交
176
		flags |= CCR_CACHE_EMODE;
177 178
	else
		flags &= ~CCR_CACHE_EMODE;
L
Linus Torvalds 已提交
179 180
#endif

181 182
#if defined(CONFIG_CACHE_WRITETHROUGH)
	/* Write-through */
L
Linus Torvalds 已提交
183
	flags |= CCR_CACHE_WT;
184 185
#elif defined(CONFIG_CACHE_WRITEBACK)
	/* Write-back */
L
Linus Torvalds 已提交
186
	flags |= CCR_CACHE_CB;
187 188 189
#else
	/* Off */
	flags &= ~CCR_CACHE_ENABLE;
L
Linus Torvalds 已提交
190 191
#endif

192 193
	l2_cache_init();

L
Linus Torvalds 已提交
194
	ctrl_outl(flags, CCR);
195
	back_to_cached();
L
Linus Torvalds 已提交
196
}
197 198 199
#else
#define cache_init()	do { } while (0)
#endif
L
Linus Torvalds 已提交
200

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
#define CSHAPE(totalsize, linesize, assoc) \
	((totalsize & ~0xff) | (linesize << 4) | assoc)

#define CACHE_DESC_SHAPE(desc)	\
	CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)

static void detect_cache_shape(void)
{
	l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);

	if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
		l1i_cache_shape = l1d_cache_shape;
	else
		l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);

	if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
		l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
	else
		l2_cache_shape = -1; /* No S-cache */
}

222 223 224 225 226 227 228 229 230 231 232 233
static void __init fpu_init(void)
{
	/* Disable the FPU */
	if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
		printk("FPU Disabled\n");
		current_cpu_data.flags &= ~CPU_HAS_FPU;
	}

	disable_fpu();
	clear_used_math();
}

L
Linus Torvalds 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
#ifdef CONFIG_SH_DSP
static void __init release_dsp(void)
{
	unsigned long sr;

	/* Clear SR.DSP bit */
	__asm__ __volatile__ (
		"stc\tsr, %0\n\t"
		"and\t%1, %0\n\t"
		"ldc\t%0, sr\n\t"
		: "=&r" (sr)
		: "r" (~SR_DSP)
	);
}

static void __init dsp_init(void)
{
	unsigned long sr;

	/*
	 * Set the SR.DSP bit, wait for one instruction, and then read
	 * back the SR value.
	 */
	__asm__ __volatile__ (
		"stc\tsr, %0\n\t"
		"or\t%1, %0\n\t"
		"ldc\t%0, sr\n\t"
		"nop\n\t"
		"stc\tsr, %0\n\t"
		: "=&r" (sr)
		: "r" (SR_DSP)
	);

	/* If the DSP bit is still set, this CPU has a DSP */
	if (sr & SR_DSP)
269
		current_cpu_data.flags |= CPU_HAS_DSP;
L
Linus Torvalds 已提交
270

271 272 273 274 275 276
	/* Disable the DSP */
	if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
		printk("DSP Disabled\n");
		current_cpu_data.flags &= ~CPU_HAS_DSP;
	}

L
Linus Torvalds 已提交
277 278 279
	/* Now that we've determined the DSP status, clear the DSP bit. */
	release_dsp();
}
280 281
#else
static inline void __init dsp_init(void) { }
L
Linus Torvalds 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
#endif /* CONFIG_SH_DSP */

/**
 * sh_cpu_init
 *
 * This is our initial entry point for each CPU, and is invoked on the boot
 * CPU prior to calling start_kernel(). For SMP, a combination of this and
 * start_secondary() will bring up each processor to a ready state prior
 * to hand forking the idle loop.
 *
 * We do all of the basic processor init here, including setting up the
 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
 * hit (and subsequently platform_setup()) things like determining the
 * CPU subtype and initial configuration will all be done.
 *
 * Each processor family is still responsible for doing its own probing
 * and cache configuration in detect_cpu_and_cache_system().
 */
300

P
Paul Mundt 已提交
301
asmlinkage void __init sh_cpu_init(void)
L
Linus Torvalds 已提交
302
{
303 304
	current_thread_info()->cpu = hard_smp_processor_id();

L
Linus Torvalds 已提交
305 306 307
	/* First, probe the CPU */
	detect_cpu_and_cache_system();

308 309 310
	if (current_cpu_data.type == CPU_SH_NONE)
		panic("Unknown CPU");

311 312 313 314 315 316 317 318 319 320 321 322 323 324
	/* First setup the rest of the I-cache info */
	current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
				      current_cpu_data.icache.linesz;

	current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
				    current_cpu_data.icache.linesz;

	/* And the D-cache too */
	current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
				      current_cpu_data.dcache.linesz;

	current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
				    current_cpu_data.dcache.linesz;

L
Linus Torvalds 已提交
325 326 327
	/* Init the cache */
	cache_init();

328
	if (raw_smp_processor_id() == 0) {
329 330 331
		shm_align_mask = max_t(unsigned long,
				       current_cpu_data.dcache.way_size - 1,
				       PAGE_SIZE - 1);
332

333 334 335 336
		/* Boot CPU sets the cache shape */
		detect_cache_shape();
	}

337 338
	fpu_init();
	dsp_init();
L
Linus Torvalds 已提交
339

P
Paul Mundt 已提交
340 341 342 343 344 345
	/*
	 * Initialize the per-CPU ASID cache very early, since the
	 * TLB flushing routines depend on this being setup.
	 */
	current_cpu_data.asid_cache = NO_CONTEXT;

346
	speculative_execution_init();
347
	expmask_init();
348 349 350 351 352 353

	/*
	 * Boot processor to setup the FP and extended state context info.
	 */
	if (raw_smp_processor_id() == 0)
		init_thread_xstate();
L
Linus Torvalds 已提交
354
}