init.c 7.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * arch/sh/kernel/cpu/init.c
 *
 * CPU init code
 *
6
 * Copyright (C) 2002 - 2007  Paul Mundt
7
 * Copyright (C) 2003  Richard Curnow
L
Linus Torvalds 已提交
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
P
Paul Mundt 已提交
15
#include <linux/mm.h>
16
#include <linux/log2.h>
P
Paul Mundt 已提交
17
#include <asm/mmu_context.h>
L
Linus Torvalds 已提交
18 19
#include <asm/processor.h>
#include <asm/uaccess.h>
20
#include <asm/page.h>
L
Linus Torvalds 已提交
21 22 23
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
24
#include <asm/elf.h>
L
Linus Torvalds 已提交
25
#include <asm/io.h>
26
#include <asm/smp.h>
27 28 29
#ifdef CONFIG_SUPERH32
#include <asm/ubc.h>
#endif
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40

/*
 * Generic wrapper for command line arguments to disable on-chip
 * peripherals (nofpu, nodsp, and so forth).
 */
#define onchip_setup(x)				\
static int x##_disabled __initdata = 0;		\
						\
static int __init x##_setup(char *opts)		\
{						\
	x##_disabled = 1;			\
41
	return 1;				\
L
Linus Torvalds 已提交
42 43 44 45 46 47
}						\
__setup("no" __stringify(x), x##_setup);

onchip_setup(fpu);
onchip_setup(dsp);

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifdef CONFIG_SPECULATIVE_EXECUTION
#define CPUOPM		0xff2f0000
#define CPUOPM_RABD	(1 << 5)

static void __init speculative_execution_init(void)
{
	/* Clear RABD */
	ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);

	/* Flush the update */
	(void)ctrl_inl(CPUOPM);
	ctrl_barrier();
}
#else
#define speculative_execution_init()	do { } while (0)
#endif

65 66 67 68 69
/* 2nd-level cache init */
void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
{
}

L
Linus Torvalds 已提交
70 71 72
/*
 * Generic first-level cache init
 */
73
#ifdef CONFIG_SUPERH32
74
static void __uses_jump_to_uncached cache_init(void)
L
Linus Torvalds 已提交
75 76 77
{
	unsigned long ccr, flags;

78
	jump_to_uncached();
L
Linus Torvalds 已提交
79 80 81
	ccr = ctrl_inl(CCR);

	/*
82 83 84 85 86 87 88 89 90
	 * At this point we don't know whether the cache is enabled or not - a
	 * bootloader may have enabled it.  There are at least 2 things that
	 * could be dirty in the cache at this point:
	 * 1. kernel command line set up by boot loader
	 * 2. spilled registers from the prolog of this function
	 * => before re-initialising the cache, we must do a purge of the whole
	 * cache out to memory for safety.  As long as nothing is spilled
	 * during the loop to lines that have already been done, this is safe.
	 * - RPC
L
Linus Torvalds 已提交
91 92 93 94
	 */
	if (ccr & CCR_CACHE_ENABLE) {
		unsigned long ways, waysize, addrstart;

95
		waysize = current_cpu_data.dcache.sets;
L
Linus Torvalds 已提交
96

97
#ifdef CCR_CACHE_ORA
L
Linus Torvalds 已提交
98 99 100 101 102 103
		/*
		 * If the OC is already in RAM mode, we only have
		 * half of the entries to flush..
		 */
		if (ccr & CCR_CACHE_ORA)
			waysize >>= 1;
104
#endif
L
Linus Torvalds 已提交
105

106
		waysize <<= current_cpu_data.dcache.entry_shift;
L
Linus Torvalds 已提交
107 108 109 110 111 112 113

#ifdef CCR_CACHE_EMODE
		/* If EMODE is not set, we only have 1 way to flush. */
		if (!(ccr & CCR_CACHE_EMODE))
			ways = 1;
		else
#endif
114
			ways = current_cpu_data.dcache.ways;
L
Linus Torvalds 已提交
115 116 117 118 119 120 121

		addrstart = CACHE_OC_ADDRESS_ARRAY;
		do {
			unsigned long addr;

			for (addr = addrstart;
			     addr < addrstart + waysize;
122
			     addr += current_cpu_data.dcache.linesz)
L
Linus Torvalds 已提交
123 124
				ctrl_outl(0, addr);

125
			addrstart += current_cpu_data.dcache.way_incr;
L
Linus Torvalds 已提交
126 127 128 129 130 131 132 133 134 135 136
		} while (--ways);
	}

	/*
	 * Default CCR values .. enable the caches
	 * and invalidate them immediately..
	 */
	flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;

#ifdef CCR_CACHE_EMODE
	/* Force EMODE if possible */
137
	if (current_cpu_data.dcache.ways > 1)
L
Linus Torvalds 已提交
138
		flags |= CCR_CACHE_EMODE;
139 140
	else
		flags &= ~CCR_CACHE_EMODE;
L
Linus Torvalds 已提交
141 142
#endif

143 144
#if defined(CONFIG_CACHE_WRITETHROUGH)
	/* Write-through */
L
Linus Torvalds 已提交
145
	flags |= CCR_CACHE_WT;
146 147
#elif defined(CONFIG_CACHE_WRITEBACK)
	/* Write-back */
L
Linus Torvalds 已提交
148
	flags |= CCR_CACHE_CB;
149 150 151
#else
	/* Off */
	flags &= ~CCR_CACHE_ENABLE;
L
Linus Torvalds 已提交
152 153
#endif

154 155
	l2_cache_init();

L
Linus Torvalds 已提交
156
	ctrl_outl(flags, CCR);
157
	back_to_cached();
L
Linus Torvalds 已提交
158
}
159 160 161
#else
#define cache_init()	do { } while (0)
#endif
L
Linus Torvalds 已提交
162

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
#define CSHAPE(totalsize, linesize, assoc) \
	((totalsize & ~0xff) | (linesize << 4) | assoc)

#define CACHE_DESC_SHAPE(desc)	\
	CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)

static void detect_cache_shape(void)
{
	l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);

	if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
		l1i_cache_shape = l1d_cache_shape;
	else
		l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);

	if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
		l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
	else
		l2_cache_shape = -1; /* No S-cache */
}

L
Linus Torvalds 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
#ifdef CONFIG_SH_DSP
static void __init release_dsp(void)
{
	unsigned long sr;

	/* Clear SR.DSP bit */
	__asm__ __volatile__ (
		"stc\tsr, %0\n\t"
		"and\t%1, %0\n\t"
		"ldc\t%0, sr\n\t"
		: "=&r" (sr)
		: "r" (~SR_DSP)
	);
}

static void __init dsp_init(void)
{
	unsigned long sr;

	/*
	 * Set the SR.DSP bit, wait for one instruction, and then read
	 * back the SR value.
	 */
	__asm__ __volatile__ (
		"stc\tsr, %0\n\t"
		"or\t%1, %0\n\t"
		"ldc\t%0, sr\n\t"
		"nop\n\t"
		"stc\tsr, %0\n\t"
		: "=&r" (sr)
		: "r" (SR_DSP)
	);

	/* If the DSP bit is still set, this CPU has a DSP */
	if (sr & SR_DSP)
219
		current_cpu_data.flags |= CPU_HAS_DSP;
L
Linus Torvalds 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241

	/* Now that we've determined the DSP status, clear the DSP bit. */
	release_dsp();
}
#endif /* CONFIG_SH_DSP */

/**
 * sh_cpu_init
 *
 * This is our initial entry point for each CPU, and is invoked on the boot
 * CPU prior to calling start_kernel(). For SMP, a combination of this and
 * start_secondary() will bring up each processor to a ready state prior
 * to hand forking the idle loop.
 *
 * We do all of the basic processor init here, including setting up the
 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
 * hit (and subsequently platform_setup()) things like determining the
 * CPU subtype and initial configuration will all be done.
 *
 * Each processor family is still responsible for doing its own probing
 * and cache configuration in detect_cpu_and_cache_system().
 */
242

P
Paul Mundt 已提交
243
asmlinkage void __init sh_cpu_init(void)
L
Linus Torvalds 已提交
244
{
245 246
	current_thread_info()->cpu = hard_smp_processor_id();

L
Linus Torvalds 已提交
247 248 249
	/* First, probe the CPU */
	detect_cpu_and_cache_system();

250 251 252
	if (current_cpu_data.type == CPU_SH_NONE)
		panic("Unknown CPU");

253 254 255 256 257 258 259 260 261 262 263 264 265 266
	/* First setup the rest of the I-cache info */
	current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
				      current_cpu_data.icache.linesz;

	current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
				    current_cpu_data.icache.linesz;

	/* And the D-cache too */
	current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
				      current_cpu_data.dcache.linesz;

	current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
				    current_cpu_data.dcache.linesz;

L
Linus Torvalds 已提交
267 268 269
	/* Init the cache */
	cache_init();

270
	if (raw_smp_processor_id() == 0) {
271 272 273
		shm_align_mask = max_t(unsigned long,
				       current_cpu_data.dcache.way_size - 1,
				       PAGE_SIZE - 1);
274

275 276 277 278
		/* Boot CPU sets the cache shape */
		detect_cache_shape();
	}

L
Linus Torvalds 已提交
279 280 281
	/* Disable the FPU */
	if (fpu_disabled) {
		printk("FPU Disabled\n");
282
		current_cpu_data.flags &= ~CPU_HAS_FPU;
L
Linus Torvalds 已提交
283 284 285 286
		disable_fpu();
	}

	/* FPU initialization */
287
	if ((current_cpu_data.flags & CPU_HAS_FPU)) {
L
Linus Torvalds 已提交
288 289 290 291
		clear_thread_flag(TIF_USEDFPU);
		clear_used_math();
	}

P
Paul Mundt 已提交
292 293 294 295 296 297
	/*
	 * Initialize the per-CPU ASID cache very early, since the
	 * TLB flushing routines depend on this being setup.
	 */
	current_cpu_data.asid_cache = NO_CONTEXT;

L
Linus Torvalds 已提交
298 299 300 301 302 303 304
#ifdef CONFIG_SH_DSP
	/* Probe for DSP */
	dsp_init();

	/* Disable the DSP */
	if (dsp_disabled) {
		printk("DSP Disabled\n");
305
		current_cpu_data.flags &= ~CPU_HAS_DSP;
L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313 314 315
		release_dsp();
	}
#endif

	/*
	 * Some brain-damaged loaders decided it would be a good idea to put
	 * the UBC to sleep. This causes some issues when it comes to things
	 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB.  So ..
	 * we wake it up and hope that all is well.
	 */
316
#ifdef CONFIG_SUPERH32
317 318
	if (raw_smp_processor_id() == 0)
		ubc_wakeup();
319 320
#endif

321
	speculative_execution_init();
L
Linus Torvalds 已提交
322
}