initialize_mmu.h 4.6 KB
Newer Older
M
Max Filippov 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * arch/xtensa/include/asm/initialize_mmu.h
 *
 * Initializes MMU:
 *
 *      For the new V3 MMU we remap the TLB from virtual == physical
 *      to the standard Linux mapping used in earlier MMU's.
 *
 *      The the MMU we also support a new configuration register that
 *      specifies how the S32C1I instruction operates with the cache
 *      controller.
 *
 * This file is subject to the terms and conditions of the GNU General
 * Public License.  See the file "COPYING" in the main directory of
 * this archive for more details.
 *
 * Copyright (C) 2008 - 2012 Tensilica, Inc.
 *
 *   Marc Gauthier <marc@tensilica.com>
 *   Pete Delaney <piet@tensilica.com>
 */

#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H

M
Max Filippov 已提交
26 27 28
#include <asm/pgtable.h>
#include <asm/vectors.h>

29
#if XCHAL_HAVE_PTP_MMU
30 31
#define CA_BYPASS	(_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
#define CA_WRITEBACK	(_PAGE_CA_WB     | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
32 33 34 35 36 37 38
#else
#define CA_WRITEBACK	(0x4)
#endif

#ifndef XCHAL_SPANNING_WAY
#define XCHAL_SPANNING_WAY 0
#endif
39

M
Max Filippov 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifdef __ASSEMBLY__

#define XTENSA_HWVERSION_RC_2009_0 230000

	.macro	initialize_mmu

#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
/*
 * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
 * For details see Documentation/xtensa/atomctl.txt
 */
#if XCHAL_DCACHE_IS_COHERENT
	movi	a3, 0x25	/* For SMP/MX -- internal for writeback,
				 * RCW otherwise
				 */
#else
	movi	a3, 0x29	/* non-MX -- Most cores use Std Memory
				 * Controlers which usually can't use RCW
				 */
#endif
	wsr	a3, atomctl
#endif  /* XCHAL_HAVE_S32C1I &&
	 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
	 */

M
Max Filippov 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/*
 * Have MMU v3
 */

#if !XCHAL_HAVE_VECBASE
# error "MMU v3 requires reloc vectors"
#endif

	movi	a1, 0
	_call0	1f
	_j	2f

	.align	4
1:	movi	a2, 0x10000000
80 81 82 83 84 85

#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
#define TEMP_MAPPING_VADDR 0x40000000
#else
#define TEMP_MAPPING_VADDR 0x00000000
#endif
M
Max Filippov 已提交
86 87 88

	/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */

89
	movi	a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
M
Max Filippov 已提交
90 91 92 93 94 95 96 97 98 99 100
	idtlb	a2
	iitlb	a2
	isync

	/* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
	 * and jump to the new mapping.
	 */

	srli	a3, a0, 27
	slli	a3, a3, 27
	addi	a3, a3, CA_BYPASS
101
	addi	a7, a2, 5 - XCHAL_SPANNING_WAY
M
Max Filippov 已提交
102 103 104 105 106 107
	wdtlb	a3, a7
	witlb	a3, a7
	isync

	slli	a4, a0, 5
	srli	a4, a4, 5
108
	addi	a5, a2, -XCHAL_SPANNING_WAY
M
Max Filippov 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121
	add	a4, a4, a5
	jx	a4

	/* Step 3: unmap everything other than current area.
	 *	   Start at 0x60000000, wrap around, and end with 0x20000000
	 */
2:	movi	a4, 0x20000000
	add	a5, a2, a4
3:	idtlb	a5
	iitlb	a5
	add	a5, a5, a4
	bne	a5, a2, 3b

122 123
	/* Step 4: Setup MMU with the requested static mappings. */

M
Max Filippov 已提交
124 125 126 127 128
	movi	a6, 0x01000000
	wsr	a6, ITLBCFG
	wsr	a6, DTLBCFG
	isync

129 130 131 132 133 134 135
	movi	a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
	movi	a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
	wdtlb	a4, a5
	witlb	a4, a5

	movi	a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
	movi	a4, XCHAL_KSEG_PADDR + CA_BYPASS
M
Max Filippov 已提交
136 137 138
	wdtlb	a4, a5
	witlb	a4, a5

139 140 141
#ifdef CONFIG_XTENSA_KSEG_512M
	movi	a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
	movi	a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
M
Max Filippov 已提交
142 143 144
	wdtlb	a4, a5
	witlb	a4, a5

145 146 147 148 149 150
	movi	a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
	movi	a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
	wdtlb	a4, a5
	witlb	a4, a5
#endif

151
	movi	a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
152
	movi	a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
M
Max Filippov 已提交
153 154 155
	wdtlb	a4, a5
	witlb	a4, a5

156
	movi	a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
157
	movi	a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
M
Max Filippov 已提交
158 159 160 161 162
	wdtlb	a4, a5
	witlb	a4, a5

	isync

163
	/* Jump to self, using final mappings. */
M
Max Filippov 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
	movi	a4, 1f
	jx	a4

1:
	/* Step 5: remove temporary mapping. */
	idtlb	a7
	iitlb	a7
	isync

	movi	a0, 0
	wsr	a0, ptevaddr
	rsync

#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
	  XCHAL_HAVE_SPANNING_WAY */

180 181
#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \
		(XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE)
182 183 184 185 186 187 188 189 190 191 192 193 194
	/* Enable data and instruction cache in the DEFAULT_MEMORY region
	 * if the processor has DTLB and ITLB.
	 */

	movi	a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
	movi	a6, ~_PAGE_ATTRIB_MASK
	movi	a7, CA_WRITEBACK
	movi	a8, 0x20000000
	movi	a9, PLATFORM_DEFAULT_MEM_SIZE
	j	2f
1:
	sub	a9, a9, a8
2:
195
#if XCHAL_DCACHE_SIZE
196 197 198 199
	rdtlb1	a3, a5
	and	a3, a3, a6
	or	a3, a3, a7
	wdtlb	a3, a5
200 201 202 203 204
#endif
#if XCHAL_ICACHE_SIZE
	ritlb1	a4, a5
	and	a4, a4, a6
	or	a4, a4, a7
205
	witlb	a4, a5
206
#endif
207 208 209 210 211
	add	a5, a5, a8
	bltu	a8, a9, 1b

#endif

M
Max Filippov 已提交
212 213 214 215 216
	.endm

#endif /*__ASSEMBLY__*/

#endif /* _XTENSA_INITIALIZE_MMU_H */