proc-v7.S 7.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 *  linux/arch/arm/mm/proc-v7.S
 *
 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  This is the "shell" of the ARMv7 processor support.
 */
12
#include <linux/init.h>
13 14 15
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
16
#include <asm/hwcap.h>
17 18 19 20 21 22 23
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>

#include "proc-macros.S"

#define TTB_C		(1 << 0)
#define TTB_S		(1 << 1)
24 25
#define TTB_RGN_NC	(0 << 3)
#define TTB_RGN_OC_WBWA	(1 << 3)
26 27 28
#define TTB_RGN_OC_WT	(2 << 3)
#define TTB_RGN_OC_WB	(3 << 3)

29 30 31 32 33 34
#ifndef CONFIG_SMP
#define TTB_FLAGS	TTB_C|TTB_RGN_OC_WB		@ mark PTWs cacheable, outer WB
#else
#define TTB_FLAGS	TTB_C|TTB_S|TTB_RGN_OC_WBWA	@ mark PTWs cacheable and shared, outer WBWA
#endif

35 36
ENTRY(cpu_v7_proc_init)
	mov	pc, lr
37
ENDPROC(cpu_v7_proc_init)
38 39 40

ENTRY(cpu_v7_proc_fin)
	mov	pc, lr
41
ENDPROC(cpu_v7_proc_fin)
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56

/*
 *	cpu_v7_reset(loc)
 *
 *	Perform a soft reset of the system.  Put the CPU into the
 *	same state as it would be if it had been reset, and branch
 *	to what would be the reset vector.
 *
 *	- loc   - location to jump to for soft reset
 *
 *	It is assumed that:
 */
	.align	5
ENTRY(cpu_v7_reset)
	mov	pc, r0
57
ENDPROC(cpu_v7_reset)
58 59 60 61 62 63 64 65 66

/*
 *	cpu_v7_do_idle()
 *
 *	Idle the processor (eg, wait for interrupt).
 *
 *	IRQs are already disabled.
 */
ENTRY(cpu_v7_do_idle)
67
	dsb					@ WFI may enter a low-power mode
68
	wfi
69
	mov	pc, lr
70
ENDPROC(cpu_v7_do_idle)
71 72 73 74 75 76 77 78 79 80 81

ENTRY(cpu_v7_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE
	dcache_line_size r2, r3
1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
	add	r0, r0, r2
	subs	r1, r1, r2
	bhi	1b
	dsb
#endif
	mov	pc, lr
82
ENDPROC(cpu_v7_dcache_clean_area)
83 84 85 86 87 88 89 90 91 92 93 94

/*
 *	cpu_v7_switch_mm(pgd_phys, tsk)
 *
 *	Set the translation table base pointer to be pgd_phys
 *
 *	- pgd_phys - physical address of new TTB
 *
 *	It is assumed that:
 *	- we are not using split page tables
 */
ENTRY(cpu_v7_switch_mm)
95
#ifdef CONFIG_MMU
96 97
	mov	r2, #0
	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
98
	orr	r0, r0, #TTB_FLAGS
99 100 101
#ifdef CONFIG_ARM_ERRATA_430973
	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
#endif
102 103 104 105 106 107
	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
	isb
1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
	isb
	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
	isb
108
#endif
109
	mov	pc, lr
110
ENDPROC(cpu_v7_switch_mm)
111 112 113 114 115 116 117 118 119 120 121 122

/*
 *	cpu_v7_set_pte_ext(ptep, pte)
 *
 *	Set a level 2 translation table entry.
 *
 *	- ptep  - pointer to level 2 translation table entry
 *		  (hardware version is stored at -1024 bytes)
 *	- pte   - PTE value to store
 *	- ext	- value for extended PTE bits
 */
ENTRY(cpu_v7_set_pte_ext)
123
#ifdef CONFIG_MMU
124 125 126
	str	r1, [r0], #-2048		@ linux version

	bic	r3, r1, #0x000003f0
127
	bic	r3, r3, #PTE_TYPE_MASK
128 129 130
	orr	r3, r3, r2
	orr	r3, r3, #PTE_EXT_AP0 | 2

131
	tst	r1, #1 << 4
132 133
	orrne	r3, r3, #PTE_EXT_TEX(1)

134 135 136 137 138 139 140 141 142 143 144 145
	tst	r1, #L_PTE_WRITE
	tstne	r1, #L_PTE_DIRTY
	orreq	r3, r3, #PTE_EXT_APX

	tst	r1, #L_PTE_USER
	orrne	r3, r3, #PTE_EXT_AP1
	tstne	r3, #PTE_EXT_APX
	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0

	tst	r1, #L_PTE_EXEC
	orreq	r3, r3, #PTE_EXT_XN

146 147
	tst	r1, #L_PTE_YOUNG
	tstne	r1, #L_PTE_PRESENT
148 149 150 151
	moveq	r3, #0

	str	r3, [r0]
	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
152
#endif
153
	mov	pc, lr
154
ENDPROC(cpu_v7_set_pte_ext)
155 156 157 158 159

cpu_v7_name:
	.ascii	"ARMv7 Processor"
	.align

160
	__INIT
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

/*
 *	__v7_setup
 *
 *	Initialise TLB, Caches, and MMU state ready to switch the MMU
 *	on.  Return in r0 the new CP15 C1 control register setting.
 *
 *	We automatically detect if we have a Harvard cache, and use the
 *	Harvard cache control instructions insead of the unified cache
 *	control instructions.
 *
 *	This should be able to cover all ARMv7 cores.
 *
 *	It is assumed that:
 *	- cache type register is implemented
 */
__v7_setup:
178
#ifdef CONFIG_SMP
179 180
	mrc	p15, 0, r0, c1, c0, 1		@ Enable SMP/nAMP mode and
	orr	r0, r0, #(1 << 6) | (1 << 0)	@ TLB ops broadcasting
181 182
	mcr	p15, 0, r0, c1, c0, 1
#endif
183 184 185 186
	adr	r12, __v7_setup_stack		@ the local stack
	stmia	r12, {r0-r5, r7, r9, r11, lr}
	bl	v7_flush_dcache_all
	ldmia	r12, {r0-r5, r7, r9, r11, lr}
187 188 189 190
#ifdef CONFIG_ARM_ERRATA_430973
	mrc	p15, 0, r10, c1, c0, 1		@ read aux control register
	orr	r10, r10, #(1 << 6)		@ set IBE to 1
	mcr	p15, 0, r10, c1, c0, 1		@ write aux control register
191 192 193 194 195 196
#endif
#ifdef CONFIG_ARM_ERRATA_458693
	mrc	p15, 0, r10, c1, c0, 1		@ read aux control register
	orr	r10, r10, #(1 << 5)		@ set L1NEON to 1
	orr	r10, r10, #(1 << 9)		@ set PLDNOP to 1
	mcr	p15, 0, r10, c1, c0, 1		@ write aux control register
197 198 199 200 201
#endif
#ifdef CONFIG_ARM_ERRATA_460075
	mrc	p15, 1, r10, c9, c0, 2		@ read L2 cache aux ctrl register
	orr	r10, r10, #(1 << 22)		@ set the Write Allocate disable bit
	mcr	p15, 1, r10, c9, c0, 2		@ write the L2 cache aux ctrl register
202
#endif
203 204 205 206 207
	mov	r10, #0
#ifdef HARVARD_CACHE
	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
#endif
	dsb
208
#ifdef CONFIG_MMU
209 210
	mcr	p15, 0, r10, c8, c7, 0		@ invalidate I + D TLBs
	mcr	p15, 0, r10, c2, c0, 2		@ TTB control register
211
	orr	r4, r4, #TTB_FLAGS
212 213 214
	mcr	p15, 0, r4, c2, c0, 1		@ load TTB1
	mov	r10, #0x1f			@ domains 0, 1 = manager
	mcr	p15, 0, r10, c3, c0, 0		@ load domain access register
215
#endif
216 217
	ldr	r5, =0xff0aa1a8
	ldr	r6, =0x40e040e0
218 219
	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
	mcr	p15, 0, r6, c10, c2, 1		@ write NMRR
220 221 222 223 224
	adr	r5, v7_crval
	ldmia	r5, {r5, r6}
   	mrc	p15, 0, r0, c1, c0, 0		@ read control register
	bic	r0, r0, r5			@ clear bits them
	orr	r0, r0, r6			@ set them
225
	mov	pc, lr				@ return to head.S:__ret
226
ENDPROC(__v7_setup)
227

228 229 230 231 232
	/*   AT
	 *  TFR   EV X F   I D LR
	 * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
	 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
	 *    1    0 110       0011 1.00 .111 1101 < we want
233
	 */
234 235
	.type	v7_crval, #object
v7_crval:
236
	crval	clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c
237 238 239 240 241 242 243

__v7_setup_stack:
	.space	4 * 11				@ 11 registers

	.type	v7_processor_functions, #object
ENTRY(v7_processor_functions)
	.word	v7_early_abort
244
	.word	pabort_ifar
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	.word	cpu_v7_proc_init
	.word	cpu_v7_proc_fin
	.word	cpu_v7_reset
	.word	cpu_v7_do_idle
	.word	cpu_v7_dcache_clean_area
	.word	cpu_v7_switch_mm
	.word	cpu_v7_set_pte_ext
	.size	v7_processor_functions, . - v7_processor_functions

	.type	cpu_arch_name, #object
cpu_arch_name:
	.asciz	"armv7"
	.size	cpu_arch_name, . - cpu_arch_name

	.type	cpu_elf_name, #object
cpu_elf_name:
	.asciz	"v7"
	.size	cpu_elf_name, . - cpu_elf_name
	.align

	.section ".proc.info.init", #alloc, #execinstr

	/*
	 * Match any ARMv7 processor core.
	 */
	.type	__v7_proc_info, #object
__v7_proc_info:
	.long	0x000f0000		@ Required ID value
	.long	0x000f0000		@ Mask for ID
	.long   PMD_TYPE_SECT | \
		PMD_SECT_BUFFERABLE | \
		PMD_SECT_CACHEABLE | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ
	.long   PMD_TYPE_SECT | \
		PMD_SECT_XN | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ
	b	__v7_setup
	.long	cpu_arch_name
	.long	cpu_elf_name
	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
	.long	cpu_v7_name
	.long	v7_processor_functions
289
	.long	v7wbi_tlb_fns
290 291 292
	.long	v6_user_fns
	.long	v7_cache_fns
	.size	__v7_proc_info, . - __v7_proc_info