assembler.h 7.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  arch/arm/include/asm/assembler.h
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 *  Copyright (C) 1996-2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  This file contains arm architecture specific defines
 *  for the different processors.
 *
 *  Do not include any C declarations in this file - it is included by
 *  assembler source.
 */
16 17 18
#ifndef __ASM_ASSEMBLER_H__
#define __ASM_ASSEMBLER_H__

L
Linus Torvalds 已提交
19 20 21 22 23
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif

#include <asm/ptrace.h>
24
#include <asm/domain.h>
25
#include <asm/opcodes-virt.h>
L
Linus Torvalds 已提交
26

27 28
#define IOMEM(x)	(x)

L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/*
 * Endian independent macros for shifting bytes within registers.
 */
#ifndef __ARMEB__
#define pull            lsr
#define push            lsl
#define get_byte_0      lsl #0
#define get_byte_1	lsr #8
#define get_byte_2	lsr #16
#define get_byte_3	lsr #24
#define put_byte_0      lsl #0
#define put_byte_1	lsl #8
#define put_byte_2	lsl #16
#define put_byte_3	lsl #24
#else
#define pull            lsl
#define push            lsr
#define get_byte_0	lsr #24
#define get_byte_1	lsr #16
#define get_byte_2	lsr #8
#define get_byte_3      lsl #0
#define put_byte_0	lsl #24
#define put_byte_1	lsl #16
#define put_byte_2	lsl #8
#define put_byte_3      lsl #0
#endif

/*
 * Data preload for architectures that support it
 */
#if __LINUX_ARM_ARCH__ >= 5
#define PLD(code...)	code
#else
#define PLD(code...)
#endif

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * This can be used to enable code to cacheline align the destination
 * pointer when bulk writing to memory.  Experiments on StrongARM and
 * XScale didn't show this a worthwhile thing to do when the cache is not
 * set to write-allocate (this would need further testing on XScale when WA
 * is used).
 *
 * On Feroceon there is much to gain however, regardless of cache mode.
 */
#ifdef CONFIG_CPU_FEROCEON
#define CALGN(code...) code
#else
#define CALGN(code...)
#endif

L
Linus Torvalds 已提交
80
/*
81
 * Enable and disable interrupts
L
Linus Torvalds 已提交
82
 */
83
#if __LINUX_ARM_ARCH__ >= 6
84
	.macro	disable_irq_notrace
85
	cpsid	i
86 87
	.endm

88
	.macro	enable_irq_notrace
89 90
	cpsie	i
	.endm
91
#else
92
	.macro	disable_irq_notrace
93 94 95
	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
	.endm

96
	.macro	enable_irq_notrace
97 98
	msr	cpsr_c, #SVC_MODE
	.endm
99
#endif
100

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	.macro asm_trace_hardirqs_off
#if defined(CONFIG_TRACE_IRQFLAGS)
	stmdb   sp!, {r0-r3, ip, lr}
	bl	trace_hardirqs_off
	ldmia	sp!, {r0-r3, ip, lr}
#endif
	.endm

	.macro asm_trace_hardirqs_on_cond, cond
#if defined(CONFIG_TRACE_IRQFLAGS)
	/*
	 * actually the registers should be pushed and pop'd conditionally, but
	 * after bl the flags are certainly clobbered
	 */
	stmdb   sp!, {r0-r3, ip, lr}
	bl\cond	trace_hardirqs_on
	ldmia	sp!, {r0-r3, ip, lr}
#endif
	.endm

	.macro asm_trace_hardirqs_on
	asm_trace_hardirqs_on_cond al
	.endm

	.macro disable_irq
	disable_irq_notrace
	asm_trace_hardirqs_off
	.endm

	.macro enable_irq
	asm_trace_hardirqs_on
	enable_irq_notrace
	.endm
134 135 136 137 138 139 140
/*
 * Save the current IRQ state and disable IRQs.  Note that this macro
 * assumes FIQs are enabled, and that the processor is in SVC mode.
 */
	.macro	save_and_disable_irqs, oldcpsr
	mrs	\oldcpsr, cpsr
	disable_irq
L
Linus Torvalds 已提交
141 142
	.endm

143 144 145 146 147
	.macro	save_and_disable_irqs_notrace, oldcpsr
	mrs	\oldcpsr, cpsr
	disable_irq_notrace
	.endm

L
Linus Torvalds 已提交
148 149 150 151
/*
 * Restore interrupt state previously stored in a register.  We don't
 * guarantee that this will preserve the flags.
 */
152
	.macro	restore_irqs_notrace, oldcpsr
L
Linus Torvalds 已提交
153 154 155
	msr	cpsr_c, \oldcpsr
	.endm

156 157 158 159 160 161
	.macro restore_irqs, oldcpsr
	tst	\oldcpsr, #PSR_I_BIT
	asm_trace_hardirqs_on_cond eq
	restore_irqs_notrace \oldcpsr
	.endm

L
Linus Torvalds 已提交
162 163
#define USER(x...)				\
9999:	x;					\
164
	.pushsection __ex_table,"a";		\
L
Linus Torvalds 已提交
165 166
	.align	3;				\
	.long	9999b,9001f;			\
167
	.popsection
168

169 170 171
#ifdef CONFIG_SMP
#define ALT_SMP(instr...)					\
9998:	instr
172 173 174 175 176
/*
 * Note: if you get assembler errors from ALT_UP() when building with
 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
 * ALT_SMP( W(instr) ... )
 */
177 178 179
#define ALT_UP(instr...)					\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
180 181 182 183
9997:	instr							;\
	.if . - 9997b != 4					;\
		.error "ALT_UP() content must assemble to exactly 4 bytes";\
	.endif							;\
184 185 186 187 188
	.popsection
#define ALT_UP_B(label)					\
	.equ	up_b_offset, label - 9998b			;\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
189
	W(b)	. + up_b_offset					;\
190 191 192 193 194 195 196
	.popsection
#else
#define ALT_SMP(instr...)
#define ALT_UP(instr...) instr
#define ALT_UP_B(label) b label
#endif

197 198 199 200 201 202 203 204 205 206 207
/*
 * Instruction barrier
 */
	.macro	instr_sync
#if __LINUX_ARM_ARCH__ >= 7
	isb
#elif __LINUX_ARM_ARCH__ == 6
	mcr	p15, 0, r0, c7, c5, 4
#endif
	.endm

208 209 210
/*
 * SMP data memory barrier
 */
211
	.macro	smp_dmb mode
212 213
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
214
	.ifeqs "\mode","arm"
215
	ALT_SMP(dmb)
216 217 218
	.else
	ALT_SMP(W(dmb))
	.endif
219
#elif __LINUX_ARM_ARCH__ == 6
220 221 222
	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
#else
#error Incompatible SMP platform
223
#endif
224
	.ifeqs "\mode","arm"
225
	ALT_UP(nop)
226 227 228
	.else
	ALT_UP(W(nop))
	.endif
229 230
#endif
	.endm
231 232 233 234 235 236 237 238 239 240 241

#ifdef CONFIG_THUMB2_KERNEL
	.macro	setmode, mode, reg
	mov	\reg, #\mode
	msr	cpsr_c, \reg
	.endm
#else
	.macro	setmode, mode, reg
	msr	cpsr_c, #\mode
	.endm
#endif
242

243 244 245 246 247 248 249 250 251 252 253 254 255 256
/*
 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
 * a scratch register for the macro to overwrite.
 *
 * This macro is intended for forcing the CPU into SVC mode at boot time.
 * you cannot return to the original mode.
 *
 * Beware, it also clobers LR.
 */
.macro safe_svcmode_maskall reg:req
	mrs	\reg , cpsr
	mov	lr , \reg
	and	lr , lr , #MODE_MASK
	cmp	lr , #HYP_MODE
257
	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT
258 259 260 261
	bic	\reg , \reg , #MODE_MASK
	orr	\reg , \reg , #SVC_MODE
THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
	bne	1f
262 263 264
	orr	\reg, \reg, #PSR_A_BIT
	adr	lr, BSYM(2f)
	msr	spsr_cxsf, \reg
265 266
	__MSR_ELR_HYP(14)
	__ERET
267
1:	msr	cpsr_c, \reg
268 269 270
2:
.endm

271 272 273 274 275
/*
 * STRT/LDRT access macros with ARM and Thumb-2 variants
 */
#ifdef CONFIG_THUMB2_KERNEL

276
	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
277 278
9999:
	.if	\inc == 1
279
	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
280
	.elseif	\inc == 4
281
	\instr\cond\()\t\().w \reg, [\ptr, #\off]
282 283 284 285
	.else
	.error	"Unsupported inc macro argument"
	.endif

286
	.pushsection __ex_table,"a"
287 288
	.align	3
	.long	9999b, \abort
289
	.popsection
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	.endm

	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
	@ explicit IT instruction needed because of the label
	@ introduced by the USER macro
	.ifnc	\cond,al
	.if	\rept == 1
	itt	\cond
	.elseif	\rept == 2
	ittt	\cond
	.else
	.error	"Unsupported rept macro argument"
	.endif
	.endif

	@ Slightly optimised to avoid incrementing the pointer twice
	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
	.if	\rept == 2
308
	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
309 310 311 312 313 314 315
	.endif

	add\cond \ptr, #\rept * \inc
	.endm

#else	/* !CONFIG_THUMB2_KERNEL */

316
	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
317 318 319
	.rept	\rept
9999:
	.if	\inc == 1
320
	\instr\cond\()b\()\t \reg, [\ptr], #\inc
321
	.elseif	\inc == 4
322
	\instr\cond\()\t \reg, [\ptr], #\inc
323 324 325 326
	.else
	.error	"Unsupported inc macro argument"
	.endif

327
	.pushsection __ex_table,"a"
328 329
	.align	3
	.long	9999b, \abort
330
	.popsection
331 332 333 334 335 336 337 338 339 340 341 342
	.endr
	.endm

#endif	/* CONFIG_THUMB2_KERNEL */

	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
	.endm

	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
	.endm
343 344 345 346 347 348 349 350 351

/* Utility macro for declaring string literals */
	.macro	string name:req, string
	.type \name , #object
\name:
	.asciz "\string"
	.size \name , . - \name
	.endm

352 353 354 355 356 357 358 359
	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
	adds	\tmp, \addr, #\size - 1
	sbcccs	\tmp, \tmp, \limit
	bcs	\bad
#endif
	.endm

360
#endif /* __ASM_ASSEMBLER_H__ */