assembler.h 10.8 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  arch/arm/include/asm/assembler.h
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 *  Copyright (C) 1996-2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  This file contains arm architecture specific defines
 *  for the different processors.
 *
 *  Do not include any C declarations in this file - it is included by
 *  assembler source.
 */
16 17 18
#ifndef __ASM_ASSEMBLER_H__
#define __ASM_ASSEMBLER_H__

L
Linus Torvalds 已提交
19 20 21 22 23
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif

#include <asm/ptrace.h>
24
#include <asm/domain.h>
25
#include <asm/opcodes-virt.h>
26
#include <asm/asm-offsets.h>
27 28
#include <asm/page.h>
#include <asm/thread_info.h>
L
Linus Torvalds 已提交
29

30 31
#define IOMEM(x)	(x)

L
Linus Torvalds 已提交
32 33 34 35
/*
 * Endian independent macros for shifting bytes within registers.
 */
#ifndef __ARMEB__
36 37
#define lspull          lsr
#define lspush          lsl
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46
#define get_byte_0      lsl #0
#define get_byte_1	lsr #8
#define get_byte_2	lsr #16
#define get_byte_3	lsr #24
#define put_byte_0      lsl #0
#define put_byte_1	lsl #8
#define put_byte_2	lsl #16
#define put_byte_3	lsl #24
#else
47 48
#define lspull          lsl
#define lspush          lsr
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57 58
#define get_byte_0	lsr #24
#define get_byte_1	lsr #16
#define get_byte_2	lsr #8
#define get_byte_3      lsl #0
#define put_byte_0	lsl #24
#define put_byte_1	lsl #16
#define put_byte_2	lsl #8
#define put_byte_3      lsl #0
#endif

59 60 61 62 63 64 65
/* Select code for any configuration running in BE8 mode */
#ifdef CONFIG_CPU_ENDIAN_BE8
#define ARM_BE8(code...) code
#else
#define ARM_BE8(code...)
#endif

L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73 74
/*
 * Data preload for architectures that support it
 */
#if __LINUX_ARM_ARCH__ >= 5
#define PLD(code...)	code
#else
#define PLD(code...)
#endif

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
/*
 * This can be used to enable code to cacheline align the destination
 * pointer when bulk writing to memory.  Experiments on StrongARM and
 * XScale didn't show this a worthwhile thing to do when the cache is not
 * set to write-allocate (this would need further testing on XScale when WA
 * is used).
 *
 * On Feroceon there is much to gain however, regardless of cache mode.
 */
#ifdef CONFIG_CPU_FEROCEON
#define CALGN(code...) code
#else
#define CALGN(code...)
#endif

L
Linus Torvalds 已提交
90
/*
91
 * Enable and disable interrupts
L
Linus Torvalds 已提交
92
 */
93
#if __LINUX_ARM_ARCH__ >= 6
94
	.macro	disable_irq_notrace
95
	cpsid	i
96 97
	.endm

98
	.macro	enable_irq_notrace
99 100
	cpsie	i
	.endm
101
#else
102
	.macro	disable_irq_notrace
103 104 105
	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
	.endm

106
	.macro	enable_irq_notrace
107 108
	msr	cpsr_c, #SVC_MODE
	.endm
109
#endif
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
	.macro asm_trace_hardirqs_off
#if defined(CONFIG_TRACE_IRQFLAGS)
	stmdb   sp!, {r0-r3, ip, lr}
	bl	trace_hardirqs_off
	ldmia	sp!, {r0-r3, ip, lr}
#endif
	.endm

	.macro asm_trace_hardirqs_on_cond, cond
#if defined(CONFIG_TRACE_IRQFLAGS)
	/*
	 * actually the registers should be pushed and pop'd conditionally, but
	 * after bl the flags are certainly clobbered
	 */
	stmdb   sp!, {r0-r3, ip, lr}
	bl\cond	trace_hardirqs_on
	ldmia	sp!, {r0-r3, ip, lr}
#endif
	.endm

	.macro asm_trace_hardirqs_on
	asm_trace_hardirqs_on_cond al
	.endm

	.macro disable_irq
	disable_irq_notrace
	asm_trace_hardirqs_off
	.endm

	.macro enable_irq
	asm_trace_hardirqs_on
	enable_irq_notrace
	.endm
144 145 146 147 148
/*
 * Save the current IRQ state and disable IRQs.  Note that this macro
 * assumes FIQs are enabled, and that the processor is in SVC mode.
 */
	.macro	save_and_disable_irqs, oldcpsr
149 150 151
#ifdef CONFIG_CPU_V7M
	mrs	\oldcpsr, primask
#else
152
	mrs	\oldcpsr, cpsr
153
#endif
154
	disable_irq
L
Linus Torvalds 已提交
155 156
	.endm

157 158 159 160 161
	.macro	save_and_disable_irqs_notrace, oldcpsr
	mrs	\oldcpsr, cpsr
	disable_irq_notrace
	.endm

L
Linus Torvalds 已提交
162 163 164 165
/*
 * Restore interrupt state previously stored in a register.  We don't
 * guarantee that this will preserve the flags.
 */
166
	.macro	restore_irqs_notrace, oldcpsr
167 168 169
#ifdef CONFIG_CPU_V7M
	msr	primask, \oldcpsr
#else
L
Linus Torvalds 已提交
170
	msr	cpsr_c, \oldcpsr
171
#endif
L
Linus Torvalds 已提交
172 173
	.endm

174 175 176 177 178 179
	.macro restore_irqs, oldcpsr
	tst	\oldcpsr, #PSR_I_BIT
	asm_trace_hardirqs_on_cond eq
	restore_irqs_notrace \oldcpsr
	.endm

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
/*
 * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
 * reference local symbols in the same assembly file which are to be
 * resolved by the assembler.  Other usage is undefined.
 */
	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
	.macro	badr\c, rd, sym
#ifdef CONFIG_THUMB2_KERNEL
	adr\c	\rd, \sym + 1
#else
	adr\c	\rd, \sym
#endif
	.endm
	.endr

195 196 197 198
/*
 * Get current thread_info.
 */
	.macro	get_thread_info, rd
199
 ARM(	mov	\rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT	)
200
 THUMB(	mov	\rd, sp			)
201 202
 THUMB(	lsr	\rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT	)
	mov	\rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
203 204
	.endm

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
/*
 * Increment/decrement the preempt count.
 */
#ifdef CONFIG_PREEMPT_COUNT
	.macro	inc_preempt_count, ti, tmp
	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
	add	\tmp, \tmp, #1			@ increment it
	str	\tmp, [\ti, #TI_PREEMPT]
	.endm

	.macro	dec_preempt_count, ti, tmp
	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
	sub	\tmp, \tmp, #1			@ decrement it
	str	\tmp, [\ti, #TI_PREEMPT]
	.endm

	.macro	dec_preempt_count_ti, ti, tmp
	get_thread_info \ti
	dec_preempt_count \ti, \tmp
	.endm
#else
	.macro	inc_preempt_count, ti, tmp
	.endm

	.macro	dec_preempt_count, ti, tmp
	.endm

	.macro	dec_preempt_count_ti, ti, tmp
	.endm
#endif

L
Linus Torvalds 已提交
236 237
#define USER(x...)				\
9999:	x;					\
238
	.pushsection __ex_table,"a";		\
L
Linus Torvalds 已提交
239 240
	.align	3;				\
	.long	9999b,9001f;			\
241
	.popsection
242

243 244 245
#ifdef CONFIG_SMP
#define ALT_SMP(instr...)					\
9998:	instr
246 247 248 249 250
/*
 * Note: if you get assembler errors from ALT_UP() when building with
 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
 * ALT_SMP( W(instr) ... )
 */
251 252 253
#define ALT_UP(instr...)					\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
254
9997:	instr							;\
255 256 257
	.if . - 9997b == 2					;\
		nop						;\
	.endif							;\
258 259 260
	.if . - 9997b != 4					;\
		.error "ALT_UP() content must assemble to exactly 4 bytes";\
	.endif							;\
261 262 263 264 265
	.popsection
#define ALT_UP_B(label)					\
	.equ	up_b_offset, label - 9998b			;\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
266
	W(b)	. + up_b_offset					;\
267 268 269 270 271 272 273
	.popsection
#else
#define ALT_SMP(instr...)
#define ALT_UP(instr...) instr
#define ALT_UP_B(label) b label
#endif

274 275 276 277 278 279 280 281 282 283 284
/*
 * Instruction barrier
 */
	.macro	instr_sync
#if __LINUX_ARM_ARCH__ >= 7
	isb
#elif __LINUX_ARM_ARCH__ == 6
	mcr	p15, 0, r0, c7, c5, 4
#endif
	.endm

285 286 287
/*
 * SMP data memory barrier
 */
288
	.macro	smp_dmb mode
289 290
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
291
	.ifeqs "\mode","arm"
292
	ALT_SMP(dmb	ish)
293
	.else
294
	ALT_SMP(W(dmb)	ish)
295
	.endif
296
#elif __LINUX_ARM_ARCH__ == 6
297 298 299
	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
#else
#error Incompatible SMP platform
300
#endif
301
	.ifeqs "\mode","arm"
302
	ALT_UP(nop)
303 304 305
	.else
	ALT_UP(W(nop))
	.endif
306 307
#endif
	.endm
308

309 310 311 312 313 314 315 316
#if defined(CONFIG_CPU_V7M)
	/*
	 * setmode is used to assert to be in svc mode during boot. For v7-M
	 * this is done in __v7m_setup, so setmode can be empty here.
	 */
	.macro	setmode, mode, reg
	.endm
#elif defined(CONFIG_THUMB2_KERNEL)
317 318 319 320 321 322 323 324 325
	.macro	setmode, mode, reg
	mov	\reg, #\mode
	msr	cpsr_c, \reg
	.endm
#else
	.macro	setmode, mode, reg
	msr	cpsr_c, #\mode
	.endm
#endif
326

327 328 329 330 331 332 333 334
/*
 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
 * a scratch register for the macro to overwrite.
 *
 * This macro is intended for forcing the CPU into SVC mode at boot time.
 * you cannot return to the original mode.
 */
.macro safe_svcmode_maskall reg:req
335
#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
336
	mrs	\reg , cpsr
337 338
	eor	\reg, \reg, #HYP_MODE
	tst	\reg, #MODE_MASK
339
	bic	\reg , \reg , #MODE_MASK
340
	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
341 342
THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
	bne	1f
343
	orr	\reg, \reg, #PSR_A_BIT
344
	badr	lr, 2f
345
	msr	spsr_cxsf, \reg
346 347
	__MSR_ELR_HYP(14)
	__ERET
348
1:	msr	cpsr_c, \reg
349
2:
350 351 352 353 354 355 356
#else
/*
 * workaround for possibly broken pre-v6 hardware
 * (akita, Sharp Zaurus C-1000, PXA270-based)
 */
	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
#endif
357 358
.endm

359 360 361 362 363
/*
 * STRT/LDRT access macros with ARM and Thumb-2 variants
 */
#ifdef CONFIG_THUMB2_KERNEL

364
	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
365 366
9999:
	.if	\inc == 1
367
	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
368
	.elseif	\inc == 4
369
	\instr\cond\()\t\().w \reg, [\ptr, #\off]
370 371 372 373
	.else
	.error	"Unsupported inc macro argument"
	.endif

374
	.pushsection __ex_table,"a"
375 376
	.align	3
	.long	9999b, \abort
377
	.popsection
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	.endm

	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
	@ explicit IT instruction needed because of the label
	@ introduced by the USER macro
	.ifnc	\cond,al
	.if	\rept == 1
	itt	\cond
	.elseif	\rept == 2
	ittt	\cond
	.else
	.error	"Unsupported rept macro argument"
	.endif
	.endif

	@ Slightly optimised to avoid incrementing the pointer twice
	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
	.if	\rept == 2
396
	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
397 398 399 400 401 402 403
	.endif

	add\cond \ptr, #\rept * \inc
	.endm

#else	/* !CONFIG_THUMB2_KERNEL */

404
	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
405 406 407
	.rept	\rept
9999:
	.if	\inc == 1
408
	\instr\cond\()b\()\t \reg, [\ptr], #\inc
409
	.elseif	\inc == 4
410
	\instr\cond\()\t \reg, [\ptr], #\inc
411 412 413 414
	.else
	.error	"Unsupported inc macro argument"
	.endif

415
	.pushsection __ex_table,"a"
416 417
	.align	3
	.long	9999b, \abort
418
	.popsection
419 420 421 422 423 424 425 426 427 428 429 430
	.endr
	.endm

#endif	/* CONFIG_THUMB2_KERNEL */

	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
	.endm

	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
	.endm
431 432 433 434 435 436 437 438 439

/* Utility macro for declaring string literals */
	.macro	string name:req, string
	.type \name , #object
\name:
	.asciz "\string"
	.size \name , . - \name
	.endm

440 441 442 443 444 445 446 447
	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
	adds	\tmp, \addr, #\size - 1
	sbcccs	\tmp, \tmp, \limit
	bcs	\bad
#endif
	.endm

448
	.macro	uaccess_disable, tmp, isb=1
449 450 451 452 453 454 455 456 457 458 459
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	/*
	 * Whenever we re-enter userspace, the domains should always be
	 * set appropriately.
	 */
	mov	\tmp, #DACR_UACCESS_DISABLE
	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
	.if	\isb
	instr_sync
	.endif
#endif
460 461 462
	.endm

	.macro	uaccess_enable, tmp, isb=1
463 464 465 466 467 468 469 470 471 472 473
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	/*
	 * Whenever we re-enter userspace, the domains should always be
	 * set appropriately.
	 */
	mov	\tmp, #DACR_UACCESS_ENABLE
	mcr	p15, 0, \tmp, c3, c0, 0
	.if	\isb
	instr_sync
	.endif
#endif
474 475 476
	.endm

	.macro	uaccess_save, tmp
477 478 479 480
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	mrc	p15, 0, \tmp, c3, c0, 0
	str	\tmp, [sp, #S_FRAME_SIZE]
#endif
481 482 483
	.endm

	.macro	uaccess_restore
484 485 486 487
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	ldr	r0, [sp, #S_FRAME_SIZE]
	mcr	p15, 0, r0, c3, c0, 0
#endif
488 489 490 491 492 493 494
	.endm

	.macro	uaccess_save_and_disable, tmp
	uaccess_save \tmp
	uaccess_disable \tmp
	.endm

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
	.macro	ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
	mov\c	pc, \reg
#else
	.ifeqs	"\reg", "lr"
	bx\c	\reg
	.else
	mov\c	pc, \reg
	.endif
#endif
	.endm
	.endr

	.macro	ret.w, reg
	ret	\reg
#ifdef CONFIG_THUMB2_KERNEL
	nop
#endif
	.endm

516
#endif /* __ASM_ASSEMBLER_H__ */