assembler.h 10.8 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  arch/arm/include/asm/assembler.h
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 *  Copyright (C) 1996-2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  This file contains arm architecture specific defines
 *  for the different processors.
 *
 *  Do not include any C declarations in this file - it is included by
 *  assembler source.
 */
16 17 18
#ifndef __ASM_ASSEMBLER_H__
#define __ASM_ASSEMBLER_H__

L
Linus Torvalds 已提交
19 20 21 22 23
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif

#include <asm/ptrace.h>
24
#include <asm/domain.h>
25
#include <asm/opcodes-virt.h>
26
#include <asm/asm-offsets.h>
27 28
#include <asm/page.h>
#include <asm/thread_info.h>
L
Linus Torvalds 已提交
29

30 31
#define IOMEM(x)	(x)

L
Linus Torvalds 已提交
32 33 34 35
/*
 * Endian independent macros for shifting bytes within registers.
 */
#ifndef __ARMEB__
36 37
#define lspull          lsr
#define lspush          lsl
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46
#define get_byte_0      lsl #0
#define get_byte_1	lsr #8
#define get_byte_2	lsr #16
#define get_byte_3	lsr #24
#define put_byte_0      lsl #0
#define put_byte_1	lsl #8
#define put_byte_2	lsl #16
#define put_byte_3	lsl #24
#else
47 48
#define lspull          lsl
#define lspush          lsr
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57 58
#define get_byte_0	lsr #24
#define get_byte_1	lsr #16
#define get_byte_2	lsr #8
#define get_byte_3      lsl #0
#define put_byte_0	lsl #24
#define put_byte_1	lsl #16
#define put_byte_2	lsl #8
#define put_byte_3      lsl #0
#endif

59 60 61 62 63 64 65
/* Select code for any configuration running in BE8 mode */
#ifdef CONFIG_CPU_ENDIAN_BE8
#define ARM_BE8(code...) code
#else
#define ARM_BE8(code...)
#endif

L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73 74
/*
 * Data preload for architectures that support it
 */
#if __LINUX_ARM_ARCH__ >= 5
#define PLD(code...)	code
#else
#define PLD(code...)
#endif

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
/*
 * This can be used to enable code to cacheline align the destination
 * pointer when bulk writing to memory.  Experiments on StrongARM and
 * XScale didn't show this a worthwhile thing to do when the cache is not
 * set to write-allocate (this would need further testing on XScale when WA
 * is used).
 *
 * On Feroceon there is much to gain however, regardless of cache mode.
 */
#ifdef CONFIG_CPU_FEROCEON
#define CALGN(code...) code
#else
#define CALGN(code...)
#endif

L
Linus Torvalds 已提交
90
/*
91
 * Enable and disable interrupts
L
Linus Torvalds 已提交
92
 */
93
#if __LINUX_ARM_ARCH__ >= 6
94
	.macro	disable_irq_notrace
95
	cpsid	i
96 97
	.endm

98
	.macro	enable_irq_notrace
99 100
	cpsie	i
	.endm
101
#else
102
	.macro	disable_irq_notrace
103 104 105
	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
	.endm

106
	.macro	enable_irq_notrace
107 108
	msr	cpsr_c, #SVC_MODE
	.endm
109
#endif
110

R
Russell King 已提交
111
	.macro asm_trace_hardirqs_off, save=1
112
#if defined(CONFIG_TRACE_IRQFLAGS)
R
Russell King 已提交
113
	.if \save
114
	stmdb   sp!, {r0-r3, ip, lr}
R
Russell King 已提交
115
	.endif
116
	bl	trace_hardirqs_off
R
Russell King 已提交
117
	.if \save
118
	ldmia	sp!, {r0-r3, ip, lr}
R
Russell King 已提交
119
	.endif
120 121 122
#endif
	.endm

R
Russell King 已提交
123
	.macro asm_trace_hardirqs_on, cond=al, save=1
124 125 126 127 128
#if defined(CONFIG_TRACE_IRQFLAGS)
	/*
	 * actually the registers should be pushed and pop'd conditionally, but
	 * after bl the flags are certainly clobbered
	 */
R
Russell King 已提交
129
	.if \save
130
	stmdb   sp!, {r0-r3, ip, lr}
R
Russell King 已提交
131
	.endif
132
	bl\cond	trace_hardirqs_on
R
Russell King 已提交
133
	.if \save
134
	ldmia	sp!, {r0-r3, ip, lr}
R
Russell King 已提交
135
	.endif
136 137 138
#endif
	.endm

R
Russell King 已提交
139
	.macro disable_irq, save=1
140
	disable_irq_notrace
R
Russell King 已提交
141
	asm_trace_hardirqs_off \save
142 143 144 145 146 147
	.endm

	.macro enable_irq
	asm_trace_hardirqs_on
	enable_irq_notrace
	.endm
148 149 150 151 152
/*
 * Save the current IRQ state and disable IRQs.  Note that this macro
 * assumes FIQs are enabled, and that the processor is in SVC mode.
 */
	.macro	save_and_disable_irqs, oldcpsr
153 154 155
#ifdef CONFIG_CPU_V7M
	mrs	\oldcpsr, primask
#else
156
	mrs	\oldcpsr, cpsr
157
#endif
158
	disable_irq
L
Linus Torvalds 已提交
159 160
	.endm

161
	.macro	save_and_disable_irqs_notrace, oldcpsr
162 163 164
#ifdef CONFIG_CPU_V7M
	mrs	\oldcpsr, primask
#else
165
	mrs	\oldcpsr, cpsr
166
#endif
167 168 169
	disable_irq_notrace
	.endm

L
Linus Torvalds 已提交
170 171 172 173
/*
 * Restore interrupt state previously stored in a register.  We don't
 * guarantee that this will preserve the flags.
 */
174
	.macro	restore_irqs_notrace, oldcpsr
175 176 177
#ifdef CONFIG_CPU_V7M
	msr	primask, \oldcpsr
#else
L
Linus Torvalds 已提交
178
	msr	cpsr_c, \oldcpsr
179
#endif
L
Linus Torvalds 已提交
180 181
	.endm

182 183
	.macro restore_irqs, oldcpsr
	tst	\oldcpsr, #PSR_I_BIT
184
	asm_trace_hardirqs_on cond=eq
185 186 187
	restore_irqs_notrace \oldcpsr
	.endm

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
/*
 * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
 * reference local symbols in the same assembly file which are to be
 * resolved by the assembler.  Other usage is undefined.
 */
	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
	.macro	badr\c, rd, sym
#ifdef CONFIG_THUMB2_KERNEL
	adr\c	\rd, \sym + 1
#else
	adr\c	\rd, \sym
#endif
	.endm
	.endr

203 204 205 206
/*
 * Get current thread_info.
 */
	.macro	get_thread_info, rd
207
 ARM(	mov	\rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT	)
208
 THUMB(	mov	\rd, sp			)
209 210
 THUMB(	lsr	\rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT	)
	mov	\rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
211 212
	.endm

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
/*
 * Increment/decrement the preempt count.
 */
#ifdef CONFIG_PREEMPT_COUNT
	.macro	inc_preempt_count, ti, tmp
	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
	add	\tmp, \tmp, #1			@ increment it
	str	\tmp, [\ti, #TI_PREEMPT]
	.endm

	.macro	dec_preempt_count, ti, tmp
	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
	sub	\tmp, \tmp, #1			@ decrement it
	str	\tmp, [\ti, #TI_PREEMPT]
	.endm

	.macro	dec_preempt_count_ti, ti, tmp
	get_thread_info \ti
	dec_preempt_count \ti, \tmp
	.endm
#else
	.macro	inc_preempt_count, ti, tmp
	.endm

	.macro	dec_preempt_count, ti, tmp
	.endm

	.macro	dec_preempt_count_ti, ti, tmp
	.endm
#endif

L
Linus Torvalds 已提交
244 245
#define USER(x...)				\
9999:	x;					\
246
	.pushsection __ex_table,"a";		\
L
Linus Torvalds 已提交
247 248
	.align	3;				\
	.long	9999b,9001f;			\
249
	.popsection
250

251 252 253
#ifdef CONFIG_SMP
#define ALT_SMP(instr...)					\
9998:	instr
254 255 256 257 258
/*
 * Note: if you get assembler errors from ALT_UP() when building with
 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
 * ALT_SMP( W(instr) ... )
 */
259 260 261
#define ALT_UP(instr...)					\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
262
9997:	instr							;\
263 264 265
	.if . - 9997b == 2					;\
		nop						;\
	.endif							;\
266 267 268
	.if . - 9997b != 4					;\
		.error "ALT_UP() content must assemble to exactly 4 bytes";\
	.endif							;\
269 270 271 272 273
	.popsection
#define ALT_UP_B(label)					\
	.equ	up_b_offset, label - 9998b			;\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
274
	W(b)	. + up_b_offset					;\
275 276 277 278 279 280 281
	.popsection
#else
#define ALT_SMP(instr...)
#define ALT_UP(instr...) instr
#define ALT_UP_B(label) b label
#endif

282 283 284 285 286 287 288 289 290 291 292
/*
 * Instruction barrier
 */
	.macro	instr_sync
#if __LINUX_ARM_ARCH__ >= 7
	isb
#elif __LINUX_ARM_ARCH__ == 6
	mcr	p15, 0, r0, c7, c5, 4
#endif
	.endm

293 294 295
/*
 * SMP data memory barrier
 */
296
	.macro	smp_dmb mode
297 298
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
299
	.ifeqs "\mode","arm"
300
	ALT_SMP(dmb	ish)
301
	.else
302
	ALT_SMP(W(dmb)	ish)
303
	.endif
304
#elif __LINUX_ARM_ARCH__ == 6
305 306 307
	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
#else
#error Incompatible SMP platform
308
#endif
309
	.ifeqs "\mode","arm"
310
	ALT_UP(nop)
311 312 313
	.else
	ALT_UP(W(nop))
	.endif
314 315
#endif
	.endm
316

317 318 319 320 321 322 323 324
#if defined(CONFIG_CPU_V7M)
	/*
	 * setmode is used to assert to be in svc mode during boot. For v7-M
	 * this is done in __v7m_setup, so setmode can be empty here.
	 */
	.macro	setmode, mode, reg
	.endm
#elif defined(CONFIG_THUMB2_KERNEL)
325 326 327 328 329 330 331 332 333
	.macro	setmode, mode, reg
	mov	\reg, #\mode
	msr	cpsr_c, \reg
	.endm
#else
	.macro	setmode, mode, reg
	msr	cpsr_c, #\mode
	.endm
#endif
334

335 336 337 338 339 340 341 342
/*
 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
 * a scratch register for the macro to overwrite.
 *
 * This macro is intended for forcing the CPU into SVC mode at boot time.
 * you cannot return to the original mode.
 */
.macro safe_svcmode_maskall reg:req
343
#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
344
	mrs	\reg , cpsr
345 346
	eor	\reg, \reg, #HYP_MODE
	tst	\reg, #MODE_MASK
347
	bic	\reg , \reg , #MODE_MASK
348
	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
349 350
THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
	bne	1f
351
	orr	\reg, \reg, #PSR_A_BIT
352
	badr	lr, 2f
353
	msr	spsr_cxsf, \reg
354 355
	__MSR_ELR_HYP(14)
	__ERET
356
1:	msr	cpsr_c, \reg
357
2:
358 359 360 361 362 363 364
#else
/*
 * workaround for possibly broken pre-v6 hardware
 * (akita, Sharp Zaurus C-1000, PXA270-based)
 */
	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
#endif
365 366
.endm

367 368 369 370 371
/*
 * STRT/LDRT access macros with ARM and Thumb-2 variants
 */
#ifdef CONFIG_THUMB2_KERNEL

372
	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
373 374
9999:
	.if	\inc == 1
375
	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
376
	.elseif	\inc == 4
377
	\instr\cond\()\t\().w \reg, [\ptr, #\off]
378 379 380 381
	.else
	.error	"Unsupported inc macro argument"
	.endif

382
	.pushsection __ex_table,"a"
383 384
	.align	3
	.long	9999b, \abort
385
	.popsection
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
	.endm

	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
	@ explicit IT instruction needed because of the label
	@ introduced by the USER macro
	.ifnc	\cond,al
	.if	\rept == 1
	itt	\cond
	.elseif	\rept == 2
	ittt	\cond
	.else
	.error	"Unsupported rept macro argument"
	.endif
	.endif

	@ Slightly optimised to avoid incrementing the pointer twice
	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
	.if	\rept == 2
404
	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
405 406 407 408 409 410 411
	.endif

	add\cond \ptr, #\rept * \inc
	.endm

#else	/* !CONFIG_THUMB2_KERNEL */

412
	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
413 414 415
	.rept	\rept
9999:
	.if	\inc == 1
416
	\instr\cond\()b\()\t \reg, [\ptr], #\inc
417
	.elseif	\inc == 4
418
	\instr\cond\()\t \reg, [\ptr], #\inc
419 420 421 422
	.else
	.error	"Unsupported inc macro argument"
	.endif

423
	.pushsection __ex_table,"a"
424 425
	.align	3
	.long	9999b, \abort
426
	.popsection
427 428 429 430 431 432 433 434 435 436 437 438
	.endr
	.endm

#endif	/* CONFIG_THUMB2_KERNEL */

	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
	.endm

	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
	.endm
439 440 441 442 443 444 445 446 447

/* Utility macro for declaring string literals */
	.macro	string name:req, string
	.type \name , #object
\name:
	.asciz "\string"
	.size \name , . - \name
	.endm

448 449 450 451 452 453 454 455
	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
	adds	\tmp, \addr, #\size - 1
	sbcccs	\tmp, \tmp, \limit
	bcs	\bad
#endif
	.endm

456
	.macro	uaccess_disable, tmp, isb=1
457 458 459 460 461 462 463 464 465 466 467
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	/*
	 * Whenever we re-enter userspace, the domains should always be
	 * set appropriately.
	 */
	mov	\tmp, #DACR_UACCESS_DISABLE
	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
	.if	\isb
	instr_sync
	.endif
#endif
468 469 470
	.endm

	.macro	uaccess_enable, tmp, isb=1
471 472 473 474 475 476 477 478 479 480 481
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	/*
	 * Whenever we re-enter userspace, the domains should always be
	 * set appropriately.
	 */
	mov	\tmp, #DACR_UACCESS_ENABLE
	mcr	p15, 0, \tmp, c3, c0, 0
	.if	\isb
	instr_sync
	.endif
#endif
482 483 484
	.endm

	.macro	uaccess_save, tmp
485 486
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
	mrc	p15, 0, \tmp, c3, c0, 0
487
	str	\tmp, [sp, #SVC_DACR]
488
#endif
489 490 491
	.endm

	.macro	uaccess_restore
492
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
493
	ldr	r0, [sp, #SVC_DACR]
494 495
	mcr	p15, 0, r0, c3, c0, 0
#endif
496 497
	.endm

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
	.macro	ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
	mov\c	pc, \reg
#else
	.ifeqs	"\reg", "lr"
	bx\c	\reg
	.else
	mov\c	pc, \reg
	.endif
#endif
	.endm
	.endr

	.macro	ret.w, reg
	ret	\reg
#ifdef CONFIG_THUMB2_KERNEL
	nop
#endif
	.endm

519
#endif /* __ASM_ASSEMBLER_H__ */