assembler.h 11.1 KB
Newer Older
1
/*
2
 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Copyright (C) 1996-2000 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif

23 24 25
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H

26
#include <asm/asm-offsets.h>
27
#include <asm/cpufeature.h>
28
#include <asm/debug-monitors.h>
29
#include <asm/mmu_context.h>
30
#include <asm/page.h>
31
#include <asm/pgtable-hwdef.h>
32
#include <asm/ptrace.h>
33
#include <asm/thread_info.h>
34 35 36 37 38 39 40 41 42 43 44 45

/*
 * Enable and disable interrupts.
 */
	.macro	disable_irq
	msr	daifset, #2
	.endm

	.macro	enable_irq
	msr	daifclr, #2
	.endm

46 47 48 49 50 51 52 53 54
	.macro	save_and_disable_irq, flags
	mrs	\flags, daif
	msr	daifset, #2
	.endm

	.macro	restore_irq, flags
	msr	daif, \flags
	.endm

55 56 57 58 59 60 61 62 63 64 65
/*
 * Enable and disable debug exceptions.
 */
	.macro	disable_dbg
	msr	daifset, #8
	.endm

	.macro	enable_dbg
	msr	daifclr, #8
	.endm

66 67
	.macro	disable_step_tsk, flgs, tmp
	tbz	\flgs, #TIF_SINGLESTEP, 9990f
68
	mrs	\tmp, mdscr_el1
69
	bic	\tmp, \tmp, #DBG_MDSCR_SS
70
	msr	mdscr_el1, \tmp
71 72
	isb	// Synchronise with enable_dbg
9990:
73 74
	.endm

75 76 77
	.macro	enable_step_tsk, flgs, tmp
	tbz	\flgs, #TIF_SINGLESTEP, 9990f
	disable_dbg
78
	mrs	\tmp, mdscr_el1
79
	orr	\tmp, \tmp, #DBG_MDSCR_SS
80
	msr	mdscr_el1, \tmp
81
9990:
82 83
	.endm

84 85 86 87 88 89 90
/*
 * Enable both debug exceptions and interrupts. This is likely to be
 * faster than two daifclr operations, since writes to this register
 * are self-synchronising.
 */
	.macro	enable_dbg_and_irq
	msr	daifclr, #(8 | 2)
91 92 93 94 95 96 97 98 99
	.endm

/*
 * SMP data memory barrier
 */
	.macro	smp_dmb, opt
	dmb	\opt
	.endm

100 101 102 103 104 105 106 107 108
/*
 * NOP sequence
 */
	.macro	nops, num
	.rept	\num
	nop
	.endr
	.endm

109 110 111 112 113 114 115 116 117 118
/*
 * Emit an entry into the exception table
 */
	.macro		_asm_extable, from, to
	.pushsection	__ex_table, "a"
	.align		3
	.long		(\from - .), (\to - .)
	.popsection
	.endm

119 120
#define USER(l, x...)				\
9999:	x;					\
121
	_asm_extable	9999b, l
122 123 124 125 126

/*
 * Register aliases.
 */
lr	.req	x30		// link register
127 128 129 130 131 132 133 134

/*
 * Vector entry
 */
	 .macro	ventry	label
	.align	7
	b	\label
	.endm
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

/*
 * Select code when configured for BE.
 */
#ifdef CONFIG_CPU_BIG_ENDIAN
#define CPU_BE(code...) code
#else
#define CPU_BE(code...)
#endif

/*
 * Select code when configured for LE.
 */
#ifdef CONFIG_CPU_BIG_ENDIAN
#define CPU_LE(code...)
#else
#define CPU_LE(code...) code
#endif

154 155 156 157 158 159 160 161 162 163 164 165
/*
 * Define a macro that constructs a 64-bit value by concatenating two
 * 32-bit registers. Note that on big endian systems the order of the
 * registers is swapped.
 */
#ifndef CONFIG_CPU_BIG_ENDIAN
	.macro	regs_to_64, rd, lbits, hbits
#else
	.macro	regs_to_64, rd, hbits, lbits
#endif
	orr	\rd, \lbits, \hbits, lsl #32
	.endm
166

167 168
/*
 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
169 170 171 172
 * <symbol> is within the range +/- 4 GB of the PC when running
 * in core kernel context. In module context, a movz/movk sequence
 * is used, since modules may be loaded far away from the kernel
 * when KASLR is in effect.
173 174 175 176 177
 */
	/*
	 * @dst: destination register (64 bit wide)
	 * @sym: name of the symbol
	 */
178 179
	.macro	adr_l, dst, sym
#ifndef MODULE
180 181
	adrp	\dst, \sym
	add	\dst, \dst, :lo12:\sym
182 183 184 185 186 187
#else
	movz	\dst, #:abs_g3:\sym
	movk	\dst, #:abs_g2_nc:\sym
	movk	\dst, #:abs_g1_nc:\sym
	movk	\dst, #:abs_g0_nc:\sym
#endif
188 189 190 191 192 193 194 195 196 197
	.endm

	/*
	 * @dst: destination register (32 or 64 bit wide)
	 * @sym: name of the symbol
	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
	 *       32-bit wide register, in which case it cannot be used to hold
	 *       the address
	 */
	.macro	ldr_l, dst, sym, tmp=
198
#ifndef MODULE
199 200 201 202 203 204 205
	.ifb	\tmp
	adrp	\dst, \sym
	ldr	\dst, [\dst, :lo12:\sym]
	.else
	adrp	\tmp, \sym
	ldr	\dst, [\tmp, :lo12:\sym]
	.endif
206 207 208 209 210 211 212 213 214
#else
	.ifb	\tmp
	adr_l	\dst, \sym
	ldr	\dst, [\dst]
	.else
	adr_l	\tmp, \sym
	ldr	\dst, [\tmp]
	.endif
#endif
215 216 217 218 219 220 221 222 223
	.endm

	/*
	 * @src: source register (32 or 64 bit wide)
	 * @sym: name of the symbol
	 * @tmp: mandatory 64-bit scratch register to calculate the address
	 *       while <src> needs to be preserved.
	 */
	.macro	str_l, src, sym, tmp
224
#ifndef MODULE
225 226
	adrp	\tmp, \sym
	str	\src, [\tmp, :lo12:\sym]
227 228 229 230
#else
	adr_l	\tmp, \sym
	str	\src, [\tmp]
#endif
231 232
	.endm

233
	/*
234 235
	 * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
	 *       non-module code
236 237 238
	 * @sym: The name of the per-cpu variable
	 * @tmp: scratch register
	 */
239
	.macro adr_this_cpu, dst, sym, tmp
240 241 242 243
#ifndef MODULE
	adrp	\tmp, \sym
	add	\dst, \tmp, #:lo12:\sym
#else
244
	adr_l	\dst, \sym
245
#endif
246
	mrs	\tmp, tpidr_el1
247 248 249 250 251 252 253 254 255 256 257 258
	add	\dst, \dst, \tmp
	.endm

	/*
	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
	 * @sym: The name of the per-cpu variable
	 * @tmp: scratch register
	 */
	.macro ldr_this_cpu dst, sym, tmp
	adr_l	\dst, \sym
	mrs	\tmp, tpidr_el1
	ldr	\dst, [\dst, \tmp]
259 260
	.endm

261 262 263 264 265 266 267 268 269 270 271 272 273
/*
 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
 */
	.macro	vma_vm_mm, rd, rn
	ldr	\rd, [\rn, #VMA_VM_MM]
	.endm

/*
 * mmid - get context id from mm pointer (mm->context.id)
 */
	.macro	mmid, rd, rn
	ldr	\rd, [\rn, #MM_CONTEXT_ID]
	.endm
274 275 276 277 278 279 280 281 282 283 284 285 286 287
/*
 * read_ctr - read CTR_EL0. If the system has mismatched
 * cache line sizes, provide the system wide safe value
 * from arm64_ftr_reg_ctrel0.sys_val
 */
	.macro	read_ctr, reg
alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
	mrs	\reg, ctr_el0			// read CTR
	nop
alternative_else
	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
alternative_endif
	.endm

288 289

/*
290 291
 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
 * from the CTR register.
292
 */
293
	.macro	raw_dcache_line_size, reg, tmp
294 295 296 297 298 299 300
	mrs	\tmp, ctr_el0			// read CTR
	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
	mov	\reg, #4			// bytes per word
	lsl	\reg, \reg, \tmp		// actual cache line size
	.endm

/*
301
 * dcache_line_size - get the safe D-cache line size across all CPUs
302
 */
303
	.macro	dcache_line_size, reg, tmp
304 305 306 307
	read_ctr	\tmp
	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
	mov		\reg, #4		// bytes per word
	lsl		\reg, \reg, \tmp	// actual cache line size
308 309 310 311 312 313 314
	.endm

/*
 * raw_icache_line_size - get the minimum I-cache line size on this CPU
 * from the CTR register.
 */
	.macro	raw_icache_line_size, reg, tmp
315 316 317 318 319 320
	mrs	\tmp, ctr_el0			// read CTR
	and	\tmp, \tmp, #0xf		// cache line size encoding
	mov	\reg, #4			// bytes per word
	lsl	\reg, \reg, \tmp		// actual cache line size
	.endm

321 322 323 324
/*
 * icache_line_size - get the safe I-cache line size across all CPUs
 */
	.macro	icache_line_size, reg, tmp
325 326 327 328
	read_ctr	\tmp
	and		\tmp, \tmp, #0xf	// cache line size encoding
	mov		\reg, #4		// bytes per word
	lsl		\reg, \reg, \tmp	// actual cache line size
329 330
	.endm

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
/*
 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
 */
	.macro	tcr_set_idmap_t0sz, valreg, tmpreg
#ifndef CONFIG_ARM64_VA_BITS_48
	ldr_l	\tmpreg, idmap_t0sz
	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
#endif
	.endm

/*
 * Macro to perform a data cache maintenance for the interval
 * [kaddr, kaddr + size)
 *
 * 	op:		operation passed to dc instruction
 * 	domain:		domain used in dsb instruciton
 * 	kaddr:		starting virtual address of the region
 * 	size:		size of the region
 * 	Corrupts:	kaddr, size, tmp1, tmp2
 */
	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
	dcache_line_size \tmp1, \tmp2
	add	\size, \kaddr, \size
	sub	\tmp2, \tmp1, #1
	bic	\kaddr, \kaddr, \tmp2
356 357 358 359 360 361
9998:
	.if	(\op == cvau || \op == cvac)
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
	dc	\op, \kaddr
alternative_else
	dc	civac, \kaddr
R
Robin Murphy 已提交
362 363 364 365 366 367
alternative_endif
	.elseif	(\op == cvap)
alternative_if ARM64_HAS_DCPOP
	sys 3, c7, c12, 1, \kaddr	// dc cvap
alternative_else
	dc	cvac, \kaddr
368 369 370 371
alternative_endif
	.else
	dc	\op, \kaddr
	.endif
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
	add	\kaddr, \kaddr, \tmp1
	cmp	\kaddr, \size
	b.lo	9998b
	dsb	\domain
	.endm

/*
 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
 */
	.macro	reset_pmuserenr_el0, tmpreg
	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
	sbfx	\tmpreg, \tmpreg, #8, #4
	cmp	\tmpreg, #1			// Skip if no PMU present
	b.lt	9000f
	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
9000:
	.endm

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * copy_page - copy src to dest using temp registers t1-t8
 */
	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
9998:	ldp	\t1, \t2, [\src]
	ldp	\t3, \t4, [\src, #16]
	ldp	\t5, \t6, [\src, #32]
	ldp	\t7, \t8, [\src, #48]
	add	\src, \src, #64
	stnp	\t1, \t2, [\dest]
	stnp	\t3, \t4, [\dest, #16]
	stnp	\t5, \t6, [\dest, #32]
	stnp	\t7, \t8, [\dest, #48]
	add	\dest, \dest, #64
	tst	\src, #(PAGE_SIZE - 1)
	b.ne	9998b
	.endm

408 409 410 411 412 413 414 415 416 417 418
/*
 * Annotate a function as position independent, i.e., safe to be called before
 * the kernel virtual mapping is activated.
 */
#define ENDPIPROC(x)			\
	.globl	__pi_##x;		\
	.type 	__pi_##x, %function;	\
	.set	__pi_##x, x;		\
	.size	__pi_##x, . - x;	\
	ENDPROC(x)

419 420 421 422 423 424 425 426 427 428 429
/*
 * Annotate a function as being unsuitable for kprobes.
 */
#ifdef CONFIG_KPROBES
#define NOKPROBE(x)				\
	.pushsection "_kprobe_blacklist", "aw";	\
	.quad	x;				\
	.popsection;
#else
#define NOKPROBE(x)
#endif
430 431 432 433 434 435 436 437 438 439 440
	/*
	 * Emit a 64-bit absolute little endian symbol reference in a way that
	 * ensures that it will be resolved at build time, even when building a
	 * PIE binary. This requires cooperation from the linker script, which
	 * must emit the lo32/hi32 halves individually.
	 */
	.macro	le64sym, sym
	.long	\sym\()_lo32
	.long	\sym\()_hi32
	.endm

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
	/*
	 * mov_q - move an immediate constant into a 64-bit register using
	 *         between 2 and 4 movz/movk instructions (depending on the
	 *         magnitude and sign of the operand)
	 */
	.macro	mov_q, reg, val
	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
	movz	\reg, :abs_g1_s:\val
	.else
	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
	movz	\reg, :abs_g2_s:\val
	.else
	movz	\reg, :abs_g3:\val
	movk	\reg, :abs_g2_nc:\val
	.endif
	movk	\reg, :abs_g1_nc:\val
	.endif
	movk	\reg, :abs_g0_nc:\val
	.endm

461 462 463 464 465 466 467
/*
 * Return the current thread_info.
 */
	.macro	get_thread_info, rd
	mrs	\rd, sp_el0
	.endm

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
/*
 * Errata workaround prior to TTBR0_EL1 update
 *
 * 	val:	TTBR value with new BADDR, preserved
 * 	tmp0:	temporary register, clobbered
 * 	tmp1:	other temporary register, clobbered
 */
	.macro	pre_ttbr0_update_workaround, val, tmp0, tmp1
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
	mrs	\tmp0, ttbr0_el1
	mov	\tmp1, #FALKOR_RESERVED_ASID
	bfi	\tmp0, \tmp1, #48, #16		// reserved ASID + old BADDR
	msr	ttbr0_el1, \tmp0
	isb
	bfi	\tmp0, \val, #0, #48		// reserved ASID + new BADDR
	msr	ttbr0_el1, \tmp0
	isb
alternative_else_nop_endif
#endif
	.endm

490 491 492 493 494 495 496 497 498 499 500 501 502
/*
 * Errata workaround post TTBR0_EL1 update.
 */
	.macro	post_ttbr0_update_workaround
#ifdef CONFIG_CAVIUM_ERRATUM_27456
alternative_if ARM64_WORKAROUND_CAVIUM_27456
	ic	iallu
	dsb	nsh
	isb
alternative_else_nop_endif
#endif
	.endm

503
#endif	/* __ASM_ASSEMBLER_H */