head_64.S 13.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
L
Linus Torvalds 已提交
3 4 5 6 7
 *
 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8
 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
L
Linus Torvalds 已提交
9 10 11 12 13
 */


#include <linux/linkage.h>
#include <linux/threads.h>
14
#include <linux/init.h>
L
Linus Torvalds 已提交
15
#include <asm/segment.h>
16
#include <asm/pgtable.h>
L
Linus Torvalds 已提交
17 18 19
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/cache.h>
20
#include <asm/processor-flags.h>
T
Tejun Heo 已提交
21
#include <asm/percpu.h>
22
#include <asm/nops.h>
23

24 25 26
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
#include <asm/paravirt.h>
27
#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
28
#else
29
#define GET_CR2_INTO(reg) movq %cr2, reg
30
#define INTERRUPT_RETURN iretq
31 32
#endif

D
Daniel Mack 已提交
33
/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
34 35
 * because we need identity-mapped pages.
 *
L
Linus Torvalds 已提交
36 37
 */

38 39 40 41 42 43 44
#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))

L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map)

L
Linus Torvalds 已提交
45
	.text
46
	__HEAD
47 48 49
	.code64
	.globl startup_64
startup_64:
L
Linus Torvalds 已提交
50
	/*
51
	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
52 53 54 55
	 * and someone has loaded an identity mapped page table
	 * for us.  These identity mapped page tables map all of the
	 * kernel pages and possibly all of memory.
	 *
56
	 * %rsi holds a physical pointer to real_mode_data.
57 58
	 *
	 * We come here either directly from a 64bit bootloader, or from
59
	 * arch/x86/boot/compressed/head_64.S.
60 61 62 63 64 65
	 *
	 * We only come here initially at boot nothing else comes here.
	 *
	 * Since we may be loaded at an address different from what we were
	 * compiled to run at we first fixup the physical addresses in our page
	 * tables and then reload them.
L
Linus Torvalds 已提交
66 67
	 */

68 69 70
	/* Sanitize CPU configuration */
	call verify_cpu

71 72
	/*
	 * Compute the delta between the address I am compiled to run at and the
73
	 * address I am actually running at.
L
Linus Torvalds 已提交
74
	 */
75 76 77 78 79
	leaq	_text(%rip), %rbp
	subq	$_text - __START_KERNEL_map, %rbp

	/* Is the address not 2M aligned? */
	movq	%rbp, %rax
80
	andl	$~PMD_PAGE_MASK, %eax
81 82 83
	testl	%eax, %eax
	jnz	bad_address

84 85
	/*
	 * Is the address too large?
L
Linus Torvalds 已提交
86
	 */
87 88 89
	leaq	_text(%rip), %rax
	shrq	$MAX_PHYSMEM_BITS, %rax
	jnz	bad_address
90

91 92 93 94
	/*
	 * Fixup the physical addresses in the page table
	 */
	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
E
Eric W. Biderman 已提交
95

96
	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
E
Eric W. Biderman 已提交
97 98 99
	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)

	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
100

101 102 103 104 105 106
	/*
	 * Set up the identity mapping for the switchover.  These
	 * entries should *NOT* have the global bit set!  This also
	 * creates a bunch of nonsense entries but that is fine --
	 * it avoids problems around wraparound.
	 */
107
	leaq	_text(%rip), %rdi
108
	leaq	early_level4_pgt(%rip), %rbx
109 110

	movq	%rdi, %rax
111
	shrq	$PGDIR_SHIFT, %rax
112

113 114 115
	leaq	(4096 + _KERNPG_TABLE)(%rbx), %rdx
	movq	%rdx, 0(%rbx,%rax,8)
	movq	%rdx, 8(%rbx,%rax,8)
116

117
	addq	$4096, %rdx
118
	movq	%rdi, %rax
119 120
	shrq	$PUD_SHIFT, %rax
	andl	$(PTRS_PER_PUD-1), %eax
121 122 123 124
	movq	%rdx, 4096(%rbx,%rax,8)
	incl	%eax
	andl	$(PTRS_PER_PUD-1), %eax
	movq	%rdx, 4096(%rbx,%rax,8)
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

	addq	$8192, %rbx
	movq	%rdi, %rax
	shrq	$PMD_SHIFT, %rdi
	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
	leaq	(_end - 1)(%rip), %rcx
	shrq	$PMD_SHIFT, %rcx
	subq	%rdi, %rcx
	incl	%ecx

1:
	andq	$(PTRS_PER_PMD - 1), %rdi
	movq	%rax, (%rbx,%rdi,8)
	incq	%rdi
	addq	$PMD_SIZE, %rax
	decl	%ecx
	jnz	1b
142

143 144 145 146 147
	/*
	 * Fixup the kernel text+data virtual addresses. Note that
	 * we might write invalid pmds, when the kernel is relocated
	 * cleanup_highmap() fixes this up along with the mappings
	 * beyond _end.
148 149 150 151
	 */
	leaq	level2_kernel_pgt(%rip), %rdi
	leaq	4096(%rdi), %r8
	/* See if it is a valid page table entry */
152
1:	testb	$1, 0(%rdi)
153 154 155 156 157 158 159 160 161
	jz	2f
	addq	%rbp, 0(%rdi)
	/* Go to the next page */
2:	addq	$8, %rdi
	cmp	%r8, %rdi
	jne	1b

	/* Fixup phys_base */
	addq	%rbp, phys_base(%rip)
L
Linus Torvalds 已提交
162

163 164
	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
	jmp 1f
165
ENTRY(secondary_startup_64)
166
	/*
167
	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
168 169
	 * and someone has loaded a mapped page table.
	 *
170
	 * %rsi holds a physical pointer to real_mode_data.
171 172 173 174 175 176 177
	 *
	 * We come here either from startup_64 (using physical addresses)
	 * or from trampoline.S (using virtual addresses).
	 *
	 * Using virtual addresses from trampoline.S removes the need
	 * to have any identity mapped pages in the kernel page table
	 * after the boot processor executes this code.
L
Linus Torvalds 已提交
178 179
	 */

180 181 182
	/* Sanitize CPU configuration */
	call verify_cpu

183 184 185
	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
1:

L
Linus Torvalds 已提交
186
	/* Enable PAE mode and PGE */
187 188
	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
	movq	%rcx, %cr4
L
Linus Torvalds 已提交
189 190

	/* Setup early boot stage 4 level pagetables. */
191
	addq	phys_base(%rip), %rax
L
Linus Torvalds 已提交
192 193
	movq	%rax, %cr3

194 195 196 197 198
	/* Ensure I am executing from virtual addresses */
	movq	$1f, %rax
	jmp	*%rax
1:

L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206
	/* Check if nx is implemented */
	movl	$0x80000001, %eax
	cpuid
	movl	%edx,%edi

	/* Setup EFER (Extended Feature Enable Register) */
	movl	$MSR_EFER, %ecx
	rdmsr
207 208
	btsl	$_EFER_SCE, %eax	/* Enable System Call */
	btl	$20,%edi		/* No Execute supported? */
L
Linus Torvalds 已提交
209 210
	jnc     1f
	btsl	$_EFER_NX, %eax
211
	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
212
1:	wrmsr				/* Make changes effective */
L
Linus Torvalds 已提交
213 214

	/* Setup cr0 */
215 216 217 218
#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
			 X86_CR0_PG)
	movl	$CR0_STATE, %eax
L
Linus Torvalds 已提交
219 220 221 222
	/* Make changes effective */
	movq	%rax, %cr0

	/* Setup a boot time stack */
223
	movq stack_start(%rip), %rsp
L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231 232 233 234

	/* zero EFLAGS after setting rsp */
	pushq $0
	popfq

	/*
	 * We must switch to a new descriptor in kernel space for the GDT
	 * because soon the kernel won't have access anymore to the userspace
	 * addresses where we're currently running on. We have to do that here
	 * because in 32bit we couldn't load a 64bit linear address.
	 */
235
	lgdt	early_gdt_descr(%rip)
L
Linus Torvalds 已提交
236

237 238
	/* set up data segments */
	xorl %eax,%eax
239 240 241 242 243 244 245 246 247 248 249 250
	movl %eax,%ds
	movl %eax,%ss
	movl %eax,%es

	/*
	 * We don't really need to load %fs or %gs, but load them anyway
	 * to kill any stale realmode selectors.  This allows execution
	 * under VT hardware.
	 */
	movl %eax,%fs
	movl %eax,%gs

251 252
	/* Set up %gs.
	 *
253 254 255 256
	 * The base of %gs always points to the bottom of the irqstack
	 * union.  If the stack protector canary is enabled, it is
	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
	 * init data section till per cpu areas are set up.
257
	 */
L
Linus Torvalds 已提交
258
	movl	$MSR_GS_BASE,%ecx
259 260
	movl	initial_gs(%rip),%eax
	movl	initial_gs+4(%rip),%edx
L
Linus Torvalds 已提交
261 262
	wrmsr	

263
	/* rsi is pointer to real mode structure with interesting info.
L
Linus Torvalds 已提交
264
	   pass it to C */
265
	movq	%rsi, %rdi
L
Linus Torvalds 已提交
266 267 268
	
	/* Finally jump to run C code and to be on real kernel address
	 * Since we are running on identity-mapped space we have to jump
269 270 271
	 * to the full 64bit address, this is only possible as indirect
	 * jump.  In addition we need to ensure %cs is set so we make this
	 * a far return.
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	 *
	 * Note: do not change to far jump indirect with 64bit offset.
	 *
	 * AMD does not support far jump indirect with 64bit offset.
	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
	 *		with the target specified by a far pointer in memory.
	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
	 *		with the target specified by a far pointer in memory.
	 *
	 * Intel64 does support 64bit offset.
	 * Software Developer Manual Vol 2: states:
	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
	 *		address given in m16:16
	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
	 *		address given in m16:32.
	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
	 *		address given in m16:64.
L
Linus Torvalds 已提交
290 291
	 */
	movq	initial_code(%rip),%rax
292 293 294 295
	pushq	$0		# fake return address to stop unwinder
	pushq	$__KERNEL_CS	# set correct cs
	pushq	%rax		# target address in negative space
	lretq
L
Linus Torvalds 已提交
296

297 298
#include "verify_cpu.S"

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
#ifdef CONFIG_HOTPLUG_CPU
/*
 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
 * up already except stack. We just set up stack here. Then call
 * start_secondary().
 */
ENTRY(start_cpu0)
	movq stack_start(%rip),%rsp
	movq	initial_code(%rip),%rax
	pushq	$0		# fake return address to stop unwinder
	pushq	$__KERNEL_CS	# set correct cs
	pushq	%rax		# target address in negative space
	lretq
ENDPROC(start_cpu0)
#endif

315
	/* SMP bootup changes these two */
316
	__REFDATA
317 318
	.balign	8
	GLOBAL(initial_code)
L
Linus Torvalds 已提交
319
	.quad	x86_64_start_kernel
320
	GLOBAL(initial_gs)
321
	.quad	INIT_PER_CPU_VAR(irq_stack_union)
S
Sam Ravnborg 已提交
322

323
	GLOBAL(stack_start)
L
Linus Torvalds 已提交
324
	.quad  init_thread_union+THREAD_SIZE-8
G
Glauber Costa 已提交
325
	.word  0
326
	__FINITDATA
L
Linus Torvalds 已提交
327

328 329 330
bad_address:
	jmp bad_address

331
	__INIT
332
ENTRY(early_idt_handler_array)
333 334 335 336
	# 104(%rsp) %rflags
	#  96(%rsp) %cs
	#  88(%rsp) %rip
	#  80(%rsp) error code
337 338
	i = 0
	.rept NUM_EXCEPTION_VECTORS
339
	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
340 341 342
	pushq $0		# Dummy error code, to make stack frame uniform
	.endif
	pushq $i		# 72(%rsp) Vector number
343
	jmp early_idt_handler_common
344
	i = i + 1
345
	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
346
	.endr
347
ENDPROC(early_idt_handler_array)
348

349 350 351 352 353
early_idt_handler_common:
	/*
	 * The stack is the hardware frame, an error code or zero, and the
	 * vector number.
	 */
354 355
	cld

356
	cmpl $2,(%rsp)		# X86_TRAP_NMI
357
	je .Lis_nmi		# Ignore NMI
358

359 360 361
	cmpl $2,early_recursion_flag(%rip)
	jz  1f
	incl early_recursion_flag(%rip)
362 363 364 365 366 367 368 369 370 371 372 373

	pushq %rax		# 64(%rsp)
	pushq %rcx		# 56(%rsp)
	pushq %rdx		# 48(%rsp)
	pushq %rsi		# 40(%rsp)
	pushq %rdi		# 32(%rsp)
	pushq %r8		# 24(%rsp)
	pushq %r9		# 16(%rsp)
	pushq %r10		#  8(%rsp)
	pushq %r11		#  0(%rsp)

	cmpl $__KERNEL_CS,96(%rsp)
374 375 376 377 378 379 380 381
	jne 11f

	cmpl $14,72(%rsp)	# Page fault?
	jnz 10f
	GET_CR2_INTO(%rdi)	# can clobber any volatile register if pv
	call early_make_pgtable
	andl %eax,%eax
	jz 20f			# All good
382

383
10:
384 385 386 387 388
	leaq 88(%rsp),%rdi	# Pointer to %rip
	call early_fixup_exception
	andl %eax,%eax
	jnz 20f			# Found an exception entry

389
11:
390 391 392 393 394 395
#ifdef CONFIG_EARLY_PRINTK
	GET_CR2_INTO(%r9)	# can clobber any volatile register if pv
	movl 80(%rsp),%r8d	# error code
	movl 72(%rsp),%esi	# vector number
	movl 96(%rsp),%edx	# %cs
	movq 88(%rsp),%rcx	# %rip
396
	xorl %eax,%eax
L
Linus Torvalds 已提交
397 398
	leaq early_idt_msg(%rip),%rdi
	call early_printk
399 400 401
	cmpl $2,early_recursion_flag(%rip)
	jz  1f
	call dump_stack
402 403
#ifdef CONFIG_KALLSYMS	
	leaq early_idt_ripmsg(%rip),%rdi
404
	movq 40(%rsp),%rsi	# %rip again
405 406
	call __print_symbol
#endif
407
#endif /* EARLY_PRINTK */
L
Linus Torvalds 已提交
408 409
1:	hlt
	jmp 1b
410

411
20:	# Exception table entry found or page table generated
412 413 414 415 416 417 418 419 420 421
	popq %r11
	popq %r10
	popq %r9
	popq %r8
	popq %rdi
	popq %rsi
	popq %rdx
	popq %rcx
	popq %rax
	decl early_recursion_flag(%rip)
422
.Lis_nmi:
423
	addq $16,%rsp		# drop vector number and error code
424
	INTERRUPT_RETURN
425
ENDPROC(early_idt_handler_common)
426

427 428
	__INITDATA

429
	.balign 4
430 431
early_recursion_flag:
	.long 0
L
Linus Torvalds 已提交
432

433
#ifdef CONFIG_EARLY_PRINTK
L
Linus Torvalds 已提交
434
early_idt_msg:
435
	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
436 437
early_idt_ripmsg:
	.asciz "RIP %s\n"
438
#endif /* CONFIG_EARLY_PRINTK */
L
Linus Torvalds 已提交
439

440
#define NEXT_PAGE(name) \
441
	.balign	PAGE_SIZE; \
442
GLOBAL(name)
443

444
/* Automate the creation of 1 to 1 mapping pmd entries */
445 446 447 448 449
#define PMDS(START, PERM, COUNT)			\
	i = 0 ;						\
	.rept (COUNT) ;					\
	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
	i = i + 1 ;					\
450 451
	.endr

452 453 454 455 456 457 458 459
	__INITDATA
NEXT_PAGE(early_level4_pgt)
	.fill	511,8,0
	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE

NEXT_PAGE(early_dynamic_pgts)
	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0

460
	.data
461 462

#ifndef CONFIG_XEN
463
NEXT_PAGE(init_level4_pgt)
464 465 466 467 468 469 470
	.fill	512,8,0
#else
NEXT_PAGE(init_level4_pgt)
	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
	.org    init_level4_pgt + L4_START_KERNEL*8, 0
471
	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
472
	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
L
Linus Torvalds 已提交
473

474
NEXT_PAGE(level3_ident_pgt)
475
	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
476 477 478 479 480 481 482
	.fill	511, 8, 0
NEXT_PAGE(level2_ident_pgt)
	/* Since I easily can, map the first 1G.
	 * Don't set NX because code runs from these pages.
	 */
	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
#endif
L
Linus Torvalds 已提交
483

484
NEXT_PAGE(level3_kernel_pgt)
485
	.fill	L3_START_KERNEL,8,0
L
Linus Torvalds 已提交
486
	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
487
	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
E
Eric W. Biderman 已提交
488 489
	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE

490
NEXT_PAGE(level2_kernel_pgt)
491
	/*
492
	 * 512 MB kernel mapping. We spend a full page on this pagetable
493 494 495 496
	 * anyway.
	 *
	 * The kernel code+data+bss must not be bigger than that.
	 *
497
	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
498 499 500
	 *  If you want to increase this then increase MODULES_VADDR
	 *  too.)
	 */
501
	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
502
		KERNEL_IMAGE_SIZE/PMD_SIZE)
L
Linus Torvalds 已提交
503

504 505 506 507 508 509 510 511
NEXT_PAGE(level2_fixmap_pgt)
	.fill	506,8,0
	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
	.fill	5,8,0

NEXT_PAGE(level1_fixmap_pgt)
	.fill	512,8,0
512

513
#undef PMDS
L
Linus Torvalds 已提交
514

515
	.data
L
Linus Torvalds 已提交
516
	.align 16
517 518 519
	.globl early_gdt_descr
early_gdt_descr:
	.word	GDT_ENTRIES*8-1
520
early_gdt_descr_base:
521
	.quad	INIT_PER_CPU_VAR(gdt_page)
L
Linus Torvalds 已提交
522

523 524 525 526
ENTRY(phys_base)
	/* This must match the first entry in level2_kernel_pgt */
	.quad   0x0000000000000000

527
#include "../../x86/xen/xen-head.S"
L
Linus Torvalds 已提交
528
	
529
	__PAGE_ALIGNED_BSS
530
NEXT_PAGE(empty_zero_page)
531
	.skip PAGE_SIZE
A
Andrey Ryabinin 已提交
532