head_64.S 11.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
 *
 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8
 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
L
Linus Torvalds 已提交
9 10 11 12 13
 */


#include <linux/linkage.h>
#include <linux/threads.h>
14
#include <linux/init.h>
L
Linus Torvalds 已提交
15 16
#include <asm/desc.h>
#include <asm/segment.h>
17
#include <asm/pgtable.h>
L
Linus Torvalds 已提交
18 19 20
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/cache.h>
21

22 23 24 25 26 27 28
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
#include <asm/paravirt.h>
#else
#define GET_CR2_INTO_RCX movq %cr2, %rcx
#endif

L
Linus Torvalds 已提交
29
/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
30 31
 * because we need identity-mapped pages.
 *
L
Linus Torvalds 已提交
32 33 34
 */

	.text
35
	.section .text.head
36 37 38 39
	.code64
	.globl startup_64
startup_64:

L
Linus Torvalds 已提交
40
	/*
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
	 * and someone has loaded an identity mapped page table
	 * for us.  These identity mapped page tables map all of the
	 * kernel pages and possibly all of memory.
	 *
	 * %esi holds a physical pointer to real_mode_data.
	 *
	 * We come here either directly from a 64bit bootloader, or from
	 * arch/x86_64/boot/compressed/head.S.
	 *
	 * We only come here initially at boot nothing else comes here.
	 *
	 * Since we may be loaded at an address different from what we were
	 * compiled to run at we first fixup the physical addresses in our page
	 * tables and then reload them.
L
Linus Torvalds 已提交
56 57
	 */

58 59
	/* Compute the delta between the address I am compiled to run at and the
	 * address I am actually running at.
L
Linus Torvalds 已提交
60
	 */
61 62 63 64 65
	leaq	_text(%rip), %rbp
	subq	$_text - __START_KERNEL_map, %rbp

	/* Is the address not 2M aligned? */
	movq	%rbp, %rax
66
	andl	$~PMD_PAGE_MASK, %eax
67 68 69 70 71 72 73 74 75 76
	testl	%eax, %eax
	jnz	bad_address

	/* Is the address too large? */
	leaq	_text(%rip), %rdx
	movq	$PGDIR_SIZE, %rax
	cmpq	%rax, %rdx
	jae	bad_address

	/* Fixup the physical addresses in the page table
L
Linus Torvalds 已提交
77
	 */
78 79 80 81 82
	addq	%rbp, init_level4_pgt + 0(%rip)
	addq	%rbp, init_level4_pgt + (258*8)(%rip)
	addq	%rbp, init_level4_pgt + (511*8)(%rip)

	addq	%rbp, level3_ident_pgt + 0(%rip)
E
Eric W. Biderman 已提交
83

84
	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
E
Eric W. Biderman 已提交
85 86 87
	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)

	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
88 89 90

	/* Add an Identity mapping if I am above 1G */
	leaq	_text(%rip), %rdi
91
	andq	$PMD_PAGE_MASK, %rdi
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

	movq	%rdi, %rax
	shrq	$PUD_SHIFT, %rax
	andq	$(PTRS_PER_PUD - 1), %rax
	jz	ident_complete

	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
	leaq	level3_ident_pgt(%rip), %rbx
	movq	%rdx, 0(%rbx, %rax, 8)

	movq	%rdi, %rax
	shrq	$PMD_SHIFT, %rax
	andq	$(PTRS_PER_PMD - 1), %rax
	leaq	__PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
	leaq	level2_spare_pgt(%rip), %rbx
	movq	%rdx, 0(%rbx, %rax, 8)
ident_complete:

	/* Fixup the kernel text+data virtual addresses
	 */
	leaq	level2_kernel_pgt(%rip), %rdi
	leaq	4096(%rdi), %r8
	/* See if it is a valid page table entry */
1:	testq	$1, 0(%rdi)
	jz	2f
	addq	%rbp, 0(%rdi)
	/* Go to the next page */
2:	addq	$8, %rdi
	cmp	%r8, %rdi
	jne	1b

	/* Fixup phys_base */
	addq	%rbp, phys_base(%rip)
L
Linus Torvalds 已提交
125

126 127 128 129
#ifdef CONFIG_SMP
	addq	%rbp, trampoline_level4_pgt + 0(%rip)
	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
#endif
L
Len Brown 已提交
130
#ifdef CONFIG_ACPI_SLEEP
131 132 133
	addq	%rbp, wakeup_level4_pgt + 0(%rip)
	addq	%rbp, wakeup_level4_pgt + (511*8)(%rip)
#endif
L
Linus Torvalds 已提交
134

135 136 137
	/* Due to ENTRY(), sometimes the empty space gets filled with
	 * zeros. Better take a jmp than relying on empty space being
	 * filled with 0x90 (nop)
L
Linus Torvalds 已提交
138
	 */
139
	jmp secondary_startup_64
140
ENTRY(secondary_startup_64)
141 142 143 144 145 146 147 148 149 150 151 152
	/*
	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
	 * and someone has loaded a mapped page table.
	 *
	 * %esi holds a physical pointer to real_mode_data.
	 *
	 * We come here either from startup_64 (using physical addresses)
	 * or from trampoline.S (using virtual addresses).
	 *
	 * Using virtual addresses from trampoline.S removes the need
	 * to have any identity mapped pages in the kernel page table
	 * after the boot processor executes this code.
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160 161
	 */

	/* Enable PAE mode and PGE */
	xorq	%rax, %rax
	btsq	$5, %rax
	btsq	$7, %rax
	movq	%rax, %cr4

	/* Setup early boot stage 4 level pagetables. */
162
	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
163
	addq	phys_base(%rip), %rax
L
Linus Torvalds 已提交
164 165
	movq	%rax, %cr3

166 167 168 169 170
	/* Ensure I am executing from virtual addresses */
	movq	$1f, %rax
	jmp	*%rax
1:

L
Linus Torvalds 已提交
171 172 173 174 175 176 177 178
	/* Check if nx is implemented */
	movl	$0x80000001, %eax
	cpuid
	movl	%edx,%edi

	/* Setup EFER (Extended Feature Enable Register) */
	movl	$MSR_EFER, %ecx
	rdmsr
179 180
	btsl	$_EFER_SCE, %eax	/* Enable System Call */
	btl	$20,%edi		/* No Execute supported? */
L
Linus Torvalds 已提交
181 182
	jnc     1f
	btsl	$_EFER_NX, %eax
183
1:	wrmsr				/* Make changes effective */
L
Linus Torvalds 已提交
184 185

	/* Setup cr0 */
186 187 188 189 190 191 192 193
#define CR0_PM				1		/* protected mode */
#define CR0_MP				(1<<1)
#define CR0_ET				(1<<4)
#define CR0_NE				(1<<5)
#define CR0_WP				(1<<16)
#define CR0_AM				(1<<18)
#define CR0_PAGING 			(1<<31)
	movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax
L
Linus Torvalds 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
	/* Make changes effective */
	movq	%rax, %cr0

	/* Setup a boot time stack */
	movq init_rsp(%rip),%rsp

	/* zero EFLAGS after setting rsp */
	pushq $0
	popfq

	/*
	 * We must switch to a new descriptor in kernel space for the GDT
	 * because soon the kernel won't have access anymore to the userspace
	 * addresses where we're currently running on. We have to do that here
	 * because in 32bit we couldn't load a 64bit linear address.
	 */
210
	lgdt	cpu_gdt_descr(%rip)
L
Linus Torvalds 已提交
211

212 213 214 215 216 217 218 219 220 221 222 223 224 225
	/* set up data segments. actually 0 would do too */
	movl $__KERNEL_DS,%eax
	movl %eax,%ds
	movl %eax,%ss
	movl %eax,%es

	/*
	 * We don't really need to load %fs or %gs, but load them anyway
	 * to kill any stale realmode selectors.  This allows execution
	 * under VT hardware.
	 */
	movl %eax,%fs
	movl %eax,%gs

L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	/* 
	 * Setup up a dummy PDA. this is just for some early bootup code
	 * that does in_interrupt() 
	 */ 
	movl	$MSR_GS_BASE,%ecx
	movq	$empty_zero_page,%rax
	movq    %rax,%rdx
	shrq	$32,%rdx
	wrmsr	

	/* esi is pointer to real mode structure with interesting info.
	   pass it to C */
	movl	%esi, %edi
	
	/* Finally jump to run C code and to be on real kernel address
	 * Since we are running on identity-mapped space we have to jump
242 243 244
	 * to the full 64bit address, this is only possible as indirect
	 * jump.  In addition we need to ensure %cs is set so we make this
	 * a far return.
L
Linus Torvalds 已提交
245 246
	 */
	movq	initial_code(%rip),%rax
247 248 249 250
	pushq	$0		# fake return address to stop unwinder
	pushq	$__KERNEL_CS	# set correct cs
	pushq	%rax		# target address in negative space
	lretq
L
Linus Torvalds 已提交
251

252
	/* SMP bootup changes these two */
253 254 255
#ifndef CONFIG_HOTPLUG_CPU
	.pushsection .init.data
#endif
256
	.align	8
L
Linus Torvalds 已提交
257 258 259
	.globl	initial_code
initial_code:
	.quad	x86_64_start_kernel
260 261 262
#ifndef CONFIG_HOTPLUG_CPU
	.popsection
#endif
L
Linus Torvalds 已提交
263 264 265 266
	.globl init_rsp
init_rsp:
	.quad  init_thread_union+THREAD_SIZE-8

267 268 269
bad_address:
	jmp bad_address

270
#ifdef CONFIG_EARLY_PRINTK
271 272 273 274 275 276 277 278 279 280 281 282 283 284
.macro early_idt_tramp first, last
	.ifgt \last-\first
	early_idt_tramp \first, \last-1
	.endif
	movl $\last,%esi
	jmp early_idt_handler
.endm

	.globl early_idt_handlers
early_idt_handlers:
	early_idt_tramp 0, 63
	early_idt_tramp 64, 127
	early_idt_tramp 128, 191
	early_idt_tramp 192, 255
285
#endif
286

L
Linus Torvalds 已提交
287
ENTRY(early_idt_handler)
288
#ifdef CONFIG_EARLY_PRINTK
289 290 291
	cmpl $2,early_recursion_flag(%rip)
	jz  1f
	incl early_recursion_flag(%rip)
292
	GET_CR2_INTO_RCX
293 294 295 296 297 298 299 300 301 302 303 304 305 306
	movq %rcx,%r9
	xorl %r8d,%r8d		# zero for error code
	movl %esi,%ecx		# get vector number
	# Test %ecx against mask of vectors that push error code.
	cmpl $31,%ecx
	ja 0f
	movl $1,%eax
	salq %cl,%rax
	testl $0x27d00,%eax
	je 0f
	popq %r8		# get error code
0:	movq 0(%rsp),%rcx	# get ip
	movq 8(%rsp),%rdx	# get cs
	xorl %eax,%eax
L
Linus Torvalds 已提交
307 308
	leaq early_idt_msg(%rip),%rdi
	call early_printk
309 310 311
	cmpl $2,early_recursion_flag(%rip)
	jz  1f
	call dump_stack
312 313 314 315 316
#ifdef CONFIG_KALLSYMS	
	leaq early_idt_ripmsg(%rip),%rdi
	movq 8(%rsp),%rsi	# get rip again
	call __print_symbol
#endif
317
#endif /* EARLY_PRINTK */
L
Linus Torvalds 已提交
318 319
1:	hlt
	jmp 1b
320 321

#ifdef CONFIG_EARLY_PRINTK
322 323
early_recursion_flag:
	.long 0
L
Linus Torvalds 已提交
324 325

early_idt_msg:
326
	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
327 328
early_idt_ripmsg:
	.asciz "RIP %s\n"
329
#endif /* CONFIG_EARLY_PRINTK */
L
Linus Torvalds 已提交
330

331
.balign PAGE_SIZE
L
Linus Torvalds 已提交
332

333
#define NEXT_PAGE(name) \
334
	.balign	PAGE_SIZE; \
335 336
ENTRY(name)

337 338 339 340 341 342 343 344
/* Automate the creation of 1 to 1 mapping pmd entries */
#define PMDS(START, PERM, COUNT)		\
	i = 0 ;					\
	.rept (COUNT) ;				\
	.quad	(START) + (i << 21) + (PERM) ;	\
	i = i + 1 ;				\
	.endr

345 346 347 348 349 350
	/*
	 * This default setting generates an ident mapping at address 0x100000
	 * and a mapping for the kernel that precisely maps virtual address
	 * 0xffffffff80000000 to physical address 0x000000. (always using
	 * 2Mbyte large pages provided by PAE mode)
	 */
351
NEXT_PAGE(init_level4_pgt)
352 353 354 355 356 357
	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
	.fill	257,8,0
	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
	.fill	252,8,0
	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
L
Linus Torvalds 已提交
358

359
NEXT_PAGE(level3_ident_pgt)
360
	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
L
Linus Torvalds 已提交
361 362
	.fill	511,8,0

363
NEXT_PAGE(level3_kernel_pgt)
L
Linus Torvalds 已提交
364 365
	.fill	510,8,0
	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
366
	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
E
Eric W. Biderman 已提交
367 368 369 370 371 372 373 374 375 376
	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE

NEXT_PAGE(level2_fixmap_pgt)
	.fill	506,8,0
	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
	.fill	5,8,0

NEXT_PAGE(level1_fixmap_pgt)
	.fill	512,8,0
L
Linus Torvalds 已提交
377

378
NEXT_PAGE(level2_ident_pgt)
379 380 381 382
	/* Since I easily can, map the first 1G.
	 * Don't set NX because code runs from these pages.
	 */
	PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
383

384
NEXT_PAGE(level2_kernel_pgt)
L
Linus Torvalds 已提交
385 386 387
	/* 40MB kernel mapping. The kernel code cannot be bigger than that.
	   When you change this change KERNEL_TEXT_SIZE in page.h too. */
	/* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
388
	PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE)
L
Linus Torvalds 已提交
389
	/* Module mapping starts here */
390
	.fill	(PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0
L
Linus Torvalds 已提交
391

392 393 394
NEXT_PAGE(level2_spare_pgt)
	.fill   512,8,0

395
#undef PMDS
396
#undef NEXT_PAGE
L
Linus Torvalds 已提交
397

398
	.data
L
Linus Torvalds 已提交
399 400 401
	.align 16
	.globl cpu_gdt_descr
cpu_gdt_descr:
402
	.word	gdt_end-cpu_gdt_table-1
L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411
gdt:
	.quad	cpu_gdt_table
#ifdef CONFIG_SMP
	.rept	NR_CPUS-1
	.word	0
	.quad	0
	.endr
#endif

412 413 414 415
ENTRY(phys_base)
	/* This must match the first entry in level2_kernel_pgt */
	.quad   0x0000000000000000

L
Linus Torvalds 已提交
416 417 418 419 420
/* We need valid kernel segments for data and code in long mode too
 * IRET will check the segment types  kkeil 2000/10/28
 * Also sysret mandates a special GDT layout 
 */
		 		
421 422
	.section .data.page_aligned, "aw"
	.align PAGE_SIZE
L
Linus Torvalds 已提交
423 424 425 426 427 428

/* The TLS descriptors are currently at a different place compared to i386.
   Hopefully nobody expects them at a fixed place (Wine?) */
	
ENTRY(cpu_gdt_table)
	.quad	0x0000000000000000	/* NULL descriptor */
V
Vivek Goyal 已提交
429 430 431 432 433 434
	.quad	0x00cf9b000000ffff	/* __KERNEL32_CS */
	.quad	0x00af9b000000ffff	/* __KERNEL_CS */
	.quad	0x00cf93000000ffff	/* __KERNEL_DS */
	.quad	0x00cffb000000ffff	/* __USER32_CS */
	.quad	0x00cff3000000ffff	/* __USER_DS, __USER32_DS  */
	.quad	0x00affb000000ffff	/* __USER_CS */
435
	.quad	0x0			/* unused */
L
Linus Torvalds 已提交
436 437 438
	.quad	0,0			/* TSS */
	.quad	0,0			/* LDT */
	.quad   0,0,0			/* three TLS descriptors */ 
439
	.quad	0x0000f40000000000	/* node/CPU stored in limit */
L
Linus Torvalds 已提交
440 441 442
gdt_end:	
	/* asm/segment.h:GDT_ENTRIES must match this */	
	/* This should be a multiple of the cache line size */
443 444 445 446
	/* GDTs of other CPUs are now dynamically allocated */

	/* zero the remaining page */
	.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
L
Linus Torvalds 已提交
447

448 449 450 451
	.section .bss, "aw", @nobits
	.align L1_CACHE_BYTES
ENTRY(idt_table)
	.skip 256 * 16
L
Linus Torvalds 已提交
452

453 454 455 456
	.section .bss.page_aligned, "aw", @nobits
	.align PAGE_SIZE
ENTRY(empty_zero_page)
	.skip PAGE_SIZE