/* * linux/boot/head.S * * Copyright (C) 1991, 1992, 1993 Linus Torvalds */ /* * head.S contains the 32-bit startup code. * * NOTE!!! Startup happens at absolute address 0x00001000, which is also where * the page directory will exist. The startup code will be overwritten by * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ .text #include #include #include #include #include .section ".text.head","ax",@progbits ENTRY(startup_32) cld /* * Test KEEP_SEGMENTS flag to see if the bootloader is asking * us to not reload segments */ testb $(1<<6), BP_loadflags(%esi) jnz 1f cli movl $__BOOT_DS, %eax movl %eax, %ds movl %eax, %es movl %eax, %fs movl %eax, %gs movl %eax, %ss 1: /* * Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp subl $1b, %ebp /* * %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE movl %ebp, %ebx addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebx andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx #else movl $LOAD_PHYSICAL_ADDR, %ebx #endif /* Target address to relocate to for decompression */ addl $z_extract_offset, %ebx /* Set up the stack */ leal boot_stack_end(%ebx), %esp /* Zero EFLAGS */ pushl $0 popfl /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushl %esi leal (_bss-4)(%ebp), %esi leal (_bss-4)(%ebx), %edi movl $(_bss - startup_32), %ecx shrl $2, %ecx std rep movsl cld popl %esi /* * Compute the kernel start address. */ #ifdef CONFIG_RELOCATABLE addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebp andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebp #else movl $LOAD_PHYSICAL_ADDR, %ebp #endif /* * Jump to the relocated address. */ leal relocated(%ebx), %eax jmp *%eax ENDPROC(startup_32) .text relocated: /* * Clear BSS (stack is currently empty) */ xorl %eax, %eax leal _bss(%ebx), %edi leal _ebss(%ebx), %ecx subl %edi, %ecx shrl $2, %ecx rep stosl /* * Do the decompression, and jump to the new kernel.. */ leal z_extract_offset_negative(%ebx), %ebp /* push arguments for decompress_kernel: */ pushl %ebp /* output address */ pushl $z_input_len /* input_len */ leal input_data(%ebx), %eax pushl %eax /* input_data */ leal boot_heap(%ebx), %eax pushl %eax /* heap area */ pushl %esi /* real mode pointer */ call decompress_kernel addl $20, %esp #if CONFIG_RELOCATABLE /* * Find the address of the relocations. */ leal z_output_len(%ebp), %edi /* * Calculate the delta between where vmlinux was compiled to run * and where it was actually loaded. */ movl %ebp, %ebx subl $LOAD_PHYSICAL_ADDR, %ebx jz 2f /* Nothing to be done if loaded at compiled addr. */ /* * Process relocations. */ 1: subl $4, %edi movl (%edi), %ecx testl %ecx, %ecx jz 2f addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) jmp 1b 2: #endif /* * Jump to the decompressed kernel. */ xorl %ebx, %ebx jmp *%ebp /* * Stack and heap for uncompression */ .bss .balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end: