head_32.S 5.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *  linux/boot/head.S
 *
 *  Copyright (C) 1991, 1992, 1993  Linus Torvalds
 */

/*
 *  head.S contains the 32-bit startup code.
 *
 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
 * the page directory will exist. The startup code will be overwritten by
 * the page directory. [According to comments etc elsewhere on a compressed
 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
 *
16
 * Page 0 is deliberately kept safe, since System Management Mode code in
L
Linus Torvalds 已提交
17
 * laptops may need to access the BIOS data stored there.  This is also
18
 * useful for future device drivers that either access the BIOS via VM86
L
Linus Torvalds 已提交
19 20 21 22 23 24
 * mode.
 */

/*
 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
 */
25
	.text
L
Linus Torvalds 已提交
26

27
#include <linux/init.h>
L
Linus Torvalds 已提交
28 29
#include <linux/linkage.h>
#include <asm/segment.h>
30
#include <asm/page_types.h>
31
#include <asm/boot.h>
R
Rusty Russell 已提交
32
#include <asm/asm-offsets.h>
33
#include <asm/bootparam.h>
L
Linus Torvalds 已提交
34

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
 * relocation to get the symbol address in PIC.  When the compressed x86
 * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
 * relocations to their fixed symbol addresses.  However, when the
 * compressed x86 kernel is loaded at a different address, it leads
 * to the following load failure:
 *
 *   Failed to allocate space for phdrs
 *
 * during the decompression stage.
 *
 * If the compressed x86 kernel is relocatable at run-time, it should be
 * compiled with -fPIE, instead of -fPIC, if possible and should be built as
 * Position Independent Executable (PIE) so that linker won't optimize
 * R_386_GOT32X relocation to its fixed symbol address.  Older
 * linkers generate R_386_32 relocations against locally defined symbols,
 * _bss, _ebss, _got and _egot, in PIE.  It isn't wrong, just less
 * optimal than R_386_RELATIVE.  But the x86 kernel fails to properly handle
 * R_386_32 relocations when relocating the kernel.  To generate
 * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
 * hidden:
 */
	.hidden _bss
	.hidden _ebss
	.hidden _got
	.hidden _egot

63
	__HEAD
64
SYM_FUNC_START(startup_32)
65 66
	cld
	cli
67 68 69 70 71 72
	movl	$__BOOT_DS, %eax
	movl	%eax, %ds
	movl	%eax, %es
	movl	%eax, %fs
	movl	%eax, %gs
	movl	%eax, %ss
R
Rusty Russell 已提交
73

74 75
/*
 * Calculate the delta between where we were compiled to run
76 77 78
 * at and where we were actually loaded at.  This can only be done
 * with a short local call on x86.  Nothing  else will tell us what
 * address we are running at.  The reserved chunk of the real-mode
79 80
 * data at 0x1e4 (defined as a scratch field) are used as the stack
 * for this calculation. Only 4 bytes are needed.
81
 */
82 83 84 85
	leal	(BP_scratch+4)(%esi), %esp
	call	1f
1:	popl	%ebp
	subl	$1b, %ebp
86

87 88
/*
 * %ebp contains the address we are loaded at by the boot loader and %ebx
89 90
 * contains the address where we should move the kernel image temporarily
 * for safe in-place decompression.
91
 */
92

93
#ifdef CONFIG_RELOCATABLE
94
	movl	%ebp, %ebx
95 96 97 98 99
	movl	BP_kernel_alignment(%esi), %eax
	decl	%eax
	addl    %eax, %ebx
	notl	%eax
	andl    %eax, %ebx
100 101
	cmpl	$LOAD_PHYSICAL_ADDR, %ebx
	jge	1f
102
#endif
103 104
	movl	$LOAD_PHYSICAL_ADDR, %ebx
1:
105

106
	/* Target address to relocate to for decompression */
107 108 109
	movl    BP_init_size(%esi), %eax
	subl    $_end, %eax
	addl    %eax, %ebx
110

111 112 113
	/* Set up the stack */
	leal	boot_stack_end(%ebx), %esp

114 115 116 117
	/* Zero EFLAGS */
	pushl	$0
	popfl

118 119
/*
 * Copy the compressed kernel to the end of our buffer
120 121
 * where decompression in place becomes safe.
 */
122
	pushl	%esi
123 124
	leal	(_bss-4)(%ebp), %esi
	leal	(_bss-4)(%ebx), %edi
125
	movl	$(_bss - startup_32), %ecx
126
	shrl	$2, %ecx
127
	std
128
	rep	movsl
129
	cld
130
	popl	%esi
131

L
Linus Torvalds 已提交
132
/*
133
 * Jump to the relocated address.
L
Linus Torvalds 已提交
134
 */
135
	leal	.Lrelocated(%ebx), %eax
136
	jmp	*%eax
137
SYM_FUNC_END(startup_32)
138

139
#ifdef CONFIG_EFI_STUB
140
SYM_FUNC_START(efi32_stub_entry)
141
SYM_FUNC_START_ALIAS(efi_stub_entry)
142 143 144 145 146 147
	add	$0x4, %esp
	call	efi_main
	movl	%eax, %esi
	movl	BP_code32_start(%esi), %eax
	leal	startup_32(%eax), %eax
	jmp	*%eax
148
SYM_FUNC_END(efi32_stub_entry)
149
SYM_FUNC_END_ALIAS(efi_stub_entry)
150 151
#endif

152
	.text
J
Jiri Slaby 已提交
153
SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
154

L
Linus Torvalds 已提交
155
/*
156
 * Clear BSS (stack is currently empty)
L
Linus Torvalds 已提交
157
 */
158
	xorl	%eax, %eax
159
	leal	_bss(%ebx), %edi
160 161
	leal	_ebss(%ebx), %ecx
	subl	%edi, %ecx
162 163
	shrl	$2, %ecx
	rep	stosl
164

165 166 167 168 169 170 171 172 173 174 175 176 177
/*
 * Adjust our own GOT
 */
	leal	_got(%ebx), %edx
	leal	_egot(%ebx), %ecx
1:
	cmpl	%ecx, %edx
	jae	2f
	addl	%ebx, (%edx)
	addl	$4, %edx
	jmp	1b
2:

L
Linus Torvalds 已提交
178
/*
179
 * Do the extraction, and jump to the new kernel..
L
Linus Torvalds 已提交
180
 */
181
				/* push arguments for extract_kernel: */
182
	pushl	$z_output_len	/* decompressed length, end of relocs */
183

184 185 186
	leal	_end(%ebx), %eax
	subl    BP_init_size(%esi), %eax
	pushl	%eax		/* output address */
187

188
	pushl	$z_input_len	/* input_len */
189 190 191 192 193
	leal	input_data(%ebx), %eax
	pushl	%eax		/* input_data */
	leal	boot_heap(%ebx), %eax
	pushl	%eax		/* heap area */
	pushl	%esi		/* real mode pointer */
194
	call	extract_kernel	/* returns kernel location in %eax */
195
	addl	$24, %esp
L
Linus Torvalds 已提交
196 197

/*
198
 * Jump to the extracted kernel.
L
Linus Torvalds 已提交
199
 */
200
	xorl	%ebx, %ebx
201
	jmp	*%eax
J
Jiri Slaby 已提交
202
SYM_FUNC_END(.Lrelocated)
203

204 205 206 207 208
/*
 * Stack and heap for uncompression
 */
	.bss
	.balign 4
209 210 211 212 213
boot_heap:
	.fill BOOT_HEAP_SIZE, 1, 0
boot_stack:
	.fill BOOT_STACK_SIZE, 1, 0
boot_stack_end: