vmlinux.lds.S 7.6 KB
Newer Older
1 2 3 4 5
/*
 * ld script for the x86 kernel
 *
 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
 *
I
Ingo Molnar 已提交
6 7
 * Modernisation, unification and other changes and fixes:
 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 *
 * Don't define absolute symbols until and unless you know that symbol
 * value is should remain constant even if kernel image is relocated
 * at run time. Absolute symbols are not relocated. If symbol value should
 * change if kernel is relocated, make the symbol section relative and
 * put it inside the section definition.
 */

#ifdef CONFIG_X86_32
#define LOAD_OFFSET __PAGE_OFFSET
#else
#define LOAD_OFFSET __START_KERNEL_map
#endif

#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page_types.h>
#include <asm/cache.h>
#include <asm/boot.h>

#undef i386     /* in case the preprocessor is a 32bit one */

OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)

#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
jiffies_64 = jiffies;
#endif

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)

#define X64_ALIGN_DEBUG_RODATA_BEGIN	. = ALIGN(HPAGE_SIZE);

#define X64_ALIGN_DEBUG_RODATA_END				\
		. = ALIGN(HPAGE_SIZE);				\
		__end_rodata_hpage_align = .;

#else

#define X64_ALIGN_DEBUG_RODATA_BEGIN
#define X64_ALIGN_DEBUG_RODATA_END

#endif

S
Sam Ravnborg 已提交
59 60 61 62
PHDRS {
	text PT_LOAD FLAGS(5);          /* R_E */
	data PT_LOAD FLAGS(7);          /* RWE */
#ifdef CONFIG_X86_64
63
	user PT_LOAD FLAGS(5);          /* R_E */
S
Sam Ravnborg 已提交
64
#ifdef CONFIG_SMP
65
	percpu PT_LOAD FLAGS(6);        /* RW_ */
S
Sam Ravnborg 已提交
66
#endif
67
	init PT_LOAD FLAGS(7);          /* RWE */
S
Sam Ravnborg 已提交
68 69 70
#endif
	note PT_NOTE FLAGS(0);          /* ___ */
}
71

72 73 74 75 76 77 78 79 80 81
SECTIONS
{
#ifdef CONFIG_X86_32
        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
        phys_startup_32 = startup_32 - LOAD_OFFSET;
#else
        . = __START_KERNEL;
        phys_startup_64 = startup_64 - LOAD_OFFSET;
#endif

82 83
	/* Text and read-only data */
	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
84 85 86
		_text = .;
		/* bootstrapping code */
		HEAD_TEXT
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
#ifdef CONFIG_X86_32
		. = ALIGN(PAGE_SIZE);
		*(.text.page_aligned)
#endif
		. = ALIGN(8);
		_stext = .;
		TEXT_TEXT
		SCHED_TEXT
		LOCK_TEXT
		KPROBES_TEXT
		IRQENTRY_TEXT
		*(.fixup)
		*(.gnu.warning)
		/* End of text section */
		_etext = .;
	} :text = 0x9090

	NOTES :text :note

106
	EXCEPTION_TABLE(16) :text = 0x9090
107

108
	X64_ALIGN_DEBUG_RODATA_BEGIN
109
	RO_DATA(PAGE_SIZE)
110
	X64_ALIGN_DEBUG_RODATA_END
111

112 113
	/* Data */
	.data : AT(ADDR(.data) - LOAD_OFFSET) {
114 115
		/* Start of data section */
		_sdata = .;
116 117 118

		/* init_task */
		INIT_TASK_DATA(THREAD_SIZE)
119 120

#ifdef CONFIG_X86_32
121 122
		/* 32 bit has nosave before _edata */
		NOSAVE_DATA
123 124
#endif

125
		PAGE_ALIGNED_DATA(PAGE_SIZE)
126

127
		CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
128

129 130 131 132 133
		DATA_DATA
		CONSTRUCTORS

		/* rarely changed data like cpu maps */
		READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
134 135 136

		/* End of data section */
		_edata = .;
137
	} :data
138

139 140 141 142
#ifdef CONFIG_X86_64

#define VSYSCALL_ADDR (-10*1024*1024)

143
#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
144 145
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)

146
#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
147 148
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)

149 150 151
	. = ALIGN(4096);
	__vsyscall_0 = .;

152
	. = VSYSCALL_ADDR;
153
	.vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
		*(.vsyscall_0)
	} :user

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
		*(.vsyscall_fn)
	}

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
		*(.vsyscall_gtod_data)
	}

	vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
	.vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
		*(.vsyscall_clock)
	}
	vsyscall_clock = VVIRT(.vsyscall_clock);


	.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
		*(.vsyscall_1)
	}
	.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
		*(.vsyscall_2)
	}

	.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
		*(.vgetcpu_mode)
	}
	vgetcpu_mode = VVIRT(.vgetcpu_mode);

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.jiffies : AT(VLOAD(.jiffies)) {
		*(.jiffies)
	}
	jiffies = VVIRT(.jiffies);

	.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
		*(.vsyscall_3)
	}

196
	. = __vsyscall_0 + PAGE_SIZE;
197 198 199 200 201 202 203 204

#undef VSYSCALL_ADDR
#undef VLOAD_OFFSET
#undef VLOAD
#undef VVIRT_OFFSET
#undef VVIRT

#endif /* CONFIG_X86_64 */
205

206 207 208 209
	/* Init code and data - will be freed after init */
	. = ALIGN(PAGE_SIZE);
	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
		__init_begin = .; /* paired with __init_end */
210 211
	}

212
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
213
	/*
214 215 216
	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
	 * output PHDR, so the next output section - .init.text - should
	 * start another segment - init.
217
	 */
218 219
	PERCPU_VADDR(0, :percpu)
#endif
220

221
	INIT_TEXT_SECTION(PAGE_SIZE)
222 223 224
#ifdef CONFIG_X86_64
	:init
#endif
225

226
	INIT_DATA_SECTION(16)
227 228 229 230 231 232 233

	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
		__x86_cpu_dev_start = .;
		*(.x86_cpu_dev.init)
		__x86_cpu_dev_end = .;
	}

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	. = ALIGN(8);
	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
		__parainstructions = .;
		*(.parainstructions)
		__parainstructions_end = .;
	}

	. = ALIGN(8);
	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
		__alt_instructions = .;
		*(.altinstructions)
		__alt_instructions_end = .;
	}

	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
		*(.altinstr_replacement)
	}

252 253 254 255 256 257 258 259 260 261 262 263
	/*
	 * .exit.text is discard at runtime, not link time, to deal with
	 *  references from .altinstructions and .eh_frame
	 */
	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
		EXIT_TEXT
	}

	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
		EXIT_DATA
	}

264
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
S
Sam Ravnborg 已提交
265 266 267 268
	PERCPU(PAGE_SIZE)
#endif

	. = ALIGN(PAGE_SIZE);
269

S
Sam Ravnborg 已提交
270
	/* freed after init ends here */
271 272 273
	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
		__init_end = .;
	}
S
Sam Ravnborg 已提交
274

275 276 277 278 279 280 281 282 283 284 285 286
	/*
	 * smp_locks might be freed after init
	 * start/end must be page aligned
	 */
	. = ALIGN(PAGE_SIZE);
	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
		__smp_locks = .;
		*(.smp_locks)
		__smp_locks_end = .;
		. = ALIGN(PAGE_SIZE);
	}

S
Sam Ravnborg 已提交
287 288
#ifdef CONFIG_X86_64
	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
289 290
		NOSAVE_DATA
	}
S
Sam Ravnborg 已提交
291 292
#endif

293 294 295 296 297 298 299 300 301
	/* BSS */
	. = ALIGN(PAGE_SIZE);
	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
		__bss_start = .;
		*(.bss.page_aligned)
		*(.bss)
		. = ALIGN(4);
		__bss_stop = .;
	}
S
Sam Ravnborg 已提交
302

303 304 305 306 307 308 309 310 311 312 313 314
	. = ALIGN(PAGE_SIZE);
	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
		__brk_base = .;
		. += 64 * 1024;		/* 64k alignment slop space */
		*(.brk_reservation)	/* areas brk users have reserved */
		__brk_limit = .;
	}

	.end : AT(ADDR(.end) - LOAD_OFFSET) {
		_end = .;
	}

315 316
        STABS_DEBUG
        DWARF_DEBUG
317 318 319 320

	/* Sections to be discarded */
	DISCARDS
	/DISCARD/ : { *(.eh_frame) }
321 322
}

323 324

#ifdef CONFIG_X86_32
325 326
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
	   "kernel image bigger than KERNEL_IMAGE_SIZE");
327 328 329 330 331 332 333 334 335 336 337 338
#else
/*
 * Per-cpu symbols which need to be offset from __per_cpu_load
 * for the boot processor.
 */
#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_union);

/*
 * Build-time check on the image size:
 */
339 340
. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
	   "kernel image bigger than KERNEL_IMAGE_SIZE");
341 342

#ifdef CONFIG_SMP
343 344
. = ASSERT((per_cpu__irq_stack_union == 0),
           "irq_stack_union is not at start of per-cpu area");
345 346 347 348 349 350 351
#endif

#endif /* CONFIG_X86_32 */

#ifdef CONFIG_KEXEC
#include <asm/kexec.h>

352 353
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
           "kexec control code size is too big");
354 355
#endif