vmlinux.lds.S 8.0 KB
Newer Older
1 2 3 4 5
/*
 * ld script for the x86 kernel
 *
 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
 *
I
Ingo Molnar 已提交
6 7
 * Modernisation, unification and other changes and fixes:
 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 *
 * Don't define absolute symbols until and unless you know that symbol
 * value is should remain constant even if kernel image is relocated
 * at run time. Absolute symbols are not relocated. If symbol value should
 * change if kernel is relocated, make the symbol section relative and
 * put it inside the section definition.
 */

#ifdef CONFIG_X86_32
#define LOAD_OFFSET __PAGE_OFFSET
#else
#define LOAD_OFFSET __START_KERNEL_map
#endif

#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page_types.h>
#include <asm/cache.h>
#include <asm/boot.h>

#undef i386     /* in case the preprocessor is a 32bit one */

OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)

#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
jiffies_64 = jiffies;
#endif

S
Sam Ravnborg 已提交
44 45 46 47 48 49 50 51
PHDRS {
	text PT_LOAD FLAGS(5);          /* R_E */
	data PT_LOAD FLAGS(7);          /* RWE */
#ifdef CONFIG_X86_64
	user PT_LOAD FLAGS(7);          /* RWE */
#ifdef CONFIG_SMP
	percpu PT_LOAD FLAGS(7);        /* RWE */
#endif
52
	init PT_LOAD FLAGS(7);          /* RWE */
S
Sam Ravnborg 已提交
53 54 55
#endif
	note PT_NOTE FLAGS(0);          /* ___ */
}
56

57 58 59 60 61 62 63 64 65 66
SECTIONS
{
#ifdef CONFIG_X86_32
        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
        phys_startup_32 = startup_32 - LOAD_OFFSET;
#else
        . = __START_KERNEL;
        phys_startup_64 = startup_64 - LOAD_OFFSET;
#endif

67 68
	/* Text and read-only data */
	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
69 70 71
		_text = .;
		/* bootstrapping code */
		HEAD_TEXT
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#ifdef CONFIG_X86_32
		. = ALIGN(PAGE_SIZE);
		*(.text.page_aligned)
#endif
		. = ALIGN(8);
		_stext = .;
		TEXT_TEXT
		SCHED_TEXT
		LOCK_TEXT
		KPROBES_TEXT
		IRQENTRY_TEXT
		*(.fixup)
		*(.gnu.warning)
		/* End of text section */
		_etext = .;
	} :text = 0x9090

	NOTES :text :note

91 92 93 94 95 96 97 98
	/* Exception table */
	. = ALIGN(16);
	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
		__start___ex_table = .;
		*(__ex_table)
		__stop___ex_table = .;
	} :text = 0x9090

99
	RO_DATA(PAGE_SIZE)
100

101 102
	/* Data */
	.data : AT(ADDR(.data) - LOAD_OFFSET) {
103 104
		/* Start of data section */
		_sdata = .;
105 106 107

		/* init_task */
		INIT_TASK_DATA(THREAD_SIZE)
108 109

#ifdef CONFIG_X86_32
110 111
		/* 32 bit has nosave before _edata */
		NOSAVE_DATA
112 113
#endif

114
		PAGE_ALIGNED_DATA(PAGE_SIZE)
115

116
		CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
117

118 119 120 121 122
		DATA_DATA
		CONSTRUCTORS

		/* rarely changed data like cpu maps */
		READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
123 124 125

		/* End of data section */
		_edata = .;
126
	} :data
127

128 129 130 131
#ifdef CONFIG_X86_64

#define VSYSCALL_ADDR (-10*1024*1024)

132
#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
133 134
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)

135
#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
136 137
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)

138 139 140
	. = ALIGN(4096);
	__vsyscall_0 = .;

141
	. = VSYSCALL_ADDR;
142
	.vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
		*(.vsyscall_0)
	} :user

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
		*(.vsyscall_fn)
	}

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
		*(.vsyscall_gtod_data)
	}

	vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
	.vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
		*(.vsyscall_clock)
	}
	vsyscall_clock = VVIRT(.vsyscall_clock);


	.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
		*(.vsyscall_1)
	}
	.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
		*(.vsyscall_2)
	}

	.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
		*(.vgetcpu_mode)
	}
	vgetcpu_mode = VVIRT(.vgetcpu_mode);

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.jiffies : AT(VLOAD(.jiffies)) {
		*(.jiffies)
	}
	jiffies = VVIRT(.jiffies);

	.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
		*(.vsyscall_3)
	}

185
	. = __vsyscall_0 + PAGE_SIZE;
186 187 188 189 190 191 192 193

#undef VSYSCALL_ADDR
#undef VLOAD_OFFSET
#undef VLOAD
#undef VVIRT_OFFSET
#undef VVIRT

#endif /* CONFIG_X86_64 */
194

195 196 197 198
	/* Init code and data - will be freed after init */
	. = ALIGN(PAGE_SIZE);
	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
		__init_begin = .; /* paired with __init_end */
199 200
	}

201
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
202
	/*
203 204 205
	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
	 * output PHDR, so the next output section - .init.text - should
	 * start another segment - init.
206
	 */
207 208
	PERCPU_VADDR(0, :percpu)
#endif
209 210 211 212 213 214

	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
		_sinittext = .;
		INIT_TEXT
		_einittext = .;
	}
215 216 217
#ifdef CONFIG_X86_64
	:init
#endif
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248

	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
		INIT_DATA
	}

	. = ALIGN(16);
	.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
		__setup_start = .;
		*(.init.setup)
		__setup_end = .;
	}
	.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
		__initcall_start = .;
		INITCALLS
		__initcall_end = .;
	}

	.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
		__con_initcall_start = .;
		*(.con_initcall.init)
		__con_initcall_end = .;
	}

	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
		__x86_cpu_dev_start = .;
		*(.x86_cpu_dev.init)
		__x86_cpu_dev_end = .;
	}

	SECURITY_INIT

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	. = ALIGN(8);
	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
		__parainstructions = .;
		*(.parainstructions)
		__parainstructions_end = .;
	}

	. = ALIGN(8);
	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
		__alt_instructions = .;
		*(.altinstructions)
		__alt_instructions_end = .;
	}

	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
		*(.altinstr_replacement)
	}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	/*
	 * .exit.text is discard at runtime, not link time, to deal with
	 *  references from .altinstructions and .eh_frame
	 */
	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
		EXIT_TEXT
	}

	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
		EXIT_DATA
	}

#ifdef CONFIG_BLK_DEV_INITRD
	. = ALIGN(PAGE_SIZE);
	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
		__initramfs_start = .;
		*(.init.ramfs)
		__initramfs_end = .;
	}
#endif
287

288
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
S
Sam Ravnborg 已提交
289 290 291 292
	PERCPU(PAGE_SIZE)
#endif

	. = ALIGN(PAGE_SIZE);
293

S
Sam Ravnborg 已提交
294
	/* freed after init ends here */
295 296 297
	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
		__init_end = .;
	}
S
Sam Ravnborg 已提交
298

299 300 301 302 303 304 305 306 307 308 309 310
	/*
	 * smp_locks might be freed after init
	 * start/end must be page aligned
	 */
	. = ALIGN(PAGE_SIZE);
	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
		__smp_locks = .;
		*(.smp_locks)
		__smp_locks_end = .;
		. = ALIGN(PAGE_SIZE);
	}

S
Sam Ravnborg 已提交
311 312
#ifdef CONFIG_X86_64
	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
313 314
		NOSAVE_DATA
	}
S
Sam Ravnborg 已提交
315 316
#endif

317 318 319 320 321 322 323 324 325
	/* BSS */
	. = ALIGN(PAGE_SIZE);
	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
		__bss_start = .;
		*(.bss.page_aligned)
		*(.bss)
		. = ALIGN(4);
		__bss_stop = .;
	}
S
Sam Ravnborg 已提交
326

327 328 329 330 331 332 333 334 335 336 337 338
	. = ALIGN(PAGE_SIZE);
	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
		__brk_base = .;
		. += 64 * 1024;		/* 64k alignment slop space */
		*(.brk_reservation)	/* areas brk users have reserved */
		__brk_limit = .;
	}

	.end : AT(ADDR(.end) - LOAD_OFFSET) {
		_end = .;
	}

339 340
        STABS_DEBUG
        DWARF_DEBUG
341 342 343 344

	/* Sections to be discarded */
	DISCARDS
	/DISCARD/ : { *(.eh_frame) }
345 346
}

347 348

#ifdef CONFIG_X86_32
349 350
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
	   "kernel image bigger than KERNEL_IMAGE_SIZE");
351 352 353 354 355 356 357 358 359 360 361 362
#else
/*
 * Per-cpu symbols which need to be offset from __per_cpu_load
 * for the boot processor.
 */
#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_union);

/*
 * Build-time check on the image size:
 */
363 364
. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
	   "kernel image bigger than KERNEL_IMAGE_SIZE");
365 366

#ifdef CONFIG_SMP
367 368
. = ASSERT((per_cpu__irq_stack_union == 0),
           "irq_stack_union is not at start of per-cpu area");
369 370 371 372 373 374 375
#endif

#endif /* CONFIG_X86_32 */

#ifdef CONFIG_KEXEC
#include <asm/kexec.h>

376 377
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
           "kexec control code size is too big");
378 379
#endif