vmlinux.lds.S 7.2 KB
Newer Older
1 2 3 4 5
/*
 * ld script for the x86 kernel
 *
 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
 *
I
Ingo Molnar 已提交
6 7
 * Modernisation, unification and other changes and fixes:
 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 *
 * Don't define absolute symbols until and unless you know that symbol
 * value is should remain constant even if kernel image is relocated
 * at run time. Absolute symbols are not relocated. If symbol value should
 * change if kernel is relocated, make the symbol section relative and
 * put it inside the section definition.
 */

#ifdef CONFIG_X86_32
#define LOAD_OFFSET __PAGE_OFFSET
#else
#define LOAD_OFFSET __START_KERNEL_map
#endif

#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page_types.h>
#include <asm/cache.h>
#include <asm/boot.h>

#undef i386     /* in case the preprocessor is a 32bit one */

OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)

#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
jiffies_64 = jiffies;
#endif

S
Sam Ravnborg 已提交
44 45 46 47
PHDRS {
	text PT_LOAD FLAGS(5);          /* R_E */
	data PT_LOAD FLAGS(7);          /* RWE */
#ifdef CONFIG_X86_64
48
	user PT_LOAD FLAGS(5);          /* R_E */
S
Sam Ravnborg 已提交
49
#ifdef CONFIG_SMP
50
	percpu PT_LOAD FLAGS(6);        /* RW_ */
S
Sam Ravnborg 已提交
51
#endif
52
	init PT_LOAD FLAGS(7);          /* RWE */
S
Sam Ravnborg 已提交
53 54 55
#endif
	note PT_NOTE FLAGS(0);          /* ___ */
}
56

57 58 59 60 61 62 63 64 65 66
SECTIONS
{
#ifdef CONFIG_X86_32
        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
        phys_startup_32 = startup_32 - LOAD_OFFSET;
#else
        . = __START_KERNEL;
        phys_startup_64 = startup_64 - LOAD_OFFSET;
#endif

67 68
	/* Text and read-only data */
	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
69 70 71
		_text = .;
		/* bootstrapping code */
		HEAD_TEXT
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
#ifdef CONFIG_X86_32
		. = ALIGN(PAGE_SIZE);
		*(.text.page_aligned)
#endif
		. = ALIGN(8);
		_stext = .;
		TEXT_TEXT
		SCHED_TEXT
		LOCK_TEXT
		KPROBES_TEXT
		IRQENTRY_TEXT
		*(.fixup)
		*(.gnu.warning)
		/* End of text section */
		_etext = .;
	} :text = 0x9090

	NOTES :text :note

91
	EXCEPTION_TABLE(16) :text = 0x9090
92

93
	RO_DATA(PAGE_SIZE)
94

95 96
	/* Data */
	.data : AT(ADDR(.data) - LOAD_OFFSET) {
97 98
		/* Start of data section */
		_sdata = .;
99 100 101

		/* init_task */
		INIT_TASK_DATA(THREAD_SIZE)
102 103

#ifdef CONFIG_X86_32
104 105
		/* 32 bit has nosave before _edata */
		NOSAVE_DATA
106 107
#endif

108
		PAGE_ALIGNED_DATA(PAGE_SIZE)
109

110
		CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
111

112 113 114 115 116
		DATA_DATA
		CONSTRUCTORS

		/* rarely changed data like cpu maps */
		READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
117 118 119

		/* End of data section */
		_edata = .;
120
	} :data
121

122 123 124 125
#ifdef CONFIG_X86_64

#define VSYSCALL_ADDR (-10*1024*1024)

126
#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
127 128
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)

129
#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
130 131
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)

132 133 134
	. = ALIGN(4096);
	__vsyscall_0 = .;

135
	. = VSYSCALL_ADDR;
136
	.vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
		*(.vsyscall_0)
	} :user

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
		*(.vsyscall_fn)
	}

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
		*(.vsyscall_gtod_data)
	}

	vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
	.vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
		*(.vsyscall_clock)
	}
	vsyscall_clock = VVIRT(.vsyscall_clock);


	.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
		*(.vsyscall_1)
	}
	.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
		*(.vsyscall_2)
	}

	.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
		*(.vgetcpu_mode)
	}
	vgetcpu_mode = VVIRT(.vgetcpu_mode);

	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
	.jiffies : AT(VLOAD(.jiffies)) {
		*(.jiffies)
	}
	jiffies = VVIRT(.jiffies);

	.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
		*(.vsyscall_3)
	}

179
	. = __vsyscall_0 + PAGE_SIZE;
180 181 182 183 184 185 186 187

#undef VSYSCALL_ADDR
#undef VLOAD_OFFSET
#undef VLOAD
#undef VVIRT_OFFSET
#undef VVIRT

#endif /* CONFIG_X86_64 */
188

189 190 191 192
	/* Init code and data - will be freed after init */
	. = ALIGN(PAGE_SIZE);
	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
		__init_begin = .; /* paired with __init_end */
193 194
	}

195
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
196
	/*
197 198 199
	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
	 * output PHDR, so the next output section - .init.text - should
	 * start another segment - init.
200
	 */
201 202
	PERCPU_VADDR(0, :percpu)
#endif
203

204
	INIT_TEXT_SECTION(PAGE_SIZE)
205 206 207
#ifdef CONFIG_X86_64
	:init
#endif
208

209
	INIT_DATA_SECTION(16)
210 211 212 213 214 215 216

	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
		__x86_cpu_dev_start = .;
		*(.x86_cpu_dev.init)
		__x86_cpu_dev_end = .;
	}

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	. = ALIGN(8);
	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
		__parainstructions = .;
		*(.parainstructions)
		__parainstructions_end = .;
	}

	. = ALIGN(8);
	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
		__alt_instructions = .;
		*(.altinstructions)
		__alt_instructions_end = .;
	}

	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
		*(.altinstr_replacement)
	}

235 236 237 238 239 240 241 242 243 244 245 246
	/*
	 * .exit.text is discard at runtime, not link time, to deal with
	 *  references from .altinstructions and .eh_frame
	 */
	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
		EXIT_TEXT
	}

	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
		EXIT_DATA
	}

247
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
S
Sam Ravnborg 已提交
248 249 250 251
	PERCPU(PAGE_SIZE)
#endif

	. = ALIGN(PAGE_SIZE);
252

S
Sam Ravnborg 已提交
253
	/* freed after init ends here */
254 255 256
	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
		__init_end = .;
	}
S
Sam Ravnborg 已提交
257

258 259 260 261 262 263 264 265 266 267 268 269
	/*
	 * smp_locks might be freed after init
	 * start/end must be page aligned
	 */
	. = ALIGN(PAGE_SIZE);
	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
		__smp_locks = .;
		*(.smp_locks)
		__smp_locks_end = .;
		. = ALIGN(PAGE_SIZE);
	}

S
Sam Ravnborg 已提交
270 271
#ifdef CONFIG_X86_64
	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
272 273
		NOSAVE_DATA
	}
S
Sam Ravnborg 已提交
274 275
#endif

276 277 278 279 280 281 282 283 284
	/* BSS */
	. = ALIGN(PAGE_SIZE);
	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
		__bss_start = .;
		*(.bss.page_aligned)
		*(.bss)
		. = ALIGN(4);
		__bss_stop = .;
	}
S
Sam Ravnborg 已提交
285

286 287 288 289 290 291 292 293 294 295 296 297
	. = ALIGN(PAGE_SIZE);
	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
		__brk_base = .;
		. += 64 * 1024;		/* 64k alignment slop space */
		*(.brk_reservation)	/* areas brk users have reserved */
		__brk_limit = .;
	}

	.end : AT(ADDR(.end) - LOAD_OFFSET) {
		_end = .;
	}

298 299
        STABS_DEBUG
        DWARF_DEBUG
300 301 302 303

	/* Sections to be discarded */
	DISCARDS
	/DISCARD/ : { *(.eh_frame) }
304 305
}

306 307

#ifdef CONFIG_X86_32
308 309
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
	   "kernel image bigger than KERNEL_IMAGE_SIZE");
310 311 312 313 314 315 316 317 318 319 320 321
#else
/*
 * Per-cpu symbols which need to be offset from __per_cpu_load
 * for the boot processor.
 */
#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_union);

/*
 * Build-time check on the image size:
 */
322 323
. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
	   "kernel image bigger than KERNEL_IMAGE_SIZE");
324 325

#ifdef CONFIG_SMP
326 327
. = ASSERT((per_cpu__irq_stack_union == 0),
           "irq_stack_union is not at start of per-cpu area");
328 329 330 331 332 333 334
#endif

#endif /* CONFIG_X86_32 */

#ifdef CONFIG_KEXEC
#include <asm/kexec.h>

335 336
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
           "kexec control code size is too big");
337 338
#endif