init.c 19.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Initialize MMU support.
 *
 * Copyright (C) 1998-2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 */
#include <linux/kernel.h>
#include <linux/init.h>

#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
22
#include <linux/kexec.h>
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51

#include <asm/dma.h>
#include <asm/ia32.h>
#include <asm/io.h>
#include <asm/machvec.h>
#include <asm/numa.h>
#include <asm/patch.h>
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/mca.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);

extern void ia64_tlb_init (void);

unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;

#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long vmalloc_end = VMALLOC_END_INIT;
EXPORT_SYMBOL(vmalloc_end);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif

52
struct page *zero_page_memmap_ptr;	/* map entry for zero page */
L
Linus Torvalds 已提交
53 54 55
EXPORT_SYMBOL(zero_page_memmap_ptr);

void
56
__ia64_sync_icache_dcache (pte_t pte)
L
Linus Torvalds 已提交
57 58 59 60 61 62 63 64 65 66
{
	unsigned long addr;
	struct page *page;

	page = pte_page(pte);
	addr = (unsigned long) page_address(page);

	if (test_bit(PG_arch_1, &page->flags))
		return;				/* i-cache is already coherent with d-cache */

67
	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
L
Linus Torvalds 已提交
68 69 70
	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
}

J
Jan Beulich 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
/*
 * Since DMA is i-cache coherent, any (complete) pages that were written via
 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
 * flush them when they get mapped into an executable vm-area.
 */
void
dma_mark_clean(void *addr, size_t size)
{
	unsigned long pg_addr, end;

	pg_addr = PAGE_ALIGN((unsigned long) addr);
	end = (unsigned long) addr + size;
	while (pg_addr + PAGE_SIZE <= end) {
		struct page *page = virt_to_page(pg_addr);
		set_bit(PG_arch_1, &page->flags);
		pg_addr += PAGE_SIZE;
	}
}

L
Linus Torvalds 已提交
90 91 92 93 94 95 96
inline void
ia64_set_rbs_bot (void)
{
	unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;

	if (stack_size > MAX_USER_STACK_SIZE)
		stack_size = MAX_USER_STACK_SIZE;
97
	current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
L
Linus Torvalds 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
}

/*
 * This performs some platform-dependent address space initialization.
 * On IA-64, we want to setup the VM area for the register backing
 * store (which grows upwards) and install the gateway page which is
 * used for signal trampolines, etc.
 */
void
ia64_init_addr_space (void)
{
	struct vm_area_struct *vma;

	ia64_set_rbs_bot();

	/*
	 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
	 * the problem.  When the process attempts to write to the register backing store
	 * for the first time, it will get a SEGFAULT in this case.
	 */
118
	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
L
Linus Torvalds 已提交
119 120 121 122
	if (vma) {
		vma->vm_mm = current->mm;
		vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
		vma->vm_end = vma->vm_start + PAGE_SIZE;
H
Hugh Dickins 已提交
123
		vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
124
		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
L
Linus Torvalds 已提交
125 126 127 128 129 130 131 132 133 134 135
		down_write(&current->mm->mmap_sem);
		if (insert_vm_struct(current->mm, vma)) {
			up_write(&current->mm->mmap_sem);
			kmem_cache_free(vm_area_cachep, vma);
			return;
		}
		up_write(&current->mm->mmap_sem);
	}

	/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
	if (!(current->personality & MMAP_PAGE_ZERO)) {
136
		vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
L
Linus Torvalds 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
		if (vma) {
			vma->vm_mm = current->mm;
			vma->vm_end = PAGE_SIZE;
			vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
			vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
			down_write(&current->mm->mmap_sem);
			if (insert_vm_struct(current->mm, vma)) {
				up_write(&current->mm->mmap_sem);
				kmem_cache_free(vm_area_cachep, vma);
				return;
			}
			up_write(&current->mm->mmap_sem);
		}
	}
}

void
free_initmem (void)
{
	unsigned long addr, eaddr;

	addr = (unsigned long) ia64_imva(__init_begin);
	eaddr = (unsigned long) ia64_imva(__init_end);
	while (addr < eaddr) {
		ClearPageReserved(virt_to_page(addr));
162
		init_page_count(virt_to_page(addr));
L
Linus Torvalds 已提交
163 164 165 166 167 168 169 170
		free_page(addr);
		++totalram_pages;
		addr += PAGE_SIZE;
	}
	printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
	       (__init_end - __init_begin) >> 10);
}

171
void __init
L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
free_initrd_mem (unsigned long start, unsigned long end)
{
	struct page *page;
	/*
	 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
	 * Thus EFI and the kernel may have different page sizes. It is
	 * therefore possible to have the initrd share the same page as
	 * the end of the kernel (given current setup).
	 *
	 * To avoid freeing/using the wrong page (kernel sized) we:
	 *	- align up the beginning of initrd
	 *	- align down the end of initrd
	 *
	 *  |             |
	 *  |=============| a000
	 *  |             |
	 *  |             |
	 *  |             | 9000
	 *  |/////////////|
	 *  |/////////////|
	 *  |=============| 8000
	 *  |///INITRD////|
	 *  |/////////////|
	 *  |/////////////| 7000
	 *  |             |
	 *  |KKKKKKKKKKKKK|
	 *  |=============| 6000
	 *  |KKKKKKKKKKKKK|
	 *  |KKKKKKKKKKKKK|
	 *  K=kernel using 8KB pages
	 *
	 * In this example, we must free page 8000 ONLY. So we must align up
	 * initrd_start and keep initrd_end as is.
	 */
	start = PAGE_ALIGN(start);
	end = end & PAGE_MASK;

	if (start < end)
		printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);

	for (; start < end; start += PAGE_SIZE) {
		if (!virt_addr_valid(start))
			continue;
		page = virt_to_page(start);
		ClearPageReserved(page);
217
		init_page_count(page);
L
Linus Torvalds 已提交
218 219 220 221 222 223 224 225
		free_page(start);
		++totalram_pages;
	}
}

/*
 * This installs a clean page in the kernel's page table.
 */
226
static struct page * __init
L
Linus Torvalds 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	if (!PageReserved(page))
		printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
		       page_address(page));

	pgd = pgd_offset_k(address);		/* note: this is NOT pgd_offset()! */

	{
		pud = pud_alloc(&init_mm, pgd, address);
		if (!pud)
			goto out;
		pmd = pmd_alloc(&init_mm, pud, address);
		if (!pmd)
			goto out;
H
Hugh Dickins 已提交
247
		pte = pte_alloc_kernel(pmd, address);
L
Linus Torvalds 已提交
248 249
		if (!pte)
			goto out;
H
Hugh Dickins 已提交
250
		if (!pte_none(*pte))
L
Linus Torvalds 已提交
251 252 253
			goto out;
		set_pte(pte, mk_pte(page, pgprot));
	}
H
Hugh Dickins 已提交
254
  out:
L
Linus Torvalds 已提交
255 256 257 258
	/* no need for flush_tlb */
	return page;
}

259
static void __init
L
Linus Torvalds 已提交
260 261 262 263 264
setup_gate (void)
{
	struct page *page;

	/*
265 266 267
	 * Map the gate page twice: once read-only to export the ELF
	 * headers etc. and once execute-only page to enable
	 * privilege-promotion via "epc":
L
Linus Torvalds 已提交
268 269 270 271 272 273 274 275
	 */
	page = virt_to_page(ia64_imva(__start_gate_section));
	put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL
	page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
	put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
	put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
276 277 278 279 280 281 282 283 284 285 286 287 288 289
	/* Fill in the holes (if any) with read-only zero pages: */
	{
		unsigned long addr;

		for (addr = GATE_ADDR + PAGE_SIZE;
		     addr < GATE_ADDR + PERCPU_PAGE_SIZE;
		     addr += PAGE_SIZE)
		{
			put_kernel_page(ZERO_PAGE(0), addr,
					PAGE_READONLY);
			put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
					PAGE_READONLY);
		}
	}
L
Linus Torvalds 已提交
290 291 292 293 294 295 296
#endif
	ia64_patch_gate();
}

void __devinit
ia64_mmu_init (void *my_cpu_data)
{
297
	unsigned long pta, impl_va_bits;
L
Linus Torvalds 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
	extern void __devinit tlb_init (void);

#ifdef CONFIG_DISABLE_VHPT
#	define VHPT_ENABLE_BIT	0
#else
#	define VHPT_ENABLE_BIT	1
#endif

	/*
	 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
	 * address space.  The IA-64 architecture guarantees that at least 50 bits of
	 * virtual address space are implemented but if we pick a large enough page size
	 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
	 * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,
	 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
	 * problem in practice.  Alternatively, we could truncate the top of the mapped
	 * address space to not permit mappings that would overlap with the VMLPT.
	 * --davidm 00/12/06
	 */
#	define pte_bits			3
#	define mapped_space_bits	(3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
	/*
	 * The virtual page table has to cover the entire implemented address space within
	 * a region even though not all of this space may be mappable.  The reason for
	 * this is that the Access bit and Dirty bit fault handlers perform
	 * non-speculative accesses to the virtual page table, so the address range of the
	 * virtual page table itself needs to be covered by virtual page table.
	 */
#	define vmlpt_bits		(impl_va_bits - PAGE_SHIFT + pte_bits)
#	define POW2(n)			(1ULL << (n))

	impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));

	if (impl_va_bits < 51 || impl_va_bits > 61)
		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
P
Peter Chubb 已提交
333 334 335 336 337 338 339 340 341 342 343 344
	/*
	 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
	 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
	 * the test makes sure that our mapped space doesn't overlap the
	 * unimplemented hole in the middle of the region.
	 */
	if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
	    (mapped_space_bits > impl_va_bits - 1))
		panic("Cannot build a big enough virtual-linear page table"
		      " to cover mapped address space.\n"
		      " Try using a smaller page size.\n");

L
Linus Torvalds 已提交
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365

	/* place the VMLPT at the end of each page-table mapped region: */
	pta = POW2(61) - POW2(vmlpt_bits);

	/*
	 * Set the (virtually mapped linear) page table address.  Bit
	 * 8 selects between the short and long format, bits 2-7 the
	 * size of the table, and bit 0 whether the VHPT walker is
	 * enabled.
	 */
	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);

	ia64_tlb_init();

#ifdef	CONFIG_HUGETLB_PAGE
	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
	ia64_srlz_d();
#endif
}

#ifdef CONFIG_VIRTUAL_MEM_MAP
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
int vmemmap_find_next_valid_pfn(int node, int i)
{
	unsigned long end_address, hole_next_pfn;
	unsigned long stop_address;
	pg_data_t *pgdat = NODE_DATA(node);

	end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
	end_address = PAGE_ALIGN(end_address);

	stop_address = (unsigned long) &vmem_map[
		pgdat->node_start_pfn + pgdat->node_spanned_pages];

	do {
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pgd = pgd_offset_k(end_address);
		if (pgd_none(*pgd)) {
			end_address += PGDIR_SIZE;
			continue;
		}

		pud = pud_offset(pgd, end_address);
		if (pud_none(*pud)) {
			end_address += PUD_SIZE;
			continue;
		}

		pmd = pmd_offset(pud, end_address);
		if (pmd_none(*pmd)) {
			end_address += PMD_SIZE;
			continue;
		}

		pte = pte_offset_kernel(pmd, end_address);
retry_pte:
		if (pte_none(*pte)) {
			end_address += PAGE_SIZE;
			pte++;
			if ((end_address < stop_address) &&
			    (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
				goto retry_pte;
			continue;
		}
		/* Found next valid vmem_map page */
		break;
	} while (end_address < stop_address);

	end_address = min(end_address, stop_address);
	end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
	hole_next_pfn = end_address / sizeof(struct page);
	return hole_next_pfn - pgdat->node_start_pfn;
}
L
Linus Torvalds 已提交
421

422
int __init
L
Linus Torvalds 已提交
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
create_mem_map_page_table (u64 start, u64 end, void *arg)
{
	unsigned long address, start_page, end_page;
	struct page *map_start, *map_end;
	int node;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);

	start_page = (unsigned long) map_start & PAGE_MASK;
	end_page = PAGE_ALIGN((unsigned long) map_end);
	node = paddr_to_nid(__pa(start));

	for (address = start_page; address < end_page; address += PAGE_SIZE) {
		pgd = pgd_offset_k(address);
		if (pgd_none(*pgd))
			pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
		pud = pud_offset(pgd, address);

		if (pud_none(*pud))
			pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
		pmd = pmd_offset(pud, address);

		if (pmd_none(*pmd))
			pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
		pte = pte_offset_kernel(pmd, address);

		if (pte_none(*pte))
			set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
					     PAGE_KERNEL));
	}
	return 0;
}

struct memmap_init_callback_data {
	struct page *start;
	struct page *end;
	int nid;
	unsigned long zone;
};

468
static int __meminit
L
Linus Torvalds 已提交
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
virtual_memmap_init (u64 start, u64 end, void *arg)
{
	struct memmap_init_callback_data *args;
	struct page *map_start, *map_end;

	args = (struct memmap_init_callback_data *) arg;
	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);

	if (map_start < args->start)
		map_start = args->start;
	if (map_end > args->end)
		map_end = args->end;

	/*
	 * We have to initialize "out of bounds" struct page elements that fit completely
	 * on the same pages that were allocated for the "in bounds" elements because they
	 * may be referenced later (and found to be "reserved").
	 */
	map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
	map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
		    / sizeof(struct page));

	if (map_start < map_end)
		memmap_init_zone((unsigned long)(map_end - map_start),
D
Dave Hansen 已提交
494 495
				 args->nid, args->zone, page_to_pfn(map_start),
				 MEMMAP_EARLY);
L
Linus Torvalds 已提交
496 497 498
	return 0;
}

499
void __meminit
L
Linus Torvalds 已提交
500 501 502 503
memmap_init (unsigned long size, int nid, unsigned long zone,
	     unsigned long start_pfn)
{
	if (!vmem_map)
D
Dave Hansen 已提交
504
		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
L
Linus Torvalds 已提交
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	else {
		struct page *start;
		struct memmap_init_callback_data args;

		start = pfn_to_page(start_pfn);
		args.start = start;
		args.end = start + size;
		args.nid = nid;
		args.zone = zone;

		efi_memmap_walk(virtual_memmap_init, &args);
	}
}

int
ia64_pfn_valid (unsigned long pfn)
{
	char byte;
	struct page *pg = pfn_to_page(pfn);

	return     (__get_user(byte, (char __user *) pg) == 0)
		&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
			|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
}
EXPORT_SYMBOL(ia64_pfn_valid);

531
int __init
L
Linus Torvalds 已提交
532 533 534 535 536 537 538 539 540 541 542 543 544
find_largest_hole (u64 start, u64 end, void *arg)
{
	u64 *max_gap = arg;

	static u64 last_end = PAGE_OFFSET;

	/* NOTE: this algorithm assumes efi memmap table is ordered */

	if (*max_gap < (start - last_end))
		*max_gap = start - last_end;
	last_end = end;
	return 0;
}
545

546 547
#endif /* CONFIG_VIRTUAL_MEM_MAP */

548
int __init
549
register_active_ranges(u64 start, u64 len, int nid)
550
{
551
	u64 end = start + len;
552 553 554 555 556 557 558 559 560 561 562

#ifdef CONFIG_KEXEC
	if (start > crashk_res.start && start < crashk_res.end)
		start = crashk_res.end;
	if (end > crashk_res.start && end < crashk_res.end)
		end = crashk_res.start;
#endif

	if (start < end)
		add_active_range(nid, __pa(start) >> PAGE_SHIFT,
			__pa(end) >> PAGE_SHIFT);
563 564
	return 0;
}
L
Linus Torvalds 已提交
565

566
static int __init
L
Linus Torvalds 已提交
567 568 569 570 571 572 573 574 575 576 577 578
count_reserved_pages (u64 start, u64 end, void *arg)
{
	unsigned long num_reserved = 0;
	unsigned long *count = arg;

	for (; start < end; start += PAGE_SIZE)
		if (PageReserved(virt_to_page(start)))
			++num_reserved;
	*count += num_reserved;
	return 0;
}

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
int
find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
{
	unsigned long pfn_start, pfn_end;
#ifdef CONFIG_FLATMEM
	pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
	pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
#else
	pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
	pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
#endif
	min_low_pfn = min(min_low_pfn, pfn_start);
	max_low_pfn = max(max_low_pfn, pfn_end);
	return 0;
}

L
Linus Torvalds 已提交
595 596 597 598 599 600 601 602
/*
 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
 * system call handler.  When this option is in effect, all fsyscalls will end up bubbling
 * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is
 * useful for performance testing, but conceivably could also come in handy for debugging
 * purposes.
 */

603
static int nolwsys __initdata;
L
Linus Torvalds 已提交
604 605 606 607 608 609 610 611 612 613

static int __init
nolwsys_setup (char *s)
{
	nolwsys = 1;
	return 1;
}

__setup("nolwsys", nolwsys_setup);

614
void __init
L
Linus Torvalds 已提交
615 616 617 618 619 620 621
mem_init (void)
{
	long reserved_pages, codesize, datasize, initsize;
	pg_data_t *pgdat;
	int i;
	static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;

622 623 624 625
	BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
	BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
	BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);

L
Linus Torvalds 已提交
626 627 628 629 630 631 632 633 634
#ifdef CONFIG_PCI
	/*
	 * This needs to be called _after_ the command line has been parsed but _before_
	 * any drivers that may need the PCI DMA interface are initialized or bootmem has
	 * been freed.
	 */
	platform_dma_init();
#endif

635
#ifdef CONFIG_FLATMEM
L
Linus Torvalds 已提交
636 637 638 639 640 641 642 643 644 645 646
	if (!mem_map)
		BUG();
	max_mapnr = max_low_pfn;
#endif

	high_memory = __va(max_low_pfn * PAGE_SIZE);

	kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
	kclist_add(&kcore_kernel, _stext, _end - _stext);

647
	for_each_online_pgdat(pgdat)
B
bob.picco 已提交
648 649
		if (pgdat->bdata->node_bootmem_map)
			totalram_pages += free_all_bootmem_node(pgdat);
L
Linus Torvalds 已提交
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681

	reserved_pages = 0;
	efi_memmap_walk(count_reserved_pages, &reserved_pages);

	codesize =  (unsigned long) _etext - (unsigned long) _stext;
	datasize =  (unsigned long) _edata - (unsigned long) _etext;
	initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;

	printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
	       "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
	       num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
	       reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);


	/*
	 * For fsyscall entrpoints with no light-weight handler, use the ordinary
	 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
	 * code can tell them apart.
	 */
	for (i = 0; i < NR_syscalls; ++i) {
		extern unsigned long fsyscall_table[NR_syscalls];
		extern unsigned long sys_call_table[NR_syscalls];

		if (!fsyscall_table[i] || nolwsys)
			fsyscall_table[i] = sys_call_table[i] | 1;
	}
	setup_gate();

#ifdef CONFIG_IA32_SUPPORT
	ia32_mem_init();
#endif
}
682 683

#ifdef CONFIG_MEMORY_HOTPLUG
684
int arch_add_memory(int nid, u64 start, u64 size)
685 686 687 688 689 690 691
{
	pg_data_t *pgdat;
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

692
	pgdat = NODE_DATA(nid);
693 694 695 696 697 698

	zone = pgdat->node_zones + ZONE_NORMAL;
	ret = __add_pages(zone, start_pfn, nr_pages);

	if (ret)
		printk("%s: Problem encountered in __add_pages() as ret=%d\n",
699
		       __func__,  ret);
700 701 702 703

	return ret;
}
#endif
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

/*
 * Even when CONFIG_IA32_SUPPORT is not enabled it is
 * useful to have the Linux/x86 domain registered to
 * avoid an attempted module load when emulators call
 * personality(PER_LINUX32). This saves several milliseconds
 * on each such call.
 */
static struct exec_domain ia32_exec_domain;

static int __init
per_linux32_init(void)
{
	ia32_exec_domain.name = "Linux/x86";
	ia32_exec_domain.handler = NULL;
	ia32_exec_domain.pers_low = PER_LINUX32;
	ia32_exec_domain.pers_high = PER_LINUX32;
	ia32_exec_domain.signal_map = default_exec_domain.signal_map;
	ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
	register_exec_domain(&ia32_exec_domain);

	return 0;
}

__initcall(per_linux32_init);