init_64.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

22 23
#undef DEBUG

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
42
#include <linux/poison.h>
43
#include <linux/lmb.h>
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65

#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
D
David Gibson 已提交
66 67

#include "mmu_decl.h"
68 69 70 71 72 73 74 75 76

#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif

#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif

77 78
phys_addr_t memstart_addr = ~0;
phys_addr_t kernstart_addr;
79

80 81 82 83 84 85
void free_initmem(void)
{
	unsigned long addr;

	addr = (unsigned long)__init_begin;
	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
86
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
87
		ClearPageReserved(virt_to_page(addr));
88
		init_page_count(virt_to_page(addr));
89 90 91 92 93 94 95 96 97 98 99 100 101 102
		free_page(addr);
		totalram_pages++;
	}
	printk ("Freeing unused kernel memory: %luk freed\n",
		((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	if (start < end)
		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
	for (; start < end; start += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(start));
103
		init_page_count(virt_to_page(start));
104 105 106 107 108 109
		free_page(start);
		totalram_pages++;
	}
}
#endif

110
#ifdef CONFIG_PROC_KCORE
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
static struct kcore_list kcore_vmem;

static int __init setup_kcore(void)
{
	int i;

	for (i=0; i < lmb.memory.cnt; i++) {
		unsigned long base, size;
		struct kcore_list *kcore_mem;

		base = lmb.memory.region[i].base;
		size = lmb.memory.region[i].size;

		/* GFP_ATOMIC to avoid might_sleep warnings during boot */
		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
		if (!kcore_mem)
127
			panic("%s: kmalloc failed\n", __func__);
128 129 130 131 132 133 134 135 136

		kclist_add(kcore_mem, __va(base), size);
	}

	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);

	return 0;
}
module_init(setup_kcore);
137
#endif
138

139
static void zero_ctor(struct kmem_cache *cache, void *addr)
140 141 142 143
{
	memset(addr, 0, kmem_cache_size(cache));
}

144
static const unsigned int pgtable_cache_size[2] = {
145
	PGD_TABLE_SIZE, PMD_TABLE_SIZE
146 147
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
148 149 150 151
#ifdef CONFIG_PPC_64K_PAGES
	"pgd_cache", "pmd_cache",
#else
	"pgd_cache", "pud_pmd_cache",
152
#endif /* CONFIG_PPC_64K_PAGES */
153
};
154

155
#ifdef CONFIG_HUGETLB_PAGE
156 157 158 159
/* Hugepages need an extra cache per hugepagesize, initialized in
 * hugetlbpage.c.  We can't put into the tables above, because HPAGE_SHIFT
 * is not compile time constant. */
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT];
160
#else
161
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
162
#endif
163 164 165 166 167 168 169 170 171

void pgtable_cache_init(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
		int size = pgtable_cache_size[i];
		const char *name = pgtable_cache_name[i];

172 173
		pr_debug("Allocating page table cache %s (#%d) "
			"for size: %08x...\n", name, i, size);
174 175
		pgtable_cache[i] = kmem_cache_create(name,
						     size, size,
A
Akinobu Mita 已提交
176
						     SLAB_PANIC,
177
						     zero_ctor);
178 179
	}
}
A
Andy Whitcroft 已提交
180 181 182 183 184 185 186 187

#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Given an address within the vmemmap, determine the pfn of the page that
 * represents the start of the section it is within.  Note that we have to
 * do this by hand as the proffered address may not be correctly aligned.
 * Subtraction of non-aligned pointers produces undefined results.
 */
188
static unsigned long __meminit vmemmap_section_start(unsigned long page)
A
Andy Whitcroft 已提交
189 190 191 192 193 194 195 196 197 198 199 200
{
	unsigned long offset = page - ((unsigned long)(vmemmap));

	/* Return the pfn of the start of the section. */
	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
}

/*
 * Check if this vmemmap page is already initialised.  If any section
 * which overlaps this vmemmap page is initialised then this page is
 * initialised already.
 */
201
static int __meminit vmemmap_populated(unsigned long start, int page_size)
A
Andy Whitcroft 已提交
202 203 204 205 206 207 208 209 210 211 212
{
	unsigned long end = start + page_size;

	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
		if (pfn_valid(vmemmap_section_start(start)))
			return 1;

	return 0;
}

int __meminit vmemmap_populate(struct page *start_page,
213
			       unsigned long nr_pages, int node)
A
Andy Whitcroft 已提交
214 215 216 217
{
	unsigned long mode_rw;
	unsigned long start = (unsigned long)start_page;
	unsigned long end = (unsigned long)(start_page + nr_pages);
218
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
A
Andy Whitcroft 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

	mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;

	/* Align to the page size of the linear mapping. */
	start = _ALIGN_DOWN(start, page_size);

	for (; start < end; start += page_size) {
		int mapped;
		void *p;

		if (vmemmap_populated(start, page_size))
			continue;

		p = vmemmap_alloc_block(page_size, node);
		if (!p)
			return -ENOMEM;

236 237
		pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
			start, p, __pa(p));
A
Andy Whitcroft 已提交
238 239

		mapped = htab_bolt_mapping(start, start + page_size,
240
					__pa(p), mode_rw, mmu_vmemmap_psize,
241
					mmu_kernel_ssize);
A
Andy Whitcroft 已提交
242 243 244 245 246
		BUG_ON(mapped < 0);
	}

	return 0;
}
247
#endif /* CONFIG_SPARSEMEM_VMEMMAP */