init_32.c 6.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
33
#include <linux/lmb.h>
34 35 36 37 38 39 40 41 42 43

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
44
#include <asm/sections.h>
45
#include <asm/system.h>
46 47 48 49 50

#include "mmu_decl.h"

#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
51
#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
52 53 54 55 56
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
#endif
#endif
#define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE

57 58
phys_addr_t total_memory;
phys_addr_t total_lowmem;
59

60 61 62 63
phys_addr_t memstart_addr = (phys_addr_t)~0ull;
EXPORT_SYMBOL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL(kernstart_addr);
64
phys_addr_t lowmem_end_addr;
65 66 67 68

int boot_mapsize;
#ifdef CONFIG_PPC_PMAC
unsigned long agp_special_page;
69
EXPORT_SYMBOL(agp_special_page);
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
#endif

void MMU_init(void);

/* XXX should be in current.h  -- paulus */
extern struct task_struct *current_set[NR_CPUS];

/*
 * this tells the system to map all of ram with the segregs
 * (i.e. page tables) instead of the bats.
 * -- Cort
 */
int __map_without_bats;
int __map_without_ltlbs;

/* max amount of low RAM to map in */
unsigned long __max_low_memory = MAX_LOW_MEM;

/*
89
 * address of the limit of what is accessible with initial MMU setup -
90
 * 256MB usually, but only 16MB on 601.
91
 */
92
phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000;
93 94 95 96 97 98 99 100 101 102 103 104 105 106

/*
 * Check for command-line options that affect what MMU_init will do.
 */
void MMU_setup(void)
{
	/* Check for nobats option (used in mapin_ram). */
	if (strstr(cmd_line, "nobats")) {
		__map_without_bats = 1;
	}

	if (strstr(cmd_line, "noltlbs")) {
		__map_without_ltlbs = 1;
	}
107 108 109 110
#ifdef CONFIG_DEBUG_PAGEALLOC
	__map_without_bats = 1;
	__map_without_ltlbs = 1;
#endif
111 112 113 114 115 116 117 118 119 120 121 122
}

/*
 * MMU_init sets up the basic memory mappings for the kernel,
 * including both RAM and possibly some I/O regions,
 * and sets up the page tables and the MMU hardware ready to go.
 */
void __init MMU_init(void)
{
	if (ppc_md.progress)
		ppc_md.progress("MMU:enter", 0x111);

123 124
	/* 601 can only access 16MB at the moment */
	if (PVR_VER(mfspr(SPRN_PVR)) == 1)
125
		__initial_memory_limit_addr = 0x01000000;
126 127
	/* 8xx can only access 8MB at the moment */
	if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
128
		__initial_memory_limit_addr = 0x00800000;
129

130 131 132
	/* parse args from command line */
	MMU_setup();

133 134 135 136 137 138
	if (lmb.memory.cnt > 1) {
		lmb.memory.cnt = 1;
		lmb_analyze();
		printk(KERN_WARNING "Only using first contiguous memory region");
	}

139
	total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
140
	lowmem_end_addr = memstart_addr + total_lowmem;
141

142 143 144 145 146 147
#ifdef CONFIG_FSL_BOOKE
	/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
	 * entries, so we need to adjust lowmem to match the amount we can map
	 * in the fixed entries */
	adjust_total_lowmem();
#endif /* CONFIG_FSL_BOOKE */
148

149 150
	if (total_lowmem > __max_low_memory) {
		total_lowmem = __max_low_memory;
151
		lowmem_end_addr = memstart_addr + total_lowmem;
152 153
#ifndef CONFIG_HIGHMEM
		total_memory = total_lowmem;
154
		lmb_enforce_memory_limit(lowmem_end_addr);
155
		lmb_analyze();
156 157 158 159 160 161 162 163 164 165 166 167 168
#endif /* CONFIG_HIGHMEM */
	}

	/* Initialize the MMU hardware */
	if (ppc_md.progress)
		ppc_md.progress("MMU:hw init", 0x300);
	MMU_init_hw();

	/* Map in all of RAM starting at KERNELBASE */
	if (ppc_md.progress)
		ppc_md.progress("MMU:mapin", 0x301);
	mapin_ram();

169 170
	/* Initialize early top-down ioremap allocator */
	ioremap_bot = IOREMAP_TOP;
171 172 173 174 175 176 177

	/* Map in I/O resources */
	if (ppc_md.progress)
		ppc_md.progress("MMU:setio", 0x302);

	if (ppc_md.progress)
		ppc_md.progress("MMU:exit", 0x211);
178 179 180 181 182

	/* From now on, btext is no longer BAT mapped if it was at all */
#ifdef CONFIG_BOOTX_TEXT
	btext_unmap();
#endif
183 184 185 186 187 188 189 190 191 192
}

/* This is only called until mem_init is done. */
void __init *early_get_page(void)
{
	void *p;

	if (init_bootmem_done) {
		p = alloc_bootmem_pages(PAGE_SIZE);
	} else {
193
		p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
194
					__initial_memory_limit_addr));
195 196 197 198 199 200 201 202 203 204 205
	}
	return p;
}

/* Free up now-unused memory */
static void free_sec(unsigned long start, unsigned long end, const char *name)
{
	unsigned long cnt = 0;

	while (start < end) {
		ClearPageReserved(virt_to_page(start));
206
		init_page_count(virt_to_page(start));
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
		free_page(start);
		cnt++;
		start += PAGE_SIZE;
 	}
	if (cnt) {
		printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
		totalram_pages += cnt;
	}
}

void free_initmem(void)
{
#define FREESEC(TYPE) \
	free_sec((unsigned long)(&__ ## TYPE ## _begin), \
		 (unsigned long)(&__ ## TYPE ## _end), \
		 #TYPE);

	printk ("Freeing unused kernel memory:");
	FREESEC(init);
 	printk("\n");
	ppc_md.progress = NULL;
#undef FREESEC
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	if (start < end)
		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
	for (; start < end; start += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(start));
238
		init_page_count(virt_to_page(start));
239 240 241 242 243
		free_page(start);
		totalram_pages++;
	}
}
#endif
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

#ifdef CONFIG_PROC_KCORE
static struct kcore_list kcore_vmem;

static int __init setup_kcore(void)
{
	int i;

	for (i = 0; i < lmb.memory.cnt; i++) {
		unsigned long base;
		unsigned long size;
		struct kcore_list *kcore_mem;

		base = lmb.memory.region[i].base;
		size = lmb.memory.region[i].size;

		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
		if (!kcore_mem)
262
			panic("%s: kmalloc failed\n", __func__);
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

		/* must stay under 32 bits */
		if ( 0xfffffffful - (unsigned long)__va(base) < size) {
			size = 0xfffffffful - (unsigned long)(__va(base));
			printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
						size);
		}

		kclist_add(kcore_mem, __va(base), size);
	}

	kclist_add(&kcore_vmem, (void *)VMALLOC_START,
		VMALLOC_END-VMALLOC_START);

	return 0;
}
module_init(setup_kcore);
#endif