init.c 8.0 KB
Newer Older
1 2
/*
 * linux/arch/sh/mm/init.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 1999  Niibe Yutaka
5
 *  Copyright (C) 2002 - 2007  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
P
Paul Mundt 已提交
14
#include <linux/proc_fs.h>
15
#include <linux/pagemap.h>
16 17
#include <linux/percpu.h>
#include <linux/io.h>
L
Linus Torvalds 已提交
18 19 20
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
21
#include <asm/sections.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <asm/cache.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
26 27 28 29 30 31 32 33 34 35 36 37 38

#ifdef CONFIG_SUPERH32
/*
 * Handle trivial transitions between cached and uncached
 * segments, making use of the 1:1 mapping relationship in
 * 512MB lowmem.
 *
 * This is the offset of the uncached section from its cached alias.
 * Default value only valid in 29 bit mode, in 32bit mode will be
 * overridden in pmb_init.
 */
unsigned long cached_to_uncached = P2SEG - P1SEG;
#endif
L
Linus Torvalds 已提交
39

40
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
41 42 43
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
44
	pud_t *pud;
L
Linus Torvalds 已提交
45 46 47
	pmd_t *pmd;
	pte_t *pte;

S
Stuart Menefy 已提交
48
	pgd = pgd_offset_k(addr);
L
Linus Torvalds 已提交
49 50 51 52 53
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

S
Stuart Menefy 已提交
54 55 56 57
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
58 59
	}

S
Stuart Menefy 已提交
60 61 62 63
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71 72
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
73
	flush_tlb_one(get_asid(), addr);
L
Linus Torvalds 已提交
74 75 76 77 78 79
}

/*
 * As a performance optimization, other platforms preserve the fixmap mapping
 * across a context switch, we don't presently do this, but this could be done
 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
S
Simon Arlott 已提交
80
 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
L
Linus Torvalds 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
 * give up a TLB entry for each mapping we want to preserve. While this may be
 * viable for a small number of fixmaps, it's not particularly useful for
 * everything and needs to be carefully evaluated. (ie, we may want this for
 * the vsyscall page).
 *
 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
 * in at __set_fixmap() time to determine the appropriate behavior to follow.
 *
 *					 -- PFM.
 */
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131

void __init page_table_range_init(unsigned long start, unsigned long end,
					 pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	int pgd_idx;
	unsigned long vaddr;

	vaddr = start & PMD_MASK;
	end = (end + PMD_SIZE - 1) & PMD_MASK;
	pgd_idx = pgd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
		BUG_ON(pgd_none(*pgd));
		pud = pud_offset(pgd, 0);
		BUG_ON(pud_none(*pud));
		pmd = pmd_offset(pud, 0);

		if (!pmd_present(*pmd)) {
			pte_t *pte_table;
			pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
			pmd_populate_kernel(&init_mm, pmd, pte_table);
		}

		vaddr += PMD_SIZE;
	}
}
132
#endif	/* CONFIG_MMU */
L
Linus Torvalds 已提交
133 134 135 136 137 138

/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
139
	unsigned long max_zone_pfns[MAX_NR_ZONES];
140
	unsigned long vaddr;
141
	int nid;
L
Linus Torvalds 已提交
142

143 144 145 146
	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
L
Linus Torvalds 已提交
147

148 149 150 151
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

152 153
	/*
	 * Populate the relevant portions of swapper_pg_dir so that
154
	 * we can use the fixmap entries without calling kmalloc.
155 156 157 158 159 160
	 * pte's will be filled in by __set_fixmap().
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
	page_table_range_init(vaddr, 0, swapper_pg_dir);

	kmap_coherent_init();
161

162 163
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

164 165 166 167
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

168
		start_pfn = pgdat->bdata->node_min_pfn;
169 170
		low = pgdat->bdata->node_low_pfn;

171 172
		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;
173 174 175 176

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}
177 178

	free_area_init_nodes(max_zone_pfns);
179

P
Paul Mundt 已提交
180
#ifdef CONFIG_SUPERH32
181 182
	/* Set up the uncached fixmap */
	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
P
Paul Mundt 已提交
183
#endif
L
Linus Torvalds 已提交
184 185
}

P
Paul Mundt 已提交
186
static struct kcore_list kcore_mem, kcore_vmalloc;
P
Paul Mundt 已提交
187
int after_bootmem = 0;
P
Paul Mundt 已提交
188

L
Linus Torvalds 已提交
189 190
void __init mem_init(void)
{
P
Paul Mundt 已提交
191
	int codesize, datasize, initsize;
192
	int nid;
L
Linus Torvalds 已提交
193

194 195 196
	num_physpages = 0;
	high_memory = NULL;

197 198 199 200 201 202 203 204 205 206 207
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long node_pages = 0;
		void *node_high_memory;

		num_physpages += pgdat->node_present_pages;

		if (pgdat->node_spanned_pages)
			node_pages = free_all_bootmem_node(pgdat);

		totalram_pages += node_pages;
L
Linus Torvalds 已提交
208

209 210 211
		node_high_memory = (void *)__va((pgdat->node_start_pfn +
						 pgdat->node_spanned_pages) <<
						 PAGE_SHIFT);
212 213 214
		if (node_high_memory > high_memory)
			high_memory = node_high_memory;
	}
L
Linus Torvalds 已提交
215 216 217 218 219

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

P
Paul Mundt 已提交
220 221
	after_bootmem = 1;

L
Linus Torvalds 已提交
222 223 224 225
	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

P
Paul Mundt 已提交
226 227 228 229 230
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		   VMALLOC_END - VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
P
Paul Mundt 已提交
231
	       "%dk data, %dk init)\n",
L
Linus Torvalds 已提交
232
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
233
		num_physpages << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
234 235 236 237 238
		codesize >> 10,
		datasize >> 10,
		initsize >> 10);

	p3_cache_init();
P
Paul Mundt 已提交
239 240 241

	/* Initialize the vDSO */
	vsyscall_init();
L
Linus Torvalds 已提交
242 243 244 245 246
}

void free_initmem(void)
{
	unsigned long addr;
247

L
Linus Torvalds 已提交
248 249 250
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
251
		init_page_count(virt_to_page(addr));
L
Linus Torvalds 已提交
252 253 254
		free_page(addr);
		totalram_pages++;
	}
255 256 257
	printk("Freeing unused kernel memory: %ldk freed\n",
	       ((unsigned long)&__init_end -
	        (unsigned long)&__init_begin) >> 10);
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
266
		init_page_count(virt_to_page(p));
L
Linus Torvalds 已提交
267 268 269
		free_page(p);
		totalram_pages++;
	}
270
	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
L
Linus Torvalds 已提交
271 272
}
#endif
273

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;

struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
	struct thread_info *ti;

	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
	if (unlikely(ti == NULL))
		return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
	memset(ti, 0, THREAD_SIZE);
#endif
	return ti;
}

void free_thread_info(struct thread_info *ti)
{
	kmem_cache_free(thread_info_cache, ti);
}

void thread_info_cache_init(void)
{
	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
					      THREAD_SIZE, 0, NULL);
	BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */

303 304 305 306 307 308 309 310 311 312 313 314 315
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
	if (unlikely(ret))
316
		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
317 318 319 320 321

	return ret;
}
EXPORT_SYMBOL_GPL(arch_add_memory);

322
#ifdef CONFIG_NUMA
323 324 325 326 327 328 329
int memory_add_physaddr_to_nid(u64 addr)
{
	/* Node 0 for now.. */
	return 0;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
330
#endif /* CONFIG_MEMORY_HOTPLUG */