init.c 10.7 KB
Newer Older
1 2
/*
 * linux/arch/sh/mm/init.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 1999  Niibe Yutaka
5
 *  Copyright (C) 2002 - 2011  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9 10 11 12
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
13
#include <linux/gfp.h>
L
Linus Torvalds 已提交
14
#include <linux/bootmem.h>
P
Paul Mundt 已提交
15
#include <linux/proc_fs.h>
16
#include <linux/pagemap.h>
17 18
#include <linux/percpu.h>
#include <linux/io.h>
Y
Yinghai Lu 已提交
19
#include <linux/memblock.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/export.h>
L
Linus Torvalds 已提交
22
#include <asm/mmu_context.h>
P
Paul Mundt 已提交
23
#include <asm/mmzone.h>
P
Paul Mundt 已提交
24
#include <asm/kexec.h>
L
Linus Torvalds 已提交
25 26
#include <asm/tlb.h>
#include <asm/cacheflush.h>
27
#include <asm/sections.h>
P
Paul Mundt 已提交
28
#include <asm/setup.h>
L
Linus Torvalds 已提交
29
#include <asm/cache.h>
30
#include <asm/sizes.h>
L
Linus Torvalds 已提交
31 32

pgd_t swapper_pg_dir[PTRS_PER_PGD];
33

34 35
void __init generic_mem_init(void)
{
Y
Yinghai Lu 已提交
36
	memblock_add(__MEMORY_START, __MEMORY_SIZE);
37 38
}

P
Paul Mundt 已提交
39 40 41 42 43
void __init __weak plat_mem_setup(void)
{
	/* Nothing to see here, move along. */
}

44
#ifdef CONFIG_MMU
45
static pte_t *__get_pte_phys(unsigned long addr)
L
Linus Torvalds 已提交
46 47
{
	pgd_t *pgd;
48
	pud_t *pud;
L
Linus Torvalds 已提交
49 50
	pmd_t *pmd;

S
Stuart Menefy 已提交
51
	pgd = pgd_offset_k(addr);
L
Linus Torvalds 已提交
52 53
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
54
		return NULL;
L
Linus Torvalds 已提交
55 56
	}

S
Stuart Menefy 已提交
57 58 59
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
60
		return NULL;
61 62
	}

S
Stuart Menefy 已提交
63 64 65
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
66
		return NULL;
L
Linus Torvalds 已提交
67 68
	}

69
	return pte_offset_kernel(pmd, addr);
70 71 72 73 74 75 76
}

static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pte_t *pte;

	pte = __get_pte_phys(addr);
L
Linus Torvalds 已提交
77 78 79 80 81 82
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
83
	local_flush_tlb_one(get_asid(), addr);
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

	if (pgprot_val(prot) & _PAGE_WIRED)
		tlb_wire_entry(NULL, addr, *pte);
}

static void clear_pte_phys(unsigned long addr, pgprot_t prot)
{
	pte_t *pte;

	pte = __get_pte_phys(addr);

	if (pgprot_val(prot) & _PAGE_WIRED)
		tlb_unwire_entry();

	set_pte(pte, pfn_pte(0, __pgprot(0)));
	local_flush_tlb_one(get_asid(), addr);
L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
}

void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
113

114 115 116 117 118 119 120 121 122 123 124 125
void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	clear_pte_phys(address, prot);
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static pmd_t * __init one_md_table_init(pud_t *pud)
{
	if (pud_none(*pud)) {
		pmd_t *pmd;

		pmd = alloc_bootmem_pages(PAGE_SIZE);
		pud_populate(&init_mm, pud, pmd);
		BUG_ON(pmd != pmd_offset(pud, 0));
	}

	return pmd_offset(pud, 0);
}

static pte_t * __init one_page_table_init(pmd_t *pmd)
{
	if (pmd_none(*pmd)) {
		pte_t *pte;

		pte = alloc_bootmem_pages(PAGE_SIZE);
		pmd_populate_kernel(&init_mm, pmd, pte);
		BUG_ON(pte != pte_offset_kernel(pmd, 0));
	}

	return pte_offset_kernel(pmd, 0);
}

static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
					    unsigned long vaddr, pte_t *lastpte)
{
	return pte;
}

158 159 160 161 162 163
void __init page_table_range_init(unsigned long start, unsigned long end,
					 pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
164
	pte_t *pte = NULL;
165
	int i, j, k;
166 167
	unsigned long vaddr;

168 169 170 171 172 173 174 175 176
	vaddr = start;
	i = __pgd_offset(vaddr);
	j = __pud_offset(vaddr);
	k = __pmd_offset(vaddr);
	pgd = pgd_base + i;

	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
		pud = (pud_t *)pgd;
		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
177 178
			pmd = one_md_table_init(pud);
#ifndef __PAGETABLE_PMD_FOLDED
179 180
			pmd += k;
#endif
181
			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
182 183
				pte = page_table_kmap_check(one_page_table_init(pmd),
							    pmd, vaddr, pte);
184 185 186
				vaddr += PMD_SIZE;
			}
			k = 0;
187
		}
188
		j = 0;
189 190
	}
}
191
#endif	/* CONFIG_MMU */
L
Linus Torvalds 已提交
192

P
Paul Mundt 已提交
193 194 195 196 197 198 199 200 201 202
void __init allocate_pgdat(unsigned int nid)
{
	unsigned long start_pfn, end_pfn;
#ifdef CONFIG_NEED_MULTIPLE_NODES
	unsigned long phys;
#endif

	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);

#ifdef CONFIG_NEED_MULTIPLE_NODES
Y
Yinghai Lu 已提交
203
	phys = __memblock_alloc_base(sizeof(struct pglist_data),
P
Paul Mundt 已提交
204 205 206
				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
	/* Retry with all of system memory */
	if (!phys)
Y
Yinghai Lu 已提交
207 208
		phys = __memblock_alloc_base(sizeof(struct pglist_data),
					SMP_CACHE_BYTES, memblock_end_of_DRAM());
P
Paul Mundt 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221
	if (!phys)
		panic("Can't allocate pgdat for node %d\n", nid);

	NODE_DATA(nid) = __va(phys);
	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
#endif

	NODE_DATA(nid)->node_start_pfn = start_pfn;
	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}

static void __init do_init_bootmem(void)
{
222
	struct memblock_region *reg;
P
Paul Mundt 已提交
223 224

	/* Add active regions with valid PFNs. */
225
	for_each_memblock(memory, reg) {
P
Paul Mundt 已提交
226
		unsigned long start_pfn, end_pfn;
227 228
		start_pfn = memblock_region_memory_base_pfn(reg);
		end_pfn = memblock_region_memory_end_pfn(reg);
P
Paul Mundt 已提交
229 230 231 232 233 234 235 236 237
		__add_active_range(0, start_pfn, end_pfn);
	}

	/* All of system RAM sits in node 0 for the non-NUMA case */
	allocate_pgdat(0);
	node_set_online(0);

	plat_mem_setup();

R
Rob Herring 已提交
238 239
	for_each_memblock(memory, reg) {
		int nid = memblock_get_region_node(reg);
P
Paul Mundt 已提交
240

R
Rob Herring 已提交
241 242 243
		memory_present(nid, memblock_region_memory_base_pfn(reg),
			memblock_region_memory_end_pfn(reg));
	}
P
Paul Mundt 已提交
244 245 246 247 248 249
	sparse_init();
}

static void __init early_reserve_mem(void)
{
	unsigned long start_pfn;
S
Simon Horman 已提交
250 251
	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
P
Paul Mundt 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264

	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	start_pfn = PFN_UP(__pa(_end));

	/*
	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
	 * this in two steps (first step was init_bootmem()), because
	 * this catches the (definitely buggy) case of us accidentally
	 * initializing the bootmem allocator with an invalid RAM area.
	 */
S
Simon Horman 已提交
265
	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
P
Paul Mundt 已提交
266 267 268 269 270

	/*
	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
	 */
	if (CONFIG_ZERO_PAGE_OFFSET != 0)
S
Simon Horman 已提交
271
		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
P
Paul Mundt 已提交
272 273 274 275 276 277 278 279

	/*
	 * Handle additional early reservations
	 */
	check_for_initrd();
	reserve_crashkernel();
}

L
Linus Torvalds 已提交
280 281
void __init paging_init(void)
{
282
	unsigned long max_zone_pfns[MAX_NR_ZONES];
283
	unsigned long vaddr, end;
L
Linus Torvalds 已提交
284

P
Paul Mundt 已提交
285 286 287 288
	sh_mv.mv_mem_init();

	early_reserve_mem();

289 290 291 292 293 294 295
	/*
	 * Once the early reservations are out of the way, give the
	 * platforms a chance to kick out some memory.
	 */
	if (sh_mv.mv_mem_reserve)
		sh_mv.mv_mem_reserve();

Y
Yinghai Lu 已提交
296
	memblock_enforce_memory_limit(memory_limit);
297
	memblock_allow_resize();
P
Paul Mundt 已提交
298

Y
Yinghai Lu 已提交
299
	memblock_dump_all();
P
Paul Mundt 已提交
300 301 302 303

	/*
	 * Determine low and high memory ranges:
	 */
Y
Yinghai Lu 已提交
304
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
P
Paul Mundt 已提交
305 306 307 308 309
	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;

	nodes_clear(node_online_map);

	memory_start = (unsigned long)__va(__MEMORY_START);
Y
Yinghai Lu 已提交
310
	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
P
Paul Mundt 已提交
311 312 313 314 315 316

	uncached_init();
	pmb_init();
	do_init_bootmem();
	ioremap_fixed_init();

317 318 319 320
	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
L
Linus Torvalds 已提交
321

322 323 324 325
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

326 327
	/*
	 * Populate the relevant portions of swapper_pg_dir so that
328
	 * we can use the fixmap entries without calling kmalloc.
329 330 331
	 * pte's will be filled in by __set_fixmap().
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
332 333
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, swapper_pg_dir);
334 335

	kmap_coherent_init();
336

337
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
R
Rob Herring 已提交
338
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
339
	free_area_init_nodes(max_zone_pfns);
L
Linus Torvalds 已提交
340 341
}

342 343
unsigned int mem_init_done = 0;

L
Linus Torvalds 已提交
344 345
void __init mem_init(void)
{
346
	pg_data_t *pgdat;
L
Linus Torvalds 已提交
347

348
	high_memory = NULL;
349 350 351
	for_each_online_pgdat(pgdat)
		high_memory = max_t(void *, high_memory,
				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
352

353
	free_all_bootmem();
L
Linus Torvalds 已提交
354

355 356 357
	/* Set this up early, so we can take care of the zero page */
	cpu_cache_init();

L
Linus Torvalds 已提交
358 359 360 361
	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

362 363
	vsyscall_init();

364 365
	mem_init_print_info(NULL);
	pr_info("virtual kernel memory layout:\n"
366 367 368 369 370
		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
#ifdef CONFIG_HIGHMEM
		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
#endif
		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
371
		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
372
#ifdef CONFIG_UNCACHED_MAPPING
373
		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
374
#endif
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
		FIXADDR_START, FIXADDR_TOP,
		(FIXADDR_TOP - FIXADDR_START) >> 10,

#ifdef CONFIG_HIGHMEM
		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
		(LAST_PKMAP*PAGE_SIZE) >> 10,
#endif

		(unsigned long)VMALLOC_START, VMALLOC_END,
		(VMALLOC_END - VMALLOC_START) >> 20,

		(unsigned long)memory_start, (unsigned long)high_memory,
		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,

392
#ifdef CONFIG_UNCACHED_MAPPING
P
Paul Mundt 已提交
393
		uncached_start, uncached_end, uncached_size >> 20,
394
#endif
395

396 397 398 399 400 401 402 403 404
		(unsigned long)&__init_begin, (unsigned long)&__init_end,
		((unsigned long)&__init_end -
		 (unsigned long)&__init_begin) >> 10,

		(unsigned long)&_etext, (unsigned long)&_edata,
		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,

		(unsigned long)&_text, (unsigned long)&_etext,
		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
405 406

	mem_init_done = 1;
L
Linus Torvalds 已提交
407 408 409 410
}

void free_initmem(void)
{
411
	free_initmem_default(-1);
L
Linus Torvalds 已提交
412 413 414 415 416
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
417
	free_reserved_area((void *)start, (void *)end, -1, "initrd");
L
Linus Torvalds 已提交
418 419
}
#endif
420 421

#ifdef CONFIG_MEMORY_HOTPLUG
422 423
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
424
{
A
Alexander Kuleshov 已提交
425
	unsigned long start_pfn = PFN_DOWN(start);
426 427 428 429
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	/* We only have ZONE_NORMAL, so this is easy.. */
430
	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
431
	if (unlikely(ret))
432
		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
433 434 435 436

	return ret;
}

437
#ifdef CONFIG_NUMA
438 439 440 441 442 443 444
int memory_add_physaddr_to_nid(u64 addr)
{
	/* Node 0 for now.. */
	return 0;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
M
Matt Fleming 已提交
445

446
#ifdef CONFIG_MEMORY_HOTREMOVE
447
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
448
{
A
Alexander Kuleshov 已提交
449
	unsigned long start_pfn = PFN_DOWN(start);
450 451 452 453 454
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct zone *zone;
	int ret;

	zone = page_zone(pfn_to_page(start_pfn));
455
	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
456 457 458 459 460 461 462
	if (unlikely(ret))
		pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
			ret);

	return ret;
}
#endif
463
#endif /* CONFIG_MEMORY_HOTPLUG */