mem.c 14.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
33
#include <linux/suspend.h>
34 35 36 37 38 39 40 41 42 43 44 45

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
#include <asm/prom.h>
46 47
#include <asm/lmb.h>
#include <asm/sections.h>
48
#include <asm/vdso.h>
49 50 51 52 53 54 55 56

#include "mmu_decl.h"

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

57 58
int init_bootmem_done;
int mem_init_done;
59
unsigned long memory_limit;
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
int page_is_ram(unsigned long pfn)
{
	unsigned long paddr = (pfn << PAGE_SHIFT);

#ifndef CONFIG_PPC64	/* XXX for now */
	return paddr < __pa(high_memory);
#else
	int i;
	for (i=0; i < lmb.memory.cnt; i++) {
		unsigned long base;

		base = lmb.memory.region[i].base;

		if ((paddr >= base) &&
			(paddr < (base + lmb.memory.region[i].size))) {
			return 1;
		}
	}

	return 0;
#endif
}

84
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
85 86 87
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
88
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
89

90
	if (!page_is_ram(pfn))
91 92 93 94 95 96
		vma_prot = __pgprot(pgprot_val(vma_prot)
				    | _PAGE_GUARDED | _PAGE_NO_CACHE);
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
97 98 99 100 101
#ifdef CONFIG_MEMORY_HOTPLUG

void online_page(struct page *page)
{
	ClearPageReserved(page);
102
	init_page_count(page);
103
	__free_page(page);
P
Paul Mackerras 已提交
104 105 106 107
	totalram_pages++;
	num_physpages++;
}

108 109 110 111 112 113 114 115
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

int __devinit arch_add_memory(int nid, u64 start, u64 size)
P
Paul Mackerras 已提交
116
{
117
	struct pglist_data *pgdata;
P
Paul Mackerras 已提交
118 119 120 121
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

122 123
	pgdata = NODE_DATA(nid);

124
	start = (unsigned long)__va(start);
125 126
	create_section_mapping(start, start + size);

P
Paul Mackerras 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
	/* this should work for most non-highmem platforms */
	zone = pgdata->node_zones;

	return __add_pages(zone, start_pfn, nr_pages);
}

/*
 * First pass at this code will check to determine if the remove
 * request is within the RMO.  Do not allow removal within the RMO.
 */
int __devinit remove_memory(u64 start, u64 size)
{
	struct zone *zone;
	unsigned long start_pfn, end_pfn, nr_pages;

	start_pfn = start >> PAGE_SHIFT;
	nr_pages = size >> PAGE_SHIFT;
	end_pfn = start_pfn + nr_pages;

	printk("%s(): Attempting to remove memoy in range "
			"%lx to %lx\n", __func__, start, start+size);
	/*
	 * check for range within RMO
	 */
	zone = page_zone(pfn_to_page(start_pfn));

	printk("%s(): memory will be removed from "
			"the %s zone\n", __func__, zone->name);

	/*
	 * not handling removing memory ranges that
	 * overlap multiple zones yet
	 */
	if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
		goto overlap;

	/* make sure it is NOT in RMO */
	if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
		printk("%s(): range to be removed must NOT be in RMO!\n",
			__func__);
		goto in_rmo;
	}

	return __remove_pages(zone, start_pfn, nr_pages);

overlap:
	printk("%s(): memory range to be removed overlaps "
		"multiple zones!!!\n", __func__);
in_rmo:
	return -1;
}
#endif /* CONFIG_MEMORY_HOTPLUG */

180 181 182 183 184 185 186 187 188 189 190 191
void show_mem(void)
{
	unsigned long total = 0, reserved = 0;
	unsigned long shared = 0, cached = 0;
	unsigned long highmem = 0;
	struct page *page;
	pg_data_t *pgdat;
	unsigned long i;

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
192
	for_each_online_pgdat(pgdat) {
P
Paul Mackerras 已提交
193 194
		unsigned long flags;
		pgdat_resize_lock(pgdat, &flags);
195
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
196 197
			if (!pfn_valid(pgdat->node_start_pfn + i))
				continue;
198 199 200 201 202 203 204 205 206 207 208
			page = pgdat_page_nr(pgdat, i);
			total++;
			if (PageHighMem(page))
				highmem++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (page_count(page))
				shared += page_count(page) - 1;
		}
P
Paul Mackerras 已提交
209
		pgdat_resize_unlock(pgdat, &flags);
210 211 212 213 214 215 216 217 218 219
	}
	printk("%ld pages of RAM\n", total);
#ifdef CONFIG_HIGHMEM
	printk("%ld pages of HIGHMEM\n", highmem);
#endif
	printk("%ld reserved pages\n", reserved);
	printk("%ld pages shared\n", shared);
	printk("%ld pages swap cached\n", cached);
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
/*
 * Initialize the bootmem system and give it all the memory we
 * have available.  If we are using highmem, we only put the
 * lowmem into the bootmem system.
 */
#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem(void)
{
	unsigned long i;
	unsigned long start, bootmap_pages;
	unsigned long total_pages;
	int boot_mapsize;

	max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
	total_pages = total_lowmem >> PAGE_SHIFT;
#endif

	/*
	 * Find an area to use for the bootmem bitmap.  Calculate the size of
	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
	 * Add 1 additional page in case the address isn't page-aligned.
	 */
	bootmap_pages = bootmem_bootmap_pages(total_pages);

	start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);

	boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);

249 250 251 252 253 254 255 256
	/* Add active regions with valid PFNs */
	for (i = 0; i < lmb.memory.cnt; i++) {
		unsigned long start_pfn, end_pfn;
		start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
		end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
		add_active_range(0, start_pfn, end_pfn);
	}

257 258 259 260
	/* Add all physical memory to the bootmem map, mark each area
	 * present.
	 */
#ifdef CONFIG_HIGHMEM
261 262 263
	free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
#else
	free_bootmem_with_active_regions(0, max_pfn);
264 265 266 267 268 269 270 271
#endif

	/* reserve the sections we're already using */
	for (i = 0; i < lmb.reserved.cnt; i++)
		reserve_bootmem(lmb.reserved.region[i].base,
				lmb_size_bytes(&lmb.reserved, i));

	/* XXX need to clip this if using highmem? */
272 273
	sparse_memory_present_with_active_regions(0);

274 275 276
	init_bootmem_done = 1;
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
	unsigned long lmb_next_region_start_pfn,
		      lmb_region_max_pfn;
	int i;

	for (i = 0; i < lmb.memory.cnt - 1; i++) {
		lmb_region_max_pfn =
			(lmb.memory.region[i].base >> PAGE_SHIFT) +
			(lmb.memory.region[i].size >> PAGE_SHIFT);
		lmb_next_region_start_pfn =
			lmb.memory.region[i+1].base >> PAGE_SHIFT;

		if (lmb_region_max_pfn < lmb_next_region_start_pfn)
			register_nosave_region(lmb_region_max_pfn,
					       lmb_next_region_start_pfn);
	}

	return 0;
}

299 300 301 302 303 304 305
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
	unsigned long total_ram = lmb_phys_mem_size();
	unsigned long top_of_ram = lmb_end_of_DRAM();
306
	unsigned long max_zone_pfns[MAX_NR_ZONES];
307 308 309

#ifdef CONFIG_HIGHMEM
	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
310 311
	pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
			(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
312
	map_page(KMAP_FIX_BEGIN, 0, 0);	/* XXX gross */
313 314 315
	kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
			(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
			 KMAP_FIX_BEGIN);
316 317 318
	kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */

319
	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
320
	       top_of_ram, total_ram);
321
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
322
	       (top_of_ram - total_ram) >> 20);
323
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
324
#ifdef CONFIG_HIGHMEM
325 326
	max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
	max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
327
#else
328
	max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
329 330
#endif
	free_area_init_nodes(max_zone_pfns);
331 332

	mark_nonram_nosave();
333 334 335 336 337 338 339 340 341 342 343 344 345
}
#endif /* ! CONFIG_NEED_MULTIPLE_NODES */

void __init mem_init(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
	int nid;
#endif
	pg_data_t *pgdat;
	unsigned long i;
	struct page *page;
	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;

346
	num_physpages = lmb.memory.size >> PAGE_SHIFT;
347 348 349 350 351
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

#ifdef CONFIG_NEED_MULTIPLE_NODES
        for_each_online_node(nid) {
		if (NODE_DATA(nid)->node_spanned_pages != 0) {
352
			printk("freeing bootmem node %d\n", nid);
353 354 355 356 357
			totalram_pages +=
				free_all_bootmem_node(NODE_DATA(nid));
		}
	}
#else
358
	max_mapnr = max_pfn;
359 360
	totalram_pages += free_all_bootmem();
#endif
361
	for_each_online_pgdat(pgdat) {
362
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
363 364
			if (!pfn_valid(pgdat->node_start_pfn + i))
				continue;
365 366 367 368 369 370 371
			page = pgdat_page_nr(pgdat, i);
			if (PageReserved(page))
				reservedpages++;
		}
	}

	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
372
	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
373 374 375 376 377 378 379 380 381 382 383 384
	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

		highmem_mapnr = total_lowmem >> PAGE_SHIFT;
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
			struct page *page = pfn_to_page(pfn);

			ClearPageReserved(page);
385
			init_page_count(page);
386 387 388 389
			__free_page(page);
			totalhigh_pages++;
		}
		totalram_pages += totalhigh_pages;
390
		printk(KERN_DEBUG "High memory: %luk\n",
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
		       totalhigh_pages << (PAGE_SHIFT-10));
	}
#endif /* CONFIG_HIGHMEM */

	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		bsssize >> 10,
		initsize >> 10);

	mem_init_done = 1;
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
#ifdef CONFIG_BOOKE
	void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
	__flush_dcache_icache(start);
	kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
429
#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page)); 
#else
	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif

}
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
	 * We shouldnt have to do this, but some versions of glibc
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
446
	flush_dcache_page(pg);
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

470
	flush_dcache_page(pg);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
H
Hugh Dickins 已提交
490
 * This must always be called with the pte lock held.
491 492 493 494
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
		      pte_t pte)
{
495 496
#ifdef CONFIG_PPC_STD_MMU
	unsigned long access = 0, trap;
497
#endif
498
	unsigned long pfn = pte_pfn(pte);
499 500 501 502 503 504

	/* handle i-cache coherency */
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
	    !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
	    pfn_valid(pfn)) {
		struct page *page = pfn_to_page(pfn);
505 506 507 508 509 510 511 512 513 514
#ifdef CONFIG_8xx
		/* On 8xx, cache control instructions (particularly
		 * "dcbst" from flush_dcache_icache) fault as write
		 * operation if there is an unpopulated TLB entry
		 * for the address in question. To workaround that,
		 * we invalidate the TLB here, thus avoiding dcbst
		 * misbehaviour.
		 */
		_tlbie(address);
#endif
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
		if (!PageReserved(page)
		    && !test_bit(PG_arch_1, &page->flags)) {
			if (vma->vm_mm == current->active_mm) {
				__flush_dcache_icache((void *) address);
			} else
				flush_dcache_icache_page(page);
			set_bit(PG_arch_1, &page->flags);
		}
	}

#ifdef CONFIG_PPC_STD_MMU
	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
	if (!pte_young(pte) || address >= TASK_SIZE)
		return;

530 531 532 533 534 535 536 537
	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
	if (current->thread.regs == NULL)
538
		return;
539 540 541 542 543 544 545
	trap = TRAP(current->thread.regs);
	if (trap == 0x400)
		access |= _PAGE_EXEC;
	else if (trap != 0x300)
		return;
	hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
546
}