mem.c 14.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

20
#include <linux/export.h>
21 22 23 24
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
25
#include <linux/gfp.h>
26 27 28 29 30 31 32 33
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
34
#include <linux/suspend.h>
Y
Yinghai Lu 已提交
35
#include <linux/memblock.h>
36
#include <linux/hugetlb.h>
37
#include <linux/slab.h>
38
#include <linux/vmalloc.h>
39 40 41 42 43 44 45 46 47 48 49

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
50
#include <asm/sections.h>
51
#include <asm/sparsemem.h>
52
#include <asm/vdso.h>
53
#include <asm/fixmap.h>
54
#include <asm/swiotlb.h>
55
#include <asm/rtas.h>
56 57 58 59 60 61 62 63

#include "mmu_decl.h"

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

64
unsigned long long memory_limit;
65

66 67
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
68
EXPORT_SYMBOL(kmap_pte);
69 70 71 72 73 74 75 76 77 78
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);

static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
#endif

79 80 81
int page_is_ram(unsigned long pfn)
{
#ifndef CONFIG_PPC64	/* XXX for now */
82
	return pfn < max_pfn;
83
#else
84
	unsigned long paddr = (pfn << PAGE_SHIFT);
85
	struct memblock_region *reg;
86

87 88
	for_each_memblock(memory, reg)
		if (paddr >= reg->base && paddr < (reg->base + reg->size))
89 90 91 92 93
			return 1;
	return 0;
#endif
}

94
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
95 96 97
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
98
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
99

100
	if (!page_is_ram(pfn))
101 102
		vma_prot = pgprot_noncached(vma_prot);

103 104 105 106
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
107 108
#ifdef CONFIG_MEMORY_HOTPLUG

109 110 111 112 113 114 115
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

116
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
P
Paul Mackerras 已提交
117
{
118
	struct pglist_data *pgdata;
P
Paul Mackerras 已提交
119 120 121
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
122
	int rc;
P
Paul Mackerras 已提交
123

124 125
	pgdata = NODE_DATA(nid);

126
	start = (unsigned long)__va(start);
127 128 129 130 131 132 133
	rc = create_section_mapping(start, start + size);
	if (rc) {
		pr_warning(
			"Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}
134

P
Paul Mackerras 已提交
135
	/* this should work for most non-highmem platforms */
136
	zone = pgdata->node_zones +
137
		zone_for_memory(nid, start, size, 0, for_device);
P
Paul Mackerras 已提交
138

139
	return __add_pages(nid, zone, start_pfn, nr_pages);
P
Paul Mackerras 已提交
140
}
141 142 143 144 145 146 147

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct zone *zone;
148
	int ret;
149 150

	zone = page_zone(pfn_to_page(start_pfn));
151
	ret = __remove_pages(zone, start_pfn, nr_pages);
152 153 154 155 156 157 158 159 160 161 162
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();
163 164

	return ret;
165 166
}
#endif
167
#endif /* CONFIG_MEMORY_HOTPLUG */
168 169 170

/*
 * walk_memory_resource() needs to make sure there is no holes in a given
171
 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
Y
Yinghai Lu 已提交
172
 * Instead it maintains it in memblock.memory structures.  Walk through the
173
 * memory regions, find holes and callback for contiguous regions.
174 175
 */
int
K
KAMEZAWA Hiroyuki 已提交
176 177
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
		void *arg, int (*func)(unsigned long, unsigned long, void *))
178
{
179 180 181
	struct memblock_region *reg;
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long tstart, tend;
182 183
	int ret = -1;

184
	for_each_memblock(memory, reg) {
185 186
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
187 188 189
		if (tstart >= tend)
			continue;
		ret = (*func)(tstart, tend - tstart, arg);
190 191 192 193
		if (ret)
			break;
	}
	return ret;
194
}
K
KAMEZAWA Hiroyuki 已提交
195
EXPORT_SYMBOL_GPL(walk_system_ram_range);
196

197
#ifndef CONFIG_NEED_MULTIPLE_NODES
198
void __init initmem_init(void)
199
{
Y
Yinghai Lu 已提交
200
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
201
	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
202
#ifdef CONFIG_HIGHMEM
203
	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
204 205
#endif

206 207 208
	/* Place all memblock_regions in the same node and merge contiguous
	 * memblock_regions
	 */
209
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
210

211
	/* XXX need to clip this if using highmem? */
212
	sparse_memory_present_with_active_regions(0);
213
	sparse_init();
214 215
}

216 217 218
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
219 220 221 222
	struct memblock_region *reg, *prev = NULL;

	for_each_memblock(memory, reg) {
		if (prev &&
223 224 225
		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
			register_nosave_region(memblock_region_memory_end_pfn(prev),
					       memblock_region_memory_base_pfn(reg));
226
		prev = reg;
227 228 229
	}
	return 0;
}
230 231 232 233 234 235
#else /* CONFIG_NEED_MULTIPLE_NODES */
static int __init mark_nonram_nosave(void)
{
	return 0;
}
#endif
236

S
Scott Wood 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
static bool zone_limits_final;

static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
	[0 ... MAX_NR_ZONES - 1] = ~0UL
};

/*
 * Restrict the specified zone and all more restrictive zones
 * to be below the specified pfn.  May not be called after
 * paging_init().
 */
void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
{
	int i;

	if (WARN_ON(zone_limits_final))
		return;

	for (i = zone; i >= 0; i--) {
		if (max_zone_pfns[i] > pfn_limit)
			max_zone_pfns[i] = pfn_limit;
	}
}

/*
 * Find the least restrictive zone that is entirely below the
 * specified pfn limit.  Returns < 0 if no suitable zone is found.
 *
 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
 * systems -- the DMA limit can be higher than any possible real pfn.
 */
int dma_pfn_limit_to_zone(u64 pfn_limit)
{
	enum zone_type top_zone = ZONE_NORMAL;
	int i;

#ifdef CONFIG_HIGHMEM
	top_zone = ZONE_HIGHMEM;
#endif

	for (i = top_zone; i >= 0; i--) {
		if (max_zone_pfns[i] <= pfn_limit)
			return i;
	}

	return -EPERM;
}
284

285 286 287 288 289
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
290
	unsigned long long total_ram = memblock_phys_mem_size();
Y
Yinghai Lu 已提交
291
	phys_addr_t top_of_ram = memblock_end_of_DRAM();
S
Scott Wood 已提交
292
	enum zone_type top_zone;
293

294 295 296 297 298 299 300 301
#ifdef CONFIG_PPC32
	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
	unsigned long end = __fix_to_virt(FIX_HOLE);

	for (; v < end; v += PAGE_SIZE)
		map_page(v, 0, 0); /* XXX gross */
#endif

302 303
#ifdef CONFIG_HIGHMEM
	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
304 305 306
	pkmap_page_table = virt_to_kpte(PKMAP_BASE);

	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
307 308 309
	kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */

310
	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
311
	       (unsigned long long)top_of_ram, total_ram);
312
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
313
	       (long int)((top_of_ram - total_ram) >> 20));
S
Scott Wood 已提交
314

315
#ifdef CONFIG_HIGHMEM
S
Scott Wood 已提交
316 317
	top_zone = ZONE_HIGHMEM;
	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
318
#else
S
Scott Wood 已提交
319
	top_zone = ZONE_NORMAL;
320
#endif
S
Scott Wood 已提交
321 322 323

	limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT);
	zone_limits_final = true;
324
	free_area_init_nodes(max_zone_pfns);
325 326

	mark_nonram_nosave();
327 328 329 330
}

void __init mem_init(void)
{
331 332 333 334 335 336
	/*
	 * book3s is limited to 16 page sizes due to encoding this in
	 * a 4-bit field for slices.
	 */
	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

337
#ifdef CONFIG_SWIOTLB
338
	swiotlb_init(0);
339 340
#endif

341
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
342
	set_max_mapnr(max_pfn);
343
	free_all_bootmem();
344 345 346 347 348

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

349
		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
350
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
351
			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
352
			struct page *page = pfn_to_page(pfn);
353 354
			if (!memblock_is_reserved(paddr))
				free_highmem_page(page);
355 356 357 358
		}
	}
#endif /* CONFIG_HIGHMEM */

359 360 361 362 363 364 365 366 367
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
	/*
	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
	 * functions.... do it here for the non-smp case.
	 */
	per_cpu(next_tlbcam_idx, smp_processor_id()) =
		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

368
	mem_init_print_info(NULL);
369 370 371 372 373 374 375
#ifdef CONFIG_PPC32
	pr_info("Kernel virtual memory layout:\n");
	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
376 377 378 379
#ifdef CONFIG_NOT_COHERENT_CACHE
	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
#endif /* CONFIG_NOT_COHERENT_CACHE */
380 381 382 383 384
	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
		ioremap_bot, IOREMAP_TOP);
	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
		VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
385 386
}

387 388
void free_initmem(void)
{
389
	ppc_md.progress = ppc_printk_progress;
390
	free_initmem_default(POISON_FREE_INITMEM);
391 392
}

393 394 395
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
396
	free_reserved_area((void *)start, (void *)end, -1, "initrd");
397 398 399
}
#endif

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
417 418 419 420 421 422
#ifdef CONFIG_HUGETLB_PAGE
	if (PageCompound(page)) {
		flush_dcache_icache_hugepage(page);
		return;
	}
#endif
423 424 425 426 427
#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page));
#else
	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
428
		void *start = kmap_atomic(page);
429
		__flush_dcache_icache(start);
430
		kunmap_atomic(start);
431 432
	} else {
		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
433
	}
434 435
#endif
}
436
EXPORT_SYMBOL(flush_dcache_icache_page);
437

438 439 440 441 442
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
L
Lucas De Marchi 已提交
443
	 * We shouldn't have to do this, but some versions of glibc
444 445 446
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
447
	flush_dcache_page(pg);
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

471
	flush_dcache_page(pg);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
H
Hugh Dickins 已提交
491
 * This must always be called with the pte lock held.
492 493
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
494
		      pte_t *ptep)
495
{
496
#ifdef CONFIG_PPC_STD_MMU
497 498 499 500
	/*
	 * We don't need to worry about _PAGE_PRESENT here because we are
	 * called with either mm->page_table_lock held or ptl lock held
	 */
501
	unsigned long access = 0, trap;
502 503
	if (radix_enabled())
		return;
504 505

	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
506
	if (!pte_young(*ptep) || address >= TASK_SIZE)
507 508
		return;

509 510 511 512 513 514 515 516
	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
	if (current->thread.regs == NULL)
517
		return;
518 519 520 521 522 523 524
	trap = TRAP(current->thread.regs);
	if (trap == 0x400)
		access |= _PAGE_EXEC;
	else if (trap != 0x300)
		return;
	hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
B
Becky Bruce 已提交
525 526 527
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
	&& defined(CONFIG_HUGETLB_PAGE)
	if (is_vm_hugetlb_page(vma))
528
		book3e_hugetlb_preload(vma, address, *ptep);
B
Becky Bruce 已提交
529
#endif
530
}
531 532 533 534 535

/*
 * System memory should not be in /proc/iomem but various tools expect it
 * (eg kdump).
 */
536
static int __init add_system_ram_resources(void)
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		struct resource *res;
		unsigned long base = reg->base;
		unsigned long size = reg->size;

		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
		WARN_ON(!res);

		if (res) {
			res->name = "System RAM";
			res->start = base;
			res->end = base + size - 1;
552
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
553 554 555 556 557 558 559
			WARN_ON(request_resource(&iomem_resource, res) < 0);
		}
	}

	return 0;
}
subsys_initcall(add_system_ram_resources);
560 561 562 563 564 565 566 567 568 569 570

#ifdef CONFIG_STRICT_DEVMEM
/*
 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 * Access has to be given to non-kernel-ram areas as well, these contain the
 * PCI mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pfn)
{
571 572
	if (page_is_rtas_user_buf(pfn))
		return 1;
573
	if (iomem_is_exclusive(PFN_PHYS(pfn)))
574 575 576 577 578 579
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}
#endif /* CONFIG_STRICT_DEVMEM */