mem.c 15.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

20
#include <linux/export.h>
21 22 23 24
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
25
#include <linux/gfp.h>
26 27 28 29 30 31 32 33
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
34
#include <linux/suspend.h>
Y
Yinghai Lu 已提交
35
#include <linux/memblock.h>
36
#include <linux/hugetlb.h>
37
#include <linux/slab.h>
38
#include <linux/vmalloc.h>
39
#include <linux/memremap.h>
40 41 42 43 44 45 46 47 48 49 50

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
51
#include <asm/sections.h>
52
#include <asm/sparsemem.h>
53
#include <asm/vdso.h>
54
#include <asm/fixmap.h>
55
#include <asm/swiotlb.h>
56
#include <asm/rtas.h>
57 58 59 60 61 62 63 64

#include "mmu_decl.h"

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

65
unsigned long long memory_limit;
66

67 68
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
69
EXPORT_SYMBOL(kmap_pte);
70 71
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
72
#define TOP_ZONE ZONE_HIGHMEM
73 74 75 76 77 78

static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
79 80
#else
#define TOP_ZONE ZONE_NORMAL
81 82
#endif

83 84 85
int page_is_ram(unsigned long pfn)
{
#ifndef CONFIG_PPC64	/* XXX for now */
86
	return pfn < max_pfn;
87
#else
88
	unsigned long paddr = (pfn << PAGE_SHIFT);
89
	struct memblock_region *reg;
90

91 92
	for_each_memblock(memory, reg)
		if (paddr >= reg->base && paddr < (reg->base + reg->size))
93 94 95 96 97
			return 1;
	return 0;
#endif
}

98
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
99 100 101
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
102
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
103

104
	if (!page_is_ram(pfn))
105 106
		vma_prot = pgprot_noncached(vma_prot);

107 108 109 110
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
111 112
#ifdef CONFIG_MEMORY_HOTPLUG

113 114 115 116 117 118 119
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

120 121 122 123 124 125 126 127 128 129
int __weak create_section_mapping(unsigned long start, unsigned long end)
{
	return -ENODEV;
}

int __weak remove_section_mapping(unsigned long start, unsigned long end)
{
	return -ENODEV;
}

130
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
P
Paul Mackerras 已提交
131 132 133
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
134
	int rc;
P
Paul Mackerras 已提交
135

136 137
	resize_hpt_for_hotplug(memblock_phys_mem_size());

138
	start = (unsigned long)__va(start);
139 140 141 142 143 144 145
	rc = create_section_mapping(start, start + size);
	if (rc) {
		pr_warning(
			"Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}
146

147
	return __add_pages(nid, start_pfn, nr_pages, want_memblock);
P
Paul Mackerras 已提交
148
}
149 150 151 152 153 154

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
155 156
	struct vmem_altmap *altmap;
	struct page *page;
157
	int ret;
158

159 160 161 162 163 164 165 166 167 168
	/*
	 * If we have an altmap then we need to skip over any reserved PFNs
	 * when querying the zone.
	 */
	page = pfn_to_page(start_pfn);
	altmap = to_vmem_altmap((unsigned long) page);
	if (altmap)
		page += vmem_altmap_offset(altmap);

	ret = __remove_pages(page_zone(page), start_pfn, nr_pages);
169 170 171 172 173 174 175 176 177 178 179
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();
180

181 182
	resize_hpt_for_hotplug(memblock_phys_mem_size());

183
	return ret;
184 185
}
#endif
186
#endif /* CONFIG_MEMORY_HOTPLUG */
187 188 189

/*
 * walk_memory_resource() needs to make sure there is no holes in a given
190
 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
Y
Yinghai Lu 已提交
191
 * Instead it maintains it in memblock.memory structures.  Walk through the
192
 * memory regions, find holes and callback for contiguous regions.
193 194
 */
int
K
KAMEZAWA Hiroyuki 已提交
195 196
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
		void *arg, int (*func)(unsigned long, unsigned long, void *))
197
{
198 199 200
	struct memblock_region *reg;
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long tstart, tend;
201 202
	int ret = -1;

203
	for_each_memblock(memory, reg) {
204 205
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
206 207 208
		if (tstart >= tend)
			continue;
		ret = (*func)(tstart, tend - tstart, arg);
209 210 211 212
		if (ret)
			break;
	}
	return ret;
213
}
K
KAMEZAWA Hiroyuki 已提交
214
EXPORT_SYMBOL_GPL(walk_system_ram_range);
215

216
#ifndef CONFIG_NEED_MULTIPLE_NODES
217
void __init initmem_init(void)
218
{
Y
Yinghai Lu 已提交
219
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
220
	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
221
#ifdef CONFIG_HIGHMEM
222
	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
223 224
#endif

225 226 227
	/* Place all memblock_regions in the same node and merge contiguous
	 * memblock_regions
	 */
228
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
229

230
	/* XXX need to clip this if using highmem? */
231
	sparse_memory_present_with_active_regions(0);
232
	sparse_init();
233 234
}

235 236 237
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
238 239 240 241
	struct memblock_region *reg, *prev = NULL;

	for_each_memblock(memory, reg) {
		if (prev &&
242 243 244
		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
			register_nosave_region(memblock_region_memory_end_pfn(prev),
					       memblock_region_memory_base_pfn(reg));
245
		prev = reg;
246 247 248
	}
	return 0;
}
249 250 251 252 253 254
#else /* CONFIG_NEED_MULTIPLE_NODES */
static int __init mark_nonram_nosave(void)
{
	return 0;
}
#endif
255

S
Scott Wood 已提交
256 257
static bool zone_limits_final;

258 259 260 261 262
/*
 * The memory zones past TOP_ZONE are managed by generic mm code.
 * These should be set to zero since that's what every other
 * architecture does.
 */
S
Scott Wood 已提交
263
static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
264 265
	[0            ... TOP_ZONE        ] = ~0UL,
	[TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
S
Scott Wood 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
};

/*
 * Restrict the specified zone and all more restrictive zones
 * to be below the specified pfn.  May not be called after
 * paging_init().
 */
void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
{
	int i;

	if (WARN_ON(zone_limits_final))
		return;

	for (i = zone; i >= 0; i--) {
		if (max_zone_pfns[i] > pfn_limit)
			max_zone_pfns[i] = pfn_limit;
	}
}

/*
 * Find the least restrictive zone that is entirely below the
 * specified pfn limit.  Returns < 0 if no suitable zone is found.
 *
 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
 * systems -- the DMA limit can be higher than any possible real pfn.
 */
int dma_pfn_limit_to_zone(u64 pfn_limit)
{
	int i;

297
	for (i = TOP_ZONE; i >= 0; i--) {
S
Scott Wood 已提交
298 299 300 301 302 303
		if (max_zone_pfns[i] <= pfn_limit)
			return i;
	}

	return -EPERM;
}
304

305 306 307 308 309
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
310
	unsigned long long total_ram = memblock_phys_mem_size();
Y
Yinghai Lu 已提交
311
	phys_addr_t top_of_ram = memblock_end_of_DRAM();
312

313 314 315 316 317
#ifdef CONFIG_PPC32
	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
	unsigned long end = __fix_to_virt(FIX_HOLE);

	for (; v < end; v += PAGE_SIZE)
318
		map_kernel_page(v, 0, 0); /* XXX gross */
319 320
#endif

321
#ifdef CONFIG_HIGHMEM
322
	map_kernel_page(PKMAP_BASE, 0, 0);	/* XXX gross */
323 324 325
	pkmap_page_table = virt_to_kpte(PKMAP_BASE);

	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
326 327 328
	kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */

329
	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
330
	       (unsigned long long)top_of_ram, total_ram);
331
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
332
	       (long int)((top_of_ram - total_ram) >> 20));
S
Scott Wood 已提交
333

334
#ifdef CONFIG_HIGHMEM
S
Scott Wood 已提交
335
	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
336
#endif
337
	limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
S
Scott Wood 已提交
338
	zone_limits_final = true;
339
	free_area_init_nodes(max_zone_pfns);
340 341

	mark_nonram_nosave();
342 343 344 345
}

void __init mem_init(void)
{
346 347 348 349 350 351
	/*
	 * book3s is limited to 16 page sizes due to encoding this in
	 * a 4-bit field for slices.
	 */
	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

352
#ifdef CONFIG_SWIOTLB
353
	swiotlb_init(0);
354 355
#endif

356
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
357
	set_max_mapnr(max_pfn);
358
	free_all_bootmem();
359 360 361 362 363

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

364
		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
365
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
366
			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
367
			struct page *page = pfn_to_page(pfn);
368 369
			if (!memblock_is_reserved(paddr))
				free_highmem_page(page);
370 371 372 373
		}
	}
#endif /* CONFIG_HIGHMEM */

374 375 376 377 378 379 380 381 382
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
	/*
	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
	 * functions.... do it here for the non-smp case.
	 */
	per_cpu(next_tlbcam_idx, smp_processor_id()) =
		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

383
	mem_init_print_info(NULL);
384 385 386 387 388 389 390
#ifdef CONFIG_PPC32
	pr_info("Kernel virtual memory layout:\n");
	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
391 392 393 394
#ifdef CONFIG_NOT_COHERENT_CACHE
	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
#endif /* CONFIG_NOT_COHERENT_CACHE */
395 396 397 398 399
	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
		ioremap_bot, IOREMAP_TOP);
	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
		VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
400 401
}

402 403
void free_initmem(void)
{
404
	ppc_md.progress = ppc_printk_progress;
405
	mark_initmem_nx();
406
	free_initmem_default(POISON_FREE_INITMEM);
407 408
}

409 410 411
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
412
	free_reserved_area((void *)start, (void *)end, -1, "initrd");
413 414 415
}
#endif

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
433 434 435 436 437 438
#ifdef CONFIG_HUGETLB_PAGE
	if (PageCompound(page)) {
		flush_dcache_icache_hugepage(page);
		return;
	}
#endif
439
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
440 441 442 443
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page));
#else
	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
444
		void *start = kmap_atomic(page);
445
		__flush_dcache_icache(start);
446
		kunmap_atomic(start);
447 448
	} else {
		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
449
	}
450 451
#endif
}
452
EXPORT_SYMBOL(flush_dcache_icache_page);
453

454 455 456 457 458
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
L
Lucas De Marchi 已提交
459
	 * We shouldn't have to do this, but some versions of glibc
460 461 462
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
463
	flush_dcache_page(pg);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

487
	flush_dcache_page(pg);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
H
Hugh Dickins 已提交
507
 * This must always be called with the pte lock held.
508 509
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
510
		      pte_t *ptep)
511
{
512
#ifdef CONFIG_PPC_STD_MMU
513 514 515 516
	/*
	 * We don't need to worry about _PAGE_PRESENT here because we are
	 * called with either mm->page_table_lock held or ptl lock held
	 */
517 518
	unsigned long access, trap;

519 520
	if (radix_enabled())
		return;
521 522

	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
523
	if (!pte_young(*ptep) || address >= TASK_SIZE)
524 525
		return;

526 527 528 529 530 531 532
	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
533 534 535 536 537 538 539 540 541 542

	trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
	switch (trap) {
	case 0x300:
		access = 0UL;
		break;
	case 0x400:
		access = _PAGE_EXEC;
		break;
	default:
543
		return;
544 545
	}

546 547
	hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
B
Becky Bruce 已提交
548 549 550
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
	&& defined(CONFIG_HUGETLB_PAGE)
	if (is_vm_hugetlb_page(vma))
551
		book3e_hugetlb_preload(vma, address, *ptep);
B
Becky Bruce 已提交
552
#endif
553
}
554 555 556 557 558

/*
 * System memory should not be in /proc/iomem but various tools expect it
 * (eg kdump).
 */
559
static int __init add_system_ram_resources(void)
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		struct resource *res;
		unsigned long base = reg->base;
		unsigned long size = reg->size;

		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
		WARN_ON(!res);

		if (res) {
			res->name = "System RAM";
			res->start = base;
			res->end = base + size - 1;
575
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
576 577 578 579 580 581 582
			WARN_ON(request_resource(&iomem_resource, res) < 0);
		}
	}

	return 0;
}
subsys_initcall(add_system_ram_resources);
583 584 585 586 587 588 589 590 591 592 593

#ifdef CONFIG_STRICT_DEVMEM
/*
 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 * Access has to be given to non-kernel-ram areas as well, these contain the
 * PCI mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pfn)
{
594 595
	if (page_is_rtas_user_buf(pfn))
		return 1;
596
	if (iomem_is_exclusive(PFN_PHYS(pfn)))
597 598 599 600 601 602
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}
#endif /* CONFIG_STRICT_DEVMEM */