mem.c 15.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

20
#include <linux/export.h>
21 22 23 24
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
25
#include <linux/gfp.h>
26 27 28 29 30 31 32 33
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
34
#include <linux/suspend.h>
Y
Yinghai Lu 已提交
35
#include <linux/memblock.h>
36
#include <linux/hugetlb.h>
37
#include <linux/slab.h>
38
#include <linux/vmalloc.h>
39
#include <linux/memremap.h>
40 41 42 43 44 45 46 47 48 49 50

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
51
#include <asm/sections.h>
52
#include <asm/sparsemem.h>
53
#include <asm/vdso.h>
54
#include <asm/fixmap.h>
55
#include <asm/swiotlb.h>
56
#include <asm/rtas.h>
57 58 59 60 61 62 63 64

#include "mmu_decl.h"

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

65
unsigned long long memory_limit;
66

67 68
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
69
EXPORT_SYMBOL(kmap_pte);
70 71
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
72
#define TOP_ZONE ZONE_HIGHMEM
73 74 75 76 77 78

static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
79 80
#else
#define TOP_ZONE ZONE_NORMAL
81 82
#endif

83 84
int page_is_ram(unsigned long pfn)
{
85
	return memblock_is_memory(__pfn_to_phys(pfn));
86 87
}

88
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
89 90 91
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
92
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
93

94
	if (!page_is_ram(pfn))
95 96
		vma_prot = pgprot_noncached(vma_prot);

97 98 99 100
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
101 102
#ifdef CONFIG_MEMORY_HOTPLUG

103 104 105 106 107 108 109
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

110
int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
111 112 113 114 115 116 117 118 119
{
	return -ENODEV;
}

int __weak remove_section_mapping(unsigned long start, unsigned long end)
{
	return -ENODEV;
}

120
int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
121
		bool want_memblock)
P
Paul Mackerras 已提交
122 123 124
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
125
	int rc;
P
Paul Mackerras 已提交
126

127 128
	resize_hpt_for_hotplug(memblock_phys_mem_size());

129
	start = (unsigned long)__va(start);
130
	rc = create_section_mapping(start, start + size, nid);
131
	if (rc) {
132
		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
133 134 135
			start, start + size, rc);
		return -EFAULT;
	}
136

137
	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
P
Paul Mackerras 已提交
138
}
139 140

#ifdef CONFIG_MEMORY_HOTREMOVE
141
int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
142 143 144
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
145
	struct page *page;
146
	int ret;
147

148 149 150 151 152 153 154 155
	/*
	 * If we have an altmap then we need to skip over any reserved PFNs
	 * when querying the zone.
	 */
	page = pfn_to_page(start_pfn);
	if (altmap)
		page += vmem_altmap_offset(altmap);

156
	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
157 158 159 160 161 162 163 164 165 166 167
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();
168

169 170
	resize_hpt_for_hotplug(memblock_phys_mem_size());

171
	return ret;
172 173
}
#endif
174
#endif /* CONFIG_MEMORY_HOTPLUG */
175 176 177

/*
 * walk_memory_resource() needs to make sure there is no holes in a given
178
 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
Y
Yinghai Lu 已提交
179
 * Instead it maintains it in memblock.memory structures.  Walk through the
180
 * memory regions, find holes and callback for contiguous regions.
181 182
 */
int
K
KAMEZAWA Hiroyuki 已提交
183 184
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
		void *arg, int (*func)(unsigned long, unsigned long, void *))
185
{
186 187 188
	struct memblock_region *reg;
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long tstart, tend;
189 190
	int ret = -1;

191
	for_each_memblock(memory, reg) {
192 193
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
194 195 196
		if (tstart >= tend)
			continue;
		ret = (*func)(tstart, tend - tstart, arg);
197 198 199 200
		if (ret)
			break;
	}
	return ret;
201
}
K
KAMEZAWA Hiroyuki 已提交
202
EXPORT_SYMBOL_GPL(walk_system_ram_range);
203

204
#ifndef CONFIG_NEED_MULTIPLE_NODES
205
void __init mem_topology_setup(void)
206
{
Y
Yinghai Lu 已提交
207
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
208
	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
209
#ifdef CONFIG_HIGHMEM
210
	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
211 212
#endif

213 214 215
	/* Place all memblock_regions in the same node and merge contiguous
	 * memblock_regions
	 */
216
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
217
}
218

219 220
void __init initmem_init(void)
{
221
	/* XXX need to clip this if using highmem? */
222
	sparse_memory_present_with_active_regions(0);
223
	sparse_init();
224 225
}

226 227 228
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
229 230 231 232
	struct memblock_region *reg, *prev = NULL;

	for_each_memblock(memory, reg) {
		if (prev &&
233 234 235
		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
			register_nosave_region(memblock_region_memory_end_pfn(prev),
					       memblock_region_memory_base_pfn(reg));
236
		prev = reg;
237 238 239
	}
	return 0;
}
240 241 242 243 244 245
#else /* CONFIG_NEED_MULTIPLE_NODES */
static int __init mark_nonram_nosave(void)
{
	return 0;
}
#endif
246

S
Scott Wood 已提交
247 248
static bool zone_limits_final;

249 250 251 252 253
/*
 * The memory zones past TOP_ZONE are managed by generic mm code.
 * These should be set to zero since that's what every other
 * architecture does.
 */
S
Scott Wood 已提交
254
static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
255 256
	[0            ... TOP_ZONE        ] = ~0UL,
	[TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
S
Scott Wood 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
};

/*
 * Restrict the specified zone and all more restrictive zones
 * to be below the specified pfn.  May not be called after
 * paging_init().
 */
void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
{
	int i;

	if (WARN_ON(zone_limits_final))
		return;

	for (i = zone; i >= 0; i--) {
		if (max_zone_pfns[i] > pfn_limit)
			max_zone_pfns[i] = pfn_limit;
	}
}

/*
 * Find the least restrictive zone that is entirely below the
 * specified pfn limit.  Returns < 0 if no suitable zone is found.
 *
 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
 * systems -- the DMA limit can be higher than any possible real pfn.
 */
int dma_pfn_limit_to_zone(u64 pfn_limit)
{
	int i;

288
	for (i = TOP_ZONE; i >= 0; i--) {
S
Scott Wood 已提交
289 290 291 292 293 294
		if (max_zone_pfns[i] <= pfn_limit)
			return i;
	}

	return -EPERM;
}
295

296 297 298 299 300
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
301
	unsigned long long total_ram = memblock_phys_mem_size();
Y
Yinghai Lu 已提交
302
	phys_addr_t top_of_ram = memblock_end_of_DRAM();
303

304 305 306 307 308
#ifdef CONFIG_PPC32
	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
	unsigned long end = __fix_to_virt(FIX_HOLE);

	for (; v < end; v += PAGE_SIZE)
309
		map_kernel_page(v, 0, 0); /* XXX gross */
310 311
#endif

312
#ifdef CONFIG_HIGHMEM
313
	map_kernel_page(PKMAP_BASE, 0, 0);	/* XXX gross */
314 315 316
	pkmap_page_table = virt_to_kpte(PKMAP_BASE);

	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
317 318 319
	kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */

320
	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
321
	       (unsigned long long)top_of_ram, total_ram);
322
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
323
	       (long int)((top_of_ram - total_ram) >> 20));
S
Scott Wood 已提交
324

325
#ifdef CONFIG_HIGHMEM
S
Scott Wood 已提交
326
	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
327
#endif
328
	limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
S
Scott Wood 已提交
329
	zone_limits_final = true;
330
	free_area_init_nodes(max_zone_pfns);
331 332

	mark_nonram_nosave();
333 334 335 336
}

void __init mem_init(void)
{
337 338 339 340 341 342
	/*
	 * book3s is limited to 16 page sizes due to encoding this in
	 * a 4-bit field for slices.
	 */
	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

343
#ifdef CONFIG_SWIOTLB
344
	swiotlb_init(0);
345 346
#endif

347
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
348
	set_max_mapnr(max_pfn);
349
	free_all_bootmem();
350 351 352 353 354

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

355
		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
356
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
357
			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
358
			struct page *page = pfn_to_page(pfn);
359 360
			if (!memblock_is_reserved(paddr))
				free_highmem_page(page);
361 362 363 364
		}
	}
#endif /* CONFIG_HIGHMEM */

365 366 367 368 369 370 371 372 373
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
	/*
	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
	 * functions.... do it here for the non-smp case.
	 */
	per_cpu(next_tlbcam_idx, smp_processor_id()) =
		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

374
	mem_init_print_info(NULL);
375 376 377 378 379 380 381
#ifdef CONFIG_PPC32
	pr_info("Kernel virtual memory layout:\n");
	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
382 383 384 385
#ifdef CONFIG_NOT_COHERENT_CACHE
	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
#endif /* CONFIG_NOT_COHERENT_CACHE */
386 387 388 389 390
	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
		ioremap_bot, IOREMAP_TOP);
	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
		VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
391 392
}

393 394
void free_initmem(void)
{
395
	ppc_md.progress = ppc_printk_progress;
396
	mark_initmem_nx();
397
	free_initmem_default(POISON_FREE_INITMEM);
398 399
}

400 401 402
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
403
	free_reserved_area((void *)start, (void *)end, -1, "initrd");
404 405 406
}
#endif

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
424 425 426 427 428 429
#ifdef CONFIG_HUGETLB_PAGE
	if (PageCompound(page)) {
		flush_dcache_icache_hugepage(page);
		return;
	}
#endif
430
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
431 432 433 434
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page));
#else
	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
435
		void *start = kmap_atomic(page);
436
		__flush_dcache_icache(start);
437
		kunmap_atomic(start);
438 439
	} else {
		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
440
	}
441 442
#endif
}
443
EXPORT_SYMBOL(flush_dcache_icache_page);
444

445 446 447 448 449
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
L
Lucas De Marchi 已提交
450
	 * We shouldn't have to do this, but some versions of glibc
451 452 453
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
454
	flush_dcache_page(pg);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

478
	flush_dcache_page(pg);
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
H
Hugh Dickins 已提交
498
 * This must always be called with the pte lock held.
499 500
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
501
		      pte_t *ptep)
502
{
503
#ifdef CONFIG_PPC_STD_MMU
504 505 506 507
	/*
	 * We don't need to worry about _PAGE_PRESENT here because we are
	 * called with either mm->page_table_lock held or ptl lock held
	 */
508 509
	unsigned long access, trap;

510 511
	if (radix_enabled())
		return;
512 513

	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
514
	if (!pte_young(*ptep) || address >= TASK_SIZE)
515 516
		return;

517 518 519 520 521 522 523
	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
524 525 526 527 528 529 530 531 532 533

	trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
	switch (trap) {
	case 0x300:
		access = 0UL;
		break;
	case 0x400:
		access = _PAGE_EXEC;
		break;
	default:
534
		return;
535 536
	}

537 538
	hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
B
Becky Bruce 已提交
539 540 541
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
	&& defined(CONFIG_HUGETLB_PAGE)
	if (is_vm_hugetlb_page(vma))
542
		book3e_hugetlb_preload(vma, address, *ptep);
B
Becky Bruce 已提交
543
#endif
544
}
545 546 547 548 549

/*
 * System memory should not be in /proc/iomem but various tools expect it
 * (eg kdump).
 */
550
static int __init add_system_ram_resources(void)
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		struct resource *res;
		unsigned long base = reg->base;
		unsigned long size = reg->size;

		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
		WARN_ON(!res);

		if (res) {
			res->name = "System RAM";
			res->start = base;
			res->end = base + size - 1;
566
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
567 568 569 570 571 572 573
			WARN_ON(request_resource(&iomem_resource, res) < 0);
		}
	}

	return 0;
}
subsys_initcall(add_system_ram_resources);
574 575 576 577 578 579 580 581 582 583 584

#ifdef CONFIG_STRICT_DEVMEM
/*
 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 * Access has to be given to non-kernel-ram areas as well, these contain the
 * PCI mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pfn)
{
585 586
	if (page_is_rtas_user_buf(pfn))
		return 1;
587
	if (iomem_is_exclusive(PFN_PHYS(pfn)))
588 589 590 591 592 593
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}
#endif /* CONFIG_STRICT_DEVMEM */