mem.c 15.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

20
#include <linux/export.h>
21 22 23 24
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
25
#include <linux/gfp.h>
26 27 28 29
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
M
Mike Rapoport 已提交
30
#include <linux/memblock.h>
31 32 33
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
34
#include <linux/suspend.h>
35
#include <linux/hugetlb.h>
36
#include <linux/slab.h>
37
#include <linux/vmalloc.h>
38
#include <linux/memremap.h>
39 40 41 42 43 44 45 46 47 48 49

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
50
#include <asm/sections.h>
51
#include <asm/sparsemem.h>
52
#include <asm/vdso.h>
53
#include <asm/fixmap.h>
54
#include <asm/swiotlb.h>
55
#include <asm/rtas.h>
56 57 58 59 60 61 62 63

#include "mmu_decl.h"

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

64
unsigned long long memory_limit;
65
bool init_mem_is_free;
66

67 68
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
69
EXPORT_SYMBOL(kmap_pte);
70 71
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
72
#define TOP_ZONE ZONE_HIGHMEM
73 74 75 76 77 78

static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
79 80
#else
#define TOP_ZONE ZONE_NORMAL
81 82
#endif

83 84
int page_is_ram(unsigned long pfn)
{
85
	return memblock_is_memory(__pfn_to_phys(pfn));
86 87
}

88
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
89 90 91
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
92
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
93

94
	if (!page_is_ram(pfn))
95 96
		vma_prot = pgprot_noncached(vma_prot);

97 98 99 100
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
101 102
#ifdef CONFIG_MEMORY_HOTPLUG

103 104 105 106 107 108 109
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

110
int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
111 112 113 114 115 116 117 118 119
{
	return -ENODEV;
}

int __weak remove_section_mapping(unsigned long start, unsigned long end)
{
	return -ENODEV;
}

120
int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
121
		bool want_memblock)
P
Paul Mackerras 已提交
122 123 124
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
125
	int rc;
P
Paul Mackerras 已提交
126

127 128
	resize_hpt_for_hotplug(memblock_phys_mem_size());

129
	start = (unsigned long)__va(start);
130
	rc = create_section_mapping(start, start + size, nid);
131
	if (rc) {
132
		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
133 134 135
			start, start + size, rc);
		return -EFAULT;
	}
136
	flush_inval_dcache_range(start, start + size);
137

138
	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
P
Paul Mackerras 已提交
139
}
140 141

#ifdef CONFIG_MEMORY_HOTREMOVE
142
int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
143 144 145
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
146
	struct page *page;
147
	int ret;
148

149 150 151 152 153 154 155 156
	/*
	 * If we have an altmap then we need to skip over any reserved PFNs
	 * when querying the zone.
	 */
	page = pfn_to_page(start_pfn);
	if (altmap)
		page += vmem_altmap_offset(altmap);

157
	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
158 159 160 161 162
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
163
	flush_inval_dcache_range(start, start + size);
164 165 166 167 168 169
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();
170

171 172
	resize_hpt_for_hotplug(memblock_phys_mem_size());

173
	return ret;
174 175
}
#endif
176
#endif /* CONFIG_MEMORY_HOTPLUG */
177 178 179

/*
 * walk_memory_resource() needs to make sure there is no holes in a given
180
 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
Y
Yinghai Lu 已提交
181
 * Instead it maintains it in memblock.memory structures.  Walk through the
182
 * memory regions, find holes and callback for contiguous regions.
183 184
 */
int
K
KAMEZAWA Hiroyuki 已提交
185 186
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
		void *arg, int (*func)(unsigned long, unsigned long, void *))
187
{
188 189 190
	struct memblock_region *reg;
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long tstart, tend;
191 192
	int ret = -1;

193
	for_each_memblock(memory, reg) {
194 195
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
196 197 198
		if (tstart >= tend)
			continue;
		ret = (*func)(tstart, tend - tstart, arg);
199 200 201 202
		if (ret)
			break;
	}
	return ret;
203
}
K
KAMEZAWA Hiroyuki 已提交
204
EXPORT_SYMBOL_GPL(walk_system_ram_range);
205

206
#ifndef CONFIG_NEED_MULTIPLE_NODES
207
void __init mem_topology_setup(void)
208
{
Y
Yinghai Lu 已提交
209
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
210
	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
211
#ifdef CONFIG_HIGHMEM
212
	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
213 214
#endif

215 216 217
	/* Place all memblock_regions in the same node and merge contiguous
	 * memblock_regions
	 */
218
	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
219
}
220

221 222
void __init initmem_init(void)
{
223
	/* XXX need to clip this if using highmem? */
224
	sparse_memory_present_with_active_regions(0);
225
	sparse_init();
226 227
}

228 229 230
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
231 232 233 234
	struct memblock_region *reg, *prev = NULL;

	for_each_memblock(memory, reg) {
		if (prev &&
235 236 237
		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
			register_nosave_region(memblock_region_memory_end_pfn(prev),
					       memblock_region_memory_base_pfn(reg));
238
		prev = reg;
239 240 241
	}
	return 0;
}
242 243 244 245 246 247
#else /* CONFIG_NEED_MULTIPLE_NODES */
static int __init mark_nonram_nosave(void)
{
	return 0;
}
#endif
248

S
Scott Wood 已提交
249 250
static bool zone_limits_final;

251 252 253 254 255
/*
 * The memory zones past TOP_ZONE are managed by generic mm code.
 * These should be set to zero since that's what every other
 * architecture does.
 */
S
Scott Wood 已提交
256
static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
257 258
	[0            ... TOP_ZONE        ] = ~0UL,
	[TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
S
Scott Wood 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
};

/*
 * Restrict the specified zone and all more restrictive zones
 * to be below the specified pfn.  May not be called after
 * paging_init().
 */
void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
{
	int i;

	if (WARN_ON(zone_limits_final))
		return;

	for (i = zone; i >= 0; i--) {
		if (max_zone_pfns[i] > pfn_limit)
			max_zone_pfns[i] = pfn_limit;
	}
}

/*
 * Find the least restrictive zone that is entirely below the
 * specified pfn limit.  Returns < 0 if no suitable zone is found.
 *
 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
 * systems -- the DMA limit can be higher than any possible real pfn.
 */
int dma_pfn_limit_to_zone(u64 pfn_limit)
{
	int i;

290
	for (i = TOP_ZONE; i >= 0; i--) {
S
Scott Wood 已提交
291 292 293 294 295 296
		if (max_zone_pfns[i] <= pfn_limit)
			return i;
	}

	return -EPERM;
}
297

298 299 300 301 302
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
303
	unsigned long long total_ram = memblock_phys_mem_size();
Y
Yinghai Lu 已提交
304
	phys_addr_t top_of_ram = memblock_end_of_DRAM();
305

306 307 308 309 310
#ifdef CONFIG_PPC32
	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
	unsigned long end = __fix_to_virt(FIX_HOLE);

	for (; v < end; v += PAGE_SIZE)
311
		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
312 313
#endif

314
#ifdef CONFIG_HIGHMEM
315
	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
316 317 318
	pkmap_page_table = virt_to_kpte(PKMAP_BASE);

	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
319 320 321
	kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */

322
	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
323
	       (unsigned long long)top_of_ram, total_ram);
324
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
325
	       (long int)((top_of_ram - total_ram) >> 20));
S
Scott Wood 已提交
326

327
#ifdef CONFIG_HIGHMEM
S
Scott Wood 已提交
328
	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
329
#endif
330
	limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
S
Scott Wood 已提交
331
	zone_limits_final = true;
332
	free_area_init_nodes(max_zone_pfns);
333 334

	mark_nonram_nosave();
335 336 337 338
}

void __init mem_init(void)
{
339 340 341 342 343 344
	/*
	 * book3s is limited to 16 page sizes due to encoding this in
	 * a 4-bit field for slices.
	 */
	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

345
#ifdef CONFIG_SWIOTLB
346
	swiotlb_init(0);
347 348
#endif

349
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
350
	set_max_mapnr(max_pfn);
351
	memblock_free_all();
352 353 354 355 356

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

357
		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
358
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
359
			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
360
			struct page *page = pfn_to_page(pfn);
361 362
			if (!memblock_is_reserved(paddr))
				free_highmem_page(page);
363 364 365 366
		}
	}
#endif /* CONFIG_HIGHMEM */

367 368 369 370 371 372 373 374 375
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
	/*
	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
	 * functions.... do it here for the non-smp case.
	 */
	per_cpu(next_tlbcam_idx, smp_processor_id()) =
		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

376
	mem_init_print_info(NULL);
377 378 379 380 381 382 383
#ifdef CONFIG_PPC32
	pr_info("Kernel virtual memory layout:\n");
	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
384 385 386 387
#ifdef CONFIG_NOT_COHERENT_CACHE
	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
#endif /* CONFIG_NOT_COHERENT_CACHE */
388 389 390 391 392
	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
		ioremap_bot, IOREMAP_TOP);
	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
		VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
393 394
}

395 396
void free_initmem(void)
{
397
	ppc_md.progress = ppc_printk_progress;
398
	mark_initmem_nx();
399
	init_mem_is_free = true;
400
	free_initmem_default(POISON_FREE_INITMEM);
401 402
}

403 404 405
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
406
	free_reserved_area((void *)start, (void *)end, -1, "initrd");
407 408 409
}
#endif

410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
427 428 429 430 431 432
#ifdef CONFIG_HUGETLB_PAGE
	if (PageCompound(page)) {
		flush_dcache_icache_hugepage(page);
		return;
	}
#endif
433
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
434 435 436 437
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page));
#else
	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
438
		void *start = kmap_atomic(page);
439
		__flush_dcache_icache(start);
440
		kunmap_atomic(start);
441 442
	} else {
		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
443
	}
444 445
#endif
}
446
EXPORT_SYMBOL(flush_dcache_icache_page);
447

448 449 450 451 452
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
L
Lucas De Marchi 已提交
453
	 * We shouldn't have to do this, but some versions of glibc
454 455 456
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
457
	flush_dcache_page(pg);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

481
	flush_dcache_page(pg);
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
H
Hugh Dickins 已提交
501
 * This must always be called with the pte lock held.
502 503
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
504
		      pte_t *ptep)
505
{
506
#ifdef CONFIG_PPC_STD_MMU
507 508 509 510
	/*
	 * We don't need to worry about _PAGE_PRESENT here because we are
	 * called with either mm->page_table_lock held or ptl lock held
	 */
511 512
	unsigned long trap;
	bool is_exec;
513

514 515
	if (radix_enabled()) {
		prefetch((void *)address);
516
		return;
517
	}
518 519

	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
520
	if (!pte_young(*ptep) || address >= TASK_SIZE)
521 522
		return;

523 524 525 526 527 528 529
	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
530 531 532 533

	trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
	switch (trap) {
	case 0x300:
534
		is_exec = false;
535 536
		break;
	case 0x400:
537
		is_exec = true;
538 539
		break;
	default:
540
		return;
541 542
	}

543
	hash_preload(vma->vm_mm, address, is_exec, trap);
544
#endif /* CONFIG_PPC_STD_MMU */
B
Becky Bruce 已提交
545 546 547
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
	&& defined(CONFIG_HUGETLB_PAGE)
	if (is_vm_hugetlb_page(vma))
548
		book3e_hugetlb_preload(vma, address, *ptep);
B
Becky Bruce 已提交
549
#endif
550
}
551 552 553 554 555

/*
 * System memory should not be in /proc/iomem but various tools expect it
 * (eg kdump).
 */
556
static int __init add_system_ram_resources(void)
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		struct resource *res;
		unsigned long base = reg->base;
		unsigned long size = reg->size;

		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
		WARN_ON(!res);

		if (res) {
			res->name = "System RAM";
			res->start = base;
			res->end = base + size - 1;
572
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
573 574 575 576 577 578 579
			WARN_ON(request_resource(&iomem_resource, res) < 0);
		}
	}

	return 0;
}
subsys_initcall(add_system_ram_resources);
580 581 582 583 584 585 586 587 588 589 590

#ifdef CONFIG_STRICT_DEVMEM
/*
 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 * Access has to be given to non-kernel-ram areas as well, these contain the
 * PCI mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pfn)
{
591 592
	if (page_is_rtas_user_buf(pfn))
		return 1;
593
	if (iomem_is_exclusive(PFN_PHYS(pfn)))
594 595 596 597 598 599
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}
#endif /* CONFIG_STRICT_DEVMEM */