mem.c 16.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */

15
#include <linux/export.h>
16 17 18 19
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
20
#include <linux/gfp.h>
21 22 23 24
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
26 27 28
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
29
#include <linux/suspend.h>
30
#include <linux/hugetlb.h>
31
#include <linux/slab.h>
32
#include <linux/vmalloc.h>
33
#include <linux/memremap.h>
34
#include <linux/dma-direct.h>
35
#include <linux/kprobes.h>
36 37 38 39 40 41 42 43 44 45 46

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
47
#include <asm/sections.h>
48
#include <asm/sparsemem.h>
49
#include <asm/vdso.h>
50
#include <asm/fixmap.h>
51
#include <asm/swiotlb.h>
52
#include <asm/rtas.h>
53
#include <asm/kasan.h>
54

55
#include <mm/mmu_decl.h>
56 57 58 59 60 61

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

62
unsigned long long memory_limit;
63
bool init_mem_is_free;
64

65 66
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
67
EXPORT_SYMBOL(kmap_pte);
68 69
#endif

70
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
71 72 73
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
74
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
75

76
	if (!page_is_ram(pfn))
77 78
		vma_prot = pgprot_noncached(vma_prot);

79 80 81 82
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
83 84
#ifdef CONFIG_MEMORY_HOTPLUG

85 86 87 88 89 90 91
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

92 93
int __weak create_section_mapping(unsigned long start, unsigned long end,
				  int nid, pgprot_t prot)
94 95 96 97 98 99 100 101 102
{
	return -ENODEV;
}

int __weak remove_section_mapping(unsigned long start, unsigned long end)
{
	return -ENODEV;
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
#define FLUSH_CHUNK_SIZE SZ_1G
/**
 * flush_dcache_range_chunked(): Write any modified data cache blocks out to
 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
 * Does not invalidate the corresponding instruction cache blocks.
 *
 * @start: the start address
 * @stop: the stop address (exclusive)
 * @chunk: the max size of the chunks
 */
static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
				       unsigned long chunk)
{
	unsigned long i;

	for (i = start; i < stop; i += chunk) {
119
		flush_dcache_range(i, min(stop, i + chunk));
120 121 122 123
		cond_resched();
	}
}

124
int __ref arch_add_memory(int nid, u64 start, u64 size,
125
			  struct mhp_params *params)
P
Paul Mackerras 已提交
126 127 128
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
129
	int rc;
P
Paul Mackerras 已提交
130

131 132
	resize_hpt_for_hotplug(memblock_phys_mem_size());

133
	start = (unsigned long)__va(start);
134 135
	rc = create_section_mapping(start, start + size, nid,
				    params->pgprot);
136
	if (rc) {
137
		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
138 139 140
			start, start + size, rc);
		return -EFAULT;
	}
141

142
	return __add_pages(nid, start_pfn, nr_pages, params);
P
Paul Mackerras 已提交
143
}
144

145
void __ref arch_remove_memory(int nid, u64 start, u64 size,
146
			     struct vmem_altmap *altmap)
147 148 149
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
150
	int ret;
151

152
	__remove_pages(start_pfn, nr_pages, altmap);
153 154 155

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
156 157
	flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);

158
	ret = remove_section_mapping(start, start + size);
159
	WARN_ON_ONCE(ret);
160 161 162 163 164

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();
165

166 167
	if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
		pr_warn("Hash collision while resizing HPT\n");
168 169
}
#endif
170

171
#ifndef CONFIG_NEED_MULTIPLE_NODES
172
void __init mem_topology_setup(void)
173
{
Y
Yinghai Lu 已提交
174
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
175
	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
176
#ifdef CONFIG_HIGHMEM
177
	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
178 179
#endif

180 181 182
	/* Place all memblock_regions in the same node and merge contiguous
	 * memblock_regions
	 */
183
	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
184
}
185

186 187
void __init initmem_init(void)
{
188
	/* XXX need to clip this if using highmem? */
189
	sparse_memory_present_with_active_regions(0);
190
	sparse_init();
191 192
}

193 194 195
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
196 197 198 199
	struct memblock_region *reg, *prev = NULL;

	for_each_memblock(memory, reg) {
		if (prev &&
200 201 202
		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
			register_nosave_region(memblock_region_memory_end_pfn(prev),
					       memblock_region_memory_base_pfn(reg));
203
		prev = reg;
204 205 206
	}
	return 0;
}
207 208 209 210 211 212
#else /* CONFIG_NEED_MULTIPLE_NODES */
static int __init mark_nonram_nosave(void)
{
	return 0;
}
#endif
213

S
Scott Wood 已提交
214
/*
215 216 217 218 219 220
 * Zones usage:
 *
 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
 * everything else. GFP_DMA32 page allocations automatically fall back to
 * ZONE_DMA.
 *
221 222 223 224
 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
 * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
 * ZONE_DMA.
S
Scott Wood 已提交
225
 */
226
static unsigned long max_zone_pfns[MAX_NR_ZONES];
S
Scott Wood 已提交
227

228 229 230 231 232
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
233
	unsigned long long total_ram = memblock_phys_mem_size();
Y
Yinghai Lu 已提交
234
	phys_addr_t top_of_ram = memblock_end_of_DRAM();
235 236

#ifdef CONFIG_HIGHMEM
237 238
	unsigned long v = __fix_to_virt(FIX_KMAP_END);
	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
239 240

	for (; v < end; v += PAGE_SIZE)
241
		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
242

243
	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
244 245 246
	pkmap_page_table = virt_to_kpte(PKMAP_BASE);

	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
247 248
#endif /* CONFIG_HIGHMEM */

249
	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
250
	       (unsigned long long)top_of_ram, total_ram);
251
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
252
	       (long int)((top_of_ram - total_ram) >> 20));
S
Scott Wood 已提交
253

254 255 256 257 258 259 260 261 262
	/*
	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
	 * powerbooks.
	 */
	if (IS_ENABLED(CONFIG_PPC32))
		zone_dma_bits = 30;
	else
		zone_dma_bits = 31;

263
#ifdef CONFIG_ZONE_DMA
264
	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
265
				      1UL << (zone_dma_bits - PAGE_SHIFT));
266 267
#endif
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
268
#ifdef CONFIG_HIGHMEM
269
	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
270
#endif
271

272
	free_area_init(max_zone_pfns);
273 274

	mark_nonram_nosave();
275 276 277 278
}

void __init mem_init(void)
{
279 280 281 282 283 284
	/*
	 * book3s is limited to 16 page sizes due to encoding this in
	 * a 4-bit field for slices.
	 */
	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

285
#ifdef CONFIG_SWIOTLB
286 287 288 289 290 291 292 293
	/*
	 * Some platforms (e.g. 85xx) limit DMA-able memory way below
	 * 4G. We force memblock to bottom-up mode to ensure that the
	 * memory allocated in swiotlb_init() is DMA-able.
	 * As it's the last memblock allocation, no need to reset it
	 * back to to-down.
	 */
	memblock_set_bottom_up(true);
294
	swiotlb_init(0);
295 296
#endif

297
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
298
	set_max_mapnr(max_pfn);
299 300 301

	kasan_late_init();

302
	memblock_free_all();
303 304 305 306 307

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

308
		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
309
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
310
			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
311
			struct page *page = pfn_to_page(pfn);
312 313
			if (!memblock_is_reserved(paddr))
				free_highmem_page(page);
314 315 316 317
		}
	}
#endif /* CONFIG_HIGHMEM */

318 319 320 321 322 323 324 325 326
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
	/*
	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
	 * functions.... do it here for the non-smp case.
	 */
	per_cpu(next_tlbcam_idx, smp_processor_id()) =
		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

327
	mem_init_print_info(NULL);
328 329
#ifdef CONFIG_PPC32
	pr_info("Kernel virtual memory layout:\n");
330 331 332 333
#ifdef CONFIG_KASAN
	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
		KASAN_SHADOW_START, KASAN_SHADOW_END);
#endif
334 335 336 337 338
	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
339 340 341
	if (ioremap_bot != IOREMAP_TOP)
		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
			ioremap_bot, IOREMAP_TOP);
342 343 344
	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
		VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
345 346
}

347 348
void free_initmem(void)
{
349
	ppc_md.progress = ppc_printk_progress;
350
	mark_initmem_nx();
351
	init_mem_is_free = true;
352
	free_initmem_default(POISON_FREE_INITMEM);
353 354
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
/**
 * flush_coherent_icache() - if a CPU has a coherent icache, flush it
 * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
 * Return true if the cache was flushed, false otherwise
 */
static inline bool flush_coherent_icache(unsigned long addr)
{
	/*
	 * For a snooping icache, we still need a dummy icbi to purge all the
	 * prefetched instructions from the ifetch buffers. We also need a sync
	 * before the icbi to order the the actual stores to memory that might
	 * have modified instructions with the icbi.
	 */
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
		mb(); /* sync */
370
		allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
371
		icbi((void *)addr);
372
		prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
		mb(); /* sync */
		isync();
		return true;
	}

	return false;
}

/**
 * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
 * @start: the start address
 * @stop: the stop address (exclusive)
 */
static void invalidate_icache_range(unsigned long start, unsigned long stop)
{
	unsigned long shift = l1_icache_shift();
	unsigned long bytes = l1_icache_bytes();
	char *addr = (char *)(start & ~(bytes - 1));
	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
	unsigned long i;

	for (i = 0; i < size >> shift; i++, addr += bytes)
		icbi(addr);

	mb(); /* sync */
	isync();
}

/**
 * flush_icache_range: Write any modified data cache blocks out to memory
 * and invalidate the corresponding blocks in the instruction cache
 *
 * Generic code will call this after writing memory, before executing from it.
 *
 * @start: the start address
 * @stop: the stop address (exclusive)
 */
void flush_icache_range(unsigned long start, unsigned long stop)
{
	if (flush_coherent_icache(start))
		return;

	clean_dcache_range(start, stop);

	if (IS_ENABLED(CONFIG_44x)) {
		/*
		 * Flash invalidate on 44x because we are passed kmapped
		 * addresses and this doesn't work for userspace pages due to
		 * the virtually tagged icache.
		 */
		iccci((void *)start);
		mb(); /* sync */
		isync();
	} else
		invalidate_icache_range(start, stop);
}
EXPORT_SYMBOL(flush_icache_range);

#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
/**
 * flush_dcache_icache_phys() - Flush a page by it's physical address
 * @physaddr: the physical address of the page
 */
static void flush_dcache_icache_phys(unsigned long physaddr)
{
	unsigned long bytes = l1_dcache_bytes();
	unsigned long nb = PAGE_SIZE / bytes;
	unsigned long addr = physaddr & PAGE_MASK;
	unsigned long msr, msr0;
	unsigned long loop1 = addr, loop2 = addr;

	msr0 = mfmsr();
	msr = msr0 & ~MSR_DR;
	/*
	 * This must remain as ASM to prevent potential memory accesses
	 * while the data MMU is disabled
	 */
	asm volatile(
		"   mtctr %2;\n"
		"   mtmsr %3;\n"
		"   isync;\n"
		"0: dcbst   0, %0;\n"
		"   addi    %0, %0, %4;\n"
		"   bdnz    0b;\n"
		"   sync;\n"
		"   mtctr %2;\n"
		"1: icbi    0, %1;\n"
		"   addi    %1, %1, %4;\n"
		"   bdnz    1b;\n"
		"   sync;\n"
		"   mtmsr %5;\n"
		"   isync;\n"
		: "+&r" (loop1), "+&r" (loop2)
		: "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
		: "ctr", "memory");
}
469
NOKPROBE_SYMBOL(flush_dcache_icache_phys)
470 471
#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
489 490 491 492 493 494
#ifdef CONFIG_HUGETLB_PAGE
	if (PageCompound(page)) {
		flush_dcache_icache_hugepage(page);
		return;
	}
#endif
495
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
496 497 498 499
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page));
#else
	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
500
		void *start = kmap_atomic(page);
501
		__flush_dcache_icache(start);
502
		kunmap_atomic(start);
503
	} else {
504 505 506 507 508
		unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;

		if (flush_coherent_icache(addr))
			return;
		flush_dcache_icache_phys(addr);
509
	}
510 511
#endif
}
512
EXPORT_SYMBOL(flush_dcache_icache_page);
513

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
/**
 * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
 * Note: this is necessary because the instruction cache does *not*
 * snoop from the data cache.
 *
 * @page: the address of the page to flush
 */
void __flush_dcache_icache(void *p)
{
	unsigned long addr = (unsigned long)p;

	if (flush_coherent_icache(addr))
		return;

	clean_dcache_range(addr, addr + PAGE_SIZE);

	/*
	 * We don't flush the icache on 44x. Those have a virtual icache and we
	 * don't have access to the virtual address here (it's not the page
	 * vaddr but where it's mapped in user space). The flushing of the
	 * icache on these is handled elsewhere, when a change in the address
	 * space occurs, before returning to user space.
	 */

	if (cpu_has_feature(MMU_FTR_TYPE_44x))
		return;

	invalidate_icache_range(addr, addr + PAGE_SIZE);
}

544 545 546 547 548
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
L
Lucas De Marchi 已提交
549
	 * We shouldn't have to do this, but some versions of glibc
550 551 552
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
553
	flush_dcache_page(pg);
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

577
	flush_dcache_page(pg);
578 579 580 581 582 583 584 585 586 587 588 589
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}

590 591 592 593
/*
 * System memory should not be in /proc/iomem but various tools expect it
 * (eg kdump).
 */
594
static int __init add_system_ram_resources(void)
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		struct resource *res;
		unsigned long base = reg->base;
		unsigned long size = reg->size;

		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
		WARN_ON(!res);

		if (res) {
			res->name = "System RAM";
			res->start = base;
			res->end = base + size - 1;
610
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
611 612 613 614 615 616 617
			WARN_ON(request_resource(&iomem_resource, res) < 0);
		}
	}

	return 0;
}
subsys_initcall(add_system_ram_resources);
618 619 620 621 622 623 624 625 626 627 628

#ifdef CONFIG_STRICT_DEVMEM
/*
 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 * Access has to be given to non-kernel-ram areas as well, these contain the
 * PCI mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pfn)
{
629 630
	if (page_is_rtas_user_buf(pfn))
		return 1;
631
	if (iomem_is_exclusive(PFN_PHYS(pfn)))
632 633 634 635 636 637
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}
#endif /* CONFIG_STRICT_DEVMEM */
638 639 640 641 642 643

/*
 * This is defined in kernel/resource.c but only powerpc needs to export it, for
 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
 */
EXPORT_SYMBOL_GPL(walk_system_ram_range);