mem.c 16.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */

15
#include <linux/export.h>
16 17 18 19
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
20
#include <linux/gfp.h>
21 22 23 24
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
26 27 28
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
29
#include <linux/suspend.h>
30
#include <linux/hugetlb.h>
31
#include <linux/slab.h>
32
#include <linux/vmalloc.h>
33
#include <linux/memremap.h>
34
#include <linux/dma-direct.h>
35 36 37 38 39 40 41 42 43 44 45

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
46
#include <asm/sections.h>
47
#include <asm/sparsemem.h>
48
#include <asm/vdso.h>
49
#include <asm/fixmap.h>
50
#include <asm/swiotlb.h>
51
#include <asm/rtas.h>
52
#include <asm/kasan.h>
53

54
#include <mm/mmu_decl.h>
55 56 57 58 59 60

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

61
unsigned long long memory_limit;
62
bool init_mem_is_free;
63

64 65
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
66
EXPORT_SYMBOL(kmap_pte);
67 68 69 70 71 72 73 74 75 76
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);

static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
#endif

77
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
78 79 80
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
81
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
82

83
	if (!page_is_ram(pfn))
84 85
		vma_prot = pgprot_noncached(vma_prot);

86 87 88 89
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
90 91
#ifdef CONFIG_MEMORY_HOTPLUG

92 93 94 95 96 97 98
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

99
int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
100 101 102 103 104 105 106 107 108
{
	return -ENODEV;
}

int __weak remove_section_mapping(unsigned long start, unsigned long end)
{
	return -ENODEV;
}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
#define FLUSH_CHUNK_SIZE SZ_1G
/**
 * flush_dcache_range_chunked(): Write any modified data cache blocks out to
 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
 * Does not invalidate the corresponding instruction cache blocks.
 *
 * @start: the start address
 * @stop: the stop address (exclusive)
 * @chunk: the max size of the chunks
 */
static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
				       unsigned long chunk)
{
	unsigned long i;

	for (i = start; i < stop; i += chunk) {
125
		flush_dcache_range(i, min(stop, i + chunk));
126 127 128 129
		cond_resched();
	}
}

130 131
int __ref arch_add_memory(int nid, u64 start, u64 size,
			struct mhp_restrictions *restrictions)
P
Paul Mackerras 已提交
132 133 134
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
135
	int rc;
P
Paul Mackerras 已提交
136

137 138
	resize_hpt_for_hotplug(memblock_phys_mem_size());

139
	start = (unsigned long)__va(start);
140
	rc = create_section_mapping(start, start + size, nid);
141
	if (rc) {
142
		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
143 144 145
			start, start + size, rc);
		return -EFAULT;
	}
146

147
	return __add_pages(nid, start_pfn, nr_pages, restrictions);
P
Paul Mackerras 已提交
148
}
149

150
void __ref arch_remove_memory(int nid, u64 start, u64 size,
151
			     struct vmem_altmap *altmap)
152 153 154
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
155
	int ret;
156

157
	__remove_pages(start_pfn, nr_pages, altmap);
158 159 160

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
161 162
	flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);

163
	ret = remove_section_mapping(start, start + size);
164
	WARN_ON_ONCE(ret);
165 166 167 168 169

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();
170

171 172
	if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
		pr_warn("Hash collision while resizing HPT\n");
173 174
}
#endif
175

176
#ifndef CONFIG_NEED_MULTIPLE_NODES
177
void __init mem_topology_setup(void)
178
{
Y
Yinghai Lu 已提交
179
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
180
	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
181
#ifdef CONFIG_HIGHMEM
182
	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
183 184
#endif

185 186 187
	/* Place all memblock_regions in the same node and merge contiguous
	 * memblock_regions
	 */
188
	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
189
}
190

191 192
void __init initmem_init(void)
{
193
	/* XXX need to clip this if using highmem? */
194
	sparse_memory_present_with_active_regions(0);
195
	sparse_init();
196 197
}

198 199 200
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
201 202 203 204
	struct memblock_region *reg, *prev = NULL;

	for_each_memblock(memory, reg) {
		if (prev &&
205 206 207
		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
			register_nosave_region(memblock_region_memory_end_pfn(prev),
					       memblock_region_memory_base_pfn(reg));
208
		prev = reg;
209 210 211
	}
	return 0;
}
212 213 214 215 216 217
#else /* CONFIG_NEED_MULTIPLE_NODES */
static int __init mark_nonram_nosave(void)
{
	return 0;
}
#endif
218

S
Scott Wood 已提交
219
/*
220 221 222 223 224 225
 * Zones usage:
 *
 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
 * everything else. GFP_DMA32 page allocations automatically fall back to
 * ZONE_DMA.
 *
226 227 228 229
 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
 * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
 * ZONE_DMA.
S
Scott Wood 已提交
230
 */
231
static unsigned long max_zone_pfns[MAX_NR_ZONES];
S
Scott Wood 已提交
232

233 234 235 236 237
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
238
	unsigned long long total_ram = memblock_phys_mem_size();
Y
Yinghai Lu 已提交
239
	phys_addr_t top_of_ram = memblock_end_of_DRAM();
240 241

#ifdef CONFIG_HIGHMEM
242 243
	unsigned long v = __fix_to_virt(FIX_KMAP_END);
	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
244 245

	for (; v < end; v += PAGE_SIZE)
246
		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
247

248
	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
249 250 251
	pkmap_page_table = virt_to_kpte(PKMAP_BASE);

	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
252 253 254
	kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */

255
	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
256
	       (unsigned long long)top_of_ram, total_ram);
257
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
258
	       (long int)((top_of_ram - total_ram) >> 20));
S
Scott Wood 已提交
259

260 261 262 263 264 265 266 267 268
	/*
	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
	 * powerbooks.
	 */
	if (IS_ENABLED(CONFIG_PPC32))
		zone_dma_bits = 30;
	else
		zone_dma_bits = 31;

269
#ifdef CONFIG_ZONE_DMA
270
	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
271
				      1UL << (zone_dma_bits - PAGE_SHIFT));
272 273
#endif
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
274
#ifdef CONFIG_HIGHMEM
275
	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
276
#endif
277

278
	free_area_init_nodes(max_zone_pfns);
279 280

	mark_nonram_nosave();
281 282 283 284
}

void __init mem_init(void)
{
285 286 287 288 289 290
	/*
	 * book3s is limited to 16 page sizes due to encoding this in
	 * a 4-bit field for slices.
	 */
	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

291
#ifdef CONFIG_SWIOTLB
292 293 294 295 296 297 298 299
	/*
	 * Some platforms (e.g. 85xx) limit DMA-able memory way below
	 * 4G. We force memblock to bottom-up mode to ensure that the
	 * memory allocated in swiotlb_init() is DMA-able.
	 * As it's the last memblock allocation, no need to reset it
	 * back to to-down.
	 */
	memblock_set_bottom_up(true);
300
	swiotlb_init(0);
301 302
#endif

303
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
304
	set_max_mapnr(max_pfn);
305 306 307

	kasan_late_init();

308
	memblock_free_all();
309 310 311 312 313

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

314
		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
315
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
316
			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
317
			struct page *page = pfn_to_page(pfn);
318 319
			if (!memblock_is_reserved(paddr))
				free_highmem_page(page);
320 321 322 323
		}
	}
#endif /* CONFIG_HIGHMEM */

324 325 326 327 328 329 330 331 332
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
	/*
	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
	 * functions.... do it here for the non-smp case.
	 */
	per_cpu(next_tlbcam_idx, smp_processor_id()) =
		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

333
	mem_init_print_info(NULL);
334 335
#ifdef CONFIG_PPC32
	pr_info("Kernel virtual memory layout:\n");
336 337 338 339
#ifdef CONFIG_KASAN
	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
		KASAN_SHADOW_START, KASAN_SHADOW_END);
#endif
340 341 342 343 344
	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
345 346 347
	if (ioremap_bot != IOREMAP_TOP)
		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
			ioremap_bot, IOREMAP_TOP);
348 349 350
	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
		VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
351 352
}

353 354
void free_initmem(void)
{
355
	ppc_md.progress = ppc_printk_progress;
356
	mark_initmem_nx();
357
	init_mem_is_free = true;
358
	free_initmem_default(POISON_FREE_INITMEM);
359 360
}

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
/**
 * flush_coherent_icache() - if a CPU has a coherent icache, flush it
 * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
 * Return true if the cache was flushed, false otherwise
 */
static inline bool flush_coherent_icache(unsigned long addr)
{
	/*
	 * For a snooping icache, we still need a dummy icbi to purge all the
	 * prefetched instructions from the ifetch buffers. We also need a sync
	 * before the icbi to order the the actual stores to memory that might
	 * have modified instructions with the icbi.
	 */
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
		mb(); /* sync */
		icbi((void *)addr);
		mb(); /* sync */
		isync();
		return true;
	}

	return false;
}

/**
 * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
 * @start: the start address
 * @stop: the stop address (exclusive)
 */
static void invalidate_icache_range(unsigned long start, unsigned long stop)
{
	unsigned long shift = l1_icache_shift();
	unsigned long bytes = l1_icache_bytes();
	char *addr = (char *)(start & ~(bytes - 1));
	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
	unsigned long i;

	for (i = 0; i < size >> shift; i++, addr += bytes)
		icbi(addr);

	mb(); /* sync */
	isync();
}

/**
 * flush_icache_range: Write any modified data cache blocks out to memory
 * and invalidate the corresponding blocks in the instruction cache
 *
 * Generic code will call this after writing memory, before executing from it.
 *
 * @start: the start address
 * @stop: the stop address (exclusive)
 */
void flush_icache_range(unsigned long start, unsigned long stop)
{
	if (flush_coherent_icache(start))
		return;

	clean_dcache_range(start, stop);

	if (IS_ENABLED(CONFIG_44x)) {
		/*
		 * Flash invalidate on 44x because we are passed kmapped
		 * addresses and this doesn't work for userspace pages due to
		 * the virtually tagged icache.
		 */
		iccci((void *)start);
		mb(); /* sync */
		isync();
	} else
		invalidate_icache_range(start, stop);
}
EXPORT_SYMBOL(flush_icache_range);

#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
/**
 * flush_dcache_icache_phys() - Flush a page by it's physical address
 * @physaddr: the physical address of the page
 */
static void flush_dcache_icache_phys(unsigned long physaddr)
{
	unsigned long bytes = l1_dcache_bytes();
	unsigned long nb = PAGE_SIZE / bytes;
	unsigned long addr = physaddr & PAGE_MASK;
	unsigned long msr, msr0;
	unsigned long loop1 = addr, loop2 = addr;

	msr0 = mfmsr();
	msr = msr0 & ~MSR_DR;
	/*
	 * This must remain as ASM to prevent potential memory accesses
	 * while the data MMU is disabled
	 */
	asm volatile(
		"   mtctr %2;\n"
		"   mtmsr %3;\n"
		"   isync;\n"
		"0: dcbst   0, %0;\n"
		"   addi    %0, %0, %4;\n"
		"   bdnz    0b;\n"
		"   sync;\n"
		"   mtctr %2;\n"
		"1: icbi    0, %1;\n"
		"   addi    %1, %1, %4;\n"
		"   bdnz    1b;\n"
		"   sync;\n"
		"   mtmsr %5;\n"
		"   isync;\n"
		: "+&r" (loop1), "+&r" (loop2)
		: "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
		: "ctr", "memory");
}
#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
492 493 494 495 496 497
#ifdef CONFIG_HUGETLB_PAGE
	if (PageCompound(page)) {
		flush_dcache_icache_hugepage(page);
		return;
	}
#endif
498
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
499 500 501 502
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page));
#else
	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
503
		void *start = kmap_atomic(page);
504
		__flush_dcache_icache(start);
505
		kunmap_atomic(start);
506
	} else {
507 508 509 510 511
		unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;

		if (flush_coherent_icache(addr))
			return;
		flush_dcache_icache_phys(addr);
512
	}
513 514
#endif
}
515
EXPORT_SYMBOL(flush_dcache_icache_page);
516

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
/**
 * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
 * Note: this is necessary because the instruction cache does *not*
 * snoop from the data cache.
 *
 * @page: the address of the page to flush
 */
void __flush_dcache_icache(void *p)
{
	unsigned long addr = (unsigned long)p;

	if (flush_coherent_icache(addr))
		return;

	clean_dcache_range(addr, addr + PAGE_SIZE);

	/*
	 * We don't flush the icache on 44x. Those have a virtual icache and we
	 * don't have access to the virtual address here (it's not the page
	 * vaddr but where it's mapped in user space). The flushing of the
	 * icache on these is handled elsewhere, when a change in the address
	 * space occurs, before returning to user space.
	 */

	if (cpu_has_feature(MMU_FTR_TYPE_44x))
		return;

	invalidate_icache_range(addr, addr + PAGE_SIZE);
}

547 548 549 550 551
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
L
Lucas De Marchi 已提交
552
	 * We shouldn't have to do this, but some versions of glibc
553 554 555
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
556
	flush_dcache_page(pg);
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

580
	flush_dcache_page(pg);
581 582 583 584 585 586 587 588 589 590 591 592 593
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);

594 595 596 597
/*
 * System memory should not be in /proc/iomem but various tools expect it
 * (eg kdump).
 */
598
static int __init add_system_ram_resources(void)
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		struct resource *res;
		unsigned long base = reg->base;
		unsigned long size = reg->size;

		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
		WARN_ON(!res);

		if (res) {
			res->name = "System RAM";
			res->start = base;
			res->end = base + size - 1;
614
			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
615 616 617 618 619 620 621
			WARN_ON(request_resource(&iomem_resource, res) < 0);
		}
	}

	return 0;
}
subsys_initcall(add_system_ram_resources);
622 623 624 625 626 627 628 629 630 631 632

#ifdef CONFIG_STRICT_DEVMEM
/*
 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 * Access has to be given to non-kernel-ram areas as well, these contain the
 * PCI mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pfn)
{
633 634
	if (page_is_rtas_user_buf(pfn))
		return 1;
635
	if (iomem_is_exclusive(PFN_PHYS(pfn)))
636 637 638 639 640 641
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}
#endif /* CONFIG_STRICT_DEVMEM */
642 643 644 645 646 647

/*
 * This is defined in kernel/resource.c but only powerpc needs to export it, for
 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
 */
EXPORT_SYMBOL_GPL(walk_system_ram_range);