mem.c 16.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

20
#include <linux/export.h>
21 22 23 24
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
25
#include <linux/gfp.h>
26 27 28 29 30 31 32 33
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
34
#include <linux/suspend.h>
Y
Yinghai Lu 已提交
35
#include <linux/memblock.h>
36
#include <linux/hugetlb.h>
37
#include <linux/slab.h>
38
#include <linux/vmalloc.h>
39 40 41 42 43 44 45 46 47 48 49

#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/btext.h>
#include <asm/tlb.h>
50
#include <asm/sections.h>
51
#include <asm/sparsemem.h>
52
#include <asm/vdso.h>
53
#include <asm/fixmap.h>
54
#include <asm/swiotlb.h>
55
#include <asm/rtas.h>
56 57 58 59 60 61 62 63

#include "mmu_decl.h"

#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
#define CPU_FTR_NOEXECUTE	0
#endif

64 65
int init_bootmem_done;
int mem_init_done;
66
unsigned long long memory_limit;
67

68 69
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
70
EXPORT_SYMBOL(kmap_pte);
71 72 73 74 75 76 77 78 79 80
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);

static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
#endif

81 82 83
int page_is_ram(unsigned long pfn)
{
#ifndef CONFIG_PPC64	/* XXX for now */
84
	return pfn < max_pfn;
85
#else
86
	unsigned long paddr = (pfn << PAGE_SHIFT);
87
	struct memblock_region *reg;
88

89 90
	for_each_memblock(memory, reg)
		if (paddr >= reg->base && paddr < (reg->base + reg->size))
91 92 93 94 95
			return 1;
	return 0;
#endif
}

96
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
97 98 99
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
100
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
101

102
	if (!page_is_ram(pfn))
103 104
		vma_prot = pgprot_noncached(vma_prot);

105 106 107 108
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);

P
Paul Mackerras 已提交
109 110
#ifdef CONFIG_MEMORY_HOTPLUG

111 112 113 114 115 116 117
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
	return hot_add_scn_to_nid(start);
}
#endif

118
int arch_add_memory(int nid, u64 start, u64 size)
P
Paul Mackerras 已提交
119
{
120
	struct pglist_data *pgdata;
P
Paul Mackerras 已提交
121 122 123 124
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

125 126
	pgdata = NODE_DATA(nid);

127
	start = (unsigned long)__va(start);
128 129
	if (create_section_mapping(start, start + size))
		return -EINVAL;
130

P
Paul Mackerras 已提交
131
	/* this should work for most non-highmem platforms */
132 133
	zone = pgdata->node_zones +
		zone_for_memory(nid, start, size, 0);
P
Paul Mackerras 已提交
134

135
	return __add_pages(nid, zone, start_pfn, nr_pages);
P
Paul Mackerras 已提交
136
}
137 138 139 140 141 142 143

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct zone *zone;
144
	int ret;
145 146

	zone = page_zone(pfn_to_page(start_pfn));
147
	ret = __remove_pages(zone, start_pfn, nr_pages);
148 149 150 151 152 153 154 155 156 157 158
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();
159 160

	return ret;
161 162
}
#endif
163
#endif /* CONFIG_MEMORY_HOTPLUG */
164 165 166

/*
 * walk_memory_resource() needs to make sure there is no holes in a given
167
 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
Y
Yinghai Lu 已提交
168
 * Instead it maintains it in memblock.memory structures.  Walk through the
169
 * memory regions, find holes and callback for contiguous regions.
170 171
 */
int
K
KAMEZAWA Hiroyuki 已提交
172 173
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
		void *arg, int (*func)(unsigned long, unsigned long, void *))
174
{
175 176 177
	struct memblock_region *reg;
	unsigned long end_pfn = start_pfn + nr_pages;
	unsigned long tstart, tend;
178 179
	int ret = -1;

180
	for_each_memblock(memory, reg) {
181 182
		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
183 184 185
		if (tstart >= tend)
			continue;
		ret = (*func)(tstart, tend - tstart, arg);
186 187 188 189
		if (ret)
			break;
	}
	return ret;
190
}
K
KAMEZAWA Hiroyuki 已提交
191
EXPORT_SYMBOL_GPL(walk_system_ram_range);
192

193 194 195 196 197 198 199 200 201 202
/*
 * Initialize the bootmem system and give it all the memory we
 * have available.  If we are using highmem, we only put the
 * lowmem into the bootmem system.
 */
#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem(void)
{
	unsigned long start, bootmap_pages;
	unsigned long total_pages;
203
	struct memblock_region *reg;
204 205
	int boot_mapsize;

Y
Yinghai Lu 已提交
206 207
	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
	total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
208 209
#ifdef CONFIG_HIGHMEM
	total_pages = total_lowmem >> PAGE_SHIFT;
210
	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
211 212 213 214 215 216 217 218 219
#endif

	/*
	 * Find an area to use for the bootmem bitmap.  Calculate the size of
	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
	 * Add 1 additional page in case the address isn't page-aligned.
	 */
	bootmap_pages = bootmem_bootmap_pages(total_pages);

Y
Yinghai Lu 已提交
220
	start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
221

222 223
	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
	boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
224

225 226 227
	/* Place all memblock_regions in the same node and merge contiguous
	 * memblock_regions
	 */
228
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
229

230 231 232 233
	/* Add all physical memory to the bootmem map, mark each area
	 * present.
	 */
#ifdef CONFIG_HIGHMEM
234
	free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
235 236

	/* reserve the sections we're already using */
237 238 239 240 241 242 243
	for_each_memblock(reserved, reg) {
		unsigned long top = reg->base + reg->size - 1;
		if (top < lowmem_end_addr)
			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
		else if (reg->base < lowmem_end_addr) {
			unsigned long trunc_size = lowmem_end_addr - reg->base;
			reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
244 245
		}
	}
246 247
#else
	free_bootmem_with_active_regions(0, max_pfn);
248 249

	/* reserve the sections we're already using */
250 251
	for_each_memblock(reserved, reg)
		reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
252
#endif
253
	/* XXX need to clip this if using highmem? */
254 255
	sparse_memory_present_with_active_regions(0);

256 257 258
	init_bootmem_done = 1;
}

259 260 261
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
262 263 264 265
	struct memblock_region *reg, *prev = NULL;

	for_each_memblock(memory, reg) {
		if (prev &&
266 267 268
		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
			register_nosave_region(memblock_region_memory_end_pfn(prev),
					       memblock_region_memory_base_pfn(reg));
269
		prev = reg;
270 271 272
	}
	return 0;
}
273 274 275 276 277 278
#else /* CONFIG_NEED_MULTIPLE_NODES */
static int __init mark_nonram_nosave(void)
{
	return 0;
}
#endif
279

S
Scott Wood 已提交
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
static bool zone_limits_final;

static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
	[0 ... MAX_NR_ZONES - 1] = ~0UL
};

/*
 * Restrict the specified zone and all more restrictive zones
 * to be below the specified pfn.  May not be called after
 * paging_init().
 */
void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
{
	int i;

	if (WARN_ON(zone_limits_final))
		return;

	for (i = zone; i >= 0; i--) {
		if (max_zone_pfns[i] > pfn_limit)
			max_zone_pfns[i] = pfn_limit;
	}
}

/*
 * Find the least restrictive zone that is entirely below the
 * specified pfn limit.  Returns < 0 if no suitable zone is found.
 *
 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
 * systems -- the DMA limit can be higher than any possible real pfn.
 */
int dma_pfn_limit_to_zone(u64 pfn_limit)
{
	enum zone_type top_zone = ZONE_NORMAL;
	int i;

#ifdef CONFIG_HIGHMEM
	top_zone = ZONE_HIGHMEM;
#endif

	for (i = top_zone; i >= 0; i--) {
		if (max_zone_pfns[i] <= pfn_limit)
			return i;
	}

	return -EPERM;
}
327

328 329 330 331 332
/*
 * paging_init() sets up the page tables - in fact we've already done this.
 */
void __init paging_init(void)
{
333
	unsigned long long total_ram = memblock_phys_mem_size();
Y
Yinghai Lu 已提交
334
	phys_addr_t top_of_ram = memblock_end_of_DRAM();
S
Scott Wood 已提交
335
	enum zone_type top_zone;
336

337 338 339 340 341 342 343 344
#ifdef CONFIG_PPC32
	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
	unsigned long end = __fix_to_virt(FIX_HOLE);

	for (; v < end; v += PAGE_SIZE)
		map_page(v, 0, 0); /* XXX gross */
#endif

345 346
#ifdef CONFIG_HIGHMEM
	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
347 348 349
	pkmap_page_table = virt_to_kpte(PKMAP_BASE);

	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
350 351 352
	kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */

353
	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
354
	       (unsigned long long)top_of_ram, total_ram);
355
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
356
	       (long int)((top_of_ram - total_ram) >> 20));
S
Scott Wood 已提交
357

358
#ifdef CONFIG_HIGHMEM
S
Scott Wood 已提交
359 360
	top_zone = ZONE_HIGHMEM;
	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
361
#else
S
Scott Wood 已提交
362
	top_zone = ZONE_NORMAL;
363
#endif
S
Scott Wood 已提交
364 365 366

	limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT);
	zone_limits_final = true;
367
	free_area_init_nodes(max_zone_pfns);
368 369

	mark_nonram_nosave();
370 371
}

372 373 374 375 376 377 378 379
static void __init register_page_bootmem_info(void)
{
	int i;

	for_each_online_node(i)
		register_page_bootmem_info_node(NODE_DATA(i));
}

380 381
void __init mem_init(void)
{
382 383 384 385 386 387
	/*
	 * book3s is limited to 16 page sizes due to encoding this in
	 * a 4-bit field for slices.
	 */
	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

388
#ifdef CONFIG_SWIOTLB
389
	swiotlb_init(0);
390 391
#endif

392
	register_page_bootmem_info();
393
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
394
	set_max_mapnr(max_pfn);
395
	free_all_bootmem();
396 397 398 399 400

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

401
		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
402
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
403
			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
404
			struct page *page = pfn_to_page(pfn);
405 406
			if (!memblock_is_reserved(paddr))
				free_highmem_page(page);
407 408 409 410
		}
	}
#endif /* CONFIG_HIGHMEM */

411 412 413 414 415 416 417 418 419
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
	/*
	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
	 * functions.... do it here for the non-smp case.
	 */
	per_cpu(next_tlbcam_idx, smp_processor_id()) =
		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

420
	mem_init_print_info(NULL);
421 422 423 424 425 426 427
#ifdef CONFIG_PPC32
	pr_info("Kernel virtual memory layout:\n");
	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
428 429 430 431
#ifdef CONFIG_NOT_COHERENT_CACHE
	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
#endif /* CONFIG_NOT_COHERENT_CACHE */
432 433 434 435 436 437
	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
		ioremap_bot, IOREMAP_TOP);
	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
		VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */

438 439 440
	mem_init_done = 1;
}

441 442
void free_initmem(void)
{
443
	ppc_md.progress = ppc_printk_progress;
444
	free_initmem_default(POISON_FREE_INITMEM);
445 446
}

447 448 449
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
450
	free_reserved_area((void *)start, (void *)end, -1, "initrd");
451 452 453
}
#endif

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
/*
 * This is called when a page has been modified by the kernel.
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
void flush_dcache_page(struct page *page)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_arch_1, &page->flags))
		clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);

void flush_dcache_icache_page(struct page *page)
{
471 472 473 474 475 476
#ifdef CONFIG_HUGETLB_PAGE
	if (PageCompound(page)) {
		flush_dcache_icache_hugepage(page);
		return;
	}
#endif
477
#ifdef CONFIG_BOOKE
478
	{
479
		void *start = kmap_atomic(page);
480
		__flush_dcache_icache(start);
481
		kunmap_atomic(start);
482
	}
483
#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
484 485 486 487 488 489
	/* On 8xx there is no need to kmap since highmem is not supported */
	__flush_dcache_icache(page_address(page)); 
#else
	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif
}
490
EXPORT_SYMBOL(flush_dcache_icache_page);
491

492 493 494 495 496
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
	clear_page(page);

	/*
L
Lucas De Marchi 已提交
497
	 * We shouldn't have to do this, but some versions of glibc
498 499 500
	 * require it (ld.so assumes zero filled pages are icache clean)
	 * - Anton
	 */
501
	flush_dcache_page(pg);
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
}
EXPORT_SYMBOL(clear_user_page);

void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
		    struct page *pg)
{
	copy_page(vto, vfrom);

	/*
	 * We should be able to use the following optimisation, however
	 * there are two problems.
	 * Firstly a bug in some versions of binutils meant PLT sections
	 * were not marked executable.
	 * Secondly the first word in the GOT section is blrl, used
	 * to establish the GOT address. Until recently the GOT was
	 * not marked executable.
	 * - Anton
	 */
#if 0
	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
		return;
#endif

525
	flush_dcache_page(pg);
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
}

void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
			     unsigned long addr, int len)
{
	unsigned long maddr;

	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
	flush_icache_range(maddr, maddr + len);
	kunmap(page);
}
EXPORT_SYMBOL(flush_icache_user_range);

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
H
Hugh Dickins 已提交
545
 * This must always be called with the pte lock held.
546 547
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
548
		      pte_t *ptep)
549
{
550
#ifdef CONFIG_PPC_STD_MMU
551 552 553 554
	/*
	 * We don't need to worry about _PAGE_PRESENT here because we are
	 * called with either mm->page_table_lock held or ptl lock held
	 */
555
	unsigned long access = 0, trap;
556 557

	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
558
	if (!pte_young(*ptep) || address >= TASK_SIZE)
559 560
		return;

561 562 563 564 565 566 567 568
	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
	if (current->thread.regs == NULL)
569
		return;
570 571 572 573 574 575 576
	trap = TRAP(current->thread.regs);
	if (trap == 0x400)
		access |= _PAGE_EXEC;
	else if (trap != 0x300)
		return;
	hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
B
Becky Bruce 已提交
577 578 579
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
	&& defined(CONFIG_HUGETLB_PAGE)
	if (is_vm_hugetlb_page(vma))
580
		book3e_hugetlb_preload(vma, address, *ptep);
B
Becky Bruce 已提交
581
#endif
582
}
583 584 585 586 587

/*
 * System memory should not be in /proc/iomem but various tools expect it
 * (eg kdump).
 */
588
static int __init add_system_ram_resources(void)
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
{
	struct memblock_region *reg;

	for_each_memblock(memory, reg) {
		struct resource *res;
		unsigned long base = reg->base;
		unsigned long size = reg->size;

		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
		WARN_ON(!res);

		if (res) {
			res->name = "System RAM";
			res->start = base;
			res->end = base + size - 1;
604
			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
605 606 607 608 609 610 611
			WARN_ON(request_resource(&iomem_resource, res) < 0);
		}
	}

	return 0;
}
subsys_initcall(add_system_ram_resources);
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626

#ifdef CONFIG_STRICT_DEVMEM
/*
 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 * Access has to be given to non-kernel-ram areas as well, these contain the
 * PCI mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pfn)
{
	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
		return 0;
	if (!page_is_ram(pfn))
		return 1;
627 628
	if (page_is_rtas_user_buf(pfn))
		return 1;
629 630 631
	return 0;
}
#endif /* CONFIG_STRICT_DEVMEM */