nommu.c 2.5 KB
Newer Older
1 2 3 4 5 6
/*
 *  linux/arch/arm/mm/nommu.c
 *
 * ARM uCLinux supporting functions.
 */
#include <linux/module.h>
7 8
#include <linux/mm.h>
#include <linux/pagemap.h>
9
#include <linux/io.h>
R
Russell King 已提交
10
#include <linux/memblock.h>
11

12
#include <asm/cacheflush.h>
R
Russell King 已提交
13
#include <asm/sections.h>
14
#include <asm/page.h>
15
#include <asm/setup.h>
16
#include <asm/traps.h>
R
Russell King 已提交
17
#include <asm/mach/arch.h>
18

19 20
#include "mm.h"

R
Russell King 已提交
21
void __init arm_mm_memblock_reserve(void)
22 23 24 25 26 27
{
	/*
	 * Register the exception vector page.
	 * some architectures which the DRAM is the exception vector to trap,
	 * alloc_page breaks with error, although it is not NULL, but "0."
	 */
R
Russell King 已提交
28
	memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
29 30
}

31 32
void __init sanity_check_meminfo(void)
{
33 34
	phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
	high_memory = __va(end - 1) + 1;
35 36
}

37 38 39 40
/*
 * paging_init() sets up the page tables, initialises the zone memory
 * maps, and sets up the zero page, bad page and bad page tables.
 */
41
void __init paging_init(struct machine_desc *mdesc)
42
{
43
	early_trap_init((void *)CONFIG_VECTORS_BASE);
44
	bootmem_init();
45 46
}

47 48 49
/*
 * We don't need to do anything here for nommu machines.
 */
50
void setup_mm_for_reboot(void)
51 52 53
{
}

54 55
void flush_dcache_page(struct page *page)
{
56
	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
57
}
58
EXPORT_SYMBOL(flush_dcache_page);
59

60 61 62 63 64 65 66 67 68
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long uaddr, void *dst, const void *src,
		       unsigned long len)
{
	memcpy(dst, src, len);
	if (vma->vm_flags & VM_EXEC)
		__cpuc_coherent_user_range(uaddr, uaddr + len);
}

69 70
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
				size_t size, unsigned int mtype)
71 72 73 74 75
{
	if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
		return NULL;
	return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
}
76
EXPORT_SYMBOL(__arm_ioremap_pfn);
77

78 79 80 81 82 83
void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
			   size_t size, unsigned int mtype, void *caller)
{
	return __arm_ioremap_pfn(pfn, offset, size, mtype);
}

84 85
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
			    unsigned int mtype)
86 87 88
{
	return (void __iomem *)phys_addr;
}
89
EXPORT_SYMBOL(__arm_ioremap);
90

91 92
void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *);

93 94
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
				   unsigned int mtype, void *caller)
95 96 97 98
{
	return __arm_ioremap(phys_addr, size, mtype);
}

99 100 101
void (*arch_iounmap)(volatile void __iomem *);

void __arm_iounmap(volatile void __iomem *addr)
102 103
{
}
104
EXPORT_SYMBOL(__arm_iounmap);