nommu.c 5.1 KB
Newer Older
1 2 3 4 5 6
/*
 *  linux/arch/arm/mm/nommu.c
 *
 * ARM uCLinux supporting functions.
 */
#include <linux/module.h>
7 8
#include <linux/mm.h>
#include <linux/pagemap.h>
9
#include <linux/io.h>
R
Russell King 已提交
10
#include <linux/memblock.h>
11
#include <linux/kernel.h>
12

13
#include <asm/cacheflush.h>
14
#include <asm/cp15.h>
R
Russell King 已提交
15
#include <asm/sections.h>
16
#include <asm/page.h>
17
#include <asm/setup.h>
18
#include <asm/traps.h>
R
Russell King 已提交
19
#include <asm/mach/arch.h>
20 21
#include <asm/cputype.h>
#include <asm/mpu.h>
R
Russell King 已提交
22
#include <asm/procinfo.h>
23

24 25
#include "mm.h"

26 27
unsigned long vectors_base;

28 29
#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
30
#endif
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#ifdef CONFIG_CPU_CP15
#ifdef CONFIG_CPU_HIGH_VECTOR
static unsigned long __init setup_vectors_base(void)
{
	unsigned long reg = get_cr();

	set_cr(reg | CR_V);
	return 0xffff0000;
}
#else /* CONFIG_CPU_HIGH_VECTOR */
/* Write exception base address to VBAR */
static inline void set_vbar(unsigned long val)
{
	asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
}

/*
 * Security extensions, bits[7:4], permitted values,
 * 0b0000 - not implemented, 0b0001/0b0010 - implemented
 */
static inline bool security_extensions_enabled(void)
{
54 55 56 57
	/* Check CPUID Identification Scheme before ID_PFR1 read */
	if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
		return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
	return 0;
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
}

static unsigned long __init setup_vectors_base(void)
{
	unsigned long base = 0, reg = get_cr();

	set_cr(reg & ~CR_V);
	if (security_extensions_enabled()) {
		if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
			base = CONFIG_DRAM_BASE;
		set_vbar(base);
	} else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
		if (CONFIG_DRAM_BASE != 0)
			pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
	}

	return base;
}
#endif /* CONFIG_CPU_HIGH_VECTOR */
#endif /* CONFIG_CPU_CP15 */

R
Russell King 已提交
79
void __init arm_mm_memblock_reserve(void)
80
{
81
#ifndef CONFIG_CPU_V7M
82
	vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
83 84 85 86 87
	/*
	 * Register the exception vector page.
	 * some architectures which the DRAM is the exception vector to trap,
	 * alloc_page breaks with error, although it is not NULL, but "0."
	 */
88
	memblock_reserve(vectors_base, 2 * PAGE_SIZE);
89 90 91 92 93 94
#else /* ifndef CONFIG_CPU_V7M */
	/*
	 * There is no dedicated vector page on V7-M. So nothing needs to be
	 * reserved here.
	 */
#endif
95 96
}

97
void __init adjust_lowmem_bounds(void)
98
{
99
	phys_addr_t end;
100
	adjust_lowmem_bounds_mpu();
L
Laura Abbott 已提交
101
	end = memblock_end_of_DRAM();
102
	high_memory = __va(end - 1) + 1;
103
	memblock_set_current_limit(end);
104 105
}

106 107 108 109
/*
 * paging_init() sets up the page tables, initialises the zone memory
 * maps, and sets up the zero page, bad page and bad page tables.
 */
110
void __init paging_init(const struct machine_desc *mdesc)
111
{
112
	early_trap_init((void *)vectors_base);
113
	mpu_setup();
114
	bootmem_init();
115 116
}

117 118 119
/*
 * We don't need to do anything here for nommu machines.
 */
120
void setup_mm_for_reboot(void)
121 122 123
{
}

124 125
void flush_dcache_page(struct page *page)
{
126
	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
127
}
128
EXPORT_SYMBOL(flush_dcache_page);
129

130 131 132 133 134 135
void flush_kernel_dcache_page(struct page *page)
{
	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
EXPORT_SYMBOL(flush_kernel_dcache_page);

136 137 138 139 140 141 142 143 144
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long uaddr, void *dst, const void *src,
		       unsigned long len)
{
	memcpy(dst, src, len);
	if (vma->vm_flags & VM_EXEC)
		__cpuc_coherent_user_range(uaddr, uaddr + len);
}

145 146
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
				size_t size, unsigned int mtype)
147 148 149 150 151
{
	if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
		return NULL;
	return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
}
152
EXPORT_SYMBOL(__arm_ioremap_pfn);
153

154 155
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
				   unsigned int mtype, void *caller)
156
{
157
	return (void __iomem *)phys_addr;
158 159
}

160 161 162
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);

void __iomem *ioremap(resource_size_t res_cookie, size_t size)
163
{
164 165
	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
				    __builtin_return_address(0));
166
}
167
EXPORT_SYMBOL(ioremap);
168

169
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
170 171 172
	__alias(ioremap_cached);

void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
173 174 175 176 177
{
	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
				    __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
178
EXPORT_SYMBOL(ioremap_cached);
179

180 181 182 183 184 185 186
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
				    __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wc);

187 188 189 190 191 192 193 194 195 196 197 198
#ifdef CONFIG_PCI

#include <asm/mach/map.h>

void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
{
	return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
				   __builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif

199 200 201 202 203
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
	return (void *)phys_addr;
}

204
void __iounmap(volatile void __iomem *addr)
205 206
{
}
207
EXPORT_SYMBOL(__iounmap);
208

209 210
void (*arch_iounmap)(volatile void __iomem *);

211
void iounmap(volatile void __iomem *addr)
212 213
{
}
214
EXPORT_SYMBOL(iounmap);