ioremap.c 10.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 *  linux/arch/arm/mm/ioremap.c
 *
 * Re-map IO memory to kernel address space so that we can access it.
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 *
 * Hacked for ARM by Phil Blundell <philb@gnu.org>
 * Hacked to allow all architectures to build, and various cleanups
 * by Russell King
 *
 * This allows a driver to remap an arbitrary region of bus memory into
 * virtual space.  One should *only* use readl, writel, memcpy_toio and
 * so on with such remapped areas.
 *
 * Because the ARM only has a 32-bit address space we can't address the
 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
 * allows us to circumvent this restriction by splitting PCI space into
 * two 2GB chunks and mapping only one at a time into processor memory.
 * We use MMU protection domains to trap any attempt to access the bank
 * that is not currently mapped.  (This isn't fully implemented yet.)
 */
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
27
#include <linux/io.h>
28
#include <linux/sizes.h>
L
Linus Torvalds 已提交
29

30
#include <asm/cp15.h>
31
#include <asm/cputype.h>
L
Linus Torvalds 已提交
32
#include <asm/cacheflush.h>
33 34
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
35
#include <asm/tlbflush.h>
36
#include <asm/system_info.h>
37

38 39 40
#include <asm/mach/map.h>
#include "mm.h"

41 42 43
int ioremap_page(unsigned long virt, unsigned long phys,
		 const struct mem_type *mtype)
{
44 45
	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
				  __pgprot(mtype->prot_pte));
46 47
}
EXPORT_SYMBOL(ioremap_page);
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62

void __check_kvm_seq(struct mm_struct *mm)
{
	unsigned int seq;

	do {
		seq = init_mm.context.kvm_seq;
		memcpy(pgd_offset(mm, VMALLOC_START),
		       pgd_offset_k(VMALLOC_START),
		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
					pgd_index(VMALLOC_START)));
		mm->context.kvm_seq = seq;
	} while (seq != init_mm.context.kvm_seq);
}

63
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
64 65 66 67 68 69 70
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
71 72
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
73 74 75
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
R
Russell King 已提交
76
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
77
	pgd_t *pgd;
78 79
	pud_t *pud;
	pmd_t *pmdp;
80 81 82

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
83 84
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
85
	do {
86
		pmd_t pmd = *pmdp;
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
103
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
104 105
		}

106 107
		addr += PMD_SIZE;
		pmdp += 2;
108 109 110 111 112 113 114 115 116 117 118 119 120 121
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}

static int
remap_area_sections(unsigned long virt, unsigned long pfn,
122
		    size_t size, const struct mem_type *type)
123
{
124
	unsigned long addr = virt, end = virt + size;
125
	pgd_t *pgd;
126 127
	pud_t *pud;
	pmd_t *pmd;
128 129 130 131 132 133 134 135

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
136 137
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
138
	do {
139
		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
140
		pfn += SZ_1M >> PAGE_SHIFT;
141
		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
142 143 144
		pfn += SZ_1M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

145 146
		addr += PMD_SIZE;
		pmd += 2;
147 148 149 150
	} while (addr < end);

	return 0;
}
151 152 153

static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
154
			 size_t size, const struct mem_type *type)
155
{
156
	unsigned long addr = virt, end = virt + size;
157
	pgd_t *pgd;
158 159
	pud_t *pud;
	pmd_t *pmd;
160 161 162 163 164 165 166 167

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(virt);
168 169
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
170 171 172
	do {
		unsigned long super_pmd_val, i;

173 174
		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
				PMD_SECT_SUPER;
175 176 177 178 179 180 181
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

182 183
			addr += PMD_SIZE;
			pmd += 2;
184 185 186 187 188 189 190
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
191 192
#endif

193 194
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
195
{
196
	const struct mem_type *type;
197
	int err;
198 199
	unsigned long addr;
 	struct vm_struct * area;
200

201
#ifndef CONFIG_ARM_LPAE
202 203 204 205 206
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
207
#endif
208

209 210 211
	type = get_mem_type(mtype);
	if (!type)
		return NULL;
212

213 214 215 216
	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);
R
Russell King 已提交
217

218 219 220 221 222 223 224 225 226 227 228 229
	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
230
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;

246
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
247 248 249
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;
250

251
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
252 253
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
254
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
255 256
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
257
		err = remap_area_supersections(addr, pfn, size, type);
258
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
259
		area->flags |= VM_ARM_SECTION_MAPPING;
260
		err = remap_area_sections(addr, pfn, size, type);
261 262
	} else
#endif
263 264
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));
265 266

	if (err) {
267
 		vunmap((void *)addr);
268 269
 		return NULL;
 	}
270 271 272

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
273 274
}

275 276
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
	unsigned int mtype, void *caller)
L
Linus Torvalds 已提交
277
{
278 279 280
	unsigned long last_addr;
 	unsigned long offset = phys_addr & ~PAGE_MASK;
 	unsigned long pfn = __phys_to_pfn(phys_addr);
L
Linus Torvalds 已提交
281

282 283 284
 	/*
 	 * Don't allow wraparound or zero size
	 */
L
Linus Torvalds 已提交
285 286 287 288
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
			caller);
}

/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
		  unsigned int mtype)
{
	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
			__builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);

311 312 313 314
void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
				      unsigned int, void *) =
	__arm_ioremap_caller;

315 316 317
void __iomem *
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
318 319
	return arch_ioremap_caller(phys_addr, size, mtype,
		__builtin_return_address(0));
L
Linus Torvalds 已提交
320
}
321
EXPORT_SYMBOL(__arm_ioremap);
L
Linus Torvalds 已提交
322

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space as memory. Needed when the kernel wants to execute
 * code in external memory. This is needed for reprogramming source
 * clocks that would affect normal memory for example. Please see
 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
 */
void __iomem *
__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
{
	unsigned int mtype;

	if (cached)
		mtype = MT_MEMORY;
	else
		mtype = MT_MEMORY_NONCACHED;

	return __arm_ioremap_caller(phys_addr, size, mtype,
			__builtin_return_address(0));
}

344
void __iounmap(volatile void __iomem *io_addr)
L
Linus Torvalds 已提交
345
{
346
	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
347
	struct vm_struct *vm;
348

349 350
	read_lock(&vmlist_lock);
	for (vm = vmlist; vm; vm = vm->next) {
351
		if (vm->addr > addr)
352
			break;
353 354 355 356 357 358 359
		if (!(vm->flags & VM_IOREMAP))
			continue;
		/* If this is a static mapping we must leave it alone */
		if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
		    (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
			read_unlock(&vmlist_lock);
			return;
360
		}
R
Russell King 已提交
361
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
362 363 364 365 366 367 368 369 370 371 372
		/*
		 * If this is a section based mapping we need to handle it
		 * specially as the VM subsystem does not know how to handle
		 * such a beast.
		 */
		if ((vm->addr == addr) &&
		    (vm->flags & VM_ARM_SECTION_MAPPING)) {
			unmap_area_sections((unsigned long)vm->addr, vm->size);
			break;
		}
#endif
373
	}
374
	read_unlock(&vmlist_lock);
375

R
Russell King 已提交
376
	vunmap(addr);
L
Linus Torvalds 已提交
377
}
378 379 380 381 382 383 384 385

void (*arch_iounmap)(volatile void __iomem *) = __iounmap;

void __arm_iounmap(volatile void __iomem *io_addr)
{
	arch_iounmap(io_addr);
}
EXPORT_SYMBOL(__arm_iounmap);