ioremap.c 9.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 *  linux/arch/arm/mm/ioremap.c
 *
 * Re-map IO memory to kernel address space so that we can access it.
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 *
 * Hacked for ARM by Phil Blundell <philb@gnu.org>
 * Hacked to allow all architectures to build, and various cleanups
 * by Russell King
 *
 * This allows a driver to remap an arbitrary region of bus memory into
 * virtual space.  One should *only* use readl, writel, memcpy_toio and
 * so on with such remapped areas.
 *
 * Because the ARM only has a 32-bit address space we can't address the
 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
 * allows us to circumvent this restriction by splitting PCI space into
 * two 2GB chunks and mapping only one at a time into processor memory.
 * We use MMU protection domains to trap any attempt to access the bank
 * that is not currently mapped.  (This isn't fully implemented yet.)
 */
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
27
#include <linux/io.h>
L
Linus Torvalds 已提交
28

29
#include <asm/cputype.h>
L
Linus Torvalds 已提交
30
#include <asm/cacheflush.h>
31 32
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
33
#include <asm/tlbflush.h>
34 35
#include <asm/sizes.h>

36 37 38
#include <asm/mach/map.h>
#include "mm.h"

39
/*
40 41
 * Used by ioremap() and iounmap() code to mark (super)section-mapped
 * I/O regions in vm_struct->flags field.
42 43
 */
#define VM_ARM_SECTION_MAPPING	0x80000000
L
Linus Torvalds 已提交
44

R
Russell King 已提交
45
static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
46
			  unsigned long phys_addr, const struct mem_type *type)
L
Linus Torvalds 已提交
47
{
48
	pgprot_t prot = __pgprot(type->prot_pte);
R
Russell King 已提交
49 50 51 52 53
	pte_t *pte;

	pte = pte_alloc_kernel(pmd, addr);
	if (!pte)
		return -ENOMEM;
L
Linus Torvalds 已提交
54 55 56 57 58

	do {
		if (!pte_none(*pte))
			goto bad;

59
		set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
L
Linus Torvalds 已提交
60
		phys_addr += PAGE_SIZE;
R
Russell King 已提交
61 62
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
L
Linus Torvalds 已提交
63 64

 bad:
R
Russell King 已提交
65
	printk(KERN_CRIT "remap_area_pte: page already exists\n");
L
Linus Torvalds 已提交
66 67 68
	BUG();
}

R
Russell King 已提交
69 70
static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
				 unsigned long end, unsigned long phys_addr,
71
				 const struct mem_type *type)
L
Linus Torvalds 已提交
72
{
R
Russell King 已提交
73 74 75
	unsigned long next;
	pmd_t *pmd;
	int ret = 0;
L
Linus Torvalds 已提交
76

R
Russell King 已提交
77 78 79
	pmd = pmd_alloc(&init_mm, pgd, addr);
	if (!pmd)
		return -ENOMEM;
L
Linus Torvalds 已提交
80 81

	do {
R
Russell King 已提交
82
		next = pmd_addr_end(addr, end);
83
		ret = remap_area_pte(pmd, addr, next, phys_addr, type);
R
Russell King 已提交
84 85 86 87 88
		if (ret)
			return ret;
		phys_addr += next - addr;
	} while (pmd++, addr = next, addr != end);
	return ret;
L
Linus Torvalds 已提交
89 90
}

R
Russell King 已提交
91
static int remap_area_pages(unsigned long start, unsigned long pfn,
92
			    size_t size, const struct mem_type *type)
L
Linus Torvalds 已提交
93
{
R
Russell King 已提交
94 95
	unsigned long addr = start;
	unsigned long next, end = start + size;
96
	unsigned long phys_addr = __pfn_to_phys(pfn);
R
Russell King 已提交
97
	pgd_t *pgd;
L
Linus Torvalds 已提交
98 99
	int err = 0;

R
Russell King 已提交
100 101
	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
L
Linus Torvalds 已提交
102
	do {
R
Russell King 已提交
103
		next = pgd_addr_end(addr, end);
104
		err = remap_area_pmd(pgd, addr, next, phys_addr, type);
R
Russell King 已提交
105
		if (err)
L
Linus Torvalds 已提交
106
			break;
R
Russell King 已提交
107 108
		phys_addr += next - addr;
	} while (pgd++, addr = next, addr != end);
L
Linus Torvalds 已提交
109 110 111 112

	return err;
}

113 114 115 116 117 118
int ioremap_page(unsigned long virt, unsigned long phys,
		 const struct mem_type *mtype)
{
	return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype);
}
EXPORT_SYMBOL(ioremap_page);
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

void __check_kvm_seq(struct mm_struct *mm)
{
	unsigned int seq;

	do {
		seq = init_mm.context.kvm_seq;
		memcpy(pgd_offset(mm, VMALLOC_START),
		       pgd_offset_k(VMALLOC_START),
		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
					pgd_index(VMALLOC_START)));
		mm->context.kvm_seq = seq;
	} while (seq != init_mm.context.kvm_seq);
}

#ifndef CONFIG_SMP
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
142 143
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
144 145 146
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
R
Russell King 已提交
147
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset(pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
171
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}

static int
remap_area_sections(unsigned long virt, unsigned long pfn,
190
		    size_t size, const struct mem_type *type)
191
{
192
	unsigned long addr = virt, end = virt + size;
193 194 195 196 197 198 199 200 201 202 203 204
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
	do {
		pmd_t *pmd = pmd_offset(pgd, addr);

205
		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
206
		pfn += SZ_1M >> PAGE_SHIFT;
207
		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
208 209 210 211 212 213 214 215 216
		pfn += SZ_1M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	return 0;
}
217 218 219

static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
220
			 size_t size, const struct mem_type *type)
221
{
222
	unsigned long addr = virt, end = virt + size;
223 224 225 226 227 228 229 230 231 232 233 234
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(virt);
	do {
		unsigned long super_pmd_val, i;

235 236
		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
				PMD_SECT_SUPER;
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd_t *pmd = pmd_offset(pgd, addr);

			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

			addr += PGDIR_SIZE;
			pgd++;
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
255 256
#endif

257 258
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
259
{
260
	const struct mem_type *type;
261
	int err;
262 263
	unsigned long addr;
 	struct vm_struct * area;
264 265 266 267 268 269

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
270

271 272 273
	type = get_mem_type(mtype);
	if (!type)
		return NULL;
274

275 276 277 278
	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);
R
Russell King 已提交
279

280
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
281 282 283
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;
284 285

#ifndef CONFIG_SMP
286 287
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
288
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
289 290
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
291
		err = remap_area_supersections(addr, pfn, size, type);
292
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
293
		area->flags |= VM_ARM_SECTION_MAPPING;
294
		err = remap_area_sections(addr, pfn, size, type);
295 296
	} else
#endif
297
		err = remap_area_pages(addr, pfn, size, type);
298 299

	if (err) {
300
 		vunmap((void *)addr);
301 302
 		return NULL;
 	}
303 304 305

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
306 307
}

308 309
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
	unsigned int mtype, void *caller)
L
Linus Torvalds 已提交
310
{
311 312 313
	unsigned long last_addr;
 	unsigned long offset = phys_addr & ~PAGE_MASK;
 	unsigned long pfn = __phys_to_pfn(phys_addr);
L
Linus Torvalds 已提交
314

315 316 317
 	/*
 	 * Don't allow wraparound or zero size
	 */
L
Linus Torvalds 已提交
318 319 320 321
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
			caller);
}

/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
		  unsigned int mtype)
{
	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
			__builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);

void __iomem *
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
	return __arm_ioremap_caller(phys_addr, size, mtype,
			__builtin_return_address(0));
L
Linus Torvalds 已提交
349
}
350
EXPORT_SYMBOL(__arm_ioremap);
L
Linus Torvalds 已提交
351

352
void __iounmap(volatile void __iomem *io_addr)
L
Linus Torvalds 已提交
353
{
354
	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
355
#ifndef CONFIG_SMP
356 357 358 359
	struct vm_struct **p, *tmp;

	/*
	 * If this is a section based mapping we need to handle it
S
Simon Arlott 已提交
360
	 * specially as the VM subsystem does not know how to handle
361 362 363 364 365 366
	 * such a beast. We need the lock here b/c we need to clear
	 * all the mappings before the area can be reclaimed
	 * by someone else.
	 */
	write_lock(&vmlist_lock);
	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
367
		if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
368 369 370 371 372 373 374 375
			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
				unmap_area_sections((unsigned long)tmp->addr,
						    tmp->size);
			}
			break;
		}
	}
	write_unlock(&vmlist_lock);
376
#endif
377

R
Russell King 已提交
378
	vunmap(addr);
L
Linus Torvalds 已提交
379 380
}
EXPORT_SYMBOL(__iounmap);