ioremap.c 9.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 *  linux/arch/arm/mm/ioremap.c
 *
 * Re-map IO memory to kernel address space so that we can access it.
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 *
 * Hacked for ARM by Phil Blundell <philb@gnu.org>
 * Hacked to allow all architectures to build, and various cleanups
 * by Russell King
 *
 * This allows a driver to remap an arbitrary region of bus memory into
 * virtual space.  One should *only* use readl, writel, memcpy_toio and
 * so on with such remapped areas.
 *
 * Because the ARM only has a 32-bit address space we can't address the
 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
 * allows us to circumvent this restriction by splitting PCI space into
 * two 2GB chunks and mapping only one at a time into processor memory.
 * We use MMU protection domains to trap any attempt to access the bank
 * that is not currently mapped.  (This isn't fully implemented yet.)
 */
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
27
#include <linux/io.h>
L
Linus Torvalds 已提交
28

29
#include <asm/cp15.h>
30
#include <asm/cputype.h>
L
Linus Torvalds 已提交
31
#include <asm/cacheflush.h>
32 33
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
34
#include <asm/tlbflush.h>
35 36
#include <asm/sizes.h>

37 38 39
#include <asm/mach/map.h>
#include "mm.h"

40 41 42
int ioremap_page(unsigned long virt, unsigned long phys,
		 const struct mem_type *mtype)
{
43 44
	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
				  __pgprot(mtype->prot_pte));
45 46
}
EXPORT_SYMBOL(ioremap_page);
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

void __check_kvm_seq(struct mm_struct *mm)
{
	unsigned int seq;

	do {
		seq = init_mm.context.kvm_seq;
		memcpy(pgd_offset(mm, VMALLOC_START),
		       pgd_offset_k(VMALLOC_START),
		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
					pgd_index(VMALLOC_START)));
		mm->context.kvm_seq = seq;
	} while (seq != init_mm.context.kvm_seq);
}

62
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
63 64 65 66 67 68 69
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
70 71
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
72 73 74
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
R
Russell King 已提交
75
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
76
	pgd_t *pgd;
77 78
	pud_t *pud;
	pmd_t *pmdp;
79 80 81

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
82 83
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
84
	do {
85
		pmd_t pmd = *pmdp;
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
102
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
103 104
		}

105 106
		addr += PMD_SIZE;
		pmdp += 2;
107 108 109 110 111 112 113 114 115 116 117 118 119 120
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}

static int
remap_area_sections(unsigned long virt, unsigned long pfn,
121
		    size_t size, const struct mem_type *type)
122
{
123
	unsigned long addr = virt, end = virt + size;
124
	pgd_t *pgd;
125 126
	pud_t *pud;
	pmd_t *pmd;
127 128 129 130 131 132 133 134

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
135 136
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
137
	do {
138
		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
139
		pfn += SZ_1M >> PAGE_SHIFT;
140
		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
141 142 143
		pfn += SZ_1M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

144 145
		addr += PMD_SIZE;
		pmd += 2;
146 147 148 149
	} while (addr < end);

	return 0;
}
150 151 152

static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
153
			 size_t size, const struct mem_type *type)
154
{
155
	unsigned long addr = virt, end = virt + size;
156
	pgd_t *pgd;
157 158
	pud_t *pud;
	pmd_t *pmd;
159 160 161 162 163 164 165 166

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(virt);
167 168
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
169 170 171
	do {
		unsigned long super_pmd_val, i;

172 173
		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
				PMD_SECT_SUPER;
174 175 176 177 178 179 180
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

181 182
			addr += PMD_SIZE;
			pmd += 2;
183 184 185 186 187 188 189
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
190 191
#endif

192 193
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
194
{
195
	const struct mem_type *type;
196
	int err;
197 198
	unsigned long addr;
 	struct vm_struct * area;
199

200
#ifndef CONFIG_ARM_LPAE
201 202 203 204 205
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
206
#endif
207

208 209 210
	type = get_mem_type(mtype);
	if (!type)
		return NULL;
211

212 213 214 215
	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);
R
Russell King 已提交
216

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;

245
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
246 247 248
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;
249

250
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
251 252
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
253
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
254 255
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
256
		err = remap_area_supersections(addr, pfn, size, type);
257
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
258
		area->flags |= VM_ARM_SECTION_MAPPING;
259
		err = remap_area_sections(addr, pfn, size, type);
260 261
	} else
#endif
262 263
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));
264 265

	if (err) {
266
 		vunmap((void *)addr);
267 268
 		return NULL;
 	}
269 270 271

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
272 273
}

274 275
void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
	unsigned int mtype, void *caller)
L
Linus Torvalds 已提交
276
{
277 278 279
	unsigned long last_addr;
 	unsigned long offset = phys_addr & ~PAGE_MASK;
 	unsigned long pfn = __phys_to_pfn(phys_addr);
L
Linus Torvalds 已提交
280

281 282 283
 	/*
 	 * Don't allow wraparound or zero size
	 */
L
Linus Torvalds 已提交
284 285 286 287
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
			caller);
}

/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
		  unsigned int mtype)
{
	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
			__builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);

void __iomem *
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
	return __arm_ioremap_caller(phys_addr, size, mtype,
			__builtin_return_address(0));
L
Linus Torvalds 已提交
315
}
316
EXPORT_SYMBOL(__arm_ioremap);
L
Linus Torvalds 已提交
317

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space as memory. Needed when the kernel wants to execute
 * code in external memory. This is needed for reprogramming source
 * clocks that would affect normal memory for example. Please see
 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
 */
void __iomem *
__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
{
	unsigned int mtype;

	if (cached)
		mtype = MT_MEMORY;
	else
		mtype = MT_MEMORY_NONCACHED;

	return __arm_ioremap_caller(phys_addr, size, mtype,
			__builtin_return_address(0));
}

339
void __iounmap(volatile void __iomem *io_addr)
L
Linus Torvalds 已提交
340
{
341
	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
342
	struct vm_struct *vm;
343

344 345
	read_lock(&vmlist_lock);
	for (vm = vmlist; vm; vm = vm->next) {
346
		if (vm->addr > addr)
347
			break;
348 349 350 351 352 353 354
		if (!(vm->flags & VM_IOREMAP))
			continue;
		/* If this is a static mapping we must leave it alone */
		if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
		    (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
			read_unlock(&vmlist_lock);
			return;
355
		}
R
Russell King 已提交
356
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
357 358 359 360 361 362 363 364 365 366 367
		/*
		 * If this is a section based mapping we need to handle it
		 * specially as the VM subsystem does not know how to handle
		 * such a beast.
		 */
		if ((vm->addr == addr) &&
		    (vm->flags & VM_ARM_SECTION_MAPPING)) {
			unmap_area_sections((unsigned long)vm->addr, vm->size);
			break;
		}
#endif
368
	}
369
	read_unlock(&vmlist_lock);
370

R
Russell King 已提交
371
	vunmap(addr);
L
Linus Torvalds 已提交
372 373
}
EXPORT_SYMBOL(__iounmap);