ioremap_32.c 10.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

#include <linux/init.h>
10
#include <linux/io.h>
T
Thomas Gleixner 已提交
11 12 13 14
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

L
Linus Torvalds 已提交
15
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
16 17
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
18
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
19
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29

/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
30 31
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
			unsigned long flags)
L
Linus Torvalds 已提交
32
{
33 34
	void __iomem *addr;
	struct vm_struct *area;
L
Linus Torvalds 已提交
35
	unsigned long offset, last_addr;
36
	pgprot_t prot;
L
Linus Torvalds 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
		return (void __iomem *) phys_to_virt(phys_addr);

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr <= virt_to_phys(high_memory - 1)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
58 59 60 61

		for (page = virt_to_page(t_addr);
		     page <= virt_to_page(t_end); page++)
			if (!PageReserved(page))
L
Linus Torvalds 已提交
62 63 64
				return NULL;
	}

65
	prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
66

L
Linus Torvalds 已提交
67 68 69 70 71 72 73 74 75 76
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
77
	area = get_vm_area(size, VM_IOREMAP);
L
Linus Torvalds 已提交
78 79 80 81 82
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = (void __iomem *) area->addr;
	if (ioremap_page_range((unsigned long) addr,
83
			       (unsigned long) addr + size, phys_addr, prot)) {
L
Linus Torvalds 已提交
84 85 86 87 88
		vunmap((void __force *) addr);
		return NULL;
	}
	return (void __iomem *) (offset + (char __iomem *)addr);
}
89
EXPORT_SYMBOL(__ioremap);
L
Linus Torvalds 已提交
90 91 92 93 94 95 96 97 98 99

/**
 * ioremap_nocache     -   map bus memory into CPU space
 * @offset:    bus address of the memory
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
100
 * address.
L
Linus Torvalds 已提交
101 102 103
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
104
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
105 106 107 108
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
109
 *
L
Linus Torvalds 已提交
110 111
 * Must be freed with iounmap.
 */
112
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
L
Linus Torvalds 已提交
113 114
{
	unsigned long last_addr;
115
	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
116 117 118

	if (!p)
		return p;
L
Linus Torvalds 已提交
119 120 121 122 123

	/* Guaranteed to be > phys_addr, as per __ioremap() */
	last_addr = phys_addr + size - 1;

	if (last_addr < virt_to_phys(high_memory) - 1) {
124
		struct page *ppage = virt_to_page(__va(phys_addr));
L
Linus Torvalds 已提交
125 126 127 128 129 130 131 132
		unsigned long npages;

		phys_addr &= PAGE_MASK;

		/* This might overflow and become zero.. */
		last_addr = PAGE_ALIGN(last_addr);

		/* .. but that's ok, because modulo-2**n arithmetic will make
133 134
		 * the page-aligned "last - first" come out right.
		 */
L
Linus Torvalds 已提交
135 136
		npages = (last_addr - phys_addr) >> PAGE_SHIFT;

137 138
		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
			iounmap(p);
L
Linus Torvalds 已提交
139 140 141 142 143
			p = NULL;
		}
		global_flush_tlb();
	}

144
	return p;
L
Linus Torvalds 已提交
145
}
146
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
147

148 149 150 151 152 153
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
154 155
void iounmap(volatile void __iomem *addr)
{
156
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
157 158

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
159 160 161 162 163 164 165 166
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
167
	    addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
168 169
		return;

170 171
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
172 173 174 175 176 177 178 179 180 181 182 183 184 185

	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
		if (p->addr == addr)
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
186
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
187
		dump_stack();
188
		return;
L
Linus Torvalds 已提交
189 190
	}

191
	/* Reset the direct mapping. Can block */
192
	if (p->phys_addr < virt_to_phys(high_memory) - 1) {
L
Linus Torvalds 已提交
193
		change_page_attr(virt_to_page(__va(p->phys_addr)),
194
				 get_vm_area_size(p) >> PAGE_SHIFT,
L
Linus Torvalds 已提交
195 196
				 PAGE_KERNEL);
		global_flush_tlb();
197
	}
198 199 200 201

	/* Finally remove it */
	o = remove_vm_area((void *)addr);
	BUG_ON(p != o || o == NULL);
202
	kfree(p);
L
Linus Torvalds 已提交
203
}
204
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
205

I
Ingo Molnar 已提交
206 207 208 209 210 211 212

int __initdata early_ioremap_debug;

static int __init early_ioremap_debug_setup(char *str)
{
	early_ioremap_debug = 1;

213
	return 0;
I
Ingo Molnar 已提交
214
}
215
early_param("early_ioremap_debug", early_ioremap_debug_setup);
I
Ingo Molnar 已提交
216

217 218 219 220
static __initdata int after_paging_init;
static __initdata unsigned long bm_pte[1024]
				__attribute__((aligned(PAGE_SIZE)));

221
static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
222 223 224 225
{
	return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
}

226
static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
227 228 229 230
{
	return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
}

231
void __init early_ioremap_init(void)
232 233 234
{
	unsigned long *pgd;

I
Ingo Molnar 已提交
235
	if (early_ioremap_debug)
236
		printk(KERN_DEBUG "early_ioremap_init()\n");
I
Ingo Molnar 已提交
237

238
	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
239 240
	*pgd = __pa(bm_pte) | _PAGE_TABLE;
	memset(bm_pte, 0, sizeof(bm_pte));
241 242 243 244 245 246
	/*
	 * The boot-ioremap range spans multiple pgds, for which
	 * we are not prepared:
	 */
	if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
		WARN_ON(1);
247 248 249 250 251 252 253 254 255 256
		printk(KERN_WARNING "pgd %p != %p\n",
		       pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
		       fix_to_virt(FIX_BTMAP_BEGIN));
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
		       fix_to_virt(FIX_BTMAP_END));

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
257
	}
258 259
}

260
void __init early_ioremap_clear(void)
261 262 263
{
	unsigned long *pgd;

I
Ingo Molnar 已提交
264
	if (early_ioremap_debug)
265
		printk(KERN_DEBUG "early_ioremap_clear()\n");
I
Ingo Molnar 已提交
266

267
	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
268 269 270 271
	*pgd = 0;
	__flush_tlb_all();
}

272
void __init early_ioremap_reset(void)
273 274 275 276 277
{
	enum fixed_addresses idx;
	unsigned long *pte, phys, addr;

	after_paging_init = 1;
H
Huang, Ying 已提交
278
	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
279
		addr = fix_to_virt(idx);
280
		pte = early_ioremap_pte(addr);
281 282 283 284 285 286 287
		if (!*pte & _PAGE_PRESENT) {
			phys = *pte & PAGE_MASK;
			set_fixmap(idx, phys);
		}
	}
}

288
static void __init __early_set_fixmap(enum fixed_addresses idx,
289 290 291 292 293 294 295 296
				   unsigned long phys, pgprot_t flags)
{
	unsigned long *pte, addr = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
297
	pte = early_ioremap_pte(addr);
298 299 300 301 302 303 304
	if (pgprot_val(flags))
		*pte = (phys & PAGE_MASK) | pgprot_val(flags);
	else
		*pte = 0;
	__flush_tlb_one(addr);
}

305
static inline void __init early_set_fixmap(enum fixed_addresses idx,
306 307 308 309 310
					unsigned long phys)
{
	if (after_paging_init)
		set_fixmap(idx, phys);
	else
311
		__early_set_fixmap(idx, phys, PAGE_KERNEL);
312 313
}

314
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
315 316 317 318
{
	if (after_paging_init)
		clear_fixmap(idx);
	else
319
		__early_set_fixmap(idx, 0, __pgprot(0));
320 321
}

I
Ingo Molnar 已提交
322 323 324

int __initdata early_ioremap_nested;

325 326 327 328 329 330
static int __init check_early_ioremap_leak(void)
{
	if (!early_ioremap_nested)
		return 0;

	printk(KERN_WARNING
331 332
	       "Debug warning: early ioremap leak of %d areas detected.\n",
	       early_ioremap_nested);
333
	printk(KERN_WARNING
334
	       "please boot with early_ioremap_debug and report the dmesg.\n");
335 336 337 338 339 340
	WARN_ON(1);

	return 1;
}
late_initcall(check_early_ioremap_leak);

341
void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
L
Linus Torvalds 已提交
342 343
{
	unsigned long offset, last_addr;
I
Ingo Molnar 已提交
344 345 346 347 348 349
	unsigned int nrpages, nesting;
	enum fixed_addresses idx0, idx;

	WARN_ON(system_state != SYSTEM_BOOTING);

	nesting = early_ioremap_nested;
I
Ingo Molnar 已提交
350
	if (early_ioremap_debug) {
351 352
		printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
		       phys_addr, size, nesting);
I
Ingo Molnar 已提交
353 354
		dump_stack();
	}
L
Linus Torvalds 已提交
355 356 357

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
358 359
	if (!size || last_addr < phys_addr) {
		WARN_ON(1);
L
Linus Torvalds 已提交
360
		return NULL;
361
	}
L
Linus Torvalds 已提交
362

363 364
	if (nesting >= FIX_BTMAPS_NESTING) {
		WARN_ON(1);
I
Ingo Molnar 已提交
365
		return NULL;
366
	}
I
Ingo Molnar 已提交
367
	early_ioremap_nested++;
L
Linus Torvalds 已提交
368 369 370 371 372 373 374 375 376 377 378
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr) - phys_addr;

	/*
	 * Mappings have to fit in the FIX_BTMAP area.
	 */
	nrpages = size >> PAGE_SHIFT;
379 380
	if (nrpages > NR_FIX_BTMAPS) {
		WARN_ON(1);
L
Linus Torvalds 已提交
381
		return NULL;
382
	}
L
Linus Torvalds 已提交
383 384 385 386

	/*
	 * Ok, go for it..
	 */
I
Ingo Molnar 已提交
387 388
	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
	idx = idx0;
L
Linus Torvalds 已提交
389
	while (nrpages > 0) {
390
		early_set_fixmap(idx, phys_addr);
L
Linus Torvalds 已提交
391 392 393 394
		phys_addr += PAGE_SIZE;
		--idx;
		--nrpages;
	}
I
Ingo Molnar 已提交
395 396
	if (early_ioremap_debug)
		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
I
Ingo Molnar 已提交
397

398
	return (void *) (offset + fix_to_virt(idx0));
L
Linus Torvalds 已提交
399 400
}

401
void __init early_iounmap(void *addr, unsigned long size)
L
Linus Torvalds 已提交
402 403 404 405 406
{
	unsigned long virt_addr;
	unsigned long offset;
	unsigned int nrpages;
	enum fixed_addresses idx;
I
Ingo Molnar 已提交
407 408 409
	unsigned int nesting;

	nesting = --early_ioremap_nested;
410
	WARN_ON(nesting < 0);
L
Linus Torvalds 已提交
411

I
Ingo Molnar 已提交
412
	if (early_ioremap_debug) {
413 414
		printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
		       size, nesting);
I
Ingo Molnar 已提交
415 416 417
		dump_stack();
	}

L
Linus Torvalds 已提交
418
	virt_addr = (unsigned long)addr;
419 420
	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
		WARN_ON(1);
L
Linus Torvalds 已提交
421
		return;
422
	}
L
Linus Torvalds 已提交
423 424 425
	offset = virt_addr & ~PAGE_MASK;
	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;

I
Ingo Molnar 已提交
426
	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
L
Linus Torvalds 已提交
427
	while (nrpages > 0) {
428
		early_clear_fixmap(idx);
L
Linus Torvalds 已提交
429 430 431 432
		--idx;
		--nrpages;
	}
}
I
Ingo Molnar 已提交
433 434 435 436 437

void __this_fixmap_does_not_exist(void)
{
	WARN_ON(1);
}