ioremap.c 11.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

9
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
10
#include <linux/init.h>
11
#include <linux/io.h>
T
Thomas Gleixner 已提交
12 13 14 15
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

L
Linus Torvalds 已提交
16
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
17 18
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
19
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
20
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
21

T
Thomas Gleixner 已提交
22 23 24 25 26 27 28 29 30 31 32 33
#ifdef CONFIG_X86_64

unsigned long __phys_addr(unsigned long x)
{
	if (x >= __START_KERNEL_map)
		return x - __START_KERNEL_map + phys_base;
	return x - PAGE_OFFSET;
}
EXPORT_SYMBOL(__phys_addr);

#endif

34 35 36 37 38 39 40 41 42 43 44 45 46
int page_is_ram(unsigned long pagenr)
{
	unsigned long addr, end;
	int i;

	for (i = 0; i < e820.nr_map; i++) {
		/*
		 * Not usable memory:
		 */
		if (e820.map[i].type != E820_RAM)
			continue;
		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
47 48 49 50 51 52 53 54 55 56

		/*
		 * Sanity check: Some BIOSen report areas as RAM that
		 * are not. Notably the 640->1Mb area, which is the
		 * PCI BIOS area.
		 */
		if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
		    end < (BIOS_END >> PAGE_SHIFT))
			continue;

57 58 59 60 61 62
		if ((pagenr >= addr) && (pagenr < end))
			return 1;
	}
	return 0;
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
			       pgprot_t prot)
{
	unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
	int err, level;

	/* No change for pages after the last mapping */
	if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
		return 0;

	npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	vaddr = (unsigned long) __va(phys_addr);

	/*
	 * If there is no identity map for this address,
	 * change_page_attr_addr is unnecessary
	 */
	if (!lookup_address(vaddr, &level))
		return 0;

	/*
	 * Must use an address here and not struct page because the
	 * phys addr can be a in hole between nodes and not have a
	 * memmap entry.
	 */
	err = change_page_attr_addr(vaddr, npages, prot);
T
Thomas Gleixner 已提交
93

94 95 96 97 98 99
	if (!err)
		global_flush_tlb();

	return err;
}

L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
109 110
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
			unsigned long flags)
L
Linus Torvalds 已提交
111
{
112 113
	void __iomem *addr;
	struct vm_struct *area;
L
Linus Torvalds 已提交
114
	unsigned long offset, last_addr;
115
	pgprot_t pgprot;
L
Linus Torvalds 已提交
116 117 118 119 120 121 122 123 124 125

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
T
Thomas Gleixner 已提交
126
		return (__force void __iomem *)phys_to_virt(phys_addr);
L
Linus Torvalds 已提交
127

T
Thomas Gleixner 已提交
128
#ifdef CONFIG_X86_32
L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136 137
	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr <= virt_to_phys(high_memory - 1)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);
138 139 140 141

		for (page = virt_to_page(t_addr);
		     page <= virt_to_page(t_end); page++)
			if (!PageReserved(page))
L
Linus Torvalds 已提交
142 143
				return NULL;
	}
T
Thomas Gleixner 已提交
144
#endif
L
Linus Torvalds 已提交
145

146
	pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
147

L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156 157
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
158
	area = get_vm_area(size, VM_IOREMAP);
L
Linus Torvalds 已提交
159 160 161 162
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = (void __iomem *) area->addr;
163 164
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, pgprot)) {
165
		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
L
Linus Torvalds 已提交
166 167
		return NULL;
	}
168 169 170 171 172 173

	if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
		vunmap(addr);
		return NULL;
	}

L
Linus Torvalds 已提交
174 175
	return (void __iomem *) (offset + (char __iomem *)addr);
}
176
EXPORT_SYMBOL(__ioremap);
L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186

/**
 * ioremap_nocache     -   map bus memory into CPU space
 * @offset:    bus address of the memory
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
187
 * address.
L
Linus Torvalds 已提交
188 189 190
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
191
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
192 193 194 195
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
196
 *
L
Linus Torvalds 已提交
197 198
 * Must be freed with iounmap.
 */
199
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
L
Linus Torvalds 已提交
200
{
201
	return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
L
Linus Torvalds 已提交
202
}
203
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
204

205 206 207 208 209 210
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
211 212
void iounmap(volatile void __iomem *addr)
{
213
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
214 215

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
216 217 218 219 220 221 222 223
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
224
	    addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
225 226
		return;

227 228
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
229 230 231 232 233 234 235 236 237 238 239 240 241 242

	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
		if (p->addr == addr)
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
243
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
244
		dump_stack();
245
		return;
L
Linus Torvalds 已提交
246 247
	}

248
	/* Reset the direct mapping. Can block */
249
	ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
250 251 252 253

	/* Finally remove it */
	o = remove_vm_area((void *)addr);
	BUG_ON(p != o || o == NULL);
254
	kfree(p);
L
Linus Torvalds 已提交
255
}
256
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
257

T
Thomas Gleixner 已提交
258
#ifdef CONFIG_X86_32
I
Ingo Molnar 已提交
259 260 261 262 263 264 265

int __initdata early_ioremap_debug;

static int __init early_ioremap_debug_setup(char *str)
{
	early_ioremap_debug = 1;

266
	return 0;
I
Ingo Molnar 已提交
267
}
268
early_param("early_ioremap_debug", early_ioremap_debug_setup);
I
Ingo Molnar 已提交
269

270 271 272 273
static __initdata int after_paging_init;
static __initdata unsigned long bm_pte[1024]
				__attribute__((aligned(PAGE_SIZE)));

274
static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
275 276 277 278
{
	return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
}

279
static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
280 281 282 283
{
	return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
}

284
void __init early_ioremap_init(void)
285 286 287
{
	unsigned long *pgd;

I
Ingo Molnar 已提交
288
	if (early_ioremap_debug)
289
		printk(KERN_DEBUG "early_ioremap_init()\n");
I
Ingo Molnar 已提交
290

291
	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
292 293
	*pgd = __pa(bm_pte) | _PAGE_TABLE;
	memset(bm_pte, 0, sizeof(bm_pte));
294 295 296 297 298 299
	/*
	 * The boot-ioremap range spans multiple pgds, for which
	 * we are not prepared:
	 */
	if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
		WARN_ON(1);
300 301 302 303 304 305 306 307 308 309
		printk(KERN_WARNING "pgd %p != %p\n",
		       pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
		       fix_to_virt(FIX_BTMAP_BEGIN));
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
		       fix_to_virt(FIX_BTMAP_END));

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
310
	}
311 312
}

313
void __init early_ioremap_clear(void)
314 315 316
{
	unsigned long *pgd;

I
Ingo Molnar 已提交
317
	if (early_ioremap_debug)
318
		printk(KERN_DEBUG "early_ioremap_clear()\n");
I
Ingo Molnar 已提交
319

320
	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
321 322 323 324
	*pgd = 0;
	__flush_tlb_all();
}

325
void __init early_ioremap_reset(void)
326 327 328 329 330
{
	enum fixed_addresses idx;
	unsigned long *pte, phys, addr;

	after_paging_init = 1;
H
Huang, Ying 已提交
331
	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
332
		addr = fix_to_virt(idx);
333
		pte = early_ioremap_pte(addr);
334 335 336 337 338 339 340
		if (!*pte & _PAGE_PRESENT) {
			phys = *pte & PAGE_MASK;
			set_fixmap(idx, phys);
		}
	}
}

341
static void __init __early_set_fixmap(enum fixed_addresses idx,
342 343 344 345 346 347 348 349
				   unsigned long phys, pgprot_t flags)
{
	unsigned long *pte, addr = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
350
	pte = early_ioremap_pte(addr);
351 352 353 354 355 356 357
	if (pgprot_val(flags))
		*pte = (phys & PAGE_MASK) | pgprot_val(flags);
	else
		*pte = 0;
	__flush_tlb_one(addr);
}

358
static inline void __init early_set_fixmap(enum fixed_addresses idx,
359 360 361 362 363
					unsigned long phys)
{
	if (after_paging_init)
		set_fixmap(idx, phys);
	else
364
		__early_set_fixmap(idx, phys, PAGE_KERNEL);
365 366
}

367
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
368 369 370 371
{
	if (after_paging_init)
		clear_fixmap(idx);
	else
372
		__early_set_fixmap(idx, 0, __pgprot(0));
373 374
}

I
Ingo Molnar 已提交
375 376 377

int __initdata early_ioremap_nested;

378 379 380 381 382 383
static int __init check_early_ioremap_leak(void)
{
	if (!early_ioremap_nested)
		return 0;

	printk(KERN_WARNING
384 385
	       "Debug warning: early ioremap leak of %d areas detected.\n",
	       early_ioremap_nested);
386
	printk(KERN_WARNING
387
	       "please boot with early_ioremap_debug and report the dmesg.\n");
388 389 390 391 392 393
	WARN_ON(1);

	return 1;
}
late_initcall(check_early_ioremap_leak);

394
void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
L
Linus Torvalds 已提交
395 396
{
	unsigned long offset, last_addr;
I
Ingo Molnar 已提交
397 398 399 400 401 402
	unsigned int nrpages, nesting;
	enum fixed_addresses idx0, idx;

	WARN_ON(system_state != SYSTEM_BOOTING);

	nesting = early_ioremap_nested;
I
Ingo Molnar 已提交
403
	if (early_ioremap_debug) {
404 405
		printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
		       phys_addr, size, nesting);
I
Ingo Molnar 已提交
406 407
		dump_stack();
	}
L
Linus Torvalds 已提交
408 409 410

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
411 412
	if (!size || last_addr < phys_addr) {
		WARN_ON(1);
L
Linus Torvalds 已提交
413
		return NULL;
414
	}
L
Linus Torvalds 已提交
415

416 417
	if (nesting >= FIX_BTMAPS_NESTING) {
		WARN_ON(1);
I
Ingo Molnar 已提交
418
		return NULL;
419
	}
I
Ingo Molnar 已提交
420
	early_ioremap_nested++;
L
Linus Torvalds 已提交
421 422 423 424 425 426 427 428 429 430 431
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr) - phys_addr;

	/*
	 * Mappings have to fit in the FIX_BTMAP area.
	 */
	nrpages = size >> PAGE_SHIFT;
432 433
	if (nrpages > NR_FIX_BTMAPS) {
		WARN_ON(1);
L
Linus Torvalds 已提交
434
		return NULL;
435
	}
L
Linus Torvalds 已提交
436 437 438 439

	/*
	 * Ok, go for it..
	 */
I
Ingo Molnar 已提交
440 441
	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
	idx = idx0;
L
Linus Torvalds 已提交
442
	while (nrpages > 0) {
443
		early_set_fixmap(idx, phys_addr);
L
Linus Torvalds 已提交
444 445 446 447
		phys_addr += PAGE_SIZE;
		--idx;
		--nrpages;
	}
I
Ingo Molnar 已提交
448 449
	if (early_ioremap_debug)
		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
I
Ingo Molnar 已提交
450

451
	return (void *) (offset + fix_to_virt(idx0));
L
Linus Torvalds 已提交
452 453
}

454
void __init early_iounmap(void *addr, unsigned long size)
L
Linus Torvalds 已提交
455 456 457 458 459
{
	unsigned long virt_addr;
	unsigned long offset;
	unsigned int nrpages;
	enum fixed_addresses idx;
I
Ingo Molnar 已提交
460 461 462
	unsigned int nesting;

	nesting = --early_ioremap_nested;
463
	WARN_ON(nesting < 0);
L
Linus Torvalds 已提交
464

I
Ingo Molnar 已提交
465
	if (early_ioremap_debug) {
466 467
		printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
		       size, nesting);
I
Ingo Molnar 已提交
468 469 470
		dump_stack();
	}

L
Linus Torvalds 已提交
471
	virt_addr = (unsigned long)addr;
472 473
	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
		WARN_ON(1);
L
Linus Torvalds 已提交
474
		return;
475
	}
L
Linus Torvalds 已提交
476 477 478
	offset = virt_addr & ~PAGE_MASK;
	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;

I
Ingo Molnar 已提交
479
	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
L
Linus Torvalds 已提交
480
	while (nrpages > 0) {
481
		early_clear_fixmap(idx);
L
Linus Torvalds 已提交
482 483 484 485
		--idx;
		--nrpages;
	}
}
I
Ingo Molnar 已提交
486 487 488 489 490

void __this_fixmap_does_not_exist(void)
{
	WARN_ON(1);
}
T
Thomas Gleixner 已提交
491 492

#endif /* CONFIG_X86_32 */