ioremap.c 15.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

9
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
10
#include <linux/init.h>
11
#include <linux/io.h>
T
Thomas Gleixner 已提交
12 13 14
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
P
Pekka Paalanen 已提交
15
#include <linux/mmiotrace.h>
T
Thomas Gleixner 已提交
16

L
Linus Torvalds 已提交
17
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
18 19
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
20
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
21
#include <asm/tlbflush.h>
22
#include <asm/pgalloc.h>
23
#include <asm/pat.h>
L
Linus Torvalds 已提交
24

25
#include "physaddr.h"
T
Thomas Gleixner 已提交
26

27 28 29 30
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
31 32
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
			       unsigned long prot_val)
33
{
34
	unsigned long nrpages = size >> PAGE_SHIFT;
35
	int err;
36

37 38
	switch (prot_val) {
	case _PAGE_CACHE_UC:
39
	default:
40
		err = _set_memory_uc(vaddr, nrpages);
41
		break;
42 43 44
	case _PAGE_CACHE_WC:
		err = _set_memory_wc(vaddr, nrpages);
		break;
45
	case _PAGE_CACHE_WB:
46
		err = _set_memory_wb(vaddr, nrpages);
47 48
		break;
	}
49 50 51 52

	return err;
}

L
Linus Torvalds 已提交
53 54 55 56 57 58 59 60 61
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
62 63
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		unsigned long size, unsigned long prot_val, void *caller)
L
Linus Torvalds 已提交
64
{
65 66
	unsigned long pfn, offset, vaddr;
	resource_size_t last_addr;
67 68
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
69
	struct vm_struct *area;
70
	unsigned long new_prot_val;
71
	pgprot_t prot;
72
	int retval;
P
Pekka Paalanen 已提交
73
	void __iomem *ret_addr;
L
Linus Torvalds 已提交
74 75 76 77 78 79

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

80
	if (!phys_addr_valid(phys_addr)) {
81
		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
82
		       (unsigned long long)phys_addr);
83 84 85 86
		WARN_ON_ONCE(1);
		return NULL;
	}

L
Linus Torvalds 已提交
87 88 89
	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
90
	if (is_ISA_range(phys_addr, last_addr))
T
Thomas Gleixner 已提交
91
		return (__force void __iomem *)phys_to_virt(phys_addr);
L
Linus Torvalds 已提交
92

93 94 95 96
	/*
	 * Check if the request spans more than any BAR in the iomem resource
	 * tree.
	 */
97 98
	WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
99

L
Linus Torvalds 已提交
100 101 102
	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
A
Andres Salomon 已提交
103 104 105
	for (pfn = phys_addr >> PAGE_SHIFT;
				(pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
				pfn++) {
106

107 108 109
		int is_ram = page_is_ram(pfn);

		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
T
Thomas Gleixner 已提交
110
			return NULL;
111
		WARN_ON_ONCE(is_ram);
L
Linus Torvalds 已提交
112 113
	}

114 115 116 117 118 119 120
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

A
Andi Kleen 已提交
121
	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
122 123
						prot_val, &new_prot_val);
	if (retval) {
124
		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
125 126 127 128
		return NULL;
	}

	if (prot_val != new_prot_val) {
129 130
		if (!is_new_memtype_allowed(phys_addr, size,
					    prot_val, new_prot_val)) {
131
			printk(KERN_ERR
132
		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
133 134
				(unsigned long long)phys_addr,
				(unsigned long long)(phys_addr + size),
135
				prot_val, new_prot_val);
136
			goto err_free_memtype;
137 138 139 140
		}
		prot_val = new_prot_val;
	}

141 142
	switch (prot_val) {
	case _PAGE_CACHE_UC:
143
	default:
144
		prot = PAGE_KERNEL_IO_NOCACHE;
145
		break;
146
	case _PAGE_CACHE_UC_MINUS:
147
		prot = PAGE_KERNEL_IO_UC_MINUS;
148
		break;
149
	case _PAGE_CACHE_WC:
150
		prot = PAGE_KERNEL_IO_WC;
151
		break;
152
	case _PAGE_CACHE_WB:
153
		prot = PAGE_KERNEL_IO;
154 155
		break;
	}
156

L
Linus Torvalds 已提交
157 158 159
	/*
	 * Ok, go for it..
	 */
160
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
L
Linus Torvalds 已提交
161
	if (!area)
162
		goto err_free_memtype;
L
Linus Torvalds 已提交
163
	area->phys_addr = phys_addr;
T
Thomas Gleixner 已提交
164
	vaddr = (unsigned long) area->addr;
165

166 167
	if (kernel_map_sync_memtype(phys_addr, size, prot_val))
		goto err_free_area;
168

169 170
	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
		goto err_free_area;
171

P
Pekka Paalanen 已提交
172
	ret_addr = (void __iomem *) (vaddr + offset);
173
	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
P
Pekka Paalanen 已提交
174 175

	return ret_addr;
176 177 178 179 180
err_free_area:
	free_vm_area(area);
err_free_memtype:
	free_memtype(phys_addr, phys_addr + size);
	return NULL;
L
Linus Torvalds 已提交
181 182 183 184 185 186 187 188 189 190 191
}

/**
 * ioremap_nocache     -   map bus memory into CPU space
 * @offset:    bus address of the memory
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
192
 * address.
L
Linus Torvalds 已提交
193 194 195
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
196
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
197 198 199 200
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
201
 *
L
Linus Torvalds 已提交
202 203
 * Must be freed with iounmap.
 */
204
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
L
Linus Torvalds 已提交
205
{
206 207
	/*
	 * Ideally, this should be:
208
	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
209 210 211 212 213 214 215
	 *
	 * Till we fix all X drivers to use ioremap_wc(), we will use
	 * UC MINUS.
	 */
	unsigned long val = _PAGE_CACHE_UC_MINUS;

	return __ioremap_caller(phys_addr, size, val,
216
				__builtin_return_address(0));
L
Linus Torvalds 已提交
217
}
218
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
219

220 221 222 223 224 225 226 227 228 229
/**
 * ioremap_wc	-	map memory into CPU space write combined
 * @offset:	bus address of the memory
 * @size:	size of the resource to map
 *
 * This version of ioremap ensures that the memory is marked write combining.
 * Write combining allows faster writes to some hardware devices.
 *
 * Must be freed with iounmap.
 */
230
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
231
{
232
	if (pat_enabled)
233 234
		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
					__builtin_return_address(0));
235 236 237 238 239
	else
		return ioremap_nocache(phys_addr, size);
}
EXPORT_SYMBOL(ioremap_wc);

240
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
T
Thomas Gleixner 已提交
241
{
242 243
	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
				__builtin_return_address(0));
T
Thomas Gleixner 已提交
244 245 246
}
EXPORT_SYMBOL(ioremap_cache);

247 248 249 250 251 252 253 254
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
				unsigned long prot_val)
{
	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
				__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);

255 256 257 258 259 260
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
261 262
void iounmap(volatile void __iomem *addr)
{
263
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
264 265

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
266 267 268 269 270 271 272
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
273 274
	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
275 276
		return;

277 278
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
279

P
Pekka Paalanen 已提交
280 281
	mmiotrace_iounmap(addr);

282 283 284 285 286 287 288
	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
289
		if (p->addr == (void __force *)addr)
290 291 292 293 294
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
295
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
296
		dump_stack();
297
		return;
L
Linus Torvalds 已提交
298 299
	}

300 301
	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));

302
	/* Finally remove it */
303
	o = remove_vm_area((void __force *)addr);
304
	BUG_ON(p != o || o == NULL);
305
	kfree(p);
L
Linus Torvalds 已提交
306
}
307
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
308

309 310 311 312 313 314 315 316 317 318 319 320 321
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
void *xlate_dev_mem_ptr(unsigned long phys)
{
	void *addr;
	unsigned long start = phys & PAGE_MASK;

	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
	if (page_is_ram(start >> PAGE_SHIFT))
		return __va(phys);

X
Xiaotian Feng 已提交
322
	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	if (addr)
		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));

	return addr;
}

void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
	if (page_is_ram(phys >> PAGE_SHIFT))
		return;

	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
	return;
}

338
static int __initdata early_ioremap_debug;
I
Ingo Molnar 已提交
339 340 341 342 343

static int __init early_ioremap_debug_setup(char *str)
{
	early_ioremap_debug = 1;

344
	return 0;
I
Ingo Molnar 已提交
345
}
346
early_param("early_ioremap_debug", early_ioremap_debug_setup);
I
Ingo Molnar 已提交
347

348
static __initdata int after_paging_init;
349
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
350

351
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
352
{
353 354 355
	/* Don't assume we're using swapper_pg_dir at this point */
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(addr)];
356 357 358 359
	pud_t *pud = pud_offset(pgd, addr);
	pmd_t *pmd = pmd_offset(pud, addr);

	return pmd;
360 361
}

362
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
363
{
364
	return &bm_pte[pte_index(addr)];
365 366
}

367 368
static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;

369
void __init early_ioremap_init(void)
370
{
371
	pmd_t *pmd;
372
	int i;
373

I
Ingo Molnar 已提交
374
	if (early_ioremap_debug)
I
Ingo Molnar 已提交
375
		printk(KERN_INFO "early_ioremap_init()\n");
I
Ingo Molnar 已提交
376

377
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
378
		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
379

380
	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
381 382
	memset(bm_pte, 0, sizeof(bm_pte));
	pmd_populate_kernel(&init_mm, pmd, bm_pte);
383

384
	/*
385
	 * The boot-ioremap range spans multiple pmds, for which
386 387
	 * we are not prepared:
	 */
388 389 390 391
#define __FIXADDR_TOP (-PAGE_SIZE)
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
#undef __FIXADDR_TOP
392
	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
393
		WARN_ON(1);
394 395
		printk(KERN_WARNING "pmd %p != %p\n",
		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
396
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
397
			fix_to_virt(FIX_BTMAP_BEGIN));
398
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
399
			fix_to_virt(FIX_BTMAP_END));
400 401 402 403

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
404
	}
405 406
}

407
void __init early_ioremap_reset(void)
408 409 410 411
{
	after_paging_init = 1;
}

412
static void __init __early_set_fixmap(enum fixed_addresses idx,
413
				      phys_addr_t phys, pgprot_t flags)
414
{
415 416
	unsigned long addr = __fix_to_virt(idx);
	pte_t *pte;
417 418 419 420 421

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
422
	pte = early_ioremap_pte(addr);
423

424
	if (pgprot_val(flags))
425
		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
426
	else
427
		pte_clear(&init_mm, addr, pte);
428 429 430
	__flush_tlb_one(addr);
}

431
static inline void __init early_set_fixmap(enum fixed_addresses idx,
432
					   phys_addr_t phys, pgprot_t prot)
433 434
{
	if (after_paging_init)
J
Jeremy Fitzhardinge 已提交
435
		__set_fixmap(idx, phys, prot);
436
	else
J
Jeremy Fitzhardinge 已提交
437
		__early_set_fixmap(idx, phys, prot);
438 439
}

440
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
441 442 443 444
{
	if (after_paging_init)
		clear_fixmap(idx);
	else
445
		__early_set_fixmap(idx, 0, __pgprot(0));
446 447
}

448
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
449
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
450

L
Liang Li 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464
void __init fixup_early_ioremap(void)
{
	int i;

	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
		if (prev_map[i]) {
			WARN_ON(1);
			break;
		}
	}

	early_ioremap_init();
}

465 466
static int __init check_early_ioremap_leak(void)
{
467 468 469 470 471 472 473 474
	int count = 0;
	int i;

	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
		if (prev_map[i])
			count++;

	if (!count)
475
		return 0;
476
	WARN(1, KERN_WARNING
477
	       "Debug warning: early ioremap leak of %d areas detected.\n",
478
		count);
479
	printk(KERN_WARNING
480
		"please boot with early_ioremap_debug and report the dmesg.\n");
481 482 483 484 485

	return 1;
}
late_initcall(check_early_ioremap_leak);

486
static void __init __iomem *
487
__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
488
{
489 490
	unsigned long offset;
	resource_size_t last_addr;
491
	unsigned int nrpages;
I
Ingo Molnar 已提交
492
	enum fixed_addresses idx0, idx;
493
	int i, slot;
I
Ingo Molnar 已提交
494 495 496

	WARN_ON(system_state != SYSTEM_BOOTING);

497 498 499 500 501 502 503 504 505
	slot = -1;
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
		if (!prev_map[i]) {
			slot = i;
			break;
		}
	}

	if (slot < 0) {
506 507
		printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
			 (u64)phys_addr, size);
508 509 510 511
		WARN_ON(1);
		return NULL;
	}

I
Ingo Molnar 已提交
512
	if (early_ioremap_debug) {
513 514
		printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
		       (u64)phys_addr, size, slot);
I
Ingo Molnar 已提交
515 516
		dump_stack();
	}
L
Linus Torvalds 已提交
517 518 519

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
520 521
	if (!size || last_addr < phys_addr) {
		WARN_ON(1);
L
Linus Torvalds 已提交
522
		return NULL;
523
	}
L
Linus Torvalds 已提交
524

525
	prev_size[slot] = size;
L
Linus Torvalds 已提交
526 527 528 529 530
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
A
Alan Cox 已提交
531
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
L
Linus Torvalds 已提交
532 533 534 535 536

	/*
	 * Mappings have to fit in the FIX_BTMAP area.
	 */
	nrpages = size >> PAGE_SHIFT;
537 538
	if (nrpages > NR_FIX_BTMAPS) {
		WARN_ON(1);
L
Linus Torvalds 已提交
539
		return NULL;
540
	}
L
Linus Torvalds 已提交
541 542 543 544

	/*
	 * Ok, go for it..
	 */
545
	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
I
Ingo Molnar 已提交
546
	idx = idx0;
L
Linus Torvalds 已提交
547
	while (nrpages > 0) {
J
Jeremy Fitzhardinge 已提交
548
		early_set_fixmap(idx, phys_addr, prot);
L
Linus Torvalds 已提交
549 550 551 552
		phys_addr += PAGE_SIZE;
		--idx;
		--nrpages;
	}
I
Ingo Molnar 已提交
553
	if (early_ioremap_debug)
554
		printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
I
Ingo Molnar 已提交
555

556
	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
557
	return prev_map[slot];
L
Linus Torvalds 已提交
558 559
}

J
Jeremy Fitzhardinge 已提交
560
/* Remap an IO device */
561 562
void __init __iomem *
early_ioremap(resource_size_t phys_addr, unsigned long size)
J
Jeremy Fitzhardinge 已提交
563 564 565 566 567
{
	return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
}

/* Remap memory */
568 569
void __init __iomem *
early_memremap(resource_size_t phys_addr, unsigned long size)
J
Jeremy Fitzhardinge 已提交
570 571 572 573
{
	return __early_ioremap(phys_addr, size, PAGE_KERNEL);
}

574
void __init early_iounmap(void __iomem *addr, unsigned long size)
L
Linus Torvalds 已提交
575 576 577 578 579
{
	unsigned long virt_addr;
	unsigned long offset;
	unsigned int nrpages;
	enum fixed_addresses idx;
580 581 582 583 584 585 586 587 588
	int i, slot;

	slot = -1;
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
		if (prev_map[i] == addr) {
			slot = i;
			break;
		}
	}
I
Ingo Molnar 已提交
589

590 591 592 593 594 595 596 597 598 599 600
	if (slot < 0) {
		printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
			 addr, size);
		WARN_ON(1);
		return;
	}

	if (prev_size[slot] != size) {
		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
			 addr, size, slot, prev_size[slot]);
		WARN_ON(1);
601
		return;
602
	}
L
Linus Torvalds 已提交
603

I
Ingo Molnar 已提交
604
	if (early_ioremap_debug) {
I
Ingo Molnar 已提交
605
		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
606
		       size, slot);
I
Ingo Molnar 已提交
607 608 609
		dump_stack();
	}

L
Linus Torvalds 已提交
610
	virt_addr = (unsigned long)addr;
611 612
	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
		WARN_ON(1);
L
Linus Torvalds 已提交
613
		return;
614
	}
L
Linus Torvalds 已提交
615 616 617
	offset = virt_addr & ~PAGE_MASK;
	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;

618
	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
L
Linus Torvalds 已提交
619
	while (nrpages > 0) {
620
		early_clear_fixmap(idx);
L
Linus Torvalds 已提交
621 622 623
		--idx;
		--nrpages;
	}
624
	prev_map[slot] = NULL;
L
Linus Torvalds 已提交
625
}