ioremap.c 17.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

9
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
10
#include <linux/init.h>
11
#include <linux/io.h>
T
Thomas Gleixner 已提交
12 13 14
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
P
Pekka Paalanen 已提交
15
#include <linux/mmiotrace.h>
T
Thomas Gleixner 已提交
16

L
Linus Torvalds 已提交
17
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
18 19
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
20
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
21
#include <asm/tlbflush.h>
22
#include <asm/pgalloc.h>
23
#include <asm/pat.h>
L
Linus Torvalds 已提交
24

25
static inline int phys_addr_valid(resource_size_t addr)
T
Thomas Gleixner 已提交
26
{
27 28 29 30 31
#ifdef CONFIG_PHYS_ADDR_T_64BIT
	return !(addr >> boot_cpu_data.x86_phys_bits);
#else
	return 1;
#endif
T
Thomas Gleixner 已提交
32 33
}

34 35
#ifdef CONFIG_X86_64

J
Jiri Slaby 已提交
36
unsigned long __phys_addr(unsigned long x)
37
{
J
Jiri Slaby 已提交
38 39 40 41 42 43 44
	if (x >= __START_KERNEL_map) {
		x -= __START_KERNEL_map;
		VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
		x += phys_base;
	} else {
		VIRTUAL_BUG_ON(x < PAGE_OFFSET);
		x -= PAGE_OFFSET;
45
		VIRTUAL_BUG_ON(!phys_addr_valid(x));
J
Jiri Slaby 已提交
46 47
	}
	return x;
48
}
J
Jiri Slaby 已提交
49
EXPORT_SYMBOL(__phys_addr);
50

51 52 53 54 55 56 57 58 59 60 61
bool __virt_addr_valid(unsigned long x)
{
	if (x >= __START_KERNEL_map) {
		x -= __START_KERNEL_map;
		if (x >= KERNEL_IMAGE_SIZE)
			return false;
		x += phys_base;
	} else {
		if (x < PAGE_OFFSET)
			return false;
		x -= PAGE_OFFSET;
62
		if (!phys_addr_valid(x))
63 64 65 66 67 68 69
			return false;
	}

	return pfn_valid(x >> PAGE_SHIFT);
}
EXPORT_SYMBOL(__virt_addr_valid);

70 71
#else

J
Jiri Slaby 已提交
72
#ifdef CONFIG_DEBUG_VIRTUAL
J
Jiri Slaby 已提交
73 74
unsigned long __phys_addr(unsigned long x)
{
75
	/* VMALLOC_* aren't constants  */
76
	VIRTUAL_BUG_ON(x < PAGE_OFFSET);
77
	VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
J
Jiri Slaby 已提交
78 79 80
	return x - PAGE_OFFSET;
}
EXPORT_SYMBOL(__phys_addr);
J
Jiri Slaby 已提交
81
#endif
J
Jiri Slaby 已提交
82

83 84 85 86
bool __virt_addr_valid(unsigned long x)
{
	if (x < PAGE_OFFSET)
		return false;
87
	if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
88
		return false;
89 90
	if (x >= FIXADDR_START)
		return false;
91 92 93 94
	return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
}
EXPORT_SYMBOL(__virt_addr_valid);

T
Thomas Gleixner 已提交
95 96
#endif

97 98
int page_is_ram(unsigned long pagenr)
{
99
	resource_size_t addr, end;
100 101
	int i;

102 103 104 105 106 107 108 109
	/*
	 * A special case is the first 4Kb of memory;
	 * This is a BIOS owned area, not kernel ram, but generally
	 * not listed as such in the E820 table.
	 */
	if (pagenr == 0)
		return 0;

A
Arjan van de Ven 已提交
110 111 112 113 114 115 116
	/*
	 * Second special case: Some BIOSen report the PC BIOS
	 * area (640->1Mb) as ram even though it is not.
	 */
	if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
		    pagenr < (BIOS_END >> PAGE_SHIFT))
		return 0;
117

118 119 120 121 122 123 124 125
	for (i = 0; i < e820.nr_map; i++) {
		/*
		 * Not usable memory:
		 */
		if (e820.map[i].type != E820_RAM)
			continue;
		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
126 127


128 129 130 131 132 133
		if ((pagenr >= addr) && (pagenr < end))
			return 1;
	}
	return 0;
}

134 135 136 137
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
138 139
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
			       unsigned long prot_val)
140
{
141
	unsigned long nrpages = size >> PAGE_SHIFT;
142
	int err;
143

144 145
	switch (prot_val) {
	case _PAGE_CACHE_UC:
146
	default:
147
		err = _set_memory_uc(vaddr, nrpages);
148
		break;
149 150 151
	case _PAGE_CACHE_WC:
		err = _set_memory_wc(vaddr, nrpages);
		break;
152
	case _PAGE_CACHE_WB:
153
		err = _set_memory_wb(vaddr, nrpages);
154 155
		break;
	}
156 157 158 159

	return err;
}

L
Linus Torvalds 已提交
160 161 162 163 164 165 166 167 168
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
169 170
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		unsigned long size, unsigned long prot_val, void *caller)
L
Linus Torvalds 已提交
171
{
172 173
	unsigned long pfn, offset, vaddr;
	resource_size_t last_addr;
174 175
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
176
	struct vm_struct *area;
177
	unsigned long new_prot_val;
178
	pgprot_t prot;
179
	int retval;
P
Pekka Paalanen 已提交
180
	void __iomem *ret_addr;
L
Linus Torvalds 已提交
181 182 183 184 185 186

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

187
	if (!phys_addr_valid(phys_addr)) {
188
		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
189
		       (unsigned long long)phys_addr);
190 191 192 193
		WARN_ON_ONCE(1);
		return NULL;
	}

L
Linus Torvalds 已提交
194 195 196
	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
197
	if (is_ISA_range(phys_addr, last_addr))
T
Thomas Gleixner 已提交
198
		return (__force void __iomem *)phys_to_virt(phys_addr);
L
Linus Torvalds 已提交
199

200 201 202 203
	/*
	 * Check if the request spans more than any BAR in the iomem resource
	 * tree.
	 */
204 205
	WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
206

L
Linus Torvalds 已提交
207 208 209
	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
A
Andres Salomon 已提交
210 211 212
	for (pfn = phys_addr >> PAGE_SHIFT;
				(pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
				pfn++) {
213

214 215 216
		int is_ram = page_is_ram(pfn);

		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
T
Thomas Gleixner 已提交
217
			return NULL;
218
		WARN_ON_ONCE(is_ram);
L
Linus Torvalds 已提交
219 220
	}

221 222 223 224 225 226 227
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

A
Andi Kleen 已提交
228
	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
229 230
						prot_val, &new_prot_val);
	if (retval) {
231
		pr_debug("Warning: reserve_memtype returned %d\n", retval);
232 233 234 235
		return NULL;
	}

	if (prot_val != new_prot_val) {
236 237 238
		/*
		 * Do not fallback to certain memory types with certain
		 * requested type:
239 240
		 * - request is uc-, return cannot be write-back
		 * - request is uc-, return cannot be write-combine
241
		 * - request is write-combine, return cannot be write-back
242
		 */
243
		if ((prot_val == _PAGE_CACHE_UC_MINUS &&
244 245 246
		     (new_prot_val == _PAGE_CACHE_WB ||
		      new_prot_val == _PAGE_CACHE_WC)) ||
		    (prot_val == _PAGE_CACHE_WC &&
247
		     new_prot_val == _PAGE_CACHE_WB)) {
248
			pr_debug(
249
		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
250 251
				(unsigned long long)phys_addr,
				(unsigned long long)(phys_addr + size),
252
				prot_val, new_prot_val);
253 254 255 256 257 258
			free_memtype(phys_addr, phys_addr + size);
			return NULL;
		}
		prot_val = new_prot_val;
	}

259 260
	switch (prot_val) {
	case _PAGE_CACHE_UC:
261
	default:
262
		prot = PAGE_KERNEL_IO_NOCACHE;
263
		break;
264
	case _PAGE_CACHE_UC_MINUS:
265
		prot = PAGE_KERNEL_IO_UC_MINUS;
266
		break;
267
	case _PAGE_CACHE_WC:
268
		prot = PAGE_KERNEL_IO_WC;
269
		break;
270
	case _PAGE_CACHE_WB:
271
		prot = PAGE_KERNEL_IO;
272 273
		break;
	}
274

L
Linus Torvalds 已提交
275 276 277
	/*
	 * Ok, go for it..
	 */
278
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
L
Linus Torvalds 已提交
279 280 281
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
T
Thomas Gleixner 已提交
282 283
	vaddr = (unsigned long) area->addr;
	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
284
		free_memtype(phys_addr, phys_addr + size);
285
		free_vm_area(area);
L
Linus Torvalds 已提交
286 287
		return NULL;
	}
288

289
	if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
290
		free_memtype(phys_addr, phys_addr + size);
T
Thomas Gleixner 已提交
291
		vunmap(area->addr);
292 293 294
		return NULL;
	}

P
Pekka Paalanen 已提交
295
	ret_addr = (void __iomem *) (vaddr + offset);
296
	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
P
Pekka Paalanen 已提交
297 298

	return ret_addr;
L
Linus Torvalds 已提交
299 300 301 302 303 304 305 306 307 308 309
}

/**
 * ioremap_nocache     -   map bus memory into CPU space
 * @offset:    bus address of the memory
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
310
 * address.
L
Linus Torvalds 已提交
311 312 313
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
314
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
315 316 317 318
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
319
 *
L
Linus Torvalds 已提交
320 321
 * Must be freed with iounmap.
 */
322
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
L
Linus Torvalds 已提交
323
{
324 325
	/*
	 * Ideally, this should be:
326
	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
327 328 329 330 331 332 333
	 *
	 * Till we fix all X drivers to use ioremap_wc(), we will use
	 * UC MINUS.
	 */
	unsigned long val = _PAGE_CACHE_UC_MINUS;

	return __ioremap_caller(phys_addr, size, val,
334
				__builtin_return_address(0));
L
Linus Torvalds 已提交
335
}
336
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
337

338 339 340 341 342 343 344 345 346 347
/**
 * ioremap_wc	-	map memory into CPU space write combined
 * @offset:	bus address of the memory
 * @size:	size of the resource to map
 *
 * This version of ioremap ensures that the memory is marked write combining.
 * Write combining allows faster writes to some hardware devices.
 *
 * Must be freed with iounmap.
 */
348
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
349
{
350
	if (pat_enabled)
351 352
		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
					__builtin_return_address(0));
353 354 355 356 357
	else
		return ioremap_nocache(phys_addr, size);
}
EXPORT_SYMBOL(ioremap_wc);

358
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
T
Thomas Gleixner 已提交
359
{
360 361
	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
				__builtin_return_address(0));
T
Thomas Gleixner 已提交
362 363 364
}
EXPORT_SYMBOL(ioremap_cache);

365 366 367 368
static void __iomem *ioremap_default(resource_size_t phys_addr,
					unsigned long size)
{
	unsigned long flags;
369
	void __iomem *ret;
370 371 372 373 374 375 376 377 378 379 380
	int err;

	/*
	 * - WB for WB-able memory and no other conflicting mappings
	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
	 * - Inherit from confliting mappings otherwise
	 */
	err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
	if (err < 0)
		return NULL;

381 382
	ret = __ioremap_caller(phys_addr, size, flags,
			       __builtin_return_address(0));
383 384

	free_memtype(phys_addr, phys_addr + size);
385
	return ret;
386 387
}

388 389 390 391 392 393 394 395
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
				unsigned long prot_val)
{
	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
				__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);

396 397 398 399 400 401
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
402 403
void iounmap(volatile void __iomem *addr)
{
404
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
405 406

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
407 408 409 410 411 412 413
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
414 415
	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
416 417
		return;

418 419
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
420

P
Pekka Paalanen 已提交
421 422
	mmiotrace_iounmap(addr);

423 424 425 426 427 428 429
	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
430
		if (p->addr == (void __force *)addr)
431 432 433 434 435
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
436
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
437
		dump_stack();
438
		return;
L
Linus Torvalds 已提交
439 440
	}

441 442
	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));

443
	/* Finally remove it */
444
	o = remove_vm_area((void __force *)addr);
445
	BUG_ON(p != o || o == NULL);
446
	kfree(p);
L
Linus Torvalds 已提交
447
}
448
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
449

450 451 452 453 454 455 456 457 458 459 460 461 462
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
void *xlate_dev_mem_ptr(unsigned long phys)
{
	void *addr;
	unsigned long start = phys & PAGE_MASK;

	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
	if (page_is_ram(start >> PAGE_SHIFT))
		return __va(phys);

I
Ingo Molnar 已提交
463
	addr = (void __force *)ioremap_default(start, PAGE_SIZE);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	if (addr)
		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));

	return addr;
}

void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
	if (page_is_ram(phys >> PAGE_SHIFT))
		return;

	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
	return;
}

479
static int __initdata early_ioremap_debug;
I
Ingo Molnar 已提交
480 481 482 483 484

static int __init early_ioremap_debug_setup(char *str)
{
	early_ioremap_debug = 1;

485
	return 0;
I
Ingo Molnar 已提交
486
}
487
early_param("early_ioremap_debug", early_ioremap_debug_setup);
I
Ingo Molnar 已提交
488

489
static __initdata int after_paging_init;
490
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
491

492
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
493
{
494 495 496
	/* Don't assume we're using swapper_pg_dir at this point */
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(addr)];
497 498 499 500
	pud_t *pud = pud_offset(pgd, addr);
	pmd_t *pmd = pmd_offset(pud, addr);

	return pmd;
501 502
}

503
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
504
{
505
	return &bm_pte[pte_index(addr)];
506 507
}

508 509
static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;

510
void __init early_ioremap_init(void)
511
{
512
	pmd_t *pmd;
513
	int i;
514

I
Ingo Molnar 已提交
515
	if (early_ioremap_debug)
I
Ingo Molnar 已提交
516
		printk(KERN_INFO "early_ioremap_init()\n");
I
Ingo Molnar 已提交
517

518 519 520
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
		slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);

521
	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
522 523
	memset(bm_pte, 0, sizeof(bm_pte));
	pmd_populate_kernel(&init_mm, pmd, bm_pte);
524

525
	/*
526
	 * The boot-ioremap range spans multiple pmds, for which
527 528
	 * we are not prepared:
	 */
529
	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
530
		WARN_ON(1);
531 532
		printk(KERN_WARNING "pmd %p != %p\n",
		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
533
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
534
			fix_to_virt(FIX_BTMAP_BEGIN));
535
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
536
			fix_to_virt(FIX_BTMAP_END));
537 538 539 540

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
541
	}
542 543
}

544
void __init early_ioremap_reset(void)
545 546 547 548
{
	after_paging_init = 1;
}

549
static void __init __early_set_fixmap(enum fixed_addresses idx,
550 551
				   unsigned long phys, pgprot_t flags)
{
552 553
	unsigned long addr = __fix_to_virt(idx);
	pte_t *pte;
554 555 556 557 558

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
559
	pte = early_ioremap_pte(addr);
560

561
	if (pgprot_val(flags))
562
		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
563
	else
564
		pte_clear(&init_mm, addr, pte);
565 566 567
	__flush_tlb_one(addr);
}

568
static inline void __init early_set_fixmap(enum fixed_addresses idx,
J
Jeremy Fitzhardinge 已提交
569
					   unsigned long phys, pgprot_t prot)
570 571
{
	if (after_paging_init)
J
Jeremy Fitzhardinge 已提交
572
		__set_fixmap(idx, phys, prot);
573
	else
J
Jeremy Fitzhardinge 已提交
574
		__early_set_fixmap(idx, phys, prot);
575 576
}

577
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
578 579 580 581
{
	if (after_paging_init)
		clear_fixmap(idx);
	else
582
		__early_set_fixmap(idx, 0, __pgprot(0));
583 584
}

585
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
586
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
587

588 589
static int __init check_early_ioremap_leak(void)
{
590 591 592 593 594 595 596 597
	int count = 0;
	int i;

	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
		if (prev_map[i])
			count++;

	if (!count)
598
		return 0;
599
	WARN(1, KERN_WARNING
600
	       "Debug warning: early ioremap leak of %d areas detected.\n",
601
		count);
602
	printk(KERN_WARNING
603
		"please boot with early_ioremap_debug and report the dmesg.\n");
604 605 606 607 608

	return 1;
}
late_initcall(check_early_ioremap_leak);

609 610
static void __init __iomem *
__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
611 612
{
	unsigned long offset, last_addr;
613
	unsigned int nrpages;
I
Ingo Molnar 已提交
614
	enum fixed_addresses idx0, idx;
615
	int i, slot;
I
Ingo Molnar 已提交
616 617 618

	WARN_ON(system_state != SYSTEM_BOOTING);

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	slot = -1;
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
		if (!prev_map[i]) {
			slot = i;
			break;
		}
	}

	if (slot < 0) {
		printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
			 phys_addr, size);
		WARN_ON(1);
		return NULL;
	}

I
Ingo Molnar 已提交
634
	if (early_ioremap_debug) {
I
Ingo Molnar 已提交
635
		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
636
		       phys_addr, size, slot);
I
Ingo Molnar 已提交
637 638
		dump_stack();
	}
L
Linus Torvalds 已提交
639 640 641

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
642 643
	if (!size || last_addr < phys_addr) {
		WARN_ON(1);
L
Linus Torvalds 已提交
644
		return NULL;
645
	}
L
Linus Torvalds 已提交
646

647
	prev_size[slot] = size;
L
Linus Torvalds 已提交
648 649 650 651 652
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
A
Alan Cox 已提交
653
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
L
Linus Torvalds 已提交
654 655 656 657 658

	/*
	 * Mappings have to fit in the FIX_BTMAP area.
	 */
	nrpages = size >> PAGE_SHIFT;
659 660
	if (nrpages > NR_FIX_BTMAPS) {
		WARN_ON(1);
L
Linus Torvalds 已提交
661
		return NULL;
662
	}
L
Linus Torvalds 已提交
663 664 665 666

	/*
	 * Ok, go for it..
	 */
667
	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
I
Ingo Molnar 已提交
668
	idx = idx0;
L
Linus Torvalds 已提交
669
	while (nrpages > 0) {
J
Jeremy Fitzhardinge 已提交
670
		early_set_fixmap(idx, phys_addr, prot);
L
Linus Torvalds 已提交
671 672 673 674
		phys_addr += PAGE_SIZE;
		--idx;
		--nrpages;
	}
I
Ingo Molnar 已提交
675
	if (early_ioremap_debug)
676
		printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
I
Ingo Molnar 已提交
677

678
	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
679
	return prev_map[slot];
L
Linus Torvalds 已提交
680 681
}

J
Jeremy Fitzhardinge 已提交
682
/* Remap an IO device */
683
void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
J
Jeremy Fitzhardinge 已提交
684 685 686 687 688
{
	return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
}

/* Remap memory */
689
void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
J
Jeremy Fitzhardinge 已提交
690 691 692 693
{
	return __early_ioremap(phys_addr, size, PAGE_KERNEL);
}

694
void __init early_iounmap(void __iomem *addr, unsigned long size)
L
Linus Torvalds 已提交
695 696 697 698 699
{
	unsigned long virt_addr;
	unsigned long offset;
	unsigned int nrpages;
	enum fixed_addresses idx;
700 701 702 703 704 705 706 707 708
	int i, slot;

	slot = -1;
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
		if (prev_map[i] == addr) {
			slot = i;
			break;
		}
	}
I
Ingo Molnar 已提交
709

710 711 712 713 714 715 716 717 718 719 720
	if (slot < 0) {
		printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
			 addr, size);
		WARN_ON(1);
		return;
	}

	if (prev_size[slot] != size) {
		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
			 addr, size, slot, prev_size[slot]);
		WARN_ON(1);
721
		return;
722
	}
L
Linus Torvalds 已提交
723

I
Ingo Molnar 已提交
724
	if (early_ioremap_debug) {
I
Ingo Molnar 已提交
725
		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
726
		       size, slot);
I
Ingo Molnar 已提交
727 728 729
		dump_stack();
	}

L
Linus Torvalds 已提交
730
	virt_addr = (unsigned long)addr;
731 732
	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
		WARN_ON(1);
L
Linus Torvalds 已提交
733
		return;
734
	}
L
Linus Torvalds 已提交
735 736 737
	offset = virt_addr & ~PAGE_MASK;
	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;

738
	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
L
Linus Torvalds 已提交
739
	while (nrpages > 0) {
740
		early_clear_fixmap(idx);
L
Linus Torvalds 已提交
741 742 743
		--idx;
		--nrpages;
	}
744
	prev_map[slot] = NULL;
L
Linus Torvalds 已提交
745
}