ioremap.c 17.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

9
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
10
#include <linux/init.h>
11
#include <linux/io.h>
T
Thomas Gleixner 已提交
12 13 14
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
P
Pekka Paalanen 已提交
15
#include <linux/mmiotrace.h>
T
Thomas Gleixner 已提交
16

L
Linus Torvalds 已提交
17
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
18 19
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
20
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
21
#include <asm/tlbflush.h>
22
#include <asm/pgalloc.h>
23
#include <asm/pat.h>
L
Linus Torvalds 已提交
24

25
static inline int phys_addr_valid(resource_size_t addr)
T
Thomas Gleixner 已提交
26
{
27 28 29 30 31
#ifdef CONFIG_PHYS_ADDR_T_64BIT
	return !(addr >> boot_cpu_data.x86_phys_bits);
#else
	return 1;
#endif
T
Thomas Gleixner 已提交
32 33
}

34 35
#ifdef CONFIG_X86_64

J
Jiri Slaby 已提交
36
unsigned long __phys_addr(unsigned long x)
37
{
J
Jiri Slaby 已提交
38 39 40 41 42 43 44
	if (x >= __START_KERNEL_map) {
		x -= __START_KERNEL_map;
		VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
		x += phys_base;
	} else {
		VIRTUAL_BUG_ON(x < PAGE_OFFSET);
		x -= PAGE_OFFSET;
45
		VIRTUAL_BUG_ON(!phys_addr_valid(x));
J
Jiri Slaby 已提交
46 47
	}
	return x;
48
}
J
Jiri Slaby 已提交
49
EXPORT_SYMBOL(__phys_addr);
50

51 52 53 54 55 56 57 58 59 60 61
bool __virt_addr_valid(unsigned long x)
{
	if (x >= __START_KERNEL_map) {
		x -= __START_KERNEL_map;
		if (x >= KERNEL_IMAGE_SIZE)
			return false;
		x += phys_base;
	} else {
		if (x < PAGE_OFFSET)
			return false;
		x -= PAGE_OFFSET;
62
		if (!phys_addr_valid(x))
63 64 65 66 67 68 69
			return false;
	}

	return pfn_valid(x >> PAGE_SHIFT);
}
EXPORT_SYMBOL(__virt_addr_valid);

70 71
#else

J
Jiri Slaby 已提交
72
#ifdef CONFIG_DEBUG_VIRTUAL
J
Jiri Slaby 已提交
73 74
unsigned long __phys_addr(unsigned long x)
{
75
	/* VMALLOC_* aren't constants  */
76
	VIRTUAL_BUG_ON(x < PAGE_OFFSET);
77
	VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
J
Jiri Slaby 已提交
78 79 80
	return x - PAGE_OFFSET;
}
EXPORT_SYMBOL(__phys_addr);
J
Jiri Slaby 已提交
81
#endif
J
Jiri Slaby 已提交
82

83 84 85 86
bool __virt_addr_valid(unsigned long x)
{
	if (x < PAGE_OFFSET)
		return false;
87
	if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
88
		return false;
89 90
	if (x >= FIXADDR_START)
		return false;
91 92 93 94
	return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
}
EXPORT_SYMBOL(__virt_addr_valid);

T
Thomas Gleixner 已提交
95 96
#endif

97 98
int page_is_ram(unsigned long pagenr)
{
99
	resource_size_t addr, end;
100 101
	int i;

102 103 104 105 106 107 108 109
	/*
	 * A special case is the first 4Kb of memory;
	 * This is a BIOS owned area, not kernel ram, but generally
	 * not listed as such in the E820 table.
	 */
	if (pagenr == 0)
		return 0;

A
Arjan van de Ven 已提交
110 111 112 113 114 115 116
	/*
	 * Second special case: Some BIOSen report the PC BIOS
	 * area (640->1Mb) as ram even though it is not.
	 */
	if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
		    pagenr < (BIOS_END >> PAGE_SHIFT))
		return 0;
117

118 119 120 121 122 123 124 125
	for (i = 0; i < e820.nr_map; i++) {
		/*
		 * Not usable memory:
		 */
		if (e820.map[i].type != E820_RAM)
			continue;
		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
126 127


128 129 130 131 132 133
		if ((pagenr >= addr) && (pagenr < end))
			return 1;
	}
	return 0;
}

134 135 136 137
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
138 139
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
			       unsigned long prot_val)
140
{
141
	unsigned long nrpages = size >> PAGE_SHIFT;
142
	int err;
143

144 145
	switch (prot_val) {
	case _PAGE_CACHE_UC:
146
	default:
147
		err = _set_memory_uc(vaddr, nrpages);
148
		break;
149 150 151
	case _PAGE_CACHE_WC:
		err = _set_memory_wc(vaddr, nrpages);
		break;
152
	case _PAGE_CACHE_WB:
153
		err = _set_memory_wb(vaddr, nrpages);
154 155
		break;
	}
156 157 158 159

	return err;
}

L
Linus Torvalds 已提交
160 161 162 163 164 165 166 167 168
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
169 170
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
		unsigned long size, unsigned long prot_val, void *caller)
L
Linus Torvalds 已提交
171
{
172 173
	unsigned long pfn, offset, vaddr;
	resource_size_t last_addr;
174 175
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
176
	struct vm_struct *area;
177
	unsigned long new_prot_val;
178
	pgprot_t prot;
179
	int retval;
P
Pekka Paalanen 已提交
180
	void __iomem *ret_addr;
L
Linus Torvalds 已提交
181 182 183 184 185 186

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

187
	if (!phys_addr_valid(phys_addr)) {
188
		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
189
		       (unsigned long long)phys_addr);
190 191 192 193
		WARN_ON_ONCE(1);
		return NULL;
	}

L
Linus Torvalds 已提交
194 195 196
	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
197
	if (is_ISA_range(phys_addr, last_addr))
T
Thomas Gleixner 已提交
198
		return (__force void __iomem *)phys_to_virt(phys_addr);
L
Linus Torvalds 已提交
199

200 201 202 203
	/*
	 * Check if the request spans more than any BAR in the iomem resource
	 * tree.
	 */
204 205
	WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
206

L
Linus Torvalds 已提交
207 208 209
	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
A
Andres Salomon 已提交
210 211 212
	for (pfn = phys_addr >> PAGE_SHIFT;
				(pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
				pfn++) {
213

214 215 216
		int is_ram = page_is_ram(pfn);

		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
T
Thomas Gleixner 已提交
217
			return NULL;
218
		WARN_ON_ONCE(is_ram);
L
Linus Torvalds 已提交
219 220
	}

221 222 223 224 225 226 227
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

A
Andi Kleen 已提交
228
	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
229 230
						prot_val, &new_prot_val);
	if (retval) {
231
		pr_debug("Warning: reserve_memtype returned %d\n", retval);
232 233 234 235
		return NULL;
	}

	if (prot_val != new_prot_val) {
236 237 238
		/*
		 * Do not fallback to certain memory types with certain
		 * requested type:
239 240
		 * - request is uc-, return cannot be write-back
		 * - request is uc-, return cannot be write-combine
241
		 * - request is write-combine, return cannot be write-back
242
		 */
243
		if ((prot_val == _PAGE_CACHE_UC_MINUS &&
244 245 246
		     (new_prot_val == _PAGE_CACHE_WB ||
		      new_prot_val == _PAGE_CACHE_WC)) ||
		    (prot_val == _PAGE_CACHE_WC &&
247
		     new_prot_val == _PAGE_CACHE_WB)) {
248
			pr_debug(
249
		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
250 251
				(unsigned long long)phys_addr,
				(unsigned long long)(phys_addr + size),
252
				prot_val, new_prot_val);
253 254 255 256 257 258
			free_memtype(phys_addr, phys_addr + size);
			return NULL;
		}
		prot_val = new_prot_val;
	}

259 260
	switch (prot_val) {
	case _PAGE_CACHE_UC:
261
	default:
262
		prot = PAGE_KERNEL_IO_NOCACHE;
263
		break;
264
	case _PAGE_CACHE_UC_MINUS:
265
		prot = PAGE_KERNEL_IO_UC_MINUS;
266
		break;
267
	case _PAGE_CACHE_WC:
268
		prot = PAGE_KERNEL_IO_WC;
269
		break;
270
	case _PAGE_CACHE_WB:
271
		prot = PAGE_KERNEL_IO;
272 273
		break;
	}
274

L
Linus Torvalds 已提交
275 276 277
	/*
	 * Ok, go for it..
	 */
278
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
L
Linus Torvalds 已提交
279 280 281
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
T
Thomas Gleixner 已提交
282
	vaddr = (unsigned long) area->addr;
283 284

	if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
285
		free_memtype(phys_addr, phys_addr + size);
286
		free_vm_area(area);
L
Linus Torvalds 已提交
287 288
		return NULL;
	}
289

290
	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
291
		free_memtype(phys_addr, phys_addr + size);
292
		free_vm_area(area);
293 294 295
		return NULL;
	}

P
Pekka Paalanen 已提交
296
	ret_addr = (void __iomem *) (vaddr + offset);
297
	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
P
Pekka Paalanen 已提交
298 299

	return ret_addr;
L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307 308 309 310
}

/**
 * ioremap_nocache     -   map bus memory into CPU space
 * @offset:    bus address of the memory
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
311
 * address.
L
Linus Torvalds 已提交
312 313 314
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
315
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
316 317 318 319
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
320
 *
L
Linus Torvalds 已提交
321 322
 * Must be freed with iounmap.
 */
323
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
L
Linus Torvalds 已提交
324
{
325 326
	/*
	 * Ideally, this should be:
327
	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
328 329 330 331 332 333 334
	 *
	 * Till we fix all X drivers to use ioremap_wc(), we will use
	 * UC MINUS.
	 */
	unsigned long val = _PAGE_CACHE_UC_MINUS;

	return __ioremap_caller(phys_addr, size, val,
335
				__builtin_return_address(0));
L
Linus Torvalds 已提交
336
}
337
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
338

339 340 341 342 343 344 345 346 347 348
/**
 * ioremap_wc	-	map memory into CPU space write combined
 * @offset:	bus address of the memory
 * @size:	size of the resource to map
 *
 * This version of ioremap ensures that the memory is marked write combining.
 * Write combining allows faster writes to some hardware devices.
 *
 * Must be freed with iounmap.
 */
349
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
350
{
351
	if (pat_enabled)
352 353
		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
					__builtin_return_address(0));
354 355 356 357 358
	else
		return ioremap_nocache(phys_addr, size);
}
EXPORT_SYMBOL(ioremap_wc);

359
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
T
Thomas Gleixner 已提交
360
{
361 362
	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
				__builtin_return_address(0));
T
Thomas Gleixner 已提交
363 364 365
}
EXPORT_SYMBOL(ioremap_cache);

366 367 368 369
static void __iomem *ioremap_default(resource_size_t phys_addr,
					unsigned long size)
{
	unsigned long flags;
370
	void __iomem *ret;
371 372 373 374 375 376 377 378 379 380 381
	int err;

	/*
	 * - WB for WB-able memory and no other conflicting mappings
	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
	 * - Inherit from confliting mappings otherwise
	 */
	err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
	if (err < 0)
		return NULL;

382 383
	ret = __ioremap_caller(phys_addr, size, flags,
			       __builtin_return_address(0));
384 385

	free_memtype(phys_addr, phys_addr + size);
386
	return ret;
387 388
}

389 390 391 392 393 394 395 396
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
				unsigned long prot_val)
{
	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
				__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);

397 398 399 400 401 402
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
403 404
void iounmap(volatile void __iomem *addr)
{
405
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
406 407

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
408 409 410 411 412 413 414
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
415 416
	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
417 418
		return;

419 420
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
421

P
Pekka Paalanen 已提交
422 423
	mmiotrace_iounmap(addr);

424 425 426 427 428 429 430
	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
431
		if (p->addr == (void __force *)addr)
432 433 434 435 436
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
437
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
438
		dump_stack();
439
		return;
L
Linus Torvalds 已提交
440 441
	}

442 443
	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));

444
	/* Finally remove it */
445
	o = remove_vm_area((void __force *)addr);
446
	BUG_ON(p != o || o == NULL);
447
	kfree(p);
L
Linus Torvalds 已提交
448
}
449
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
450

451 452 453 454 455 456 457 458 459 460 461 462 463
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
void *xlate_dev_mem_ptr(unsigned long phys)
{
	void *addr;
	unsigned long start = phys & PAGE_MASK;

	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
	if (page_is_ram(start >> PAGE_SHIFT))
		return __va(phys);

I
Ingo Molnar 已提交
464
	addr = (void __force *)ioremap_default(start, PAGE_SIZE);
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	if (addr)
		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));

	return addr;
}

void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
	if (page_is_ram(phys >> PAGE_SHIFT))
		return;

	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
	return;
}

480
static int __initdata early_ioremap_debug;
I
Ingo Molnar 已提交
481 482 483 484 485

static int __init early_ioremap_debug_setup(char *str)
{
	early_ioremap_debug = 1;

486
	return 0;
I
Ingo Molnar 已提交
487
}
488
early_param("early_ioremap_debug", early_ioremap_debug_setup);
I
Ingo Molnar 已提交
489

490
static __initdata int after_paging_init;
491
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
492

493
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
494
{
495 496 497
	/* Don't assume we're using swapper_pg_dir at this point */
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(addr)];
498 499 500 501
	pud_t *pud = pud_offset(pgd, addr);
	pmd_t *pmd = pmd_offset(pud, addr);

	return pmd;
502 503
}

504
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
505
{
506
	return &bm_pte[pte_index(addr)];
507 508
}

509 510
static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;

511
void __init early_ioremap_init(void)
512
{
513
	pmd_t *pmd;
514
	int i;
515

I
Ingo Molnar 已提交
516
	if (early_ioremap_debug)
I
Ingo Molnar 已提交
517
		printk(KERN_INFO "early_ioremap_init()\n");
I
Ingo Molnar 已提交
518

519
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
520
		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
521

522
	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
523 524
	memset(bm_pte, 0, sizeof(bm_pte));
	pmd_populate_kernel(&init_mm, pmd, bm_pte);
525

526
	/*
527
	 * The boot-ioremap range spans multiple pmds, for which
528 529
	 * we are not prepared:
	 */
530
	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
531
		WARN_ON(1);
532 533
		printk(KERN_WARNING "pmd %p != %p\n",
		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
534
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
535
			fix_to_virt(FIX_BTMAP_BEGIN));
536
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
537
			fix_to_virt(FIX_BTMAP_END));
538 539 540 541

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
542
	}
543 544
}

545
void __init early_ioremap_reset(void)
546 547 548 549
{
	after_paging_init = 1;
}

550
static void __init __early_set_fixmap(enum fixed_addresses idx,
551 552
				   unsigned long phys, pgprot_t flags)
{
553 554
	unsigned long addr = __fix_to_virt(idx);
	pte_t *pte;
555 556 557 558 559

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
560
	pte = early_ioremap_pte(addr);
561

562
	if (pgprot_val(flags))
563
		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
564
	else
565
		pte_clear(&init_mm, addr, pte);
566 567 568
	__flush_tlb_one(addr);
}

569
static inline void __init early_set_fixmap(enum fixed_addresses idx,
J
Jeremy Fitzhardinge 已提交
570
					   unsigned long phys, pgprot_t prot)
571 572
{
	if (after_paging_init)
J
Jeremy Fitzhardinge 已提交
573
		__set_fixmap(idx, phys, prot);
574
	else
J
Jeremy Fitzhardinge 已提交
575
		__early_set_fixmap(idx, phys, prot);
576 577
}

578
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
579 580 581 582
{
	if (after_paging_init)
		clear_fixmap(idx);
	else
583
		__early_set_fixmap(idx, 0, __pgprot(0));
584 585
}

586
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
587
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
588

589 590
static int __init check_early_ioremap_leak(void)
{
591 592 593 594 595 596 597 598
	int count = 0;
	int i;

	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
		if (prev_map[i])
			count++;

	if (!count)
599
		return 0;
600
	WARN(1, KERN_WARNING
601
	       "Debug warning: early ioremap leak of %d areas detected.\n",
602
		count);
603
	printk(KERN_WARNING
604
		"please boot with early_ioremap_debug and report the dmesg.\n");
605 606 607 608 609

	return 1;
}
late_initcall(check_early_ioremap_leak);

610 611
static void __init __iomem *
__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
612 613
{
	unsigned long offset, last_addr;
614
	unsigned int nrpages;
I
Ingo Molnar 已提交
615
	enum fixed_addresses idx0, idx;
616
	int i, slot;
I
Ingo Molnar 已提交
617 618 619

	WARN_ON(system_state != SYSTEM_BOOTING);

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	slot = -1;
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
		if (!prev_map[i]) {
			slot = i;
			break;
		}
	}

	if (slot < 0) {
		printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
			 phys_addr, size);
		WARN_ON(1);
		return NULL;
	}

I
Ingo Molnar 已提交
635
	if (early_ioremap_debug) {
I
Ingo Molnar 已提交
636
		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
637
		       phys_addr, size, slot);
I
Ingo Molnar 已提交
638 639
		dump_stack();
	}
L
Linus Torvalds 已提交
640 641 642

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
643 644
	if (!size || last_addr < phys_addr) {
		WARN_ON(1);
L
Linus Torvalds 已提交
645
		return NULL;
646
	}
L
Linus Torvalds 已提交
647

648
	prev_size[slot] = size;
L
Linus Torvalds 已提交
649 650 651 652 653
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
A
Alan Cox 已提交
654
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
L
Linus Torvalds 已提交
655 656 657 658 659

	/*
	 * Mappings have to fit in the FIX_BTMAP area.
	 */
	nrpages = size >> PAGE_SHIFT;
660 661
	if (nrpages > NR_FIX_BTMAPS) {
		WARN_ON(1);
L
Linus Torvalds 已提交
662
		return NULL;
663
	}
L
Linus Torvalds 已提交
664 665 666 667

	/*
	 * Ok, go for it..
	 */
668
	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
I
Ingo Molnar 已提交
669
	idx = idx0;
L
Linus Torvalds 已提交
670
	while (nrpages > 0) {
J
Jeremy Fitzhardinge 已提交
671
		early_set_fixmap(idx, phys_addr, prot);
L
Linus Torvalds 已提交
672 673 674 675
		phys_addr += PAGE_SIZE;
		--idx;
		--nrpages;
	}
I
Ingo Molnar 已提交
676
	if (early_ioremap_debug)
677
		printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
I
Ingo Molnar 已提交
678

679
	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
680
	return prev_map[slot];
L
Linus Torvalds 已提交
681 682
}

J
Jeremy Fitzhardinge 已提交
683
/* Remap an IO device */
684
void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
J
Jeremy Fitzhardinge 已提交
685 686 687 688 689
{
	return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
}

/* Remap memory */
690
void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
J
Jeremy Fitzhardinge 已提交
691 692 693 694
{
	return __early_ioremap(phys_addr, size, PAGE_KERNEL);
}

695
void __init early_iounmap(void __iomem *addr, unsigned long size)
L
Linus Torvalds 已提交
696 697 698 699 700
{
	unsigned long virt_addr;
	unsigned long offset;
	unsigned int nrpages;
	enum fixed_addresses idx;
701 702 703 704 705 706 707 708 709
	int i, slot;

	slot = -1;
	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
		if (prev_map[i] == addr) {
			slot = i;
			break;
		}
	}
I
Ingo Molnar 已提交
710

711 712 713 714 715 716 717 718 719 720 721
	if (slot < 0) {
		printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
			 addr, size);
		WARN_ON(1);
		return;
	}

	if (prev_size[slot] != size) {
		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
			 addr, size, slot, prev_size[slot]);
		WARN_ON(1);
722
		return;
723
	}
L
Linus Torvalds 已提交
724

I
Ingo Molnar 已提交
725
	if (early_ioremap_debug) {
I
Ingo Molnar 已提交
726
		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
727
		       size, slot);
I
Ingo Molnar 已提交
728 729 730
		dump_stack();
	}

L
Linus Torvalds 已提交
731
	virt_addr = (unsigned long)addr;
732 733
	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
		WARN_ON(1);
L
Linus Torvalds 已提交
734
		return;
735
	}
L
Linus Torvalds 已提交
736 737 738
	offset = virt_addr & ~PAGE_MASK;
	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;

739
	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
L
Linus Torvalds 已提交
740
	while (nrpages > 0) {
741
		early_clear_fixmap(idx);
L
Linus Torvalds 已提交
742 743 744
		--idx;
		--nrpages;
	}
745
	prev_map[slot] = NULL;
L
Linus Torvalds 已提交
746
}