ioremap.c 11.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

9
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
10
#include <linux/init.h>
11
#include <linux/io.h>
T
Thomas Gleixner 已提交
12 13 14 15
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

L
Linus Torvalds 已提交
16
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
17 18
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
19
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
20
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
21

T
Thomas Gleixner 已提交
22 23 24 25 26 27 28 29 30 31 32 33
#ifdef CONFIG_X86_64

unsigned long __phys_addr(unsigned long x)
{
	if (x >= __START_KERNEL_map)
		return x - __START_KERNEL_map + phys_base;
	return x - PAGE_OFFSET;
}
EXPORT_SYMBOL(__phys_addr);

#endif

34 35 36 37 38 39 40 41 42 43 44 45 46
int page_is_ram(unsigned long pagenr)
{
	unsigned long addr, end;
	int i;

	for (i = 0; i < e820.nr_map; i++) {
		/*
		 * Not usable memory:
		 */
		if (e820.map[i].type != E820_RAM)
			continue;
		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
47 48 49 50 51 52 53 54 55 56

		/*
		 * Sanity check: Some BIOSen report areas as RAM that
		 * are not. Notably the 640->1Mb area, which is the
		 * PCI BIOS area.
		 */
		if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
		    end < (BIOS_END >> PAGE_SHIFT))
			continue;

57 58 59 60 61 62
		if ((pagenr >= addr) && (pagenr < end))
			return 1;
	}
	return 0;
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
			       pgprot_t prot)
{
	unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
	int err, level;

	/* No change for pages after the last mapping */
	if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
		return 0;

	npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	vaddr = (unsigned long) __va(phys_addr);

	/*
	 * If there is no identity map for this address,
	 * change_page_attr_addr is unnecessary
	 */
	if (!lookup_address(vaddr, &level))
		return 0;

	/*
	 * Must use an address here and not struct page because the
	 * phys addr can be a in hole between nodes and not have a
	 * memmap entry.
	 */
	err = change_page_attr_addr(vaddr, npages, prot);
T
Thomas Gleixner 已提交
93

94 95 96 97 98 99
	if (!err)
		global_flush_tlb();

	return err;
}

L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
109 110
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
			unsigned long flags)
L
Linus Torvalds 已提交
111
{
112 113
	void __iomem *addr;
	struct vm_struct *area;
L
Linus Torvalds 已提交
114
	unsigned long offset, last_addr;
115
	pgprot_t pgprot;
L
Linus Torvalds 已提交
116 117 118 119 120 121 122 123 124 125

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
	if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
T
Thomas Gleixner 已提交
126
		return (__force void __iomem *)phys_to_virt(phys_addr);
L
Linus Torvalds 已提交
127 128 129 130

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
T
Thomas Gleixner 已提交
131 132 133 134
	for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped &&
	     (offset << PAGE_SHIFT) < last_addr; offset++) {
		if (page_is_ram(offset))
			return NULL;
L
Linus Torvalds 已提交
135 136
	}

137
	pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
138

L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
149
	area = get_vm_area(size, VM_IOREMAP);
L
Linus Torvalds 已提交
150 151 152 153
	if (!area)
		return NULL;
	area->phys_addr = phys_addr;
	addr = (void __iomem *) area->addr;
154 155
	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
			       phys_addr, pgprot)) {
156
		remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
L
Linus Torvalds 已提交
157 158
		return NULL;
	}
159 160 161 162 163 164

	if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
		vunmap(addr);
		return NULL;
	}

L
Linus Torvalds 已提交
165 166
	return (void __iomem *) (offset + (char __iomem *)addr);
}
167
EXPORT_SYMBOL(__ioremap);
L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176 177

/**
 * ioremap_nocache     -   map bus memory into CPU space
 * @offset:    bus address of the memory
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
178
 * address.
L
Linus Torvalds 已提交
179 180 181
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
182
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
183 184 185 186
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
187
 *
L
Linus Torvalds 已提交
188 189
 * Must be freed with iounmap.
 */
190
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
L
Linus Torvalds 已提交
191
{
192
	return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
L
Linus Torvalds 已提交
193
}
194
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
195

196 197 198 199 200 201
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
202 203
void iounmap(volatile void __iomem *addr)
{
204
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
205 206

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
	if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
215
	    addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
216 217
		return;

218 219
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
220 221 222 223 224 225 226 227 228 229 230 231 232 233

	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
	read_lock(&vmlist_lock);
	for (p = vmlist; p; p = p->next) {
		if (p->addr == addr)
			break;
	}
	read_unlock(&vmlist_lock);

	if (!p) {
234
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
235
		dump_stack();
236
		return;
L
Linus Torvalds 已提交
237 238
	}

239
	/* Reset the direct mapping. Can block */
240
	ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
241 242 243 244

	/* Finally remove it */
	o = remove_vm_area((void *)addr);
	BUG_ON(p != o || o == NULL);
245
	kfree(p);
L
Linus Torvalds 已提交
246
}
247
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
248

T
Thomas Gleixner 已提交
249
#ifdef CONFIG_X86_32
I
Ingo Molnar 已提交
250 251 252 253 254 255 256

int __initdata early_ioremap_debug;

static int __init early_ioremap_debug_setup(char *str)
{
	early_ioremap_debug = 1;

257
	return 0;
I
Ingo Molnar 已提交
258
}
259
early_param("early_ioremap_debug", early_ioremap_debug_setup);
I
Ingo Molnar 已提交
260

261 262 263 264
static __initdata int after_paging_init;
static __initdata unsigned long bm_pte[1024]
				__attribute__((aligned(PAGE_SIZE)));

265
static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
266 267 268 269
{
	return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
}

270
static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
271 272 273 274
{
	return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
}

275
void __init early_ioremap_init(void)
276 277 278
{
	unsigned long *pgd;

I
Ingo Molnar 已提交
279
	if (early_ioremap_debug)
280
		printk(KERN_DEBUG "early_ioremap_init()\n");
I
Ingo Molnar 已提交
281

282
	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
283 284
	*pgd = __pa(bm_pte) | _PAGE_TABLE;
	memset(bm_pte, 0, sizeof(bm_pte));
285 286 287 288 289 290
	/*
	 * The boot-ioremap range spans multiple pgds, for which
	 * we are not prepared:
	 */
	if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
		WARN_ON(1);
291 292 293 294 295 296 297 298 299 300
		printk(KERN_WARNING "pgd %p != %p\n",
		       pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
		       fix_to_virt(FIX_BTMAP_BEGIN));
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
		       fix_to_virt(FIX_BTMAP_END));

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
301
	}
302 303
}

304
void __init early_ioremap_clear(void)
305 306 307
{
	unsigned long *pgd;

I
Ingo Molnar 已提交
308
	if (early_ioremap_debug)
309
		printk(KERN_DEBUG "early_ioremap_clear()\n");
I
Ingo Molnar 已提交
310

311
	pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
312 313 314 315
	*pgd = 0;
	__flush_tlb_all();
}

316
void __init early_ioremap_reset(void)
317 318 319 320 321
{
	enum fixed_addresses idx;
	unsigned long *pte, phys, addr;

	after_paging_init = 1;
H
Huang, Ying 已提交
322
	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
323
		addr = fix_to_virt(idx);
324
		pte = early_ioremap_pte(addr);
325 326 327 328 329 330 331
		if (!*pte & _PAGE_PRESENT) {
			phys = *pte & PAGE_MASK;
			set_fixmap(idx, phys);
		}
	}
}

332
static void __init __early_set_fixmap(enum fixed_addresses idx,
333 334 335 336 337 338 339 340
				   unsigned long phys, pgprot_t flags)
{
	unsigned long *pte, addr = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
341
	pte = early_ioremap_pte(addr);
342 343 344 345 346 347 348
	if (pgprot_val(flags))
		*pte = (phys & PAGE_MASK) | pgprot_val(flags);
	else
		*pte = 0;
	__flush_tlb_one(addr);
}

349
static inline void __init early_set_fixmap(enum fixed_addresses idx,
350 351 352 353 354
					unsigned long phys)
{
	if (after_paging_init)
		set_fixmap(idx, phys);
	else
355
		__early_set_fixmap(idx, phys, PAGE_KERNEL);
356 357
}

358
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
359 360 361 362
{
	if (after_paging_init)
		clear_fixmap(idx);
	else
363
		__early_set_fixmap(idx, 0, __pgprot(0));
364 365
}

I
Ingo Molnar 已提交
366 367 368

int __initdata early_ioremap_nested;

369 370 371 372 373 374
static int __init check_early_ioremap_leak(void)
{
	if (!early_ioremap_nested)
		return 0;

	printk(KERN_WARNING
375 376
	       "Debug warning: early ioremap leak of %d areas detected.\n",
	       early_ioremap_nested);
377
	printk(KERN_WARNING
378
	       "please boot with early_ioremap_debug and report the dmesg.\n");
379 380 381 382 383 384
	WARN_ON(1);

	return 1;
}
late_initcall(check_early_ioremap_leak);

385
void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
L
Linus Torvalds 已提交
386 387
{
	unsigned long offset, last_addr;
I
Ingo Molnar 已提交
388 389 390 391 392 393
	unsigned int nrpages, nesting;
	enum fixed_addresses idx0, idx;

	WARN_ON(system_state != SYSTEM_BOOTING);

	nesting = early_ioremap_nested;
I
Ingo Molnar 已提交
394
	if (early_ioremap_debug) {
395 396
		printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
		       phys_addr, size, nesting);
I
Ingo Molnar 已提交
397 398
		dump_stack();
	}
L
Linus Torvalds 已提交
399 400 401

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
402 403
	if (!size || last_addr < phys_addr) {
		WARN_ON(1);
L
Linus Torvalds 已提交
404
		return NULL;
405
	}
L
Linus Torvalds 已提交
406

407 408
	if (nesting >= FIX_BTMAPS_NESTING) {
		WARN_ON(1);
I
Ingo Molnar 已提交
409
		return NULL;
410
	}
I
Ingo Molnar 已提交
411
	early_ioremap_nested++;
L
Linus Torvalds 已提交
412 413 414 415 416 417 418 419 420 421 422
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr) - phys_addr;

	/*
	 * Mappings have to fit in the FIX_BTMAP area.
	 */
	nrpages = size >> PAGE_SHIFT;
423 424
	if (nrpages > NR_FIX_BTMAPS) {
		WARN_ON(1);
L
Linus Torvalds 已提交
425
		return NULL;
426
	}
L
Linus Torvalds 已提交
427 428 429 430

	/*
	 * Ok, go for it..
	 */
I
Ingo Molnar 已提交
431 432
	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
	idx = idx0;
L
Linus Torvalds 已提交
433
	while (nrpages > 0) {
434
		early_set_fixmap(idx, phys_addr);
L
Linus Torvalds 已提交
435 436 437 438
		phys_addr += PAGE_SIZE;
		--idx;
		--nrpages;
	}
I
Ingo Molnar 已提交
439 440
	if (early_ioremap_debug)
		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
I
Ingo Molnar 已提交
441

442
	return (void *) (offset + fix_to_virt(idx0));
L
Linus Torvalds 已提交
443 444
}

445
void __init early_iounmap(void *addr, unsigned long size)
L
Linus Torvalds 已提交
446 447 448 449 450
{
	unsigned long virt_addr;
	unsigned long offset;
	unsigned int nrpages;
	enum fixed_addresses idx;
I
Ingo Molnar 已提交
451 452 453
	unsigned int nesting;

	nesting = --early_ioremap_nested;
454
	WARN_ON(nesting < 0);
L
Linus Torvalds 已提交
455

I
Ingo Molnar 已提交
456
	if (early_ioremap_debug) {
457 458
		printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
		       size, nesting);
I
Ingo Molnar 已提交
459 460 461
		dump_stack();
	}

L
Linus Torvalds 已提交
462
	virt_addr = (unsigned long)addr;
463 464
	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
		WARN_ON(1);
L
Linus Torvalds 已提交
465
		return;
466
	}
L
Linus Torvalds 已提交
467 468 469
	offset = virt_addr & ~PAGE_MASK;
	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;

I
Ingo Molnar 已提交
470
	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
L
Linus Torvalds 已提交
471
	while (nrpages > 0) {
472
		early_clear_fixmap(idx);
L
Linus Torvalds 已提交
473 474 475 476
		--idx;
		--nrpages;
	}
}
I
Ingo Molnar 已提交
477 478 479 480 481

void __this_fixmap_does_not_exist(void)
{
	WARN_ON(1);
}
T
Thomas Gleixner 已提交
482 483

#endif /* CONFIG_X86_32 */