ioremap.c 13.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

9
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
10
#include <linux/init.h>
11
#include <linux/io.h>
T
Thomas Gleixner 已提交
12 13 14
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
P
Pekka Paalanen 已提交
15
#include <linux/mmiotrace.h>
T
Thomas Gleixner 已提交
16

L
Linus Torvalds 已提交
17
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
18 19
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
20
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
21
#include <asm/tlbflush.h>
22
#include <asm/pgalloc.h>
23
#include <asm/pat.h>
L
Linus Torvalds 已提交
24

25
#include "physaddr.h"
T
Thomas Gleixner 已提交
26

27 28 29 30
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
31
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32
			enum page_cache_mode pcm)
33
{
34
	unsigned long nrpages = size >> PAGE_SHIFT;
35
	int err;
36

37 38
	switch (pcm) {
	case _PAGE_CACHE_MODE_UC:
39
	default:
40
		err = _set_memory_uc(vaddr, nrpages);
41
		break;
42
	case _PAGE_CACHE_MODE_WC:
43 44
		err = _set_memory_wc(vaddr, nrpages);
		break;
45
	case _PAGE_CACHE_MODE_WB:
46
		err = _set_memory_wb(vaddr, nrpages);
47 48
		break;
	}
49 50 51 52

	return err;
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
			       void *arg)
{
	unsigned long i;

	for (i = 0; i < nr_pages; ++i)
		if (pfn_valid(start_pfn + i) &&
		    !PageReserved(pfn_to_page(start_pfn + i)))
			return 1;

	WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);

	return 0;
}

L
Linus Torvalds 已提交
68 69
/*
 * Remap an arbitrary physical address space into the kernel virtual
70 71 72 73 74 75 76
 * address space. It transparently creates kernel huge I/O mapping when
 * the physical address is aligned by a huge page size (1GB or 2MB) and
 * the requested size is at least the huge page size.
 *
 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
 * Therefore, the mapping code falls back to use a smaller page toward 4KB
 * when a mapping range is covered by non-WB type of MTRRs.
L
Linus Torvalds 已提交
77 78 79 80 81
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
82
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
83
		unsigned long size, enum page_cache_mode pcm, void *caller)
L
Linus Torvalds 已提交
84
{
85 86
	unsigned long offset, vaddr;
	resource_size_t pfn, last_pfn, last_addr;
87 88
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
89
	struct vm_struct *area;
90
	enum page_cache_mode new_pcm;
91
	pgprot_t prot;
92
	int retval;
P
Pekka Paalanen 已提交
93
	void __iomem *ret_addr;
94
	int ram_region;
L
Linus Torvalds 已提交
95 96 97 98 99 100

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

101
	if (!phys_addr_valid(phys_addr)) {
102
		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
103
		       (unsigned long long)phys_addr);
104 105 106 107
		WARN_ON_ONCE(1);
		return NULL;
	}

L
Linus Torvalds 已提交
108 109 110
	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
111
	if (is_ISA_range(phys_addr, last_addr))
T
Thomas Gleixner 已提交
112
		return (__force void __iomem *)phys_to_virt(phys_addr);
L
Linus Torvalds 已提交
113 114 115 116

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
117 118 119 120 121 122
	/* First check if whole region can be identified as RAM or not */
	ram_region = region_is_ram(phys_addr, size);
	if (ram_region > 0) {
		WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
				(unsigned long int)phys_addr,
				(unsigned long int)last_addr);
123
		return NULL;
124
	}
L
Linus Torvalds 已提交
125

126 127 128 129 130 131 132 133
	/* If could not be identified(-1), check page by page */
	if (ram_region < 0) {
		pfn      = phys_addr >> PAGE_SHIFT;
		last_pfn = last_addr >> PAGE_SHIFT;
		if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
					  __ioremap_check_ram) == 1)
			return NULL;
	}
134 135 136 137
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
138
	phys_addr &= PHYSICAL_PAGE_MASK;
139 140
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

A
Andi Kleen 已提交
141
	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
142
						pcm, &new_pcm);
143
	if (retval) {
144
		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
145 146 147
		return NULL;
	}

148 149
	if (pcm != new_pcm) {
		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
150
			printk(KERN_ERR
151
		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
152 153
				(unsigned long long)phys_addr,
				(unsigned long long)(phys_addr + size),
154
				pcm, new_pcm);
155
			goto err_free_memtype;
156
		}
157
		pcm = new_pcm;
158 159
	}

160 161 162
	prot = PAGE_KERNEL_IO;
	switch (pcm) {
	case _PAGE_CACHE_MODE_UC:
163
	default:
164 165
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_UC));
166
		break;
167 168 169
	case _PAGE_CACHE_MODE_UC_MINUS:
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
170
		break;
171 172 173
	case _PAGE_CACHE_MODE_WC:
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_WC));
174
		break;
175
	case _PAGE_CACHE_MODE_WB:
176 177
		break;
	}
178

L
Linus Torvalds 已提交
179 180 181
	/*
	 * Ok, go for it..
	 */
182
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
L
Linus Torvalds 已提交
183
	if (!area)
184
		goto err_free_memtype;
L
Linus Torvalds 已提交
185
	area->phys_addr = phys_addr;
T
Thomas Gleixner 已提交
186
	vaddr = (unsigned long) area->addr;
187

188
	if (kernel_map_sync_memtype(phys_addr, size, pcm))
189
		goto err_free_area;
190

191 192
	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
		goto err_free_area;
193

P
Pekka Paalanen 已提交
194
	ret_addr = (void __iomem *) (vaddr + offset);
195
	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
P
Pekka Paalanen 已提交
196

197 198 199 200 201 202 203
	/*
	 * Check if the request spans more than any BAR in the iomem resource
	 * tree.
	 */
	WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");

P
Pekka Paalanen 已提交
204
	return ret_addr;
205 206 207 208 209
err_free_area:
	free_vm_area(area);
err_free_memtype:
	free_memtype(phys_addr, phys_addr + size);
	return NULL;
L
Linus Torvalds 已提交
210 211 212 213
}

/**
 * ioremap_nocache     -   map bus memory into CPU space
214
 * @phys_addr:    bus address of the memory
L
Linus Torvalds 已提交
215 216 217 218 219 220
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
221
 * address.
L
Linus Torvalds 已提交
222 223 224
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
225
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
226 227 228 229
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
230
 *
L
Linus Torvalds 已提交
231 232
 * Must be freed with iounmap.
 */
233
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
L
Linus Torvalds 已提交
234
{
235 236
	/*
	 * Ideally, this should be:
237
	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
238 239
	 *
	 * Till we fix all X drivers to use ioremap_wc(), we will use
240 241
	 * UC MINUS. Drivers that are certain they need or can already
	 * be converted over to strong UC can use ioremap_uc().
242
	 */
243
	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
244

245
	return __ioremap_caller(phys_addr, size, pcm,
246
				__builtin_return_address(0));
L
Linus Torvalds 已提交
247
}
248
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
249

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
/**
 * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
 * @phys_addr:    bus address of the memory
 * @size:      size of the resource to map
 *
 * ioremap_uc performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
 * address.
 *
 * This version of ioremap ensures that the memory is marked with a strong
 * preference as completely uncachable on the CPU when possible. For non-PAT
 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
 * systems this will set the PAT entry for the pages as strong UC.  This call
 * will honor existing caching rules from things like the PCI bus. Note that
 * there are other caches and buffers on many busses. In particular driver
 * authors should read up on PCI writes.
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
 *
 * Must be freed with iounmap.
 */
void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
{
	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;

	return __ioremap_caller(phys_addr, size, pcm,
				__builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(ioremap_uc);

283 284
/**
 * ioremap_wc	-	map memory into CPU space write combined
285
 * @phys_addr:	bus address of the memory
286 287 288 289 290 291 292
 * @size:	size of the resource to map
 *
 * This version of ioremap ensures that the memory is marked write combining.
 * Write combining allows faster writes to some hardware devices.
 *
 * Must be freed with iounmap.
 */
293
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
294
{
295
	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
296
					__builtin_return_address(0));
297 298 299
}
EXPORT_SYMBOL(ioremap_wc);

300
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
T
Thomas Gleixner 已提交
301
{
302
	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
303
				__builtin_return_address(0));
T
Thomas Gleixner 已提交
304 305 306
}
EXPORT_SYMBOL(ioremap_cache);

307 308 309
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
				unsigned long prot_val)
{
310 311
	return __ioremap_caller(phys_addr, size,
				pgprot2cachemode(__pgprot(prot_val)),
312 313 314 315
				__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);

316 317 318 319 320 321
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
322 323
void iounmap(volatile void __iomem *addr)
{
324
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
325 326

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
327 328 329 330 331 332 333
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
334 335
	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
336 337
		return;

338 339
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
340

P
Pekka Paalanen 已提交
341 342
	mmiotrace_iounmap(addr);

343 344 345 346 347
	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
348
	p = find_vm_area((void __force *)addr);
349 350

	if (!p) {
351
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
352
		dump_stack();
353
		return;
L
Linus Torvalds 已提交
354 355
	}

356 357
	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));

358
	/* Finally remove it */
359
	o = remove_vm_area((void __force *)addr);
360
	BUG_ON(p != o || o == NULL);
361
	kfree(p);
L
Linus Torvalds 已提交
362
}
363
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
364

365
int __init arch_ioremap_pud_supported(void)
366 367 368 369 370 371 372 373
{
#ifdef CONFIG_X86_64
	return cpu_has_gbpages;
#else
	return 0;
#endif
}

374
int __init arch_ioremap_pmd_supported(void)
375 376 377 378
{
	return cpu_has_pse;
}

379 380 381 382
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
383
void *xlate_dev_mem_ptr(phys_addr_t phys)
384
{
385 386 387
	unsigned long start  = phys &  PAGE_MASK;
	unsigned long offset = phys & ~PAGE_MASK;
	unsigned long vaddr;
388 389 390 391 392

	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
	if (page_is_ram(start >> PAGE_SHIFT))
		return __va(phys);

393 394 395 396
	vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
	/* Only add the offset on success and return NULL if the ioremap() failed: */
	if (vaddr)
		vaddr += offset;
397

398
	return (void *)vaddr;
399 400
}

401
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
402 403 404 405 406 407 408 409
{
	if (page_is_ram(phys >> PAGE_SHIFT))
		return;

	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
	return;
}

410
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
411

412
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
413
{
414 415 416
	/* Don't assume we're using swapper_pg_dir at this point */
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(addr)];
417 418 419 420
	pud_t *pud = pud_offset(pgd, addr);
	pmd_t *pmd = pmd_offset(pud, addr);

	return pmd;
421 422
}

423
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
424
{
425
	return &bm_pte[pte_index(addr)];
426 427
}

428 429 430 431 432
bool __init is_early_ioremap_ptep(pte_t *ptep)
{
	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
}

433
void __init early_ioremap_init(void)
434
{
435
	pmd_t *pmd;
436

437 438 439 440 441 442
#ifdef CONFIG_X86_64
	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
#else
	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
#endif

M
Mark Salter 已提交
443
	early_ioremap_setup();
444

445
	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
446 447
	memset(bm_pte, 0, sizeof(bm_pte));
	pmd_populate_kernel(&init_mm, pmd, bm_pte);
448

449
	/*
450
	 * The boot-ioremap range spans multiple pmds, for which
451 452
	 * we are not prepared:
	 */
453 454 455 456
#define __FIXADDR_TOP (-PAGE_SIZE)
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
#undef __FIXADDR_TOP
457
	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
458
		WARN_ON(1);
459 460
		printk(KERN_WARNING "pmd %p != %p\n",
		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
461
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
462
			fix_to_virt(FIX_BTMAP_BEGIN));
463
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
464
			fix_to_virt(FIX_BTMAP_END));
465 466 467 468

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
469
	}
470 471
}

M
Mark Salter 已提交
472 473
void __init __early_set_fixmap(enum fixed_addresses idx,
			       phys_addr_t phys, pgprot_t flags)
474
{
475 476
	unsigned long addr = __fix_to_virt(idx);
	pte_t *pte;
477 478 479 480 481

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
482
	pte = early_ioremap_pte(addr);
483

484
	if (pgprot_val(flags))
485
		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
486
	else
487
		pte_clear(&init_mm, addr, pte);
488 489
	__flush_tlb_one(addr);
}