ioremap.c 11.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

9
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
10
#include <linux/init.h>
11
#include <linux/io.h>
T
Thomas Gleixner 已提交
12 13 14
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
P
Pekka Paalanen 已提交
15
#include <linux/mmiotrace.h>
T
Thomas Gleixner 已提交
16

L
Linus Torvalds 已提交
17
#include <asm/cacheflush.h>
T
Thomas Gleixner 已提交
18 19
#include <asm/e820.h>
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
20
#include <asm/pgtable.h>
T
Thomas Gleixner 已提交
21
#include <asm/tlbflush.h>
22
#include <asm/pgalloc.h>
23
#include <asm/pat.h>
L
Linus Torvalds 已提交
24

25
#include "physaddr.h"
T
Thomas Gleixner 已提交
26

27 28 29 30
/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */
31
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32
			enum page_cache_mode pcm)
33
{
34
	unsigned long nrpages = size >> PAGE_SHIFT;
35
	int err;
36

37 38
	switch (pcm) {
	case _PAGE_CACHE_MODE_UC:
39
	default:
40
		err = _set_memory_uc(vaddr, nrpages);
41
		break;
42
	case _PAGE_CACHE_MODE_WC:
43 44
		err = _set_memory_wc(vaddr, nrpages);
		break;
45
	case _PAGE_CACHE_MODE_WB:
46
		err = _set_memory_wb(vaddr, nrpages);
47 48
		break;
	}
49 50 51 52

	return err;
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
			       void *arg)
{
	unsigned long i;

	for (i = 0; i < nr_pages; ++i)
		if (pfn_valid(start_pfn + i) &&
		    !PageReserved(pfn_to_page(start_pfn + i)))
			return 1;

	WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);

	return 0;
}

L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75 76
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 */
77
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78
		unsigned long size, enum page_cache_mode pcm, void *caller)
L
Linus Torvalds 已提交
79
{
80 81
	unsigned long offset, vaddr;
	resource_size_t pfn, last_pfn, last_addr;
82 83
	const resource_size_t unaligned_phys_addr = phys_addr;
	const unsigned long unaligned_size = size;
84
	struct vm_struct *area;
85
	enum page_cache_mode new_pcm;
86
	pgprot_t prot;
87
	int retval;
P
Pekka Paalanen 已提交
88
	void __iomem *ret_addr;
89
	int ram_region;
L
Linus Torvalds 已提交
90 91 92 93 94 95

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;

96
	if (!phys_addr_valid(phys_addr)) {
97
		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
98
		       (unsigned long long)phys_addr);
99 100 101 102
		WARN_ON_ONCE(1);
		return NULL;
	}

L
Linus Torvalds 已提交
103 104 105
	/*
	 * Don't remap the low PCI/ISA area, it's always mapped..
	 */
106
	if (is_ISA_range(phys_addr, last_addr))
T
Thomas Gleixner 已提交
107
		return (__force void __iomem *)phys_to_virt(phys_addr);
L
Linus Torvalds 已提交
108 109 110 111

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
112 113 114 115 116 117
	/* First check if whole region can be identified as RAM or not */
	ram_region = region_is_ram(phys_addr, size);
	if (ram_region > 0) {
		WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
				(unsigned long int)phys_addr,
				(unsigned long int)last_addr);
118
		return NULL;
119
	}
L
Linus Torvalds 已提交
120

121 122 123 124 125 126 127 128
	/* If could not be identified(-1), check page by page */
	if (ram_region < 0) {
		pfn      = phys_addr >> PAGE_SHIFT;
		last_pfn = last_addr >> PAGE_SHIFT;
		if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
					  __ioremap_check_ram) == 1)
			return NULL;
	}
129 130 131 132
	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
133
	phys_addr &= PHYSICAL_PAGE_MASK;
134 135
	size = PAGE_ALIGN(last_addr+1) - phys_addr;

A
Andi Kleen 已提交
136
	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
137
						pcm, &new_pcm);
138
	if (retval) {
139
		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
140 141 142
		return NULL;
	}

143 144
	if (pcm != new_pcm) {
		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
145
			printk(KERN_ERR
146
		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
147 148
				(unsigned long long)phys_addr,
				(unsigned long long)(phys_addr + size),
149
				pcm, new_pcm);
150
			goto err_free_memtype;
151
		}
152
		pcm = new_pcm;
153 154
	}

155 156 157
	prot = PAGE_KERNEL_IO;
	switch (pcm) {
	case _PAGE_CACHE_MODE_UC:
158
	default:
159 160
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_UC));
161
		break;
162 163 164
	case _PAGE_CACHE_MODE_UC_MINUS:
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
165
		break;
166 167 168
	case _PAGE_CACHE_MODE_WC:
		prot = __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_WC));
169
		break;
170
	case _PAGE_CACHE_MODE_WB:
171 172
		break;
	}
173

L
Linus Torvalds 已提交
174 175 176
	/*
	 * Ok, go for it..
	 */
177
	area = get_vm_area_caller(size, VM_IOREMAP, caller);
L
Linus Torvalds 已提交
178
	if (!area)
179
		goto err_free_memtype;
L
Linus Torvalds 已提交
180
	area->phys_addr = phys_addr;
T
Thomas Gleixner 已提交
181
	vaddr = (unsigned long) area->addr;
182

183
	if (kernel_map_sync_memtype(phys_addr, size, pcm))
184
		goto err_free_area;
185

186 187
	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
		goto err_free_area;
188

P
Pekka Paalanen 已提交
189
	ret_addr = (void __iomem *) (vaddr + offset);
190
	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
P
Pekka Paalanen 已提交
191

192 193 194 195 196 197 198
	/*
	 * Check if the request spans more than any BAR in the iomem resource
	 * tree.
	 */
	WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");

P
Pekka Paalanen 已提交
199
	return ret_addr;
200 201 202 203 204
err_free_area:
	free_vm_area(area);
err_free_memtype:
	free_memtype(phys_addr, phys_addr + size);
	return NULL;
L
Linus Torvalds 已提交
205 206 207 208
}

/**
 * ioremap_nocache     -   map bus memory into CPU space
209
 * @phys_addr:    bus address of the memory
L
Linus Torvalds 已提交
210 211 212 213 214 215
 * @size:      size of the resource to map
 *
 * ioremap_nocache performs a platform specific sequence of operations to
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
 * writew/writel functions and the other mmio helpers. The returned
 * address is not guaranteed to be usable directly as a virtual
216
 * address.
L
Linus Torvalds 已提交
217 218 219
 *
 * This version of ioremap ensures that the memory is marked uncachable
 * on the CPU as well as honouring existing caching rules from things like
220
 * the PCI bus. Note that there are other caches and buffers on many
L
Linus Torvalds 已提交
221 222 223 224
 * busses. In particular driver authors should read up on PCI writes
 *
 * It's useful if some control registers are in such an area and
 * write combining or read caching is not desirable:
225
 *
L
Linus Torvalds 已提交
226 227
 * Must be freed with iounmap.
 */
228
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
L
Linus Torvalds 已提交
229
{
230 231
	/*
	 * Ideally, this should be:
232
	 *	pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
233 234 235 236
	 *
	 * Till we fix all X drivers to use ioremap_wc(), we will use
	 * UC MINUS.
	 */
237
	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
238

239
	return __ioremap_caller(phys_addr, size, pcm,
240
				__builtin_return_address(0));
L
Linus Torvalds 已提交
241
}
242
EXPORT_SYMBOL(ioremap_nocache);
L
Linus Torvalds 已提交
243

244 245
/**
 * ioremap_wc	-	map memory into CPU space write combined
246
 * @phys_addr:	bus address of the memory
247 248 249 250 251 252 253
 * @size:	size of the resource to map
 *
 * This version of ioremap ensures that the memory is marked write combining.
 * Write combining allows faster writes to some hardware devices.
 *
 * Must be freed with iounmap.
 */
254
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
255
{
256
	if (pat_enabled)
257
		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
258
					__builtin_return_address(0));
259 260 261 262 263
	else
		return ioremap_nocache(phys_addr, size);
}
EXPORT_SYMBOL(ioremap_wc);

264
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
T
Thomas Gleixner 已提交
265
{
266
	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
267
				__builtin_return_address(0));
T
Thomas Gleixner 已提交
268 269 270
}
EXPORT_SYMBOL(ioremap_cache);

271 272 273
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
				unsigned long prot_val)
{
274 275
	return __ioremap_caller(phys_addr, size,
				pgprot2cachemode(__pgprot(prot_val)),
276 277 278 279
				__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);

280 281 282 283 284 285
/**
 * iounmap - Free a IO remapping
 * @addr: virtual address from ioremap_*
 *
 * Caller must ensure there is only one unmapping for the same pointer.
 */
L
Linus Torvalds 已提交
286 287
void iounmap(volatile void __iomem *addr)
{
288
	struct vm_struct *p, *o;
A
Andrew Morton 已提交
289 290

	if ((void __force *)addr <= high_memory)
L
Linus Torvalds 已提交
291 292 293 294 295 296 297
		return;

	/*
	 * __ioremap special-cases the PCI/ISA range by not instantiating a
	 * vm_area and by simply returning an address into the kernel mapping
	 * of ISA space.   So handle that here.
	 */
298 299
	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
L
Linus Torvalds 已提交
300 301
		return;

302 303
	addr = (volatile void __iomem *)
		(PAGE_MASK & (unsigned long __force)addr);
304

P
Pekka Paalanen 已提交
305 306
	mmiotrace_iounmap(addr);

307 308 309 310 311
	/* Use the vm area unlocked, assuming the caller
	   ensures there isn't another iounmap for the same address
	   in parallel. Reuse of the virtual address is prevented by
	   leaving it in the global lists until we're done with it.
	   cpa takes care of the direct mappings. */
312
	p = find_vm_area((void __force *)addr);
313 314

	if (!p) {
315
		printk(KERN_ERR "iounmap: bad address %p\n", addr);
A
Andrew Morton 已提交
316
		dump_stack();
317
		return;
L
Linus Torvalds 已提交
318 319
	}

320 321
	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));

322
	/* Finally remove it */
323
	o = remove_vm_area((void __force *)addr);
324
	BUG_ON(p != o || o == NULL);
325
	kfree(p);
L
Linus Torvalds 已提交
326
}
327
EXPORT_SYMBOL(iounmap);
L
Linus Torvalds 已提交
328

329 330 331 332
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
333
void *xlate_dev_mem_ptr(phys_addr_t phys)
334 335 336 337 338 339 340 341
{
	void *addr;
	unsigned long start = phys & PAGE_MASK;

	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
	if (page_is_ram(start >> PAGE_SHIFT))
		return __va(phys);

X
Xiaotian Feng 已提交
342
	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
343 344 345 346 347 348
	if (addr)
		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));

	return addr;
}

349
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
350 351 352 353 354 355 356 357
{
	if (page_is_ram(phys >> PAGE_SHIFT))
		return;

	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
	return;
}

358
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
359

360
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
361
{
362 363 364
	/* Don't assume we're using swapper_pg_dir at this point */
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(addr)];
365 366 367 368
	pud_t *pud = pud_offset(pgd, addr);
	pmd_t *pmd = pmd_offset(pud, addr);

	return pmd;
369 370
}

371
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
372
{
373
	return &bm_pte[pte_index(addr)];
374 375
}

376 377 378 379 380
bool __init is_early_ioremap_ptep(pte_t *ptep)
{
	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
}

381
void __init early_ioremap_init(void)
382
{
383
	pmd_t *pmd;
384

385 386 387 388 389 390
#ifdef CONFIG_X86_64
	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
#else
	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
#endif

M
Mark Salter 已提交
391
	early_ioremap_setup();
392

393
	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
394 395
	memset(bm_pte, 0, sizeof(bm_pte));
	pmd_populate_kernel(&init_mm, pmd, bm_pte);
396

397
	/*
398
	 * The boot-ioremap range spans multiple pmds, for which
399 400
	 * we are not prepared:
	 */
401 402 403 404
#define __FIXADDR_TOP (-PAGE_SIZE)
	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
#undef __FIXADDR_TOP
405
	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
406
		WARN_ON(1);
407 408
		printk(KERN_WARNING "pmd %p != %p\n",
		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
409
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
410
			fix_to_virt(FIX_BTMAP_BEGIN));
411
		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
412
			fix_to_virt(FIX_BTMAP_END));
413 414 415 416

		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
		       FIX_BTMAP_BEGIN);
417
	}
418 419
}

M
Mark Salter 已提交
420 421
void __init __early_set_fixmap(enum fixed_addresses idx,
			       phys_addr_t phys, pgprot_t flags)
422
{
423 424
	unsigned long addr = __fix_to_virt(idx);
	pte_t *pte;
425 426 427 428 429

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
430
	pte = early_ioremap_pte(addr);
431

432
	if (pgprot_val(flags))
433
		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
434
	else
435
		pte_clear(&init_mm, addr, pte);
436 437
	__flush_tlb_one(addr);
}