pgtable_64.c 11.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 *  This file contains ioremap and related functions for 64-bit machines.
 *
 *  Derived from arch/ppc64/mm/init.c
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
29
#include <linux/export.h>
30 31 32 33 34 35 36
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
37
#include <linux/bootmem.h>
Y
Yinghai Lu 已提交
38
#include <linux/memblock.h>
39
#include <linux/slab.h>
40 41 42 43 44 45 46 47 48 49 50 51 52 53

#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/sections.h>
54
#include <asm/firmware.h>
D
David Gibson 已提交
55 56

#include "mmu_decl.h"
57

58 59 60 61 62 63
/* Some sanity checking */
#if TASK_SIZE_USER64 > PGTABLE_RANGE
#error TASK_SIZE_USER64 exceeds pagetable range
#endif

#ifdef CONFIG_PPC_STD_MMU_64
64
#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
65 66 67
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif
68

69
unsigned long ioremap_bot = IOREMAP_BASE;
70 71 72 73 74 75 76 77 78

#ifdef CONFIG_PPC_MMU_NOHASH
static void *early_alloc_pgtable(unsigned long size)
{
	void *pt;

	if (init_bootmem_done)
		pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
	else
Y
Yinghai Lu 已提交
79
		pt = __va(memblock_alloc_base(size, size,
80 81 82 83 84 85 86
					 __pa(MAX_DMA_ADDRESS)));
	memset(pt, 0, size);

	return pt;
}
#endif /* CONFIG_PPC_MMU_NOHASH */

87
/*
88 89
 * map_kernel_page currently only called by __ioremap
 * map_kernel_page adds an entry to the ioremap page table
90 91
 * and adds an entry to the HPT, possibly bolting it
 */
92
int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
93 94 95 96 97 98
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

99
	if (slab_is_available()) {
100 101 102 103 104 105 106
		pgdp = pgd_offset_k(ea);
		pudp = pud_alloc(&init_mm, pgdp, ea);
		if (!pudp)
			return -ENOMEM;
		pmdp = pmd_alloc(&init_mm, pudp, ea);
		if (!pmdp)
			return -ENOMEM;
P
Paul Mackerras 已提交
107
		ptep = pte_alloc_kernel(pmdp, ea);
108 109 110 111 112
		if (!ptep)
			return -ENOMEM;
		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
							  __pgprot(flags)));
	} else {
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
#ifdef CONFIG_PPC_MMU_NOHASH
		/* Warning ! This will blow up if bootmem is not initialized
		 * which our ppc64 code is keen to do that, we'll need to
		 * fix it and/or be more careful
		 */
		pgdp = pgd_offset_k(ea);
#ifdef PUD_TABLE_SIZE
		if (pgd_none(*pgdp)) {
			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
			BUG_ON(pudp == NULL);
			pgd_populate(&init_mm, pgdp, pudp);
		}
#endif /* PUD_TABLE_SIZE */
		pudp = pud_offset(pgdp, ea);
		if (pud_none(*pudp)) {
			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
			BUG_ON(pmdp == NULL);
			pud_populate(&init_mm, pudp, pmdp);
		}
		pmdp = pmd_offset(pudp, ea);
		if (!pmd_present(*pmdp)) {
			ptep = early_alloc_pgtable(PAGE_SIZE);
			BUG_ON(ptep == NULL);
			pmd_populate_kernel(&init_mm, pmdp, ptep);
		}
		ptep = pte_offset_kernel(pmdp, ea);
		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
							  __pgprot(flags)));
#else /* CONFIG_PPC_MMU_NOHASH */
142 143 144 145
		/*
		 * If the mm subsystem is not fully up, we cannot create a
		 * linux page table entry for this mapping.  Simply bolt an
		 * entry in the hardware page table.
146
		 *
147
		 */
P
Paul Mackerras 已提交
148 149
		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
				      mmu_io_psize, mmu_kernel_ssize)) {
150 151 152 153
			printk(KERN_ERR "Failed to do bolted mapping IO "
			       "memory at %016lx !\n", pa);
			return -ENOMEM;
		}
154
#endif /* !CONFIG_PPC_MMU_NOHASH */
155 156 157 158 159
	}
	return 0;
}


160 161 162 163 164
/**
 * __ioremap_at - Low level function to establish the page tables
 *                for an IO mapping
 */
void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
165 166 167 168
			    unsigned long flags)
{
	unsigned long i;

B
Benjamin Herrenschmidt 已提交
169
	/* Make sure we have the base flags */
170 171 172
	if ((flags & _PAGE_PRESENT) == 0)
		flags |= pgprot_val(PAGE_KERNEL);

B
Benjamin Herrenschmidt 已提交
173 174 175 176 177 178 179 180
	/* Non-cacheable page cannot be coherent */
	if (flags & _PAGE_NO_CACHE)
		flags &= ~_PAGE_COHERENT;

	/* We don't support the 4K PFN hack with ioremap */
	if (flags & _PAGE_4K_PFN)
		return NULL;

181 182 183 184
	WARN_ON(pa & ~PAGE_MASK);
	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
	WARN_ON(size & ~PAGE_MASK);

185
	for (i = 0; i < size; i += PAGE_SIZE)
186
		if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
187 188
			return NULL;

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
	return (void __iomem *)ea;
}

/**
 * __iounmap_from - Low level function to tear down the page tables
 *                  for an IO mapping. This is used for mappings that
 *                  are manipulated manually, like partial unmapping of
 *                  PCI IOs or ISA space.
 */
void __iounmap_at(void *ea, unsigned long size)
{
	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
	WARN_ON(size & ~PAGE_MASK);

	unmap_kernel_range((unsigned long)ea, size);
204 205
}

206 207
void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
				unsigned long flags, void *caller)
208
{
209
	phys_addr_t paligned;
210 211 212 213 214 215 216 217 218 219 220
	void __iomem *ret;

	/*
	 * Choose an address to map it to.
	 * Once the imalloc system is running, we use it.
	 * Before that, we map using addresses going
	 * up from ioremap_bot.  imalloc will use
	 * the addresses from ioremap_bot through
	 * IMALLOC_END
	 * 
	 */
221 222
	paligned = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - paligned;
223

224
	if ((size == 0) || (paligned == 0))
225 226 227 228
		return NULL;

	if (mem_init_done) {
		struct vm_struct *area;
229

230 231 232
		area = __get_vm_area_caller(size, VM_IOREMAP,
					    ioremap_bot, IOREMAP_END,
					    caller);
233 234
		if (area == NULL)
			return NULL;
235 236

		area->phys_addr = paligned;
237
		ret = __ioremap_at(paligned, area->addr, size, flags);
238
		if (!ret)
239
			vunmap(area->addr);
240
	} else {
241
		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
242 243 244
		if (ret)
			ioremap_bot += size;
	}
245 246 247

	if (ret)
		ret += addr & ~PAGE_MASK;
248 249 250
	return ret;
}

251 252 253 254 255
void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
			 unsigned long flags)
{
	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
256

257
void __iomem * ioremap(phys_addr_t addr, unsigned long size)
258 259
{
	unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
260
	void *caller = __builtin_return_address(0);
261 262

	if (ppc_md.ioremap)
263 264
		return ppc_md.ioremap(addr, size, flags, caller);
	return __ioremap_caller(addr, size, flags, caller);
265 266
}

A
Anton Blanchard 已提交
267 268 269 270 271 272 273 274 275 276
void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
{
	unsigned long flags = _PAGE_NO_CACHE;
	void *caller = __builtin_return_address(0);

	if (ppc_md.ioremap)
		return ppc_md.ioremap(addr, size, flags, caller);
	return __ioremap_caller(addr, size, flags, caller);
}

A
Anton Blanchard 已提交
277
void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
278 279
			     unsigned long flags)
{
280 281
	void *caller = __builtin_return_address(0);

B
Benjamin Herrenschmidt 已提交
282 283 284 285 286 287 288
	/* writeable implies dirty for kernel addresses */
	if (flags & _PAGE_RW)
		flags |= _PAGE_DIRTY;

	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
	flags &= ~(_PAGE_USER | _PAGE_EXEC);

289 290 291 292 293 294 295 296
#ifdef _PAGE_BAP_SR
	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
	 * which means that we just cleared supervisor access... oops ;-) This
	 * restores it
	 */
	flags |= _PAGE_BAP_SR;
#endif

297
	if (ppc_md.ioremap)
298 299
		return ppc_md.ioremap(addr, size, flags, caller);
	return __ioremap_caller(addr, size, flags, caller);
300 301 302
}


303 304 305 306
/*  
 * Unmap an IO region and remove it from imalloc'd list.
 * Access to IO memory should be serialized by driver.
 */
307
void __iounmap(volatile void __iomem *token)
308 309 310 311 312 313
{
	void *addr;

	if (!mem_init_done)
		return;
	
314 315 316 317 318 319 320 321
	addr = (void *) ((unsigned long __force)
			 PCI_FIX_ADDR(token) & PAGE_MASK);
	if ((unsigned long)addr < ioremap_bot) {
		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
		       " at 0x%p\n", addr);
		return;
	}
	vunmap(addr);
322 323
}

324
void iounmap(volatile void __iomem *token)
325 326 327 328 329 330 331
{
	if (ppc_md.iounmap)
		ppc_md.iounmap(token);
	else
		__iounmap(token);
}

332
EXPORT_SYMBOL(ioremap);
A
Anton Blanchard 已提交
333
EXPORT_SYMBOL(ioremap_wc);
A
Anton Blanchard 已提交
334
EXPORT_SYMBOL(ioremap_prot);
335
EXPORT_SYMBOL(__ioremap);
336
EXPORT_SYMBOL(__ioremap_at);
337
EXPORT_SYMBOL(iounmap);
338
EXPORT_SYMBOL(__iounmap);
339
EXPORT_SYMBOL(__iounmap_at);
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457

#ifdef CONFIG_PPC_64K_PAGES
static pte_t *get_from_cache(struct mm_struct *mm)
{
	void *pte_frag, *ret;

	spin_lock(&mm->page_table_lock);
	ret = mm->context.pte_frag;
	if (ret) {
		pte_frag = ret + PTE_FRAG_SIZE;
		/*
		 * If we have taken up all the fragments mark PTE page NULL
		 */
		if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
			pte_frag = NULL;
		mm->context.pte_frag = pte_frag;
	}
	spin_unlock(&mm->page_table_lock);
	return (pte_t *)ret;
}

static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{
	void *ret = NULL;
	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
				       __GFP_REPEAT | __GFP_ZERO);
	if (!page)
		return NULL;

	ret = page_address(page);
	spin_lock(&mm->page_table_lock);
	/*
	 * If we find pgtable_page set, we return
	 * the allocated page with single fragement
	 * count.
	 */
	if (likely(!mm->context.pte_frag)) {
		atomic_set(&page->_count, PTE_FRAG_NR);
		mm->context.pte_frag = ret + PTE_FRAG_SIZE;
	}
	spin_unlock(&mm->page_table_lock);

	if (!kernel)
		pgtable_page_ctor(page);

	return (pte_t *)ret;
}

pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
{
	pte_t *pte;

	pte = get_from_cache(mm);
	if (pte)
		return pte;

	return __alloc_for_cache(mm, kernel);
}

void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
{
	struct page *page = virt_to_page(table);
	if (put_page_testzero(page)) {
		if (!kernel)
			pgtable_page_dtor(page);
		free_hot_cold_page(page, 0);
	}
}

#ifdef CONFIG_SMP
static void page_table_free_rcu(void *table)
{
	struct page *page = virt_to_page(table);
	if (put_page_testzero(page)) {
		pgtable_page_dtor(page);
		free_hot_cold_page(page, 0);
	}
}

void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
	unsigned long pgf = (unsigned long)table;

	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
	pgf |= shift;
	tlb_remove_table(tlb, (void *)pgf);
}

void __tlb_remove_table(void *_table)
{
	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;

	if (!shift)
		/* PTE page needs special handling */
		page_table_free_rcu(table);
	else {
		BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
		kmem_cache_free(PGT_CACHE(shift), table);
	}
}
#else
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
	if (!shift) {
		/* PTE page needs special handling */
		struct page *page = virt_to_page(table);
		if (put_page_testzero(page)) {
			pgtable_page_dtor(page);
			free_hot_cold_page(page, 0);
		}
	} else {
		BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
		kmem_cache_free(PGT_CACHE(shift), table);
	}
}
#endif
#endif /* CONFIG_PPC_64K_PAGES */