pgtable_32.h 12.6 KB
Newer Older
1 2 3
#ifndef _SPARC_PGTABLE_H
#define _SPARC_PGTABLE_H

4
/*  asm/pgtable.h:  Defines and functions used to work
5 6 7 8 9 10
 *                        with Sparc page tables.
 *
 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 */

11 12
#include <linux/const.h>

13 14 15 16 17 18 19
#ifndef __ASSEMBLY__
#include <asm-generic/4level-fixup.h>

#include <linux/spinlock.h>
#include <linux/swap.h>
#include <asm/types.h>
#include <asm/pgtsrmmu.h>
20
#include <asm/vaddrs.h>
21
#include <asm/oplib.h>
22
#include <asm/cpu_type.h>
23 24 25 26 27 28 29 30 31 32 33 34


struct vm_area_struct;
struct page;

extern void load_mmu(void);
extern unsigned long calc_highpages(void);

#define pte_ERROR(e)   __builtin_trap()
#define pmd_ERROR(e)   __builtin_trap()
#define pgd_ERROR(e)   __builtin_trap()

S
Sam Ravnborg 已提交
35
#define PMD_SHIFT		22
36 37 38
#define PMD_SIZE        	(1UL << PMD_SHIFT)
#define PMD_MASK        	(~(PMD_SIZE-1))
#define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
39 40 41
#define PGDIR_SHIFT     	SRMMU_PGDIR_SHIFT
#define PGDIR_SIZE      	SRMMU_PGDIR_SIZE
#define PGDIR_MASK      	SRMMU_PGDIR_MASK
42
#define PTRS_PER_PTE    	1024
43 44 45
#define PTRS_PER_PMD    	SRMMU_PTRS_PER_PMD
#define PTRS_PER_PGD    	SRMMU_PTRS_PER_PGD
#define USER_PTRS_PER_PGD	PAGE_OFFSET / SRMMU_PGDIR_SIZE
46 47 48
#define FIRST_USER_ADDRESS	0
#define PTE_SIZE		(PTRS_PER_PTE*4)

49 50 51 52 53
#define PAGE_NONE	SRMMU_PAGE_NONE
#define PAGE_SHARED	SRMMU_PAGE_SHARED
#define PAGE_COPY	SRMMU_PAGE_COPY
#define PAGE_READONLY	SRMMU_PAGE_RDONLY
#define PAGE_KERNEL	SRMMU_PAGE_KERNEL
54 55 56 57 58 59 60 61

/* Top-level page directory */
extern pgd_t swapper_pg_dir[1024];

extern void paging_init(void);

extern unsigned long ptr_in_current_pgd;

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*         xwr */
#define __P000  PAGE_NONE
#define __P001  PAGE_READONLY
#define __P010  PAGE_COPY
#define __P011  PAGE_COPY
#define __P100  PAGE_READONLY
#define __P101  PAGE_READONLY
#define __P110  PAGE_COPY
#define __P111  PAGE_COPY

#define __S000	PAGE_NONE
#define __S001	PAGE_READONLY
#define __S010	PAGE_SHARED
#define __S011	PAGE_SHARED
#define __S100	PAGE_READONLY
#define __S101	PAGE_READONLY
#define __S110	PAGE_SHARED
#define __S111	PAGE_SHARED
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

extern int num_contexts;

/* First physical page can be anywhere, the following is needed so that
 * va-->pa and vice versa conversions work properly without performance
 * hit for all __pa()/__va() operations.
 */
extern unsigned long phys_base;
extern unsigned long pfn_base;

/*
 * BAD_PAGETABLE is used when we need a bogus page-table, while
 * BAD_PAGE is used for a bogus page.
 *
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern pte_t * __bad_pagetable(void);
extern pte_t __bad_page(void);
extern unsigned long empty_zero_page;

#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))

105 106 107 108 109 110 111 112 113 114 115
/*
 * In general all page table modifications should use the V8 atomic
 * swap instruction.  This insures the mmu and the cpu are in sync
 * with respect to ref/mod bits in the page tables.
 */
static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
{
	__asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
	return value;
}

116 117 118 119 120 121
/* Certain architectures need to do special things when pte's
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */

static inline void set_pte(pte_t *ptep, pte_t pteval)
122 123 124 125
{
	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
}

126 127
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)

128 129 130 131 132 133 134 135 136 137 138 139
static inline int srmmu_device_memory(unsigned long x)
{
	return ((x & 0xF0000000) != 0);
}

static inline struct page *pmd_page(pmd_t pmd)
{
	if (srmmu_device_memory(pmd_val(pmd)))
		BUG();
	return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
}

140 141 142 143 144 145 146 147 148
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
	if (srmmu_device_memory(pgd_val(pgd))) {
		return ~0;
	} else {
		unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
		return (unsigned long)__nocache_va(v << 4);
	}
}
149

150 151 152 153
static inline int pte_present(pte_t pte)
{
	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
}
154 155 156

static inline int pte_none(pte_t pte)
{
157
	return !pte_val(pte);
158 159
}

160 161
static inline void __pte_clear(pte_t *ptep)
{
162
	set_pte(ptep, __pte(0));
163 164 165 166 167 168
}

static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	__pte_clear(ptep);
}
169

170 171 172 173 174 175 176 177 178
static inline int pmd_bad(pmd_t pmd)
{
	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
}

static inline int pmd_present(pmd_t pmd)
{
	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
}
179 180 181

static inline int pmd_none(pmd_t pmd)
{
182
	return !pmd_val(pmd);
183 184
}

185 186 187 188
static inline void pmd_clear(pmd_t *pmdp)
{
	int i;
	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
189
		set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
190
}
191

192 193 194 195
static inline int pgd_none(pgd_t pgd)          
{
	return !(pgd_val(pgd) & 0xFFFFFFF);
}
196

197 198 199 200 201 202 203 204 205
static inline int pgd_bad(pgd_t pgd)
{
	return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
}

static inline int pgd_present(pgd_t pgd)
{
	return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
}
206 207 208

static inline void pgd_clear(pgd_t *pgdp)
{
209
	set_pte((pte_t *)pgdp, __pte(0));
210
}
211 212 213 214 215 216 217

/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
static inline int pte_write(pte_t pte)
{
218
	return pte_val(pte) & SRMMU_WRITE;
219 220 221 222
}

static inline int pte_dirty(pte_t pte)
{
223
	return pte_val(pte) & SRMMU_DIRTY;
224 225 226 227
}

static inline int pte_young(pte_t pte)
{
228
	return pte_val(pte) & SRMMU_REF;
229 230 231 232 233 234 235
}

/*
 * The following only work if pte_present() is not true.
 */
static inline int pte_file(pte_t pte)
{
236
	return pte_val(pte) & SRMMU_FILE;
237 238 239 240 241 242 243 244 245
}

static inline int pte_special(pte_t pte)
{
	return 0;
}

static inline pte_t pte_wrprotect(pte_t pte)
{
246
	return __pte(pte_val(pte) & ~SRMMU_WRITE);
247 248 249 250
}

static inline pte_t pte_mkclean(pte_t pte)
{
251
	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
252 253 254 255
}

static inline pte_t pte_mkold(pte_t pte)
{
256
	return __pte(pte_val(pte) & ~SRMMU_REF);
257 258
}

259 260 261 262
static inline pte_t pte_mkwrite(pte_t pte)
{
	return __pte(pte_val(pte) | SRMMU_WRITE);
}
263

264 265 266 267 268 269 270 271 272
static inline pte_t pte_mkdirty(pte_t pte)
{
	return __pte(pte_val(pte) | SRMMU_DIRTY);
}

static inline pte_t pte_mkyoung(pte_t pte)
{
	return __pte(pte_val(pte) | SRMMU_REF);
}
273 274 275 276 277

#define pte_mkspecial(pte)    (pte)

#define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)

278 279 280 281 282 283 284 285 286 287 288 289 290
static inline unsigned long pte_pfn(pte_t pte)
{
	if (srmmu_device_memory(pte_val(pte))) {
		/* Just return something that will cause
		 * pfn_valid() to return false.  This makes
		 * copy_one_pte() to just directly copy to
		 * PTE over.
		 */
		return ~0UL;
	}
	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
}

291 292 293 294 295 296
#define pte_page(pte)	pfn_to_page(pte_pfn(pte))

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
297 298 299 300
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
}
301

302 303 304 305
static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
{
	return __pte(((page) >> 4) | pgprot_val(pgprot));
}
306

307 308 309 310
static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
{
	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
}
311

312 313 314 315 316 317
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
	prot &= ~__pgprot(SRMMU_CACHE);
	return prot;
}
318 319 320 321

static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
322
	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
323 324 325 326 327 328 329 330 331 332 333 334
		pgprot_val(newprot));
}

#define pgd_index(address) ((address) >> PGDIR_SHIFT)

/* to find an entry in a page-table-directory */
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))

/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

/* Find an entry in the second-level page table.. */
335 336 337 338 339
static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
{
	return (pmd_t *) pgd_page_vaddr(*dir) +
		((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
}
340 341

/* Find an entry in the third-level page table.. */
342
pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
343 344

/*
345
 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
346 347 348 349 350
 */
#define pte_offset_map(d, a)		pte_offset_kernel(d,a)
#define pte_unmap(pte)		do{}while(0)

struct seq_file;
351
void mmu_info(struct seq_file *m);
352 353 354 355 356 357

/* Fault handler stuff... */
#define FAULT_CODE_PROT     0x1
#define FAULT_CODE_WRITE    0x2
#define FAULT_CODE_USER     0x4

358
#define update_mmu_cache(vma, address, ptep) do { } while (0)
359

360 361 362
void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
                      unsigned long xva, unsigned int len);
void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
363 364

/* Encode and de-code a swap entry */
365 366 367 368
static inline unsigned long __swp_type(swp_entry_t entry)
{
	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
}
369

370 371 372 373 374 375 376 377 378 379 380
static inline unsigned long __swp_offset(swp_entry_t entry)
{
	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
}

static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
{
	return (swp_entry_t) {
		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
}
381 382 383 384 385

#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)		((pte_t) { (x).val })

/* file-offset-in-pte helpers */
386 387 388 389
static inline unsigned long pte_to_pgoff(pte_t pte)
{
	return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
}
390

391 392 393 394
static inline pte_t pgoff_to_pte(unsigned long pgoff)
{
	return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
}
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468

/*
 * This is made a constant because mm/fremap.c required a constant.
 */
#define PTE_FILE_MAX_BITS 24

/*
 */
struct ctx_list {
	struct ctx_list *next;
	struct ctx_list *prev;
	unsigned int ctx_number;
	struct mm_struct *ctx_mm;
};

extern struct ctx_list *ctx_list_pool;  /* Dynamically allocated */
extern struct ctx_list ctx_free;        /* Head of free list */
extern struct ctx_list ctx_used;        /* Head of used contexts list */

#define NO_CONTEXT     -1

static inline void remove_from_ctx_list(struct ctx_list *entry)
{
	entry->next->prev = entry->prev;
	entry->prev->next = entry->next;
}

static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
{
	entry->next = head;
	(entry->prev = head->prev)->next = entry;
	head->prev = entry;
}
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)

static inline unsigned long
__get_phys (unsigned long addr)
{
	switch (sparc_cpu_model){
	case sun4m:
	case sun4d:
		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
	default:
		return 0;
	}
}

static inline int
__get_iospace (unsigned long addr)
{
	switch (sparc_cpu_model){
	case sun4m:
	case sun4d:
		return (srmmu_get_pte (addr) >> 28);
	default:
		return -1;
	}
}

extern unsigned long *sparc_valid_addr_bitmap;

/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) \
	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))

/*
 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
 * its high 4 bits.  These macros/functions put it there or get it from there.
 */
#define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
#define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn)			(pfn & 0x0fffffffUL)

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
			   unsigned long, pgprot_t);

static inline int io_remap_pfn_range(struct vm_area_struct *vma,
				     unsigned long from, unsigned long pfn,
				     unsigned long size, pgprot_t prot)
{
	unsigned long long offset, space, phys_base;

	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
	space = GET_IOSPACE(pfn);
	phys_base = offset | (space << 32ULL);

	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}

485 486 487 488 489 490 491 492
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({									  \
	int __changed = !pte_same(*(__ptep), __entry);			  \
	if (__changed) {						  \
		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
		flush_tlb_page(__vma, __address);			  \
	}								  \
S
Sam Ravnborg 已提交
493
	__changed;							  \
494 495 496 497 498 499
})

#include <asm-generic/pgtable.h>

#endif /* !(__ASSEMBLY__) */

500 501
#define VMALLOC_START           _AC(0xfe600000,UL)
#define VMALLOC_END             _AC(0xffc00000,UL)
502 503 504 505 506 507 508 509 510 511

/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA

/*
 * No page table caches to initialise
 */
#define pgtable_cache_init()	do { } while (0)

#endif /* !(_SPARC_PGTABLE_H) */