pgtable_32.h 11.7 KB
Newer Older
1 2 3
#ifndef _SPARC_PGTABLE_H
#define _SPARC_PGTABLE_H

4
/*  asm/pgtable.h:  Defines and functions used to work
5 6 7 8 9 10
 *                        with Sparc page tables.
 *
 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 */

11 12
#include <linux/const.h>

13 14 15 16 17 18 19
#ifndef __ASSEMBLY__
#include <asm-generic/4level-fixup.h>

#include <linux/spinlock.h>
#include <linux/swap.h>
#include <asm/types.h>
#include <asm/pgtsrmmu.h>
20
#include <asm/vaddrs.h>
21
#include <asm/oplib.h>
22
#include <asm/cpu_type.h>
23 24 25 26 27 28 29 30 31 32 33 34


struct vm_area_struct;
struct page;

extern void load_mmu(void);
extern unsigned long calc_highpages(void);

#define pte_ERROR(e)   __builtin_trap()
#define pmd_ERROR(e)   __builtin_trap()
#define pgd_ERROR(e)   __builtin_trap()

S
Sam Ravnborg 已提交
35
#define PMD_SHIFT		22
36 37 38
#define PMD_SIZE        	(1UL << PMD_SHIFT)
#define PMD_MASK        	(~(PMD_SIZE-1))
#define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
39 40 41
#define PGDIR_SHIFT     	SRMMU_PGDIR_SHIFT
#define PGDIR_SIZE      	SRMMU_PGDIR_SIZE
#define PGDIR_MASK      	SRMMU_PGDIR_MASK
42
#define PTRS_PER_PTE    	1024
43 44 45
#define PTRS_PER_PMD    	SRMMU_PTRS_PER_PMD
#define PTRS_PER_PGD    	SRMMU_PTRS_PER_PGD
#define USER_PTRS_PER_PGD	PAGE_OFFSET / SRMMU_PGDIR_SIZE
46 47 48
#define FIRST_USER_ADDRESS	0
#define PTE_SIZE		(PTRS_PER_PTE*4)

49 50 51 52 53
#define PAGE_NONE	SRMMU_PAGE_NONE
#define PAGE_SHARED	SRMMU_PAGE_SHARED
#define PAGE_COPY	SRMMU_PAGE_COPY
#define PAGE_READONLY	SRMMU_PAGE_RDONLY
#define PAGE_KERNEL	SRMMU_PAGE_KERNEL
54

S
Sam Ravnborg 已提交
55 56 57
/* Top-level page directory - dummy used by init-mm.
 * srmmu.c will assign the real one (which is dynamically sized) */
#define swapper_pg_dir NULL
58 59 60 61 62

extern void paging_init(void);

extern unsigned long ptr_in_current_pgd;

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/*         xwr */
#define __P000  PAGE_NONE
#define __P001  PAGE_READONLY
#define __P010  PAGE_COPY
#define __P011  PAGE_COPY
#define __P100  PAGE_READONLY
#define __P101  PAGE_READONLY
#define __P110  PAGE_COPY
#define __P111  PAGE_COPY

#define __S000	PAGE_NONE
#define __S001	PAGE_READONLY
#define __S010	PAGE_SHARED
#define __S011	PAGE_SHARED
#define __S100	PAGE_READONLY
#define __S101	PAGE_READONLY
#define __S110	PAGE_SHARED
#define __S111	PAGE_SHARED
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96

/* First physical page can be anywhere, the following is needed so that
 * va-->pa and vice versa conversions work properly without performance
 * hit for all __pa()/__va() operations.
 */
extern unsigned long phys_base;
extern unsigned long pfn_base;

/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern unsigned long empty_zero_page;

#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))

97 98 99 100 101 102 103 104 105 106 107
/*
 * In general all page table modifications should use the V8 atomic
 * swap instruction.  This insures the mmu and the cpu are in sync
 * with respect to ref/mod bits in the page tables.
 */
static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
{
	__asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
	return value;
}

108 109 110 111 112 113
/* Certain architectures need to do special things when pte's
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */

static inline void set_pte(pte_t *ptep, pte_t pteval)
114 115 116 117
{
	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
}

118 119
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)

120 121 122 123 124 125 126 127 128 129 130 131
static inline int srmmu_device_memory(unsigned long x)
{
	return ((x & 0xF0000000) != 0);
}

static inline struct page *pmd_page(pmd_t pmd)
{
	if (srmmu_device_memory(pmd_val(pmd)))
		BUG();
	return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
}

132 133 134 135 136 137 138 139 140
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
	if (srmmu_device_memory(pgd_val(pgd))) {
		return ~0;
	} else {
		unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
		return (unsigned long)__nocache_va(v << 4);
	}
}
141

142 143 144 145
static inline int pte_present(pte_t pte)
{
	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
}
146 147 148

static inline int pte_none(pte_t pte)
{
149
	return !pte_val(pte);
150 151
}

152 153
static inline void __pte_clear(pte_t *ptep)
{
154
	set_pte(ptep, __pte(0));
155 156 157 158 159 160
}

static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	__pte_clear(ptep);
}
161

162 163 164 165 166 167 168 169 170
static inline int pmd_bad(pmd_t pmd)
{
	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
}

static inline int pmd_present(pmd_t pmd)
{
	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
}
171 172 173

static inline int pmd_none(pmd_t pmd)
{
174
	return !pmd_val(pmd);
175 176
}

177 178 179 180
static inline void pmd_clear(pmd_t *pmdp)
{
	int i;
	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
181
		set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
182
}
183

184 185 186 187
static inline int pgd_none(pgd_t pgd)          
{
	return !(pgd_val(pgd) & 0xFFFFFFF);
}
188

189 190 191 192 193 194 195 196 197
static inline int pgd_bad(pgd_t pgd)
{
	return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
}

static inline int pgd_present(pgd_t pgd)
{
	return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
}
198 199 200

static inline void pgd_clear(pgd_t *pgdp)
{
201
	set_pte((pte_t *)pgdp, __pte(0));
202
}
203 204 205 206 207 208 209

/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
static inline int pte_write(pte_t pte)
{
210
	return pte_val(pte) & SRMMU_WRITE;
211 212 213 214
}

static inline int pte_dirty(pte_t pte)
{
215
	return pte_val(pte) & SRMMU_DIRTY;
216 217 218 219
}

static inline int pte_young(pte_t pte)
{
220
	return pte_val(pte) & SRMMU_REF;
221 222 223 224 225 226 227
}

/*
 * The following only work if pte_present() is not true.
 */
static inline int pte_file(pte_t pte)
{
228
	return pte_val(pte) & SRMMU_FILE;
229 230 231 232 233 234 235 236 237
}

static inline int pte_special(pte_t pte)
{
	return 0;
}

static inline pte_t pte_wrprotect(pte_t pte)
{
238
	return __pte(pte_val(pte) & ~SRMMU_WRITE);
239 240 241 242
}

static inline pte_t pte_mkclean(pte_t pte)
{
243
	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
244 245 246 247
}

static inline pte_t pte_mkold(pte_t pte)
{
248
	return __pte(pte_val(pte) & ~SRMMU_REF);
249 250
}

251 252 253 254
static inline pte_t pte_mkwrite(pte_t pte)
{
	return __pte(pte_val(pte) | SRMMU_WRITE);
}
255

256 257 258 259 260 261 262 263 264
static inline pte_t pte_mkdirty(pte_t pte)
{
	return __pte(pte_val(pte) | SRMMU_DIRTY);
}

static inline pte_t pte_mkyoung(pte_t pte)
{
	return __pte(pte_val(pte) | SRMMU_REF);
}
265 266 267 268 269

#define pte_mkspecial(pte)    (pte)

#define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)

270 271 272 273 274 275 276 277 278 279 280 281 282
static inline unsigned long pte_pfn(pte_t pte)
{
	if (srmmu_device_memory(pte_val(pte))) {
		/* Just return something that will cause
		 * pfn_valid() to return false.  This makes
		 * copy_one_pte() to just directly copy to
		 * PTE over.
		 */
		return ~0UL;
	}
	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
}

283 284 285 286 287 288
#define pte_page(pte)	pfn_to_page(pte_pfn(pte))

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
289 290 291 292
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
}
293

294 295 296 297
static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
{
	return __pte(((page) >> 4) | pgprot_val(pgprot));
}
298

299 300 301 302
static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
{
	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
}
303

304 305 306 307 308 309
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
	prot &= ~__pgprot(SRMMU_CACHE);
	return prot;
}
310 311 312 313

static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
314
	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
315 316 317 318 319 320 321 322 323 324 325 326
		pgprot_val(newprot));
}

#define pgd_index(address) ((address) >> PGDIR_SHIFT)

/* to find an entry in a page-table-directory */
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))

/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

/* Find an entry in the second-level page table.. */
327 328 329 330 331
static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
{
	return (pmd_t *) pgd_page_vaddr(*dir) +
		((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
}
332 333

/* Find an entry in the third-level page table.. */
334
pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
335 336

/*
337
 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
338 339 340 341 342
 */
#define pte_offset_map(d, a)		pte_offset_kernel(d,a)
#define pte_unmap(pte)		do{}while(0)

struct seq_file;
343
void mmu_info(struct seq_file *m);
344 345 346 347 348 349

/* Fault handler stuff... */
#define FAULT_CODE_PROT     0x1
#define FAULT_CODE_WRITE    0x2
#define FAULT_CODE_USER     0x4

350
#define update_mmu_cache(vma, address, ptep) do { } while (0)
351

352 353 354
void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
                      unsigned long xva, unsigned int len);
void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
355 356

/* Encode and de-code a swap entry */
357 358 359 360
static inline unsigned long __swp_type(swp_entry_t entry)
{
	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
}
361

362 363 364 365 366 367 368 369 370 371 372
static inline unsigned long __swp_offset(swp_entry_t entry)
{
	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
}

static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
{
	return (swp_entry_t) {
		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
}
373 374 375 376 377

#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)		((pte_t) { (x).val })

/* file-offset-in-pte helpers */
378 379 380 381
static inline unsigned long pte_to_pgoff(pte_t pte)
{
	return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
}
382

383 384 385 386
static inline pte_t pgoff_to_pte(unsigned long pgoff)
{
	return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
}
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430

/*
 * This is made a constant because mm/fremap.c required a constant.
 */
#define PTE_FILE_MAX_BITS 24

static inline unsigned long
__get_phys (unsigned long addr)
{
	switch (sparc_cpu_model){
	case sun4m:
	case sun4d:
		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
	default:
		return 0;
	}
}

static inline int
__get_iospace (unsigned long addr)
{
	switch (sparc_cpu_model){
	case sun4m:
	case sun4d:
		return (srmmu_get_pte (addr) >> 28);
	default:
		return -1;
	}
}

extern unsigned long *sparc_valid_addr_bitmap;

/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) \
	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))

/*
 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
 * its high 4 bits.  These macros/functions put it there or get it from there.
 */
#define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
#define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn)			(pfn & 0x0fffffffUL)

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
			   unsigned long, pgprot_t);

static inline int io_remap_pfn_range(struct vm_area_struct *vma,
				     unsigned long from, unsigned long pfn,
				     unsigned long size, pgprot_t prot)
{
	unsigned long long offset, space, phys_base;

	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
	space = GET_IOSPACE(pfn);
	phys_base = offset | (space << 32ULL);

	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
446
#define io_remap_pfn_range io_remap_pfn_range 
447

448 449 450 451 452 453 454 455
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({									  \
	int __changed = !pte_same(*(__ptep), __entry);			  \
	if (__changed) {						  \
		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
		flush_tlb_page(__vma, __address);			  \
	}								  \
S
Sam Ravnborg 已提交
456
	__changed;							  \
457 458 459 460 461 462
})

#include <asm-generic/pgtable.h>

#endif /* !(__ASSEMBLY__) */

463 464
#define VMALLOC_START           _AC(0xfe600000,UL)
#define VMALLOC_END             _AC(0xffc00000,UL)
465 466 467 468 469 470 471 472 473 474

/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA

/*
 * No page table caches to initialise
 */
#define pgtable_cache_init()	do { } while (0)

#endif /* !(_SPARC_PGTABLE_H) */