page.h 5.7 KB
Newer Older
1
/*
2
 * include/asm-xtensa/page.h
3 4 5 6 7
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version2 as
 * published by the Free Software Foundation.
 *
8
 * Copyright (C) 2001 - 2007 Tensilica Inc.
9 10 11 12 13 14
 */

#ifndef _XTENSA_PAGE_H
#define _XTENSA_PAGE_H

#include <asm/processor.h>
15
#include <asm/types.h>
16
#include <asm/cache.h>
17
#include <platform/hardware.h>
18
#include <asm/kmem_layout.h>
19

20 21 22 23
/*
 * PAGE_SHIFT determines the page size
 */

24 25 26
#define PAGE_SHIFT	12
#define PAGE_SIZE	(__XTENSA_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK	(~(PAGE_SIZE-1))
27

J
Johannes Weiner 已提交
28
#ifdef CONFIG_MMU
29
#define PAGE_OFFSET	XCHAL_KSEG_CACHED_VADDR
30
#define PHYS_OFFSET	XCHAL_KSEG_PADDR
31 32
#define MAX_LOW_PFN	(PHYS_PFN(XCHAL_KSEG_PADDR) + \
			 PHYS_PFN(XCHAL_KSEG_SIZE))
J
Johannes Weiner 已提交
33
#else
34 35 36
#define PAGE_OFFSET	PLATFORM_DEFAULT_MEM_START
#define PHYS_OFFSET	PLATFORM_DEFAULT_MEM_START
#define MAX_LOW_PFN	PHYS_PFN(0xfffffffful)
J
Johannes Weiner 已提交
37 38
#endif

39
#define PGTABLE_START	0x80000000
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Cache aliasing:
 *
 * If the cache size for one way is greater than the page size, we have to
 * deal with cache aliasing. The cache index is wider than the page size:
 *
 * |    |cache| cache index
 * | pfn  |off|	virtual address
 * |xxxx:X|zzz|
 * |    : |   |
 * | \  / |   |
 * |trans.|   |
 * | /  \ |   |
 * |yyyy:Y|zzz|	physical address
 *
 * When the page number is translated to the physical page address, the lowest
 * bit(s) (X) that are part of the cache index are also translated (Y).
 * If this translation changes bit(s) (X), the cache index is also afected,
 * thus resulting in a different cache line than before.
 * The kernel does not provide a mechanism to ensure that the page color
 * (represented by this bit) remains the same when allocated or when pages
 * are remapped. When user pages are mapped into kernel space, the color of
 * the page might also change.
 *
 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
 * to temporarily map a patch so we can match the color.
 */

#if DCACHE_WAY_SIZE > PAGE_SIZE
# define DCACHE_ALIAS_ORDER	(DCACHE_WAY_SHIFT - PAGE_SHIFT)
# define DCACHE_ALIAS_MASK	(PAGE_MASK & (DCACHE_WAY_SIZE - 1))
# define DCACHE_ALIAS(a)	(((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define DCACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
#else
# define DCACHE_ALIAS_ORDER	0
76
# define DCACHE_ALIAS(a)	((void)(a), 0)
77
#endif
78
#define DCACHE_N_COLORS		(1 << DCACHE_ALIAS_ORDER)
79 80 81 82 83 84 85 86 87 88 89

#if ICACHE_WAY_SIZE > PAGE_SIZE
# define ICACHE_ALIAS_ORDER	(ICACHE_WAY_SHIFT - PAGE_SHIFT)
# define ICACHE_ALIAS_MASK	(PAGE_MASK & (ICACHE_WAY_SIZE - 1))
# define ICACHE_ALIAS(a)	(((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define ICACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
#else
# define ICACHE_ALIAS_ORDER	0
#endif


90 91 92 93 94 95 96 97 98 99 100 101 102
#ifdef __ASSEMBLY__

#define __pgprot(x)	(x)

#else

/*
 * These are used to make use of C type-checking..
 */

typedef struct { unsigned long pte; } pte_t;		/* page table entry */
typedef struct { unsigned long pgd; } pgd_t;		/* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
103
typedef struct page *pgtable_t;
104 105 106 107 108 109 110 111 112 113 114

#define pte_val(x)	((x).pte)
#define pgd_val(x)	((x).pgd)
#define pgprot_val(x)	((x).pgprot)

#define __pte(x)	((pte_t) { (x) } )
#define __pgd(x)	((pgd_t) { (x) } )
#define __pgprot(x)	((pgprot_t) { (x) } )

/*
 * Pure 2^n version of get_order
115
 * Use 'nsau' instructions if supported by the processor or the generic version.
116 117
 */

118 119 120
#if XCHAL_HAVE_NSA

static inline __attribute_const__ int get_order(unsigned long size)
121
{
122 123 124
	int lz;
	asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
	return 32 - lz;
125 126
}

127 128
#else

129
# include <asm-generic/getorder.h>
130 131

#endif
132 133

struct page;
134
struct vm_area_struct;
135 136 137 138 139 140 141 142
extern void clear_page(void *page);
extern void copy_page(void *to, void *from);

/*
 * If we have cache aliasing and writeback caches, we might have to do
 * some extra work
 */

143
#if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
144 145 146 147 148 149 150 151 152
extern void clear_page_alias(void *vaddr, unsigned long paddr);
extern void copy_page_alias(void *to, void *from,
			    unsigned long to_paddr, unsigned long from_paddr);

#define clear_user_highpage clear_user_highpage
void clear_user_highpage(struct page *page, unsigned long vaddr);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma);
153
#else
154
# define clear_user_page(page, vaddr, pg)	clear_page(page)
155 156 157 158 159 160 161 162 163 164
# define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
#endif

/*
 * This handles the memory map.  We handle pages at
 * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
 * These macros are for conversion of kernel address, not user
 * addresses.
 */

165
#define ARCH_PFN_OFFSET		(PHYS_OFFSET >> PAGE_SHIFT)
166

167 168 169 170 171 172 173 174 175 176 177 178
#ifdef CONFIG_MMU
static inline unsigned long ___pa(unsigned long va)
{
	unsigned long off = va - PAGE_OFFSET;

	if (off >= XCHAL_KSEG_SIZE)
		off -= XCHAL_KSEG_SIZE;

	return off + PHYS_OFFSET;
}
#define __pa(x)	___pa((unsigned long)(x))
#else
179 180
#define __pa(x)	\
	((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
181
#endif
182 183
#define __va(x)	\
	((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
184 185 186
#define pfn_valid(pfn) \
	((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)

187
#ifdef CONFIG_DISCONTIGMEM
188 189 190 191 192 193 194 195 196 197 198 199 200
# error CONFIG_DISCONTIGMEM not supported
#endif

#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)

#endif /* __ASSEMBLY__ */

#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

201
#include <asm-generic/memory_model.h>
202
#endif /* _XTENSA_PAGE_H */