mmu.h 8.2 KB
Newer Older
1 2
#ifndef _ASM_POWERPC_MMU_H_
#define _ASM_POWERPC_MMU_H_
3
#ifdef __KERNEL__
4

5 6
#include <linux/types.h>

7 8 9 10 11 12 13 14
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>

/*
 * MMU features bit definitions
 */

/*
15
 * MMU families
16 17 18 19 20 21
 */
#define MMU_FTR_HPTE_TABLE		ASM_CONST(0x00000001)
#define MMU_FTR_TYPE_8xx		ASM_CONST(0x00000002)
#define MMU_FTR_TYPE_40x		ASM_CONST(0x00000004)
#define MMU_FTR_TYPE_44x		ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010)
22
#define MMU_FTR_TYPE_47x		ASM_CONST(0x00000020)
23

24 25 26
/* Radix page table supported and enabled */
#define MMU_FTR_TYPE_RADIX		ASM_CONST(0x00000040)

27
/*
28
 * Individual features below.
29
 */
30

31 32 33 34 35
/*
 * We need to clear top 16bits of va (from the remaining 64 bits )in
 * tlbie* instructions
 */
#define MMU_FTR_TLBIE_CROP_VA		ASM_CONST(0x00008000)
36 37 38 39 40 41 42 43 44

/* Enable use of high BAT registers */
#define MMU_FTR_USE_HIGH_BATS		ASM_CONST(0x00010000)

/* Enable >32-bit physical addresses on 32-bit processor, only used
 * by CONFIG_6xx currently as BookE supports that from day 1
 */
#define MMU_FTR_BIG_PHYS		ASM_CONST(0x00020000)

45 46 47 48 49 50
/* Enable use of broadcast TLB invalidations. We don't always set it
 * on processors that support it due to other constraints with the
 * use of such invalidations
 */
#define MMU_FTR_USE_TLBIVAX_BCAST	ASM_CONST(0x00040000)

51
/* Enable use of tlbilx invalidate instructions.
52
 */
53
#define MMU_FTR_USE_TLBILX		ASM_CONST(0x00080000)
54 55 56 57 58 59 60

/* This indicates that the processor cannot handle multiple outstanding
 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
 * around such invalidate forms.
 */
#define MMU_FTR_LOCK_BCAST_INVAL	ASM_CONST(0x00100000)

61 62 63 64 65 66
/* This indicates that the processor doesn't handle way selection
 * properly and needs SW to track and update the LRU state.  This
 * is specific to an errata on e300c2/c3/c4 class parts
 */
#define MMU_FTR_NEED_DTLB_SW_LRU	ASM_CONST(0x00200000)

67 68 69 70 71 72 73 74 75
/* Enable use of TLB reservation.  Processor should support tlbsrx.
 * instruction and MAS0[WQ].
 */
#define MMU_FTR_USE_TLBRSRV		ASM_CONST(0x00800000)

/* Use paired MAS registers (MAS7||MAS3, etc.)
 */
#define MMU_FTR_USE_PAIRED_MAS		ASM_CONST(0x01000000)

M
Michael Ellerman 已提交
76
/* Doesn't support the B bit (1T segment) in SLBIE
77
 */
M
Michael Ellerman 已提交
78
#define MMU_FTR_NO_SLBIE_B		ASM_CONST(0x02000000)
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

/* Support 16M large pages
 */
#define MMU_FTR_16M_PAGE		ASM_CONST(0x04000000)

/* Supports TLBIEL variant
 */
#define MMU_FTR_TLBIEL			ASM_CONST(0x08000000)

/* Supports tlbies w/o locking
 */
#define MMU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x10000000)

/* Large pages can be marked CI
 */
#define MMU_FTR_CI_LARGE_PAGE		ASM_CONST(0x20000000)

/* 1T segments available
 */
#define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)

/* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
#define MMU_FTRS_POWER4		MMU_FTRS_DEFAULT_HPTE_ARCH_V2
104
#define MMU_FTRS_PPC970		MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
105 106
#define MMU_FTRS_POWER5		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER6		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
107
#define MMU_FTRS_POWER7		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
M
Michael Neuling 已提交
108
#define MMU_FTRS_POWER8		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
109
#define MMU_FTRS_POWER9		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
110 111 112 113
#define MMU_FTRS_CELL		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
				MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
				MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
114
#ifndef __ASSEMBLY__
115
#include <linux/bug.h>
116 117
#include <asm/cputable.h>

118 119 120 121 122
#ifdef CONFIG_PPC_FSL_BOOK3E
#include <asm/percpu.h>
DECLARE_PER_CPU(int, next_tlbcam_idx);
#endif

123 124 125 126 127 128 129 130 131
enum {
	MMU_FTRS_POSSIBLE = MMU_FTR_HPTE_TABLE | MMU_FTR_TYPE_8xx |
		MMU_FTR_TYPE_40x | MMU_FTR_TYPE_44x | MMU_FTR_TYPE_FSL_E |
		MMU_FTR_TYPE_47x | MMU_FTR_USE_HIGH_BATS | MMU_FTR_BIG_PHYS |
		MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_USE_TLBILX |
		MMU_FTR_LOCK_BCAST_INVAL | MMU_FTR_NEED_DTLB_SW_LRU |
		MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
		MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
132
		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
133
#ifdef CONFIG_PPC_RADIX_MMU
134
		MMU_FTR_TYPE_RADIX |
135 136
#endif
		0,
137 138
};

139
static inline bool early_mmu_has_feature(unsigned long feature)
140
{
141
	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
142 143
}

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
#include <linux/jump_label.h>

#define NUM_MMU_FTR_KEYS	32

extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];

extern void mmu_feature_keys_init(void);

static __always_inline bool mmu_has_feature(unsigned long feature)
{
	int i;

	BUILD_BUG_ON(!__builtin_constant_p(feature));

	if (!(MMU_FTRS_POSSIBLE & feature))
		return false;

	i = __builtin_ctzl(feature);
	return static_branch_likely(&mmu_feature_keys[i]);
}

static inline void mmu_clear_feature(unsigned long feature)
{
	int i;

	i = __builtin_ctzl(feature);
	cur_cpu_spec->mmu_features &= ~feature;
	static_branch_disable(&mmu_feature_keys[i]);
}
#else

static inline void mmu_feature_keys_init(void)
{

}

181 182 183 184 185
static inline bool mmu_has_feature(unsigned long feature)
{
	return early_mmu_has_feature(feature);
}

186 187 188 189
static inline void mmu_clear_feature(unsigned long feature)
{
	cur_cpu_spec->mmu_features &= ~feature;
}
190
#endif /* CONFIG_JUMP_LABEL */
191

192 193
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;

194 195 196 197 198 199 200
#ifdef CONFIG_PPC64
/* This is our real memory area size on ppc64 server, on embedded, we
 * make it match the size our of bolted TLB area
 */
extern u64 ppc64_rma_size;
#endif /* CONFIG_PPC64 */

201 202 203 204 205 206 207 208 209
struct mm_struct;
#ifdef CONFIG_DEBUG_VM
extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
#else /* CONFIG_DEBUG_VM */
static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
}
#endif /* !CONFIG_DEBUG_VM */

210 211 212 213 214
#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
215 216 217 218 219

static inline bool early_radix_enabled(void)
{
	return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
220 221 222 223 224
#else
static inline bool radix_enabled(void)
{
	return false;
}
225 226 227 228 229

static inline bool early_radix_enabled(void)
{
	return false;
}
230 231
#endif

232 233
#endif /* !__ASSEMBLY__ */

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
/* The kernel use the constants below to index in the page sizes array.
 * The use of fixed constants for this purpose is better for performances
 * of the low level hash refill handlers.
 *
 * A non supported page size has a "shift" field set to 0
 *
 * Any new page size being implemented can get a new entry in here. Whether
 * the kernel will use it or not is a different matter though. The actual page
 * size used by hugetlbfs is not defined here and may be made variable
 *
 * Note: This array ended up being a false good idea as it's growing to the
 * point where I wonder if we should replace it with something different,
 * to think about, feedback welcome. --BenH.
 */

249
/* These are #defines as they have to be used in assembly */
250 251 252 253 254 255
#define MMU_PAGE_4K	0
#define MMU_PAGE_16K	1
#define MMU_PAGE_64K	2
#define MMU_PAGE_64K_AP	3	/* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K	4
#define MMU_PAGE_1M	5
256 257 258 259 260 261 262 263 264 265 266
#define MMU_PAGE_2M	6
#define MMU_PAGE_4M	7
#define MMU_PAGE_8M	8
#define MMU_PAGE_16M	9
#define MMU_PAGE_64M	10
#define MMU_PAGE_256M	11
#define MMU_PAGE_1G	12
#define MMU_PAGE_16G	13
#define MMU_PAGE_64G	14

#define MMU_PAGE_COUNT	15
267

268 269 270 271
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h>
#else /* CONFIG_PPC_BOOK3S_64 */

272 273 274 275 276 277
#ifndef __ASSEMBLY__
/* MMU initialization */
extern void early_init_mmu(void);
extern void early_init_mmu_secondary(void);
extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
				       phys_addr_t first_memblock_size);
278
static inline void mmu_early_init_devtree(void) { }
279
#endif /* __ASSEMBLY__ */
280 281 282
#endif

#if defined(CONFIG_PPC_STD_MMU_32)
283
/* 32-bit classic hash table MMU */
284
#include <asm/book3s/32/mmu-hash.h>
J
Josh Boyer 已提交
285 286 287
#elif defined(CONFIG_40x)
/* 40x-style software loaded TLB */
#  include <asm/mmu-40x.h>
288 289 290
#elif defined(CONFIG_44x)
/* 44x-style software loaded TLB */
#  include <asm/mmu-44x.h>
291 292 293
#elif defined(CONFIG_PPC_BOOK3E_MMU)
/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
#  include <asm/mmu-book3e.h>
294 295 296
#elif defined (CONFIG_PPC_8xx)
/* Motorola/Freescale 8xx software loaded TLB */
#  include <asm/mmu-8xx.h>
297 298
#endif

299
#endif /* __KERNEL__ */
300
#endif /* _ASM_POWERPC_MMU_H_ */