mmu.h 10.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3
#ifndef _ASM_POWERPC_MMU_H_
#define _ASM_POWERPC_MMU_H_
4
#ifdef __KERNEL__
5

6 7
#include <linux/types.h>

8
#include <asm/asm-const.h>
9 10 11 12 13 14

/*
 * MMU features bit definitions
 */

/*
15
 * MMU families
16 17 18 19 20 21
 */
#define MMU_FTR_HPTE_TABLE		ASM_CONST(0x00000001)
#define MMU_FTR_TYPE_8xx		ASM_CONST(0x00000002)
#define MMU_FTR_TYPE_40x		ASM_CONST(0x00000004)
#define MMU_FTR_TYPE_44x		ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010)
22
#define MMU_FTR_TYPE_47x		ASM_CONST(0x00000020)
23

24 25 26
/* Radix page table supported and enabled */
#define MMU_FTR_TYPE_RADIX		ASM_CONST(0x00000040)

27
/*
28
 * Individual features below.
29
 */
30

31 32 33 34
/*
 * Support for 68 bit VA space. We added that from ISA 2.05
 */
#define MMU_FTR_68_BIT_VA		ASM_CONST(0x00002000)
35 36 37 38 39 40
/*
 * Kernel read only support.
 * We added the ppp value 0b110 in ISA 2.04.
 */
#define MMU_FTR_KERNEL_RO		ASM_CONST(0x00004000)

41 42 43 44 45
/*
 * We need to clear top 16bits of va (from the remaining 64 bits )in
 * tlbie* instructions
 */
#define MMU_FTR_TLBIE_CROP_VA		ASM_CONST(0x00008000)
46 47 48 49 50

/* Enable use of high BAT registers */
#define MMU_FTR_USE_HIGH_BATS		ASM_CONST(0x00010000)

/* Enable >32-bit physical addresses on 32-bit processor, only used
51
 * by CONFIG_PPC_BOOK3S_32 currently as BookE supports that from day 1
52 53 54
 */
#define MMU_FTR_BIG_PHYS		ASM_CONST(0x00020000)

55 56 57 58 59 60
/* Enable use of broadcast TLB invalidations. We don't always set it
 * on processors that support it due to other constraints with the
 * use of such invalidations
 */
#define MMU_FTR_USE_TLBIVAX_BCAST	ASM_CONST(0x00040000)

61
/* Enable use of tlbilx invalidate instructions.
62
 */
63
#define MMU_FTR_USE_TLBILX		ASM_CONST(0x00080000)
64 65 66 67 68 69 70

/* This indicates that the processor cannot handle multiple outstanding
 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
 * around such invalidate forms.
 */
#define MMU_FTR_LOCK_BCAST_INVAL	ASM_CONST(0x00100000)

71 72 73 74 75 76
/* This indicates that the processor doesn't handle way selection
 * properly and needs SW to track and update the LRU state.  This
 * is specific to an errata on e300c2/c3/c4 class parts
 */
#define MMU_FTR_NEED_DTLB_SW_LRU	ASM_CONST(0x00200000)

77 78 79 80 81 82 83 84 85
/* Enable use of TLB reservation.  Processor should support tlbsrx.
 * instruction and MAS0[WQ].
 */
#define MMU_FTR_USE_TLBRSRV		ASM_CONST(0x00800000)

/* Use paired MAS registers (MAS7||MAS3, etc.)
 */
#define MMU_FTR_USE_PAIRED_MAS		ASM_CONST(0x01000000)

M
Michael Ellerman 已提交
86
/* Doesn't support the B bit (1T segment) in SLBIE
87
 */
M
Michael Ellerman 已提交
88
#define MMU_FTR_NO_SLBIE_B		ASM_CONST(0x02000000)
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

/* Support 16M large pages
 */
#define MMU_FTR_16M_PAGE		ASM_CONST(0x04000000)

/* Supports TLBIEL variant
 */
#define MMU_FTR_TLBIEL			ASM_CONST(0x08000000)

/* Supports tlbies w/o locking
 */
#define MMU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x10000000)

/* Large pages can be marked CI
 */
#define MMU_FTR_CI_LARGE_PAGE		ASM_CONST(0x20000000)

/* 1T segments available
 */
#define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)

110 111 112 113 114
/*
 * Supports KUAP (key 0 controlling userspace addresses) on radix
 */
#define MMU_FTR_RADIX_KUAP		ASM_CONST(0x80000000)

115 116 117
/* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
118 119 120
#define MMU_FTRS_POWER		MMU_FTRS_DEFAULT_HPTE_ARCH_V2
#define MMU_FTRS_PPC970		MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA
#define MMU_FTRS_POWER5		MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE
121 122 123 124
#define MMU_FTRS_POWER6		MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA
#define MMU_FTRS_POWER7		MMU_FTRS_POWER6
#define MMU_FTRS_POWER8		MMU_FTRS_POWER6
#define MMU_FTRS_POWER9		MMU_FTRS_POWER6
125 126 127 128
#define MMU_FTRS_CELL		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
				MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
				MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
129
#ifndef __ASSEMBLY__
130
#include <linux/bug.h>
131 132
#include <asm/cputable.h>

133 134 135 136 137
#ifdef CONFIG_PPC_FSL_BOOK3E
#include <asm/percpu.h>
DECLARE_PER_CPU(int, next_tlbcam_idx);
#endif

138
enum {
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
	MMU_FTRS_POSSIBLE =
#ifdef CONFIG_PPC_BOOK3S
		MMU_FTR_HPTE_TABLE |
#endif
#ifdef CONFIG_PPC_8xx
		MMU_FTR_TYPE_8xx |
#endif
#ifdef CONFIG_40x
		MMU_FTR_TYPE_40x |
#endif
#ifdef CONFIG_44x
		MMU_FTR_TYPE_44x |
#endif
#if defined(CONFIG_E200) || defined(CONFIG_E500)
		MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX |
#endif
#ifdef CONFIG_PPC_47x
		MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL |
#endif
#ifdef CONFIG_PPC_BOOK3S_32
		MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU |
#endif
#ifdef CONFIG_PPC_BOOK3E_64
162
		MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
163 164
#endif
#ifdef CONFIG_PPC_BOOK3S_64
165 166
		MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
167
		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
168
		MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
169
#endif
170
#ifdef CONFIG_PPC_RADIX_MMU
171
		MMU_FTR_TYPE_RADIX |
172 173 174 175
#ifdef CONFIG_PPC_KUAP
		MMU_FTR_RADIX_KUAP |
#endif /* CONFIG_PPC_KUAP */
#endif /* CONFIG_PPC_RADIX_MMU */
176
		0,
177 178
};

179
static inline bool early_mmu_has_feature(unsigned long feature)
180
{
181
	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
182 183
}

184 185 186 187 188 189 190 191 192 193 194 195 196
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
#include <linux/jump_label.h>

#define NUM_MMU_FTR_KEYS	32

extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];

extern void mmu_feature_keys_init(void);

static __always_inline bool mmu_has_feature(unsigned long feature)
{
	int i;

197
#ifndef __clang__ /* clang can't cope with this */
198
	BUILD_BUG_ON(!__builtin_constant_p(feature));
199
#endif
200

201 202 203 204 205 206 207 208
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
	if (!static_key_initialized) {
		printk("Warning! mmu_has_feature() used prior to jump label init!\n");
		dump_stack();
		return early_mmu_has_feature(feature);
	}
#endif

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
	if (!(MMU_FTRS_POSSIBLE & feature))
		return false;

	i = __builtin_ctzl(feature);
	return static_branch_likely(&mmu_feature_keys[i]);
}

static inline void mmu_clear_feature(unsigned long feature)
{
	int i;

	i = __builtin_ctzl(feature);
	cur_cpu_spec->mmu_features &= ~feature;
	static_branch_disable(&mmu_feature_keys[i]);
}
#else

static inline void mmu_feature_keys_init(void)
{

}

231 232 233 234 235
static inline bool mmu_has_feature(unsigned long feature)
{
	return early_mmu_has_feature(feature);
}

236 237 238 239
static inline void mmu_clear_feature(unsigned long feature)
{
	cur_cpu_spec->mmu_features &= ~feature;
}
240
#endif /* CONFIG_JUMP_LABEL */
241

242 243
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;

244 245 246 247 248
#ifdef CONFIG_PPC64
/* This is our real memory area size on ppc64 server, on embedded, we
 * make it match the size our of bolted TLB area
 */
extern u64 ppc64_rma_size;
249 250 251 252

/* Cleanup function used by kexec */
extern void mmu_cleanup_all(void);
extern void radix__mmu_cleanup_all(void);
253 254 255 256 257

/* Functions for creating and updating partition table on POWER9 */
extern void mmu_partition_table_init(void);
extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
					  unsigned long dw1);
258 259
#endif /* CONFIG_PPC64 */

260 261 262 263 264 265 266 267 268
struct mm_struct;
#ifdef CONFIG_DEBUG_VM
extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
#else /* CONFIG_DEBUG_VM */
static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
}
#endif /* !CONFIG_DEBUG_VM */

269 270 271 272 273
#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
274 275 276 277 278

static inline bool early_radix_enabled(void)
{
	return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
279 280 281 282 283
#else
static inline bool radix_enabled(void)
{
	return false;
}
284 285 286 287 288

static inline bool early_radix_enabled(void)
{
	return false;
}
289 290
#endif

291 292 293 294 295 296 297 298 299
#ifdef CONFIG_PPC_MEM_KEYS
extern u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address);
#else
static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
{
	return 0;
}
#endif /* CONFIG_PPC_MEM_KEYS */

300 301 302 303 304 305 306 307 308 309 310
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
{
	return rodata_enabled;
}
#else
static inline bool strict_kernel_rwx_enabled(void)
{
	return false;
}
#endif
311 312
#endif /* !__ASSEMBLY__ */

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/* The kernel use the constants below to index in the page sizes array.
 * The use of fixed constants for this purpose is better for performances
 * of the low level hash refill handlers.
 *
 * A non supported page size has a "shift" field set to 0
 *
 * Any new page size being implemented can get a new entry in here. Whether
 * the kernel will use it or not is a different matter though. The actual page
 * size used by hugetlbfs is not defined here and may be made variable
 *
 * Note: This array ended up being a false good idea as it's growing to the
 * point where I wonder if we should replace it with something different,
 * to think about, feedback welcome. --BenH.
 */

328
/* These are #defines as they have to be used in assembly */
329 330 331 332 333
#define MMU_PAGE_4K	0
#define MMU_PAGE_16K	1
#define MMU_PAGE_64K	2
#define MMU_PAGE_64K_AP	3	/* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K	4
334 335 336 337 338 339 340 341 342 343 344
#define MMU_PAGE_512K	5
#define MMU_PAGE_1M	6
#define MMU_PAGE_2M	7
#define MMU_PAGE_4M	8
#define MMU_PAGE_8M	9
#define MMU_PAGE_16M	10
#define MMU_PAGE_64M	11
#define MMU_PAGE_256M	12
#define MMU_PAGE_1G	13
#define MMU_PAGE_16G	14
#define MMU_PAGE_64G	15
345

346 347 348 349
/*
 * N.B. we need to change the type of hpte_page_sizes if this gets to be > 16
 * Also we need to change he type of mm_context.low/high_slices_psize.
 */
350
#define MMU_PAGE_COUNT	16
351

352 353 354 355 356 357 358 359 360 361 362
/*
 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
 * page_to_nid does a page->section->node lookup
 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
 * memory requirements with large number of sections.
 * 51 bits is the max physical real address on POWER9
 */
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&	\
	defined (CONFIG_PPC_64K_PAGES)
#define MAX_PHYSMEM_BITS        51
363
#elif defined(CONFIG_PPC64)
364 365 366
#define MAX_PHYSMEM_BITS        46
#endif

367 368 369 370
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h>
#else /* CONFIG_PPC_BOOK3S_64 */

371 372 373 374 375 376
#ifndef __ASSEMBLY__
/* MMU initialization */
extern void early_init_mmu(void);
extern void early_init_mmu_secondary(void);
extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
				       phys_addr_t first_memblock_size);
377
static inline void mmu_early_init_devtree(void) { }
C
Christophe Leroy 已提交
378 379

extern void *abatron_pteptrs[2];
380
#endif /* __ASSEMBLY__ */
381 382
#endif

383
#if defined(CONFIG_PPC_BOOK3S_32)
384
/* 32-bit classic hash table MMU */
385
#include <asm/book3s/32/mmu-hash.h>
386 387
#elif defined(CONFIG_PPC_MMU_NOHASH)
#include <asm/nohash/mmu.h>
388 389
#endif

390
#endif /* __KERNEL__ */
391
#endif /* _ASM_POWERPC_MMU_H_ */