mmu.h 7.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_H_

5 6
#include <asm/page.h>

7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
#ifndef __ASSEMBLY__
/*
 * Page size definition
 *
 *    shift : is the "PAGE_SHIFT" value for that page size
 *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
 *            directly to a slbmte "vsid" value
 *    penc  : is the HPTE encoding mask for the "LP" field:
 *
 */
struct mmu_psize_def {
	unsigned int	shift;	/* number of bits */
	int		penc[MMU_PAGE_COUNT];	/* HPTE encoding */
	unsigned int	tlbiel;	/* tlbiel supported for that page size */
	unsigned long	avpnm;	/* bits to mask out in AVPN in the HPTE */
22 23 24 25
	union {
		unsigned long	sllp;	/* SLB L||LP (exact mask to use in slbmte) */
		unsigned long ap;	/* Ap encoding used by PowerISA 3.0 */
	};
26 27
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
28

29 30 31 32 33 34 35
/*
 * For BOOK3s 64 with 4k and 64K linux page size
 * we want to use pointers, because the page table
 * actually store pfn
 */
typedef pte_t *pgtable_t;

36 37
#endif /* __ASSEMBLY__ */

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/*
 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
 * page_to_nid does a page->section->node lookup
 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
 * memory requirements with large number of sections.
 * 51 bits is the max physical real address on POWER9
 */
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&  \
	defined(CONFIG_PPC_64K_PAGES)
#define MAX_PHYSMEM_BITS 51
#else
#define MAX_PHYSMEM_BITS 46
#endif

53 54 55 56
/* 64-bit classic hash table MMU */
#include <asm/book3s/64/mmu-hash.h>

#ifndef __ASSEMBLY__
57
/*
58
 * ISA 3.0 partition and process table entry format
59 60 61 62 63 64 65 66 67 68 69 70 71
 */
struct prtb_entry {
	__be64 prtb0;
	__be64 prtb1;
};
extern struct prtb_entry *process_tb;

struct patb_entry {
	__be64 patb0;
	__be64 patb1;
};
extern struct patb_entry *partition_tb;

72
/* Bits in patb0 field */
73
#define PATB_HR		(1UL << 63)
74
#define RPDB_MASK	0x0fffffffffffff00UL
75
#define RPDB_SHIFT	(1UL << 8)
76 77 78 79 80 81 82 83 84
#define RTS1_SHIFT	61		/* top 2 bits of radix tree size */
#define RTS1_MASK	(3UL << RTS1_SHIFT)
#define RTS2_SHIFT	5		/* bottom 3 bits of radix tree size */
#define RTS2_MASK	(7UL << RTS2_SHIFT)
#define RPDS_MASK	0x1f		/* root page dir. size field */

/* Bits in patb1 field */
#define PATB_GR		(1UL << 63)	/* guest uses radix; must match HR */
#define PRTS_MASK	0x1f		/* process table size field */
85
#define PRTB_MASK	0x0ffffffffffff000UL
86

87 88 89 90 91 92 93 94
/* Number of supported PID bits */
extern unsigned int mmu_pid_bits;

/* Base PID to allocate from */
extern unsigned int mmu_base_pid;

#define PRTB_SIZE_SHIFT	(mmu_pid_bits + 4)
#define PRTB_ENTRIES	(1ul << mmu_pid_bits)
95

96 97 98 99
/*
 * Power9 currently only support 64K partition table size.
 */
#define PATB_SIZE_SHIFT	16
100 101 102 103

typedef unsigned long mm_context_id_t;
struct spinlock;

104 105 106
/* Maximum possible number of NPUs in a system. */
#define NV_MAX_NPUS 8

107 108 109 110 111 112 113 114 115 116
/*
 * One bit per slice. We have lower slices which cover 256MB segments
 * upto 4G range. That gets us 16 low slices. For the rest we track slices
 * in 1TB size.
 */
struct slice_mask {
	u64 low_slices;
	DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
};

117
typedef struct {
118 119 120 121 122 123 124 125 126 127 128 129
	union {
		/*
		 * We use id as the PIDR content for radix. On hash we can use
		 * more than one id. The extended ids are used when we start
		 * having address above 512TB. We allocate one extended id
		 * for each 512TB. The new id is then used with the 49 bit
		 * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
		 * from EA and new context ids to build the new VAs.
		 */
		mm_context_id_t id;
		mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
	};
130 131
	u16 user_psize;		/* page size index */

132 133 134
	/* Number of bits in the mm_cpumask */
	atomic_t active_cpus;

135 136 137
	/* Number of users of the external (Nest) MMU */
	atomic_t copros;

138 139 140
	/* NPU NMMU context */
	struct npu_context *npu_context;

141
#ifdef CONFIG_PPC_MM_SLICES
142 143
	 /* SLB page size encodings*/
	unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
144
	unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
145
	unsigned long slb_addr_limit;
146 147 148 149 150 151 152 153
# ifdef CONFIG_PPC_64K_PAGES
	struct slice_mask mask_64k;
# endif
	struct slice_mask mask_4k;
# ifdef CONFIG_HUGETLB_PAGE
	struct slice_mask mask_16m;
	struct slice_mask mask_16g;
# endif
154 155 156 157 158 159 160
#else
	u16 sllp;		/* SLB page size encoding */
#endif
	unsigned long vdso_base;
#ifdef CONFIG_PPC_SUBPAGE_PROT
	struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
161 162 163
	/*
	 * pagetable fragment support
	 */
164
	void *pte_frag;
165
	void *pmd_frag;
166 167 168
#ifdef CONFIG_SPAPR_TCE_IOMMU
	struct list_head iommu_group_mem_list;
#endif
169 170 171 172 173 174 175 176

#ifdef CONFIG_PPC_MEM_KEYS
	/*
	 * Each bit represents one protection key.
	 * bit set   -> key allocated
	 * bit unset -> key available for allocation
	 */
	u32 pkey_allocation_map;
R
Ram Pai 已提交
177
	s16 execute_only_pkey; /* key holding execute-only protection */
178
#endif
179 180 181 182 183 184 185 186 187 188 189
} mm_context_t;

/*
 * The current system page and segment sizes
 */
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
extern int mmu_vmemmap_psize;
extern int mmu_io_psize;

190
/* MMU initialization */
191
void mmu_early_init_devtree(void);
192
void hash__early_init_devtree(void);
193
void radix__early_init_devtree(void);
194
extern void radix_init_native(void);
195
extern void hash__early_init_mmu(void);
196
extern void radix__early_init_mmu(void);
197 198
static inline void early_init_mmu(void)
{
199 200
	if (radix_enabled())
		return radix__early_init_mmu();
201 202 203
	return hash__early_init_mmu();
}
extern void hash__early_init_mmu_secondary(void);
204
extern void radix__early_init_mmu_secondary(void);
205 206
static inline void early_init_mmu_secondary(void)
{
207 208
	if (radix_enabled())
		return radix__early_init_mmu_secondary();
209 210 211 212 213
	return hash__early_init_mmu_secondary();
}

extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
					 phys_addr_t first_memblock_size);
214 215
extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
					 phys_addr_t first_memblock_size);
216 217 218
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
					      phys_addr_t first_memblock_size)
{
219
	if (early_radix_enabled())
220 221
		return radix__setup_initial_memory_limit(first_memblock_base,
						   first_memblock_size);
222 223 224
	return hash__setup_initial_memory_limit(first_memblock_base,
					   first_memblock_size);
}
225 226 227 228

extern int (*register_process_table)(unsigned long base, unsigned long page_size,
				     unsigned long tbl_size);

229 230 231 232 233 234
#ifdef CONFIG_PPC_PSERIES
extern void radix_init_pseries(void);
#else
static inline void radix_init_pseries(void) { };
#endif

235
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
236 237 238 239 240 241 242 243 244 245 246 247 248 249
{
	int index = ea >> MAX_EA_BITS_PER_CONTEXT;

	if (likely(index < ARRAY_SIZE(ctx->extended_id)))
		return ctx->extended_id[index];

	/* should never happen */
	WARN_ON(1);
	return 0;
}

static inline unsigned long get_user_vsid(mm_context_t *ctx,
					  unsigned long ea, int ssize)
{
250
	unsigned long context = get_user_context(ctx, ea);
251 252 253 254

	return get_vsid(context, ea, ssize);
}

255 256
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */