mmu-hash.h 24.4 KB
Newer Older
1 2
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * PowerPC64 memory management structures
 *
 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
 *   PPC64 rework.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <asm/asm-compat.h>
#include <asm/page.h>
17
#include <asm/bug.h>
18

19 20 21 22 23
/*
 * This is necessary to get the definition of PGTABLE_RANGE which we
 * need for various slices related matters. Note that this isn't the
 * complete pgtable.h but only a portion of it.
 */
24
#include <asm/book3s/64/pgtable.h>
25
#include <asm/bug.h>
26
#include <asm/processor.h>
27
#include <asm/cpu_has_feature.h>
28

29 30 31 32 33 34
/*
 * SLB
 */

#define SLB_NUM_BOLTED		3
#define SLB_CACHE_ENTRIES	8
35
#define SLB_MIN_SIZE		32
36 37 38 39 40 41

/* Bits in the SLB ESID word */
#define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */

/* Bits in the SLB VSID word */
#define SLB_VSID_SHIFT		12
42
#define SLB_VSID_SHIFT_256M	SLB_VSID_SHIFT
P
Paul Mackerras 已提交
43 44
#define SLB_VSID_SHIFT_1T	24
#define SLB_VSID_SSIZE_SHIFT	62
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#define SLB_VSID_B		ASM_CONST(0xc000000000000000)
#define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
#define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
#define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
#define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
#define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
#define SLB_VSID_L		ASM_CONST(0x0000000000000100)
#define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
#define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
#define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
#define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
#define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)

#define SLB_VSID_KERNEL		(SLB_VSID_KP)
#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)

#define SLBIE_C			(0x08000000)
P
Paul Mackerras 已提交
64
#define SLBIE_SSIZE_SHIFT	25
65 66 67 68 69 70 71

/*
 * Hash table
 */

#define HPTES_PER_GROUP 8

72
#define HPTE_V_SSIZE_SHIFT	62
73
#define HPTE_V_AVPN_SHIFT	7
74
#define HPTE_V_COMMON_BITS	ASM_CONST(0x000fffffffffffff)
75
#define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
76
#define HPTE_V_AVPN_3_0		ASM_CONST(0x000fffffffffff80)
77
#define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
78
#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
79 80 81 82 83 84
#define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
#define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
#define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
#define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
#define HPTE_V_VALID		ASM_CONST(0x0000000000000001)

85
/*
86
 * ISA 3.0 has a different HPTE format.
87 88
 */
#define HPTE_R_3_0_SSIZE_SHIFT	58
89
#define HPTE_R_3_0_SSIZE_MASK	(3ull << HPTE_R_3_0_SSIZE_SHIFT)
90 91
#define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
#define HPTE_R_TS		ASM_CONST(0x4000000000000000)
92
#define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
93
#define HPTE_R_RPN_SHIFT	12
94
#define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
95
#define HPTE_R_RPN_3_0		ASM_CONST(0x01fffffffffff000)
96
#define HPTE_R_PP		ASM_CONST(0x0000000000000003)
97
#define HPTE_R_PPP		ASM_CONST(0x8000000000000003)
98
#define HPTE_R_N		ASM_CONST(0x0000000000000004)
99 100 101 102 103
#define HPTE_R_G		ASM_CONST(0x0000000000000008)
#define HPTE_R_M		ASM_CONST(0x0000000000000010)
#define HPTE_R_I		ASM_CONST(0x0000000000000020)
#define HPTE_R_W		ASM_CONST(0x0000000000000040)
#define HPTE_R_WIMG		ASM_CONST(0x0000000000000078)
104 105
#define HPTE_R_C		ASM_CONST(0x0000000000000080)
#define HPTE_R_R		ASM_CONST(0x0000000000000100)
106
#define HPTE_R_KEY_LO		ASM_CONST(0x0000000000000e00)
107

108 109 110
#define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)

111 112 113 114 115
/* Values for PP (assumes Ks=0, Kp=1) */
#define PP_RWXX	0	/* Supervisor read/write, User none */
#define PP_RWRX 1	/* Supervisor read/write, User read */
#define PP_RWRW 2	/* Supervisor read/write, User read/write */
#define PP_RXRX 3	/* Supervisor read,       User read */
116
#define PP_RXXX	(HPTE_R_PP0 | 2)	/* Supervisor read, user none */
117

118 119 120 121 122 123 124 125 126
/* Fields for tlbiel instruction in architecture 2.06 */
#define TLBIEL_INVAL_SEL_MASK	0xc00	/* invalidation selector */
#define  TLBIEL_INVAL_PAGE	0x000	/* invalidate a single page */
#define  TLBIEL_INVAL_SET_LPID	0x800	/* invalidate a set for current LPID */
#define  TLBIEL_INVAL_SET	0xc00	/* invalidate a set for all LPIDs */
#define TLBIEL_INVAL_SET_MASK	0xfff000	/* set number to inval. */
#define TLBIEL_INVAL_SET_SHIFT	12

#define POWER7_TLB_SETS		128	/* # sets in POWER7 TLB */
127
#define POWER8_TLB_SETS		512	/* # sets in POWER8 TLB */
128
#define POWER9_TLB_SETS_HASH	256	/* # sets in POWER9 TLB Hash mode */
129
#define POWER9_TLB_SETS_RADIX	128	/* # sets in POWER9 TLB Radix mode */
130

131 132
#ifndef __ASSEMBLY__

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
struct mmu_hash_ops {
	void            (*hpte_invalidate)(unsigned long slot,
					   unsigned long vpn,
					   int bpsize, int apsize,
					   int ssize, int local);
	long		(*hpte_updatepp)(unsigned long slot,
					 unsigned long newpp,
					 unsigned long vpn,
					 int bpsize, int apsize,
					 int ssize, unsigned long flags);
	void            (*hpte_updateboltedpp)(unsigned long newpp,
					       unsigned long ea,
					       int psize, int ssize);
	long		(*hpte_insert)(unsigned long hpte_group,
				       unsigned long vpn,
				       unsigned long prpn,
				       unsigned long rflags,
				       unsigned long vflags,
				       int psize, int apsize,
				       int ssize);
	long		(*hpte_remove)(unsigned long hpte_group);
	int             (*hpte_removebolted)(unsigned long ea,
					     int psize, int ssize);
	void		(*flush_hash_range)(unsigned long number, int local);
	void		(*hugepage_invalidate)(unsigned long vsid,
					       unsigned long addr,
					       unsigned char *hpte_slot_array,
					       int psize, int ssize, int local);
161
	int		(*resize_hpt)(unsigned long shift);
162 163 164 165 166 167 168 169 170 171 172
	/*
	 * Special for kexec.
	 * To be called in real mode with interrupts disabled. No locks are
	 * taken as such, concurrent access on pre POWER5 hardware could result
	 * in a deadlock.
	 * The linear mapping is destroyed as well.
	 */
	void		(*hpte_clear_all)(void);
};
extern struct mmu_hash_ops mmu_hash_ops;

173
struct hash_pte {
174 175
	__be64 v;
	__be64 r;
176
};
177

178
extern struct hash_pte *htab_address;
179 180 181
extern unsigned long htab_size_bytes;
extern unsigned long htab_hash_mask;

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

static inline int shift_to_mmu_psize(unsigned int shift)
{
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
		if (mmu_psize_defs[psize].shift == shift)
			return psize;
	return -1;
}

static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
	if (mmu_psize_defs[mmu_psize].shift)
		return mmu_psize_defs[mmu_psize].shift;
	BUG();
}
199

200 201 202 203 204 205 206 207 208
static inline unsigned long get_sllp_encoding(int psize)
{
	unsigned long sllp;

	sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
		((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
	return sllp;
}

209 210
#endif /* __ASSEMBLY__ */

211 212 213 214 215 216 217 218 219
/*
 * Segment sizes.
 * These are the values used by hardware in the B field of
 * SLB entries and the first dword of MMU hashtable entries.
 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
 */
#define MMU_SEGSIZE_256M	0
#define MMU_SEGSIZE_1T		1

220 221 222 223 224 225 226 227 228
/*
 * encode page number shift.
 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
 * 12 bits. This enable us to address upto 76 bit va.
 * For hpt hash from a va we can ignore the page size bits of va and for
 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
 * we work in all cases including 4k page size.
 */
#define VPN_SHIFT	12
P
Paul Mackerras 已提交
229

230 231 232 233 234 235 236
/*
 * HPTE Large Page (LP) details
 */
#define LP_SHIFT	12
#define LP_BITS		8
#define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)

237 238
#ifndef __ASSEMBLY__

239 240 241 242 243 244 245
static inline int slb_vsid_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SLB_VSID_SHIFT;
	return SLB_VSID_SHIFT_1T;
}

246 247 248 249 250 251 252
static inline int segment_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SID_SHIFT;
	return SID_SHIFT_1T;
}

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/*
 * This array is indexed by the LP field of the HPTE second dword.
 * Since this field may contain some RPN bits, some entries are
 * replicated so that we get the same value irrespective of RPN.
 * The top 4 bits are the page size index (MMU_PAGE_*) for the
 * actual page size, the bottom 4 bits are the base page size.
 */
extern u8 hpte_page_sizes[1 << LP_BITS];

static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
					     bool is_base_size)
{
	unsigned int i, lp;

	if (!(h & HPTE_V_LARGE))
		return 1ul << 12;

	/* Look at the 8 bit LP value */
	lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
	i = hpte_page_sizes[lp];
	if (!i)
		return 0;
	if (!is_base_size)
		i >>= 4;
	return 1ul << mmu_psize_defs[i & 0xf].shift;
}

static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
	return __hpte_page_size(h, l, 0);
}

static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
{
	return __hpte_page_size(h, l, 1);
}

290
/*
P
Paul Mackerras 已提交
291
 * The current system page and segment sizes
292
 */
P
Paul Mackerras 已提交
293 294
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
295
extern u16 mmu_slb_size;
296
extern unsigned long tce_alloc_start, tce_alloc_end;
297 298 299 300 301 302 303 304 305

/*
 * If the processor supports 64k normal pages but not 64k cache
 * inhibited pages, we have to be prepared to switch processes
 * to use 4k pages when they create cache-inhibited mappings.
 * If this is the case, mmu_ci_restrictions will be set to 1.
 */
extern int mmu_ci_restrictions;

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
/*
 * This computes the AVPN and B fields of the first dword of a HPTE,
 * for use when we want to match an existing PTE.  The bottom 7 bits
 * of the returned value are zero.
 */
static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
					     int ssize)
{
	unsigned long v;
	/*
	 * The AVA field omits the low-order 23 bits of the 78 bits VA.
	 * These bits are not needed in the PTE, because the
	 * low-order b of these bits are part of the byte offset
	 * into the virtual page and, if b < 23, the high-order
	 * 23-b of these bits are always used in selecting the
	 * PTEGs to be searched
	 */
	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
	v <<= HPTE_V_AVPN_SHIFT;
325
	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
326 327 328
	return v;
}

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
/*
 * ISA v3.0 defines a new HPTE format, which differs from the old
 * format in having smaller AVPN and ARPN fields, and the B field
 * in the second dword instead of the first.
 */
static inline unsigned long hpte_old_to_new_v(unsigned long v)
{
	/* trim AVPN, drop B */
	return v & HPTE_V_COMMON_BITS;
}

static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
{
	/* move B field from 1st to 2nd dword, trim ARPN */
	return (r & ~HPTE_R_3_0_SSIZE_MASK) |
		(((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
}

static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
{
	/* insert B field */
	return (v & HPTE_V_COMMON_BITS) |
		((r & HPTE_R_3_0_SSIZE_MASK) <<
		 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
}

static inline unsigned long hpte_new_to_old_r(unsigned long r)
{
	/* clear out B field */
	return r & ~HPTE_R_3_0_SSIZE_MASK;
}

361 362
/*
 * This function sets the AVPN and L fields of the HPTE  appropriately
363
 * using the base page size and actual page size.
364
 */
365 366
static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
					  int actual_psize, int ssize)
367
{
P
Paul Mackerras 已提交
368
	unsigned long v;
369 370
	v = hpte_encode_avpn(vpn, base_psize, ssize);
	if (actual_psize != MMU_PAGE_4K)
371 372 373 374 375 376 377 378 379
		v |= HPTE_V_LARGE;
	return v;
}

/*
 * This function sets the ARPN, and LP fields of the HPTE appropriately
 * for the page size. We assume the pa is already "clean" that is properly
 * aligned for the requested page size
 */
380
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
381
					  int actual_psize)
382 383
{
	/* A 4K page needs no special encoding */
384
	if (actual_psize == MMU_PAGE_4K)
385 386
		return pa & HPTE_R_RPN;
	else {
387 388 389
		unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
		unsigned int shift = mmu_psize_defs[actual_psize].shift;
		return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
390 391 392 393
	}
}

/*
394
 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
395
 */
396 397
static inline unsigned long hpt_vpn(unsigned long ea,
				    unsigned long vsid, int ssize)
P
Paul Mackerras 已提交
398
{
399 400 401 402 403
	unsigned long mask;
	int s_shift = segment_shift(ssize);

	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
P
Paul Mackerras 已提交
404
}
405

P
Paul Mackerras 已提交
406 407 408
/*
 * This hashes a virtual address
 */
409 410
static inline unsigned long hpt_hash(unsigned long vpn,
				     unsigned int shift, int ssize)
411
{
412
	unsigned long mask;
P
Paul Mackerras 已提交
413 414
	unsigned long hash, vsid;

415
	/* VPN_SHIFT can be atmost 12 */
P
Paul Mackerras 已提交
416
	if (ssize == MMU_SEGSIZE_256M) {
417 418 419
		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
			((vpn & mask) >> (shift - VPN_SHIFT));
P
Paul Mackerras 已提交
420
	} else {
421 422 423 424
		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
		hash = vsid ^ (vsid << 25) ^
			((vpn & mask) >> (shift - VPN_SHIFT)) ;
P
Paul Mackerras 已提交
425 426
	}
	return hash & 0x7fffffffffUL;
427 428
}

429 430 431
#define HPTE_LOCAL_UPDATE	0x1
#define HPTE_NOHPTE_UPDATE	0x2

432 433
extern int __hash_page_4K(unsigned long ea, unsigned long access,
			  unsigned long vsid, pte_t *ptep, unsigned long trap,
434
			  unsigned long flags, int ssize, int subpage_prot);
435 436
extern int __hash_page_64K(unsigned long ea, unsigned long access,
			   unsigned long vsid, pte_t *ptep, unsigned long trap,
437
			   unsigned long flags, int ssize);
438
struct mm_struct;
439
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
440 441 442 443 444
extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
			unsigned long access, unsigned long trap,
			unsigned long flags);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
		     unsigned long dsisr);
445
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
446 447
		     pte_t *ptep, unsigned long trap, unsigned long flags,
		     int ssize, unsigned int shift, unsigned int mmu_psize);
448 449 450
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int __hash_page_thp(unsigned long ea, unsigned long access,
			   unsigned long vsid, pmd_t *pmdp, unsigned long trap,
451
			   unsigned long flags, int ssize, unsigned int psize);
452 453 454
#else
static inline int __hash_page_thp(unsigned long ea, unsigned long access,
				  unsigned long vsid, pmd_t *pmdp,
455
				  unsigned long trap, unsigned long flags,
456 457 458
				  int ssize, unsigned int psize)
{
	BUG();
459
	return -1;
460 461
}
#endif
462 463
extern void hash_failure_debug(unsigned long ea, unsigned long access,
			       unsigned long vsid, unsigned long trap,
464 465
			       int ssize, int psize, int lpsize,
			       unsigned long pte);
466
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
467
			     unsigned long pstart, unsigned long prot,
P
Paul Mackerras 已提交
468
			     int psize, int ssize);
469 470
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
			int psize, int ssize);
B
Becky Bruce 已提交
471
extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
472
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
473

474 475 476 477 478 479
#ifdef CONFIG_PPC_PSERIES
void hpte_init_pseries(void);
#else
static inline void hpte_init_pseries(void) { }
#endif

480 481 482 483 484
extern void hpte_init_native(void);

extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);

485
extern void slb_vmalloc_update(void);
486
extern void slb_set_size(u16 size);
487 488 489
#endif /* __ASSEMBLY__ */

/*
490
 * VSID allocation (256MB segment)
491
 *
492 493
 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
 * from mmu context id and effective segment id of the address.
494
 *
495 496 497
 * For user processes max context id is limited to MAX_USER_CONTEXT.

 * For kernel space, we use context ids 1-5 to map address as below:
498
 * NOTE: each context only support 64TB now.
499 500 501 502
 * 0x00001 -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 * 0x00002 -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 * 0x00003 -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 * 0x00004 -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
503 504 505 506 507 508
 *
 * The proto-VSIDs are then scrambled into real VSIDs with the
 * multiplicative hash:
 *
 *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
 *
509
 * VSID_MULTIPLIER is prime, so in particular it is
510 511
 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
 * Because the modulus is 2^n-1 we can compute it efficiently without
512 513 514
 * a divide or extra multiply (see below). The scramble function gives
 * robust scattering in the hash table (at least based on some initial
 * results).
515
 *
516 517 518
 * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
 * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
 * will produce a VSID of 0.
519
 *
520 521
 * We also need to avoid the last segment of the last context, because that
 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
522
 * because of the modulo operation in vsid scramble.
523 524
 */

525 526 527 528 529 530 531 532 533 534
/*
 * Max Va bits we support as of now is 68 bits. We want 19 bit
 * context ID.
 * Restrictions:
 * GPU has restrictions of not able to access beyond 128TB
 * (47 bit effective address). We also cannot do more than 20bit PID.
 * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
 * to 16 bits (ie, we can only have 2^16 pids at the same time).
 */
#define VA_BITS			68
535
#define CONTEXT_BITS		19
536 537
#define ESID_BITS		(VA_BITS - (SID_SHIFT + CONTEXT_BITS))
#define ESID_BITS_1T		(VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
538

539 540 541
#define ESID_BITS_MASK		((1 << ESID_BITS) - 1)
#define ESID_BITS_1T_MASK	((1 << ESID_BITS_1T) - 1)

542 543
/*
 * 256MB segment
544
 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
545 546
 * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
 * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
547
 * context maps 2^49 bytes (512TB).
548 549 550 551
 *
 * We also need to avoid the last segment of the last context, because that
 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
 * because of the modulo operation in vsid scramble.
552
 */
553 554 555 556 557
#define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 2)
#define MIN_USER_CONTEXT	(5)

/* Would be nice to use KERNEL_REGION_ID here */
#define KERNEL_REGION_CONTEXT_OFFSET	(0xc - 1)
558

559 560 561 562 563
/*
 * For platforms that support on 65bit VA we limit the context bits
 */
#define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)

A
Aneesh Kumar K.V 已提交
564 565
/*
 * This should be computed such that protovosid * vsid_mulitplier
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
 * doesn't overflow 64 bits. The vsid_mutliplier should also be
 * co-prime to vsid_modulus. We also need to make sure that number
 * of bits in multiplied result (dividend) is less than twice the number of
 * protovsid bits for our modulus optmization to work.
 *
 * The below table shows the current values used.
 * |-------+------------+----------------------+------------+-------------------|
 * |       | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
 * |-------+------------+----------------------+------------+-------------------|
 * | 1T    |         24 |                   25 |         49 |                50 |
 * |-------+------------+----------------------+------------+-------------------|
 * | 256MB |         24 |                   37 |         61 |                74 |
 * |-------+------------+----------------------+------------+-------------------|
 *
 * |-------+------------+----------------------+------------+--------------------|
 * |       | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
 * |-------+------------+----------------------+------------+--------------------|
 * | 1T    |         24 |                   28 |         52 |                 56 |
 * |-------+------------+----------------------+------------+--------------------|
 * | 256MB |         24 |                   40 |         64 |                 80 |
 * |-------+------------+----------------------+------------+--------------------|
 *
A
Aneesh Kumar K.V 已提交
588 589
 */
#define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
590 591
#define VSID_BITS_256M		(VA_BITS - SID_SHIFT)
#define VSID_BITS_65_256M	(65 - SID_SHIFT)
592

P
Paul Mackerras 已提交
593
#define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
594 595
#define VSID_BITS_1T		(VA_BITS - SID_SHIFT_1T)
#define VSID_BITS_65_1T		(65 - SID_SHIFT_1T)
596

597
#define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
598

599
/* 4 bits per slice and we have one slice per 1TB */
600 601
#define SLICE_ARRAY_SIZE	(H_PGTABLE_RANGE >> 41)
#define TASK_SLICE_ARRAY_SZ(x)	((x)->context.addr_limit >> 41)
602 603 604

#ifndef __ASSEMBLY__

605 606 607 608 609 610 611 612 613 614 615 616 617 618
#ifdef CONFIG_PPC_SUBPAGE_PROT
/*
 * For the sub-page protection option, we extend the PGD with one of
 * these.  Basically we have a 3-level tree, with the top level being
 * the protptrs array.  To optimize speed and memory consumption when
 * only addresses < 4GB are being protected, pointers to the first
 * four pages of sub-page protection words are stored in the low_prot
 * array.
 * Each page of sub-page protection words protects 1GB (4 bytes
 * protects 64k).  For the 3-level tree, each page of pointers then
 * protects 8TB.
 */
struct subpage_prot_table {
	unsigned long maxaddr;	/* only addresses < this are protected */
619
	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
	unsigned int *low_prot[4];
};

#define SBP_L1_BITS		(PAGE_SHIFT - 2)
#define SBP_L2_BITS		(PAGE_SHIFT - 3)
#define SBP_L1_COUNT		(1 << SBP_L1_BITS)
#define SBP_L2_COUNT		(1 << SBP_L2_BITS)
#define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
#define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)

extern void subpage_prot_free(struct mm_struct *mm);
extern void subpage_prot_init_new_context(struct mm_struct *mm);
#else
static inline void subpage_prot_free(struct mm_struct *mm) {}
static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */

637
#if 0
P
Paul Mackerras 已提交
638 639 640 641 642 643
/*
 * The code below is equivalent to this function for arguments
 * < 2^VSID_BITS, which is all this should ever be called
 * with.  However gcc is not clever enough to compute the
 * modulus (2^n-1) without a second multiply.
 */
644
#define vsid_scramble(protovsid, size) \
P
Paul Mackerras 已提交
645
	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
646

647
/* simplified form avoiding mod operation */
P
Paul Mackerras 已提交
648 649 650 651 652 653 654
#define vsid_scramble(protovsid, size) \
	({								 \
		unsigned long x;					 \
		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
	})
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669

#else /* 1 */
static inline unsigned long vsid_scramble(unsigned long protovsid,
				  unsigned long vsid_multiplier, int vsid_bits)
{
	unsigned long vsid;
	unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
	/*
	 * We have same multipler for both 256 and 1T segements now
	 */
	vsid = protovsid * vsid_multiplier;
	vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
	return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
}

670 671
#endif /* 1 */

P
Paul Mackerras 已提交
672 673
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
674
{
P
Paul Mackerras 已提交
675 676 677 678
	/* Use 1T segments if possible for addresses >= 1T */
	if (addr >= (1UL << SID_SHIFT_1T))
		return mmu_highuser_ssize;
	return MMU_SEGSIZE_256M;
679 680
}

P
Paul Mackerras 已提交
681 682 683
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
				     int ssize)
{
684 685 686 687
	unsigned long va_bits = VA_BITS;
	unsigned long vsid_bits;
	unsigned long protovsid;

688 689 690
	/*
	 * Bad address. We return VSID 0 for that
	 */
691
	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
692 693
		return 0;

694 695 696 697 698 699 700 701 702 703 704 705 706 707
	if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
		va_bits = 65;

	if (ssize == MMU_SEGSIZE_256M) {
		vsid_bits = va_bits - SID_SHIFT;
		protovsid = (context << ESID_BITS) |
			((ea >> SID_SHIFT) & ESID_BITS_MASK);
		return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
	}
	/* 1T segment */
	vsid_bits = va_bits - SID_SHIFT_1T;
	protovsid = (context << ESID_BITS_1T) |
		((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
	return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
P
Paul Mackerras 已提交
708 709
}

710 711 712 713 714 715 716
/*
 * This is only valid for addresses >= PAGE_OFFSET
 */
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
	unsigned long context;

717 718 719
	if (!is_kernel_addr(ea))
		return 0;

720
	/*
721 722 723 724 725 726 727 728 729 730
	 * For kernel space, we use context ids 1-4 to map the address space as
	 * below:
	 *
	 * 0x00001 -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
	 * 0x00002 -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
	 * 0x00003 -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
	 * 0x00004 -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
	 *
	 * So we can compute the context from the region (top nibble) by
	 * subtracting 11, or 0xc - 1.
731
	 */
732 733
	context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET;

734 735
	return get_vsid(context, ea, ssize);
}
736 737 738

unsigned htab_shift_for_mem_size(unsigned long mem_size);

739 740
#endif /* __ASSEMBLY__ */

741
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */