mmu-hash.h 18.8 KB
Newer Older
1 2
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * PowerPC64 memory management structures
 *
 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
 *   PPC64 rework.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <asm/asm-compat.h>
#include <asm/page.h>
17
#include <asm/bug.h>
18

19 20 21 22 23
/*
 * This is necessary to get the definition of PGTABLE_RANGE which we
 * need for various slices related matters. Note that this isn't the
 * complete pgtable.h but only a portion of it.
 */
24
#include <asm/book3s/64/pgtable.h>
25
#include <asm/bug.h>
26
#include <asm/processor.h>
27

28 29 30 31 32 33
/*
 * SLB
 */

#define SLB_NUM_BOLTED		3
#define SLB_CACHE_ENTRIES	8
34
#define SLB_MIN_SIZE		32
35 36 37 38 39 40

/* Bits in the SLB ESID word */
#define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */

/* Bits in the SLB VSID word */
#define SLB_VSID_SHIFT		12
P
Paul Mackerras 已提交
41 42
#define SLB_VSID_SHIFT_1T	24
#define SLB_VSID_SSIZE_SHIFT	62
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
#define SLB_VSID_B		ASM_CONST(0xc000000000000000)
#define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
#define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
#define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
#define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
#define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
#define SLB_VSID_L		ASM_CONST(0x0000000000000100)
#define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
#define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
#define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
#define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
#define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)

#define SLB_VSID_KERNEL		(SLB_VSID_KP)
#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)

#define SLBIE_C			(0x08000000)
P
Paul Mackerras 已提交
62
#define SLBIE_SSIZE_SHIFT	25
63 64 65 66 67 68 69

/*
 * Hash table
 */

#define HPTES_PER_GROUP 8

70
#define HPTE_V_SSIZE_SHIFT	62
71
#define HPTE_V_AVPN_SHIFT	7
72
#define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
73
#define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74
#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
75 76 77 78 79 80
#define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
#define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
#define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
#define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
#define HPTE_V_VALID		ASM_CONST(0x0000000000000001)

81 82 83 84
/*
 * ISA 3.0 have a different HPTE format.
 */
#define HPTE_R_3_0_SSIZE_SHIFT	58
85 86
#define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
#define HPTE_R_TS		ASM_CONST(0x4000000000000000)
87
#define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
88
#define HPTE_R_RPN_SHIFT	12
89
#define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
90 91
#define HPTE_R_PP		ASM_CONST(0x0000000000000003)
#define HPTE_R_N		ASM_CONST(0x0000000000000004)
92 93 94 95 96
#define HPTE_R_G		ASM_CONST(0x0000000000000008)
#define HPTE_R_M		ASM_CONST(0x0000000000000010)
#define HPTE_R_I		ASM_CONST(0x0000000000000020)
#define HPTE_R_W		ASM_CONST(0x0000000000000040)
#define HPTE_R_WIMG		ASM_CONST(0x0000000000000078)
97 98
#define HPTE_R_C		ASM_CONST(0x0000000000000080)
#define HPTE_R_R		ASM_CONST(0x0000000000000100)
99
#define HPTE_R_KEY_LO		ASM_CONST(0x0000000000000e00)
100

101 102 103
#define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)

104 105 106 107 108
/* Values for PP (assumes Ks=0, Kp=1) */
#define PP_RWXX	0	/* Supervisor read/write, User none */
#define PP_RWRX 1	/* Supervisor read/write, User read */
#define PP_RWRW 2	/* Supervisor read/write, User read/write */
#define PP_RXRX 3	/* Supervisor read,       User read */
109
#define PP_RXXX	(HPTE_R_PP0 | 2)	/* Supervisor read, user none */
110

111 112 113 114 115 116 117 118 119
/* Fields for tlbiel instruction in architecture 2.06 */
#define TLBIEL_INVAL_SEL_MASK	0xc00	/* invalidation selector */
#define  TLBIEL_INVAL_PAGE	0x000	/* invalidate a single page */
#define  TLBIEL_INVAL_SET_LPID	0x800	/* invalidate a set for current LPID */
#define  TLBIEL_INVAL_SET	0xc00	/* invalidate a set for all LPIDs */
#define TLBIEL_INVAL_SET_MASK	0xfff000	/* set number to inval. */
#define TLBIEL_INVAL_SET_SHIFT	12

#define POWER7_TLB_SETS		128	/* # sets in POWER7 TLB */
120
#define POWER8_TLB_SETS		512	/* # sets in POWER8 TLB */
121
#define POWER9_TLB_SETS_HASH	256	/* # sets in POWER9 TLB Hash mode */
122
#define POWER9_TLB_SETS_RADIX	128	/* # sets in POWER9 TLB Radix mode */
123

124 125
#ifndef __ASSEMBLY__

126
struct hash_pte {
127 128
	__be64 v;
	__be64 r;
129
};
130

131
extern struct hash_pte *htab_address;
132 133 134
extern unsigned long htab_size_bytes;
extern unsigned long htab_hash_mask;

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

static inline int shift_to_mmu_psize(unsigned int shift)
{
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
		if (mmu_psize_defs[psize].shift == shift)
			return psize;
	return -1;
}

static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
	if (mmu_psize_defs[mmu_psize].shift)
		return mmu_psize_defs[mmu_psize].shift;
	BUG();
}
152 153 154

#endif /* __ASSEMBLY__ */

155 156 157 158 159 160 161 162 163
/*
 * Segment sizes.
 * These are the values used by hardware in the B field of
 * SLB entries and the first dword of MMU hashtable entries.
 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
 */
#define MMU_SEGSIZE_256M	0
#define MMU_SEGSIZE_1T		1

164 165 166 167 168 169 170 171 172
/*
 * encode page number shift.
 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
 * 12 bits. This enable us to address upto 76 bit va.
 * For hpt hash from a va we can ignore the page size bits of va and for
 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
 * we work in all cases including 4k page size.
 */
#define VPN_SHIFT	12
P
Paul Mackerras 已提交
173

174 175 176 177 178 179 180
/*
 * HPTE Large Page (LP) details
 */
#define LP_SHIFT	12
#define LP_BITS		8
#define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)

181 182
#ifndef __ASSEMBLY__

183 184 185 186 187 188 189
static inline int slb_vsid_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SLB_VSID_SHIFT;
	return SLB_VSID_SHIFT_1T;
}

190 191 192 193 194 195 196
static inline int segment_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SID_SHIFT;
	return SID_SHIFT_1T;
}

197
/*
P
Paul Mackerras 已提交
198
 * The current system page and segment sizes
199
 */
P
Paul Mackerras 已提交
200 201
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
202
extern u16 mmu_slb_size;
203
extern unsigned long tce_alloc_start, tce_alloc_end;
204 205 206 207 208 209 210 211 212

/*
 * If the processor supports 64k normal pages but not 64k cache
 * inhibited pages, we have to be prepared to switch processes
 * to use 4k pages when they create cache-inhibited mappings.
 * If this is the case, mmu_ci_restrictions will be set to 1.
 */
extern int mmu_ci_restrictions;

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
/*
 * This computes the AVPN and B fields of the first dword of a HPTE,
 * for use when we want to match an existing PTE.  The bottom 7 bits
 * of the returned value are zero.
 */
static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
					     int ssize)
{
	unsigned long v;
	/*
	 * The AVA field omits the low-order 23 bits of the 78 bits VA.
	 * These bits are not needed in the PTE, because the
	 * low-order b of these bits are part of the byte offset
	 * into the virtual page and, if b < 23, the high-order
	 * 23-b of these bits are always used in selecting the
	 * PTEGs to be searched
	 */
	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
	v <<= HPTE_V_AVPN_SHIFT;
232 233
	if (!cpu_has_feature(CPU_FTR_ARCH_300))
		v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
234 235 236
	return v;
}

237 238
/*
 * This function sets the AVPN and L fields of the HPTE  appropriately
239
 * using the base page size and actual page size.
240
 */
241 242
static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
					  int actual_psize, int ssize)
243
{
P
Paul Mackerras 已提交
244
	unsigned long v;
245 246
	v = hpte_encode_avpn(vpn, base_psize, ssize);
	if (actual_psize != MMU_PAGE_4K)
247 248 249 250 251 252 253 254 255
		v |= HPTE_V_LARGE;
	return v;
}

/*
 * This function sets the ARPN, and LP fields of the HPTE appropriately
 * for the page size. We assume the pa is already "clean" that is properly
 * aligned for the requested page size
 */
256
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
257
					  int actual_psize, int ssize)
258
{
259 260 261 262

	if (cpu_has_feature(CPU_FTR_ARCH_300))
		pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;

263
	/* A 4K page needs no special encoding */
264
	if (actual_psize == MMU_PAGE_4K)
265 266
		return pa & HPTE_R_RPN;
	else {
267 268 269
		unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
		unsigned int shift = mmu_psize_defs[actual_psize].shift;
		return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
270 271 272 273
	}
}

/*
274
 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
275
 */
276 277
static inline unsigned long hpt_vpn(unsigned long ea,
				    unsigned long vsid, int ssize)
P
Paul Mackerras 已提交
278
{
279 280 281 282 283
	unsigned long mask;
	int s_shift = segment_shift(ssize);

	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
P
Paul Mackerras 已提交
284
}
285

P
Paul Mackerras 已提交
286 287 288
/*
 * This hashes a virtual address
 */
289 290
static inline unsigned long hpt_hash(unsigned long vpn,
				     unsigned int shift, int ssize)
291
{
292
	int mask;
P
Paul Mackerras 已提交
293 294
	unsigned long hash, vsid;

295
	/* VPN_SHIFT can be atmost 12 */
P
Paul Mackerras 已提交
296
	if (ssize == MMU_SEGSIZE_256M) {
297 298 299
		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
			((vpn & mask) >> (shift - VPN_SHIFT));
P
Paul Mackerras 已提交
300
	} else {
301 302 303 304
		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
		hash = vsid ^ (vsid << 25) ^
			((vpn & mask) >> (shift - VPN_SHIFT)) ;
P
Paul Mackerras 已提交
305 306
	}
	return hash & 0x7fffffffffUL;
307 308
}

309 310 311
#define HPTE_LOCAL_UPDATE	0x1
#define HPTE_NOHPTE_UPDATE	0x2

312 313
extern int __hash_page_4K(unsigned long ea, unsigned long access,
			  unsigned long vsid, pte_t *ptep, unsigned long trap,
314
			  unsigned long flags, int ssize, int subpage_prot);
315 316
extern int __hash_page_64K(unsigned long ea, unsigned long access,
			   unsigned long vsid, pte_t *ptep, unsigned long trap,
317
			   unsigned long flags, int ssize);
318
struct mm_struct;
319
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
320 321 322 323 324
extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
			unsigned long access, unsigned long trap,
			unsigned long flags);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
		     unsigned long dsisr);
325
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
326 327
		     pte_t *ptep, unsigned long trap, unsigned long flags,
		     int ssize, unsigned int shift, unsigned int mmu_psize);
328 329 330
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int __hash_page_thp(unsigned long ea, unsigned long access,
			   unsigned long vsid, pmd_t *pmdp, unsigned long trap,
331
			   unsigned long flags, int ssize, unsigned int psize);
332 333 334
#else
static inline int __hash_page_thp(unsigned long ea, unsigned long access,
				  unsigned long vsid, pmd_t *pmdp,
335
				  unsigned long trap, unsigned long flags,
336 337 338
				  int ssize, unsigned int psize)
{
	BUG();
339
	return -1;
340 341
}
#endif
342 343
extern void hash_failure_debug(unsigned long ea, unsigned long access,
			       unsigned long vsid, unsigned long trap,
344 345
			       int ssize, int psize, int lpsize,
			       unsigned long pte);
346
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
347
			     unsigned long pstart, unsigned long prot,
P
Paul Mackerras 已提交
348
			     int psize, int ssize);
349 350
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
			int psize, int ssize);
B
Becky Bruce 已提交
351
extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
352
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
353 354 355 356

extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
extern void hpte_init_beat(void);
357
extern void hpte_init_beat_v3(void);
358 359 360 361

extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);

362
extern void slb_vmalloc_update(void);
363
extern void slb_set_size(u16 size);
364 365 366
#endif /* __ASSEMBLY__ */

/*
367
 * VSID allocation (256MB segment)
368
 *
369 370
 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
 * from mmu context id and effective segment id of the address.
371
 *
372 373 374 375 376 377 378
 * For user processes max context id is limited to ((1ul << 19) - 5)
 * for kernel space, we use the top 4 context ids to map address as below
 * NOTE: each context only support 64TB now.
 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
379 380 381 382 383 384
 *
 * The proto-VSIDs are then scrambled into real VSIDs with the
 * multiplicative hash:
 *
 *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
 *
385
 * VSID_MULTIPLIER is prime, so in particular it is
386 387
 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
 * Because the modulus is 2^n-1 we can compute it efficiently without
388 389 390
 * a divide or extra multiply (see below). The scramble function gives
 * robust scattering in the hash table (at least based on some initial
 * results).
391
 *
392 393 394
 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
 * bad address. This enables us to consolidate bad address handling in
 * hash_page.
395
 *
396 397 398 399 400
 * We also need to avoid the last segment of the last context, because that
 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
 * because of the modulo operation in vsid scramble. But the vmemmap
 * (which is what uses region 0xf) will never be close to 64TB in size
 * (it's 56 bytes per page of system memory).
401 402
 */

403
#define CONTEXT_BITS		19
404 405
#define ESID_BITS		18
#define ESID_BITS_1T		6
406

407 408
/*
 * 256MB segment
409
 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
410 411 412 413 414 415 416
 * available for user + kernel mapping. The top 4 contexts are used for
 * kernel mapping. Each segment contains 2^28 bytes. Each
 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
 * (19 == 37 + 28 - 46).
 */
#define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 5)

A
Aneesh Kumar K.V 已提交
417 418 419 420 421
/*
 * This should be computed such that protovosid * vsid_mulitplier
 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
 */
#define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
422
#define VSID_BITS_256M		(CONTEXT_BITS + ESID_BITS)
P
Paul Mackerras 已提交
423
#define VSID_MODULUS_256M	((1UL<<VSID_BITS_256M)-1)
424

P
Paul Mackerras 已提交
425
#define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
426
#define VSID_BITS_1T		(CONTEXT_BITS + ESID_BITS_1T)
P
Paul Mackerras 已提交
427 428
#define VSID_MODULUS_1T		((1UL<<VSID_BITS_1T)-1)

429

430
#define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
431 432 433 434 435 436 437 438 439 440 441

/*
 * This macro generates asm code to compute the VSID scramble
 * function.  Used in slb_allocate() and do_stab_bolted.  The function
 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
 *
 *	rt = register continaing the proto-VSID and into which the
 *		VSID will be stored
 *	rx = scratch register (clobbered)
 *
 * 	- rt and rx must be different registers
P
Paul Mackerras 已提交
442
 * 	- The answer will end up in the low VSID_BITS bits of rt.  The higher
443 444 445
 * 	  bits may contain other garbage, so you may need to mask the
 * 	  result.
 */
P
Paul Mackerras 已提交
446 447 448
#define ASM_VSID_SCRAMBLE(rt, rx, size)					\
	lis	rx,VSID_MULTIPLIER_##size@h;				\
	ori	rx,rx,VSID_MULTIPLIER_##size@l;				\
449 450
	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
									\
P
Paul Mackerras 已提交
451 452
	srdi	rx,rt,VSID_BITS_##size;					\
	clrldi	rt,rt,(64-VSID_BITS_##size);				\
453
	add	rt,rt,rx;		/* add high and low bits */	\
454 455
	/* NOTE: explanation based on VSID_BITS_##size = 36		\
	 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
456 457 458 459 460 461
	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
	 * the bit clear, r3 already has the answer we want, if it	\
	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
	addi	rx,rt,1;						\
P
Paul Mackerras 已提交
462
	srdi	rx,rx,VSID_BITS_##size;	/* extract 2^VSID_BITS bit */	\
463 464
	add	rt,rt,rx

465
/* 4 bits per slice and we have one slice per 1TB */
466
#define SLICE_ARRAY_SIZE  (H_PGTABLE_RANGE >> 41)
467 468 469

#ifndef __ASSEMBLY__

470 471 472 473 474 475 476 477 478 479 480 481 482 483
#ifdef CONFIG_PPC_SUBPAGE_PROT
/*
 * For the sub-page protection option, we extend the PGD with one of
 * these.  Basically we have a 3-level tree, with the top level being
 * the protptrs array.  To optimize speed and memory consumption when
 * only addresses < 4GB are being protected, pointers to the first
 * four pages of sub-page protection words are stored in the low_prot
 * array.
 * Each page of sub-page protection words protects 1GB (4 bytes
 * protects 64k).  For the 3-level tree, each page of pointers then
 * protects 8TB.
 */
struct subpage_prot_table {
	unsigned long maxaddr;	/* only addresses < this are protected */
484
	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
	unsigned int *low_prot[4];
};

#define SBP_L1_BITS		(PAGE_SHIFT - 2)
#define SBP_L2_BITS		(PAGE_SHIFT - 3)
#define SBP_L1_COUNT		(1 << SBP_L1_BITS)
#define SBP_L2_COUNT		(1 << SBP_L2_BITS)
#define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
#define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)

extern void subpage_prot_free(struct mm_struct *mm);
extern void subpage_prot_init_new_context(struct mm_struct *mm);
#else
static inline void subpage_prot_free(struct mm_struct *mm) {}
static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */

502
#if 0
P
Paul Mackerras 已提交
503 504 505 506 507 508
/*
 * The code below is equivalent to this function for arguments
 * < 2^VSID_BITS, which is all this should ever be called
 * with.  However gcc is not clever enough to compute the
 * modulus (2^n-1) without a second multiply.
 */
509
#define vsid_scramble(protovsid, size) \
P
Paul Mackerras 已提交
510
	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
511

P
Paul Mackerras 已提交
512 513 514 515 516 517 518 519
#else /* 1 */
#define vsid_scramble(protovsid, size) \
	({								 \
		unsigned long x;					 \
		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
	})
520 521
#endif /* 1 */

P
Paul Mackerras 已提交
522 523
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
524
{
P
Paul Mackerras 已提交
525 526 527 528
	/* Use 1T segments if possible for addresses >= 1T */
	if (addr >= (1UL << SID_SHIFT_1T))
		return mmu_highuser_ssize;
	return MMU_SEGSIZE_256M;
529 530
}

P
Paul Mackerras 已提交
531 532 533
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
				     int ssize)
{
534 535 536
	/*
	 * Bad address. We return VSID 0 for that
	 */
537
	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
538 539
		return 0;

P
Paul Mackerras 已提交
540
	if (ssize == MMU_SEGSIZE_256M)
541
		return vsid_scramble((context << ESID_BITS)
P
Paul Mackerras 已提交
542
				     | (ea >> SID_SHIFT), 256M);
543
	return vsid_scramble((context << ESID_BITS_1T)
P
Paul Mackerras 已提交
544 545 546
			     | (ea >> SID_SHIFT_1T), 1T);
}

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
/*
 * This is only valid for addresses >= PAGE_OFFSET
 *
 * For kernel space, we use the top 4 context ids to map address as below
 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
 */
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
	unsigned long context;

	/*
	 * kernel take the top 4 context from the available range
	 */
	context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
	return get_vsid(context, ea, ssize);
}
566 567 568

unsigned htab_shift_for_mem_size(unsigned long mem_size);

569 570
#endif /* __ASSEMBLY__ */

571
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */