mmu-hash.h 20.2 KB
Newer Older
1 2
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * PowerPC64 memory management structures
 *
 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
 *   PPC64 rework.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <asm/asm-compat.h>
#include <asm/page.h>
17
#include <asm/bug.h>
18

19 20 21 22 23
/*
 * This is necessary to get the definition of PGTABLE_RANGE which we
 * need for various slices related matters. Note that this isn't the
 * complete pgtable.h but only a portion of it.
 */
24
#include <asm/book3s/64/pgtable.h>
25
#include <asm/bug.h>
26
#include <asm/processor.h>
27

28 29 30 31 32 33
/*
 * SLB
 */

#define SLB_NUM_BOLTED		3
#define SLB_CACHE_ENTRIES	8
34
#define SLB_MIN_SIZE		32
35 36 37 38 39 40

/* Bits in the SLB ESID word */
#define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */

/* Bits in the SLB VSID word */
#define SLB_VSID_SHIFT		12
P
Paul Mackerras 已提交
41 42
#define SLB_VSID_SHIFT_1T	24
#define SLB_VSID_SSIZE_SHIFT	62
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
#define SLB_VSID_B		ASM_CONST(0xc000000000000000)
#define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
#define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
#define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
#define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
#define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
#define SLB_VSID_L		ASM_CONST(0x0000000000000100)
#define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
#define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
#define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
#define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
#define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
#define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)

#define SLB_VSID_KERNEL		(SLB_VSID_KP)
#define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)

#define SLBIE_C			(0x08000000)
P
Paul Mackerras 已提交
62
#define SLBIE_SSIZE_SHIFT	25
63 64 65 66 67 68 69

/*
 * Hash table
 */

#define HPTES_PER_GROUP 8

70
#define HPTE_V_SSIZE_SHIFT	62
71
#define HPTE_V_AVPN_SHIFT	7
72
#define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
73
#define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74
#define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
75 76 77 78 79 80
#define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
#define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
#define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
#define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
#define HPTE_V_VALID		ASM_CONST(0x0000000000000001)

81 82 83 84
/*
 * ISA 3.0 have a different HPTE format.
 */
#define HPTE_R_3_0_SSIZE_SHIFT	58
85 86
#define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
#define HPTE_R_TS		ASM_CONST(0x4000000000000000)
87
#define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
88
#define HPTE_R_RPN_SHIFT	12
89
#define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
90
#define HPTE_R_PP		ASM_CONST(0x0000000000000003)
91
#define HPTE_R_PPP		ASM_CONST(0x8000000000000003)
92
#define HPTE_R_N		ASM_CONST(0x0000000000000004)
93 94 95 96 97
#define HPTE_R_G		ASM_CONST(0x0000000000000008)
#define HPTE_R_M		ASM_CONST(0x0000000000000010)
#define HPTE_R_I		ASM_CONST(0x0000000000000020)
#define HPTE_R_W		ASM_CONST(0x0000000000000040)
#define HPTE_R_WIMG		ASM_CONST(0x0000000000000078)
98 99
#define HPTE_R_C		ASM_CONST(0x0000000000000080)
#define HPTE_R_R		ASM_CONST(0x0000000000000100)
100
#define HPTE_R_KEY_LO		ASM_CONST(0x0000000000000e00)
101

102 103 104
#define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)

105 106 107 108 109
/* Values for PP (assumes Ks=0, Kp=1) */
#define PP_RWXX	0	/* Supervisor read/write, User none */
#define PP_RWRX 1	/* Supervisor read/write, User read */
#define PP_RWRW 2	/* Supervisor read/write, User read/write */
#define PP_RXRX 3	/* Supervisor read,       User read */
110
#define PP_RXXX	(HPTE_R_PP0 | 2)	/* Supervisor read, user none */
111

112 113 114 115 116 117 118 119 120
/* Fields for tlbiel instruction in architecture 2.06 */
#define TLBIEL_INVAL_SEL_MASK	0xc00	/* invalidation selector */
#define  TLBIEL_INVAL_PAGE	0x000	/* invalidate a single page */
#define  TLBIEL_INVAL_SET_LPID	0x800	/* invalidate a set for current LPID */
#define  TLBIEL_INVAL_SET	0xc00	/* invalidate a set for all LPIDs */
#define TLBIEL_INVAL_SET_MASK	0xfff000	/* set number to inval. */
#define TLBIEL_INVAL_SET_SHIFT	12

#define POWER7_TLB_SETS		128	/* # sets in POWER7 TLB */
121
#define POWER8_TLB_SETS		512	/* # sets in POWER8 TLB */
122
#define POWER9_TLB_SETS_HASH	256	/* # sets in POWER9 TLB Hash mode */
123
#define POWER9_TLB_SETS_RADIX	128	/* # sets in POWER9 TLB Radix mode */
124

125 126
#ifndef __ASSEMBLY__

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
struct mmu_hash_ops {
	void            (*hpte_invalidate)(unsigned long slot,
					   unsigned long vpn,
					   int bpsize, int apsize,
					   int ssize, int local);
	long		(*hpte_updatepp)(unsigned long slot,
					 unsigned long newpp,
					 unsigned long vpn,
					 int bpsize, int apsize,
					 int ssize, unsigned long flags);
	void            (*hpte_updateboltedpp)(unsigned long newpp,
					       unsigned long ea,
					       int psize, int ssize);
	long		(*hpte_insert)(unsigned long hpte_group,
				       unsigned long vpn,
				       unsigned long prpn,
				       unsigned long rflags,
				       unsigned long vflags,
				       int psize, int apsize,
				       int ssize);
	long		(*hpte_remove)(unsigned long hpte_group);
	int             (*hpte_removebolted)(unsigned long ea,
					     int psize, int ssize);
	void		(*flush_hash_range)(unsigned long number, int local);
	void		(*hugepage_invalidate)(unsigned long vsid,
					       unsigned long addr,
					       unsigned char *hpte_slot_array,
					       int psize, int ssize, int local);
	/*
	 * Special for kexec.
	 * To be called in real mode with interrupts disabled. No locks are
	 * taken as such, concurrent access on pre POWER5 hardware could result
	 * in a deadlock.
	 * The linear mapping is destroyed as well.
	 */
	void		(*hpte_clear_all)(void);
};
extern struct mmu_hash_ops mmu_hash_ops;

166
struct hash_pte {
167 168
	__be64 v;
	__be64 r;
169
};
170

171
extern struct hash_pte *htab_address;
172 173 174
extern unsigned long htab_size_bytes;
extern unsigned long htab_hash_mask;

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191

static inline int shift_to_mmu_psize(unsigned int shift)
{
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
		if (mmu_psize_defs[psize].shift == shift)
			return psize;
	return -1;
}

static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
	if (mmu_psize_defs[mmu_psize].shift)
		return mmu_psize_defs[mmu_psize].shift;
	BUG();
}
192 193 194

#endif /* __ASSEMBLY__ */

195 196 197 198 199 200 201 202 203
/*
 * Segment sizes.
 * These are the values used by hardware in the B field of
 * SLB entries and the first dword of MMU hashtable entries.
 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
 */
#define MMU_SEGSIZE_256M	0
#define MMU_SEGSIZE_1T		1

204 205 206 207 208 209 210 211 212
/*
 * encode page number shift.
 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
 * 12 bits. This enable us to address upto 76 bit va.
 * For hpt hash from a va we can ignore the page size bits of va and for
 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
 * we work in all cases including 4k page size.
 */
#define VPN_SHIFT	12
P
Paul Mackerras 已提交
213

214 215 216 217 218 219 220
/*
 * HPTE Large Page (LP) details
 */
#define LP_SHIFT	12
#define LP_BITS		8
#define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)

221 222
#ifndef __ASSEMBLY__

223 224 225 226 227 228 229
static inline int slb_vsid_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SLB_VSID_SHIFT;
	return SLB_VSID_SHIFT_1T;
}

230 231 232 233 234 235 236
static inline int segment_shift(int ssize)
{
	if (ssize == MMU_SEGSIZE_256M)
		return SID_SHIFT;
	return SID_SHIFT_1T;
}

237
/*
P
Paul Mackerras 已提交
238
 * The current system page and segment sizes
239
 */
P
Paul Mackerras 已提交
240 241
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
242
extern u16 mmu_slb_size;
243
extern unsigned long tce_alloc_start, tce_alloc_end;
244 245 246 247 248 249 250 251 252

/*
 * If the processor supports 64k normal pages but not 64k cache
 * inhibited pages, we have to be prepared to switch processes
 * to use 4k pages when they create cache-inhibited mappings.
 * If this is the case, mmu_ci_restrictions will be set to 1.
 */
extern int mmu_ci_restrictions;

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
/*
 * This computes the AVPN and B fields of the first dword of a HPTE,
 * for use when we want to match an existing PTE.  The bottom 7 bits
 * of the returned value are zero.
 */
static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
					     int ssize)
{
	unsigned long v;
	/*
	 * The AVA field omits the low-order 23 bits of the 78 bits VA.
	 * These bits are not needed in the PTE, because the
	 * low-order b of these bits are part of the byte offset
	 * into the virtual page and, if b < 23, the high-order
	 * 23-b of these bits are always used in selecting the
	 * PTEGs to be searched
	 */
	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
	v <<= HPTE_V_AVPN_SHIFT;
272 273
	if (!cpu_has_feature(CPU_FTR_ARCH_300))
		v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
274 275 276
	return v;
}

277 278
/*
 * This function sets the AVPN and L fields of the HPTE  appropriately
279
 * using the base page size and actual page size.
280
 */
281 282
static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
					  int actual_psize, int ssize)
283
{
P
Paul Mackerras 已提交
284
	unsigned long v;
285 286
	v = hpte_encode_avpn(vpn, base_psize, ssize);
	if (actual_psize != MMU_PAGE_4K)
287 288 289 290 291 292 293 294 295
		v |= HPTE_V_LARGE;
	return v;
}

/*
 * This function sets the ARPN, and LP fields of the HPTE appropriately
 * for the page size. We assume the pa is already "clean" that is properly
 * aligned for the requested page size
 */
296
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
297
					  int actual_psize, int ssize)
298
{
299 300 301 302

	if (cpu_has_feature(CPU_FTR_ARCH_300))
		pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;

303
	/* A 4K page needs no special encoding */
304
	if (actual_psize == MMU_PAGE_4K)
305 306
		return pa & HPTE_R_RPN;
	else {
307 308 309
		unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
		unsigned int shift = mmu_psize_defs[actual_psize].shift;
		return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
310 311 312 313
	}
}

/*
314
 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
315
 */
316 317
static inline unsigned long hpt_vpn(unsigned long ea,
				    unsigned long vsid, int ssize)
P
Paul Mackerras 已提交
318
{
319 320 321 322 323
	unsigned long mask;
	int s_shift = segment_shift(ssize);

	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
P
Paul Mackerras 已提交
324
}
325

P
Paul Mackerras 已提交
326 327 328
/*
 * This hashes a virtual address
 */
329 330
static inline unsigned long hpt_hash(unsigned long vpn,
				     unsigned int shift, int ssize)
331
{
332
	int mask;
P
Paul Mackerras 已提交
333 334
	unsigned long hash, vsid;

335
	/* VPN_SHIFT can be atmost 12 */
P
Paul Mackerras 已提交
336
	if (ssize == MMU_SEGSIZE_256M) {
337 338 339
		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
			((vpn & mask) >> (shift - VPN_SHIFT));
P
Paul Mackerras 已提交
340
	} else {
341 342 343 344
		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
		hash = vsid ^ (vsid << 25) ^
			((vpn & mask) >> (shift - VPN_SHIFT)) ;
P
Paul Mackerras 已提交
345 346
	}
	return hash & 0x7fffffffffUL;
347 348
}

349 350 351
#define HPTE_LOCAL_UPDATE	0x1
#define HPTE_NOHPTE_UPDATE	0x2

352 353
extern int __hash_page_4K(unsigned long ea, unsigned long access,
			  unsigned long vsid, pte_t *ptep, unsigned long trap,
354
			  unsigned long flags, int ssize, int subpage_prot);
355 356
extern int __hash_page_64K(unsigned long ea, unsigned long access,
			   unsigned long vsid, pte_t *ptep, unsigned long trap,
357
			   unsigned long flags, int ssize);
358
struct mm_struct;
359
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
360 361 362 363 364
extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
			unsigned long access, unsigned long trap,
			unsigned long flags);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
		     unsigned long dsisr);
365
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
366 367
		     pte_t *ptep, unsigned long trap, unsigned long flags,
		     int ssize, unsigned int shift, unsigned int mmu_psize);
368 369 370
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int __hash_page_thp(unsigned long ea, unsigned long access,
			   unsigned long vsid, pmd_t *pmdp, unsigned long trap,
371
			   unsigned long flags, int ssize, unsigned int psize);
372 373 374
#else
static inline int __hash_page_thp(unsigned long ea, unsigned long access,
				  unsigned long vsid, pmd_t *pmdp,
375
				  unsigned long trap, unsigned long flags,
376 377 378
				  int ssize, unsigned int psize)
{
	BUG();
379
	return -1;
380 381
}
#endif
382 383
extern void hash_failure_debug(unsigned long ea, unsigned long access,
			       unsigned long vsid, unsigned long trap,
384 385
			       int ssize, int psize, int lpsize,
			       unsigned long pte);
386
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
387
			     unsigned long pstart, unsigned long prot,
P
Paul Mackerras 已提交
388
			     int psize, int ssize);
389 390
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
			int psize, int ssize);
B
Becky Bruce 已提交
391
extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
392
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
393 394 395 396

extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
extern void hpte_init_beat(void);
397
extern void hpte_init_beat_v3(void);
398 399 400 401

extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);

402
extern void slb_vmalloc_update(void);
403
extern void slb_set_size(u16 size);
404 405 406
#endif /* __ASSEMBLY__ */

/*
407
 * VSID allocation (256MB segment)
408
 *
409 410
 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
 * from mmu context id and effective segment id of the address.
411
 *
412 413 414 415 416 417 418
 * For user processes max context id is limited to ((1ul << 19) - 5)
 * for kernel space, we use the top 4 context ids to map address as below
 * NOTE: each context only support 64TB now.
 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
419 420 421 422 423 424
 *
 * The proto-VSIDs are then scrambled into real VSIDs with the
 * multiplicative hash:
 *
 *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
 *
425
 * VSID_MULTIPLIER is prime, so in particular it is
426 427
 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
 * Because the modulus is 2^n-1 we can compute it efficiently without
428 429 430
 * a divide or extra multiply (see below). The scramble function gives
 * robust scattering in the hash table (at least based on some initial
 * results).
431
 *
432 433 434
 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
 * bad address. This enables us to consolidate bad address handling in
 * hash_page.
435
 *
436 437 438 439 440
 * We also need to avoid the last segment of the last context, because that
 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
 * because of the modulo operation in vsid scramble. But the vmemmap
 * (which is what uses region 0xf) will never be close to 64TB in size
 * (it's 56 bytes per page of system memory).
441 442
 */

443
#define CONTEXT_BITS		19
444 445
#define ESID_BITS		18
#define ESID_BITS_1T		6
446

447 448
/*
 * 256MB segment
449
 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
450 451 452 453 454 455 456
 * available for user + kernel mapping. The top 4 contexts are used for
 * kernel mapping. Each segment contains 2^28 bytes. Each
 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
 * (19 == 37 + 28 - 46).
 */
#define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 5)

A
Aneesh Kumar K.V 已提交
457 458 459 460 461
/*
 * This should be computed such that protovosid * vsid_mulitplier
 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
 */
#define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
462
#define VSID_BITS_256M		(CONTEXT_BITS + ESID_BITS)
P
Paul Mackerras 已提交
463
#define VSID_MODULUS_256M	((1UL<<VSID_BITS_256M)-1)
464

P
Paul Mackerras 已提交
465
#define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
466
#define VSID_BITS_1T		(CONTEXT_BITS + ESID_BITS_1T)
P
Paul Mackerras 已提交
467 468
#define VSID_MODULUS_1T		((1UL<<VSID_BITS_1T)-1)

469

470
#define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
471 472 473 474 475 476

/*
 * This macro generates asm code to compute the VSID scramble
 * function.  Used in slb_allocate() and do_stab_bolted.  The function
 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
 *
M
Michael Ellerman 已提交
477
 *	rt = register containing the proto-VSID and into which the
478 479 480 481
 *		VSID will be stored
 *	rx = scratch register (clobbered)
 *
 * 	- rt and rx must be different registers
P
Paul Mackerras 已提交
482
 * 	- The answer will end up in the low VSID_BITS bits of rt.  The higher
483 484 485
 * 	  bits may contain other garbage, so you may need to mask the
 * 	  result.
 */
P
Paul Mackerras 已提交
486 487 488
#define ASM_VSID_SCRAMBLE(rt, rx, size)					\
	lis	rx,VSID_MULTIPLIER_##size@h;				\
	ori	rx,rx,VSID_MULTIPLIER_##size@l;				\
489 490
	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
									\
P
Paul Mackerras 已提交
491 492
	srdi	rx,rt,VSID_BITS_##size;					\
	clrldi	rt,rt,(64-VSID_BITS_##size);				\
493
	add	rt,rt,rx;		/* add high and low bits */	\
494 495
	/* NOTE: explanation based on VSID_BITS_##size = 36		\
	 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
496 497 498 499 500 501
	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
	 * the bit clear, r3 already has the answer we want, if it	\
	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
	addi	rx,rt,1;						\
P
Paul Mackerras 已提交
502
	srdi	rx,rx,VSID_BITS_##size;	/* extract 2^VSID_BITS bit */	\
503 504
	add	rt,rt,rx

505
/* 4 bits per slice and we have one slice per 1TB */
506
#define SLICE_ARRAY_SIZE  (H_PGTABLE_RANGE >> 41)
507 508 509

#ifndef __ASSEMBLY__

510 511 512 513 514 515 516 517 518 519 520 521 522 523
#ifdef CONFIG_PPC_SUBPAGE_PROT
/*
 * For the sub-page protection option, we extend the PGD with one of
 * these.  Basically we have a 3-level tree, with the top level being
 * the protptrs array.  To optimize speed and memory consumption when
 * only addresses < 4GB are being protected, pointers to the first
 * four pages of sub-page protection words are stored in the low_prot
 * array.
 * Each page of sub-page protection words protects 1GB (4 bytes
 * protects 64k).  For the 3-level tree, each page of pointers then
 * protects 8TB.
 */
struct subpage_prot_table {
	unsigned long maxaddr;	/* only addresses < this are protected */
524
	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
	unsigned int *low_prot[4];
};

#define SBP_L1_BITS		(PAGE_SHIFT - 2)
#define SBP_L2_BITS		(PAGE_SHIFT - 3)
#define SBP_L1_COUNT		(1 << SBP_L1_BITS)
#define SBP_L2_COUNT		(1 << SBP_L2_BITS)
#define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
#define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)

extern void subpage_prot_free(struct mm_struct *mm);
extern void subpage_prot_init_new_context(struct mm_struct *mm);
#else
static inline void subpage_prot_free(struct mm_struct *mm) {}
static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */

542
#if 0
P
Paul Mackerras 已提交
543 544 545 546 547 548
/*
 * The code below is equivalent to this function for arguments
 * < 2^VSID_BITS, which is all this should ever be called
 * with.  However gcc is not clever enough to compute the
 * modulus (2^n-1) without a second multiply.
 */
549
#define vsid_scramble(protovsid, size) \
P
Paul Mackerras 已提交
550
	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
551

P
Paul Mackerras 已提交
552 553 554 555 556 557 558 559
#else /* 1 */
#define vsid_scramble(protovsid, size) \
	({								 \
		unsigned long x;					 \
		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
	})
560 561
#endif /* 1 */

P
Paul Mackerras 已提交
562 563
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
564
{
P
Paul Mackerras 已提交
565 566 567 568
	/* Use 1T segments if possible for addresses >= 1T */
	if (addr >= (1UL << SID_SHIFT_1T))
		return mmu_highuser_ssize;
	return MMU_SEGSIZE_256M;
569 570
}

P
Paul Mackerras 已提交
571 572 573
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
				     int ssize)
{
574 575 576
	/*
	 * Bad address. We return VSID 0 for that
	 */
577
	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
578 579
		return 0;

P
Paul Mackerras 已提交
580
	if (ssize == MMU_SEGSIZE_256M)
581
		return vsid_scramble((context << ESID_BITS)
P
Paul Mackerras 已提交
582
				     | (ea >> SID_SHIFT), 256M);
583
	return vsid_scramble((context << ESID_BITS_1T)
P
Paul Mackerras 已提交
584 585 586
			     | (ea >> SID_SHIFT_1T), 1T);
}

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
/*
 * This is only valid for addresses >= PAGE_OFFSET
 *
 * For kernel space, we use the top 4 context ids to map address as below
 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
 */
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
	unsigned long context;

	/*
	 * kernel take the top 4 context from the available range
	 */
	context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
	return get_vsid(context, ea, ssize);
}
606 607 608

unsigned htab_shift_for_mem_size(unsigned long mem_size);

609 610
#endif /* __ASSEMBLY__ */

611
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */