hash_native_64.c 19.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * native hashtable management.
 *
 * SMP scalability work:
 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 * 
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
12 13 14

#undef DEBUG_LOW

L
Linus Torvalds 已提交
15 16
#include <linux/spinlock.h>
#include <linux/bitops.h>
17
#include <linux/of.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26 27
#include <linux/threads.h>
#include <linux/smp.h>

#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
28
#include <asm/udbg.h>
29
#include <asm/kexec.h>
30
#include <asm/ppc-opcode.h>
31

32
#include <misc/cxl-base.h>
I
Ian Munsie 已提交
33

34 35 36 37 38
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
#define DBG_LOW(fmt...)
#endif
L
Linus Torvalds 已提交
39

40
#ifdef __BIG_ENDIAN__
L
Linus Torvalds 已提交
41
#define HPTE_LOCK_BIT 3
42 43 44
#else
#define HPTE_LOCK_BIT (56+3)
#endif
L
Linus Torvalds 已提交
45

46
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
L
Linus Torvalds 已提交
47

48
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
49
{
50
	unsigned long va;
51
	unsigned int penc;
52
	unsigned long sllp;
53

54 55 56 57
	/*
	 * We need 14 to 65 bits of va for a tlibe of 4K page
	 * With vpn we ignore the lower VPN_SHIFT bits already.
	 * And top two bits are already ignored because we can
M
Michael Ellerman 已提交
58
	 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
59 60 61 62 63 64 65 66
	 * of 12.
	 */
	va = vpn << VPN_SHIFT;
	/*
	 * clear top 16 bits of 64bit va, non SLS segment
	 * Older versions of the architecture (2.02 and earler) require the
	 * masking of the top 16 bits.
	 */
67 68
	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
		va &= ~(0xffffULL << 48);
69 70 71

	switch (psize) {
	case MMU_PAGE_4K:
72 73
		/* clear out bits after (52) [0....52.....63] */
		va &= ~((1ul << (64 - 52)) - 1);
P
Paul Mackerras 已提交
74
		va |= ssize << 8;
75
		sllp = get_sllp_encoding(apsize);
76
		va |= sllp << 5;
77
		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
78
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
79
			     : "memory");
80 81
		break;
	default:
82
		/* We need 14 to 14 + i bits of va */
83
		penc = mmu_psize_defs[psize].penc[apsize];
84
		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
85
		va |= penc << 12;
P
Paul Mackerras 已提交
86
		va |= ssize << 8;
87 88 89 90 91 92 93 94
		/*
		 * AVAL bits:
		 * We don't need all the bits, but rest of the bits
		 * must be ignored by the processor.
		 * vpn cover upto 65 bits of va. (0...65) and we need
		 * 58..64 bits of va.
		 */
		va |= (vpn & 0xfe); /* AVAL */
95
		va |= 1; /* L */
96
		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
97
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
98
			     : "memory");
99 100 101 102
		break;
	}
}

103
static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
104
{
105
	unsigned long va;
106
	unsigned int penc;
107
	unsigned long sllp;
108

109 110 111 112 113 114 115
	/* VPN_SHIFT can be atmost 12 */
	va = vpn << VPN_SHIFT;
	/*
	 * clear top 16 bits of 64 bit va, non SLS segment
	 * Older versions of the architecture (2.02 and earler) require the
	 * masking of the top 16 bits.
	 */
116 117
	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
		va &= ~(0xffffULL << 48);
118 119 120

	switch (psize) {
	case MMU_PAGE_4K:
121 122
		/* clear out bits after(52) [0....52.....63] */
		va &= ~((1ul << (64 - 52)) - 1);
P
Paul Mackerras 已提交
123
		va |= ssize << 8;
124
		sllp = get_sllp_encoding(apsize);
125
		va |= sllp << 5;
126 127 128
		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
			     : "memory");
129 130
		break;
	default:
131
		/* We need 14 to 14 + i bits of va */
132
		penc = mmu_psize_defs[psize].penc[apsize];
133
		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
134
		va |= penc << 12;
P
Paul Mackerras 已提交
135
		va |= ssize << 8;
136 137 138 139 140 141 142 143
		/*
		 * AVAL bits:
		 * We don't need all the bits, but rest of the bits
		 * must be ignored by the processor.
		 * vpn cover upto 65 bits of va. (0...65) and we need
		 * 58..64 bits of va.
		 */
		va |= (vpn & 0xfe);
144
		va |= 1; /* L */
145 146 147
		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
			     : "memory");
148 149 150 151 152
		break;
	}

}

153 154
static inline void tlbie(unsigned long vpn, int psize, int apsize,
			 int ssize, int local)
155
{
I
Ian Munsie 已提交
156
	unsigned int use_local;
157
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
158

I
Ian Munsie 已提交
159 160
	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();

161 162 163
	if (use_local)
		use_local = mmu_psize_defs[psize].tlbiel;
	if (lock_tlbie && !use_local)
164
		raw_spin_lock(&native_tlbie_lock);
165 166
	asm volatile("ptesync": : :"memory");
	if (use_local) {
167
		__tlbiel(vpn, psize, apsize, ssize);
168 169
		asm volatile("ptesync": : :"memory");
	} else {
170
		__tlbie(vpn, psize, apsize, ssize);
171 172 173
		asm volatile("eieio; tlbsync; ptesync": : :"memory");
	}
	if (lock_tlbie && !use_local)
174
		raw_spin_unlock(&native_tlbie_lock);
175 176
}

177
static inline void native_lock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
178
{
179
	unsigned long *word = (unsigned long *)&hptep->v;
L
Linus Torvalds 已提交
180 181

	while (1) {
182
		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
L
Linus Torvalds 已提交
183 184 185 186 187 188
			break;
		while(test_bit(HPTE_LOCK_BIT, word))
			cpu_relax();
	}
}

189
static inline void native_unlock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
190
{
191
	unsigned long *word = (unsigned long *)&hptep->v;
L
Linus Torvalds 已提交
192

193
	clear_bit_unlock(HPTE_LOCK_BIT, word);
L
Linus Torvalds 已提交
194 195
}

196
static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
197
			unsigned long pa, unsigned long rflags,
198
			unsigned long vflags, int psize, int apsize, int ssize)
L
Linus Torvalds 已提交
199
{
200
	struct hash_pte *hptep = htab_address + hpte_group;
201
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
202 203
	int i;

204
	if (!(vflags & HPTE_V_BOLTED)) {
205
		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
206
			" rflags=%lx, vflags=%lx, psize=%d)\n",
207
			hpte_group, vpn, pa, rflags, vflags, psize);
208 209
	}

L
Linus Torvalds 已提交
210
	for (i = 0; i < HPTES_PER_GROUP; i++) {
211
		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
L
Linus Torvalds 已提交
212 213
			/* retry with lock held */
			native_lock_hpte(hptep);
214
			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
L
Linus Torvalds 已提交
215 216 217 218 219 220 221 222 223 224
				break;
			native_unlock_hpte(hptep);
		}

		hptep++;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

225
	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
226
	hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
227 228 229 230 231

	if (!(vflags & HPTE_V_BOLTED)) {
		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
			i, hpte_v, hpte_r);
	}
L
Linus Torvalds 已提交
232

233
	hptep->r = cpu_to_be64(hpte_r);
L
Linus Torvalds 已提交
234
	/* Guarantee the second dword is visible before the valid bit */
235
	eieio();
L
Linus Torvalds 已提交
236 237 238 239
	/*
	 * Now set the first dword including the valid bit
	 * NOTE: this also unlocks the hpte
	 */
240
	hptep->v = cpu_to_be64(hpte_v);
L
Linus Torvalds 已提交
241 242 243

	__asm__ __volatile__ ("ptesync" : : : "memory");

244
	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
245 246 247 248
}

static long native_hpte_remove(unsigned long hpte_group)
{
249
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
250 251
	int i;
	int slot_offset;
252
	unsigned long hpte_v;
L
Linus Torvalds 已提交
253

254 255
	DBG_LOW("    remove(group=%lx)\n", hpte_group);

L
Linus Torvalds 已提交
256 257 258 259 260
	/* pick a random entry to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + hpte_group + slot_offset;
261
		hpte_v = be64_to_cpu(hptep->v);
L
Linus Torvalds 已提交
262

263
		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
L
Linus Torvalds 已提交
264 265
			/* retry with lock held */
			native_lock_hpte(hptep);
266
			hpte_v = be64_to_cpu(hptep->v);
267 268
			if ((hpte_v & HPTE_V_VALID)
			    && !(hpte_v & HPTE_V_BOLTED))
L
Linus Torvalds 已提交
269 270 271 272 273 274 275 276 277 278 279 280
				break;
			native_unlock_hpte(hptep);
		}

		slot_offset++;
		slot_offset &= 0x7;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

	/* Invalidate the hpte. NOTE: this also unlocks it */
281
	hptep->v = 0;
L
Linus Torvalds 已提交
282 283 284 285

	return i;
}

286
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
287
				 unsigned long vpn, int bpsize,
288
				 int apsize, int ssize, unsigned long flags)
L
Linus Torvalds 已提交
289
{
290
	struct hash_pte *hptep = htab_address + slot;
291
	unsigned long hpte_v, want_v;
292
	int ret = 0, local = 0;
293

294
	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
295

296 297
	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
		vpn, want_v & HPTE_V_AVPN, slot, newpp);
298

299
	hpte_v = be64_to_cpu(hptep->v);
300 301 302 303 304 305 306
	/*
	 * We need to invalidate the TLB always because hpte_remove doesn't do
	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
	 * random entry from it. When we do that we don't invalidate the TLB
	 * (hpte_remove) because we assume the old translation is still
	 * technically "valid".
	 */
307
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
308 309 310
		DBG_LOW(" -> miss\n");
		ret = -1;
	} else {
311 312 313 314 315 316 317 318 319 320
		native_lock_hpte(hptep);
		/* recheck with locks held */
		hpte_v = be64_to_cpu(hptep->v);
		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
			     !(hpte_v & HPTE_V_VALID))) {
			ret = -1;
		} else {
			DBG_LOW(" -> hit\n");
			/* Update the HPTE */
			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
321 322
						~(HPTE_R_PPP | HPTE_R_N)) |
					       (newpp & (HPTE_R_PPP | HPTE_R_N |
323 324 325
							 HPTE_R_C)));
		}
		native_unlock_hpte(hptep);
326
	}
327 328 329 330 331 332 333 334 335

	if (flags & HPTE_LOCAL_UPDATE)
		local = 1;
	/*
	 * Ensure it is out of the tlb too if it is not a nohpte fault
	 */
	if (!(flags & HPTE_NOHPTE_UPDATE))
		tlbie(vpn, bpsize, apsize, ssize, local);

336
	return ret;
L
Linus Torvalds 已提交
337 338
}

339
static long native_hpte_find(unsigned long vpn, int psize, int ssize)
L
Linus Torvalds 已提交
340
{
341
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
342
	unsigned long hash;
P
Paul Mackerras 已提交
343
	unsigned long i;
L
Linus Torvalds 已提交
344
	long slot;
345
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
346

347
	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
348
	want_v = hpte_encode_avpn(vpn, psize, ssize);
L
Linus Torvalds 已提交
349

P
Paul Mackerras 已提交
350 351 352 353
	/* Bolted mappings are only ever in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + slot;
354
		hpte_v = be64_to_cpu(hptep->v);
L
Linus Torvalds 已提交
355

P
Paul Mackerras 已提交
356 357 358 359
		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367 368 369 370 371
	}

	return -1;
}

/*
 * Update the page protection bits. Intended to be used to create
 * guard pages for kernel data structures on pages which are bolted
 * in the HPT. Assumes pages being operated on will not be stolen.
 *
 * No need to lock here because we should be the only user.
 */
372
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
P
Paul Mackerras 已提交
373
				       int psize, int ssize)
L
Linus Torvalds 已提交
374
{
375 376
	unsigned long vpn;
	unsigned long vsid;
L
Linus Torvalds 已提交
377
	long slot;
378
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
379

P
Paul Mackerras 已提交
380
	vsid = get_kernel_vsid(ea, ssize);
381
	vpn = hpt_vpn(ea, vsid, ssize);
L
Linus Torvalds 已提交
382

383
	slot = native_hpte_find(vpn, psize, ssize);
L
Linus Torvalds 已提交
384 385 386 387
	if (slot == -1)
		panic("could not find page to bolt\n");
	hptep = htab_address + slot;

388
	/* Update the HPTE */
389
	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
390 391
				~(HPTE_R_PPP | HPTE_R_N)) |
			       (newpp & (HPTE_R_PPP | HPTE_R_N)));
392 393 394 395 396
	/*
	 * Ensure it is out of the tlb too. Bolted entries base and
	 * actual page size will be same.
	 */
	tlbie(vpn, psize, psize, ssize, 0);
L
Linus Torvalds 已提交
397 398
}

399
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
400
				   int bpsize, int apsize, int ssize, int local)
L
Linus Torvalds 已提交
401
{
402
	struct hash_pte *hptep = htab_address + slot;
403
	unsigned long hpte_v;
404
	unsigned long want_v;
L
Linus Torvalds 已提交
405 406 407 408
	unsigned long flags;

	local_irq_save(flags);

409
	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
410

411
	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
412
	native_lock_hpte(hptep);
413
	hpte_v = be64_to_cpu(hptep->v);
L
Linus Torvalds 已提交
414

415 416 417 418 419 420 421
	/*
	 * We need to invalidate the TLB always because hpte_remove doesn't do
	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
	 * random entry from it. When we do that we don't invalidate the TLB
	 * (hpte_remove) because we assume the old translation is still
	 * technically "valid".
	 */
422
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
L
Linus Torvalds 已提交
423
		native_unlock_hpte(hptep);
424
	else
L
Linus Torvalds 已提交
425
		/* Invalidate the hpte. NOTE: this also unlocks it */
426
		hptep->v = 0;
L
Linus Torvalds 已提交
427

428
	/* Invalidate the TLB */
429 430
	tlbie(vpn, bpsize, apsize, ssize, local);

L
Linus Torvalds 已提交
431 432 433
	local_irq_restore(flags);
}

434
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
435 436
static void native_hugepage_invalidate(unsigned long vsid,
				       unsigned long addr,
437
				       unsigned char *hpte_slot_array,
438
				       int psize, int ssize, int local)
439
{
440
	int i;
441 442 443 444 445
	struct hash_pte *hptep;
	int actual_psize = MMU_PAGE_16M;
	unsigned int max_hpte_count, valid;
	unsigned long flags, s_addr = addr;
	unsigned long hpte_v, want_v, shift;
446
	unsigned long hidx, vpn = 0, hash, slot;
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470

	shift = mmu_psize_defs[psize].shift;
	max_hpte_count = 1U << (PMD_SHIFT - shift);

	local_irq_save(flags);
	for (i = 0; i < max_hpte_count; i++) {
		valid = hpte_valid(hpte_slot_array, i);
		if (!valid)
			continue;
		hidx =  hpte_hash_index(hpte_slot_array, i);

		/* get the vpn */
		addr = s_addr + (i * (1ul << shift));
		vpn = hpt_vpn(addr, vsid, ssize);
		hash = hpt_hash(vpn, shift, ssize);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;

		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;

		hptep = htab_address + slot;
		want_v = hpte_encode_avpn(vpn, psize, ssize);
		native_lock_hpte(hptep);
471
		hpte_v = be64_to_cpu(hptep->v);
472 473 474 475 476 477 478

		/* Even if we miss, we need to invalidate the TLB */
		if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
			native_unlock_hpte(hptep);
		else
			/* Invalidate the hpte. NOTE: this also unlocks it */
			hptep->v = 0;
479 480 481 482 483
		/*
		 * We need to do tlb invalidate for all the address, tlbie
		 * instruction compares entry_VA in tlb with the VA specified
		 * here
		 */
484
		tlbie(vpn, psize, actual_psize, ssize, local);
485 486 487
	}
	local_irq_restore(flags);
}
488 489 490 491 492 493 494 495 496
#else
static void native_hugepage_invalidate(unsigned long vsid,
				       unsigned long addr,
				       unsigned char *hpte_slot_array,
				       int psize, int ssize, int local)
{
	WARN(1, "%s called without THP support\n", __func__);
}
#endif
497

498
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
499
			int *psize, int *apsize, int *ssize, unsigned long *vpn)
500
{
501
	unsigned long avpn, pteg, vpi;
502 503
	unsigned long hpte_v = be64_to_cpu(hpte->v);
	unsigned long hpte_r = be64_to_cpu(hpte->r);
504
	unsigned long vsid, seg_off;
505 506
	int size, a_size, shift;
	/* Look at the 8 bit LP value */
507
	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
508

509 510 511 512
	if (!(hpte_v & HPTE_V_LARGE)) {
		size   = MMU_PAGE_4K;
		a_size = MMU_PAGE_4K;
	} else {
513 514
		size = hpte_page_sizes[lp] & 0xf;
		a_size = hpte_page_sizes[lp] >> 4;
515
	}
516
	/* This works for all page sizes, and for 256M and 1T segments */
517 518 519 520 521
	if (cpu_has_feature(CPU_FTR_ARCH_300))
		*ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
	else
		*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;

522 523
	shift = mmu_psize_defs[size].shift;

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
	pteg = slot / HPTES_PER_GROUP;
	if (hpte_v & HPTE_V_SECONDARY)
		pteg = ~pteg;

	switch (*ssize) {
	case MMU_SEGSIZE_256M:
		/* We only have 28 - 23 bits of seg_off in avpn */
		seg_off = (avpn & 0x1f) << 23;
		vsid    =  avpn >> 5;
		/* We can find more bits from the pteg value */
		if (shift < 23) {
			vpi = (vsid ^ pteg) & htab_hash_mask;
			seg_off |= vpi << shift;
		}
539
		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
540
		break;
541 542 543 544 545
	case MMU_SEGSIZE_1T:
		/* We only have 40 - 23 bits of seg_off in avpn */
		seg_off = (avpn & 0x1ffff) << 23;
		vsid    = avpn >> 17;
		if (shift < 23) {
546
			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
547
			seg_off |= vpi << shift;
548
		}
549
		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
550
		break;
551
	default:
552
		*vpn = size = 0;
553
	}
554 555
	*psize  = size;
	*apsize = a_size;
556 557
}

558 559 560 561 562
/*
 * clear all mappings on kexec.  All cpus are in real mode (or they will
 * be when they isi), and we are the only one left.  We rely on our kernel
 * mapping being 0xC0's and the hardware ignoring those two real bits.
 *
563 564 565 566 567 568 569 570
 * This must be called with interrupts disabled.
 *
 * Taking the native_tlbie_lock is unsafe here due to the possibility of
 * lockdep being on. On pre POWER5 hardware, not taking the lock could
 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
 * gets called during boot before secondary CPUs have come up and during
 * crashdump and all bets are off anyway.
 *
571
 * TODO: add batching support when enabled.  remember, no dynamic memory here,
M
Michael Ellerman 已提交
572
 * although there is the control page available...
573 574 575
 */
static void native_hpte_clear(void)
{
576
	unsigned long vpn = 0;
577
	unsigned long slot, slots;
578
	struct hash_pte *hptep = htab_address;
579
	unsigned long hpte_v;
580
	unsigned long pteg_count;
581
	int psize, apsize, ssize;
582 583 584 585 586 587 588 589 590 591 592

	pteg_count = htab_hash_mask + 1;

	slots = pteg_count * HPTES_PER_GROUP;

	for (slot = 0; slot < slots; slot++, hptep++) {
		/*
		 * we could lock the pte here, but we are the only cpu
		 * running,  right?  and for crash dump, we probably
		 * don't want to wait for a maybe bad cpu.
		 */
593
		hpte_v = be64_to_cpu(hptep->v);
594

595
		/*
596 597
		 * Call __tlbie() here rather than tlbie() since we can't take the
		 * native_tlbie_lock.
598
		 */
599
		if (hpte_v & HPTE_V_VALID) {
600
			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
601
			hptep->v = 0;
602
			__tlbie(vpn, psize, apsize, ssize);
603 604 605
		}
	}

606
	asm volatile("eieio; tlbsync; ptesync":::"memory");
607 608
}

609 610 611 612
/*
 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
 * the lock all the time
 */
613
static void native_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
614
{
615 616
	unsigned long vpn;
	unsigned long hash, index, hidx, shift, slot;
617
	struct hash_pte *hptep;
618
	unsigned long hpte_v;
619 620 621
	unsigned long want_v;
	unsigned long flags;
	real_pte_t pte;
622
	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
623
	unsigned long psize = batch->psize;
P
Paul Mackerras 已提交
624
	int ssize = batch->ssize;
625
	int i;
L
Linus Torvalds 已提交
626 627 628 629

	local_irq_save(flags);

	for (i = 0; i < number; i++) {
630
		vpn = batch->vpn[i];
631 632
		pte = batch->pte[i];

633 634
		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
			hash = hpt_hash(vpn, shift, ssize);
635 636 637 638 639 640
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
			hptep = htab_address + slot;
641
			want_v = hpte_encode_avpn(vpn, psize, ssize);
642
			native_lock_hpte(hptep);
643
			hpte_v = be64_to_cpu(hptep->v);
644 645 646 647 648 649
			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
			    !(hpte_v & HPTE_V_VALID))
				native_unlock_hpte(hptep);
			else
				hptep->v = 0;
		} pte_iterate_hashed_end();
L
Linus Torvalds 已提交
650 651
	}

652
	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
653
	    mmu_psize_defs[psize].tlbiel && local) {
L
Linus Torvalds 已提交
654
		asm volatile("ptesync":::"memory");
655
		for (i = 0; i < number; i++) {
656
			vpn = batch->vpn[i];
657 658
			pte = batch->pte[i];

659 660
			pte_iterate_hashed_subpages(pte, psize,
						    vpn, index, shift) {
661
				__tlbiel(vpn, psize, psize, ssize);
662 663
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
664 665
		asm volatile("ptesync":::"memory");
	} else {
666
		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
L
Linus Torvalds 已提交
667 668

		if (lock_tlbie)
669
			raw_spin_lock(&native_tlbie_lock);
L
Linus Torvalds 已提交
670 671

		asm volatile("ptesync":::"memory");
672
		for (i = 0; i < number; i++) {
673
			vpn = batch->vpn[i];
674 675
			pte = batch->pte[i];

676 677
			pte_iterate_hashed_subpages(pte, psize,
						    vpn, index, shift) {
678
				__tlbie(vpn, psize, psize, ssize);
679 680
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
681 682 683
		asm volatile("eieio; tlbsync; ptesync":::"memory");

		if (lock_tlbie)
684
			raw_spin_unlock(&native_tlbie_lock);
L
Linus Torvalds 已提交
685 686 687 688 689
	}

	local_irq_restore(flags);
}

690 691
static int native_register_proc_table(unsigned long base, unsigned long page_size,
				      unsigned long table_size)
692
{
693 694 695 696 697
	unsigned long patb1 = base << 25; /* VSID */

	patb1 |= (page_size << 5);  /* sllp */
	patb1 |= table_size;

698 699 700 701
	partition_tb->patb1 = cpu_to_be64(patb1);
	return 0;
}

702
void __init hpte_init_native(void)
L
Linus Torvalds 已提交
703
{
704 705 706 707 708 709 710 711
	mmu_hash_ops.hpte_invalidate	= native_hpte_invalidate;
	mmu_hash_ops.hpte_updatepp	= native_hpte_updatepp;
	mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
	mmu_hash_ops.hpte_insert	= native_hpte_insert;
	mmu_hash_ops.hpte_remove	= native_hpte_remove;
	mmu_hash_ops.hpte_clear_all	= native_hpte_clear;
	mmu_hash_ops.flush_hash_range = native_flush_hash_range;
	mmu_hash_ops.hugepage_invalidate   = native_hugepage_invalidate;
712 713

	if (cpu_has_feature(CPU_FTR_ARCH_300))
714
		register_process_table = native_register_proc_table;
L
Linus Torvalds 已提交
715
}