hash_native_64.c 16.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * native hashtable management.
 *
 * SMP scalability work:
 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 * 
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
12 13 14

#undef DEBUG_LOW

L
Linus Torvalds 已提交
15 16
#include <linux/spinlock.h>
#include <linux/bitops.h>
17
#include <linux/of.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26 27
#include <linux/threads.h>
#include <linux/smp.h>

#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
28
#include <asm/udbg.h>
29
#include <asm/kexec.h>
30
#include <asm/ppc-opcode.h>
31 32 33 34 35 36

#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
#define DBG_LOW(fmt...)
#endif
L
Linus Torvalds 已提交
37 38 39

#define HPTE_LOCK_BIT 3

40
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
L
Linus Torvalds 已提交
41

42
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43
{
44
	unsigned long va;
45 46
	unsigned int penc;

47 48 49 50 51 52 53 54 55 56 57 58 59
	/*
	 * We need 14 to 65 bits of va for a tlibe of 4K page
	 * With vpn we ignore the lower VPN_SHIFT bits already.
	 * And top two bits are already ignored because we can
	 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
	 * of 12.
	 */
	va = vpn << VPN_SHIFT;
	/*
	 * clear top 16 bits of 64bit va, non SLS segment
	 * Older versions of the architecture (2.02 and earler) require the
	 * masking of the top 16 bits.
	 */
60 61 62 63
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
64 65
		/* clear out bits after (52) [0....52.....63] */
		va &= ~((1ul << (64 - 52)) - 1);
P
Paul Mackerras 已提交
66
		va |= ssize << 8;
67
		va |= mmu_psize_defs[apsize].sllp << 6;
68
		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
69
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
70
			     : "memory");
71 72
		break;
	default:
73
		/* We need 14 to 14 + i bits of va */
74
		penc = mmu_psize_defs[psize].penc[apsize];
75
		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
76
		va |= penc << 12;
P
Paul Mackerras 已提交
77
		va |= ssize << 8;
78 79 80 81 82 83 84 85 86 87 88
		/* Add AVAL part */
		if (psize != apsize) {
			/*
			 * MPSS, 64K base page size and 16MB parge page size
			 * We don't need all the bits, but rest of the bits
			 * must be ignored by the processor.
			 * vpn cover upto 65 bits of va. (0...65) and we need
			 * 58..64 bits of va.
			 */
			va |= (vpn & 0xfe);
		}
89
		va |= 1; /* L */
90
		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
91
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
92
			     : "memory");
93 94 95 96
		break;
	}
}

97
static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
98
{
99
	unsigned long va;
100 101
	unsigned int penc;

102 103 104 105 106 107 108
	/* VPN_SHIFT can be atmost 12 */
	va = vpn << VPN_SHIFT;
	/*
	 * clear top 16 bits of 64 bit va, non SLS segment
	 * Older versions of the architecture (2.02 and earler) require the
	 * masking of the top 16 bits.
	 */
109 110 111 112
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
113 114
		/* clear out bits after(52) [0....52.....63] */
		va &= ~((1ul << (64 - 52)) - 1);
P
Paul Mackerras 已提交
115
		va |= ssize << 8;
116
		va |= mmu_psize_defs[apsize].sllp << 6;
117 118 119 120
		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
			     : : "r"(va) : "memory");
		break;
	default:
121
		/* We need 14 to 14 + i bits of va */
122
		penc = mmu_psize_defs[psize].penc[apsize];
123
		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
124
		va |= penc << 12;
P
Paul Mackerras 已提交
125
		va |= ssize << 8;
126 127 128 129 130 131 132 133 134 135 136
		/* Add AVAL part */
		if (psize != apsize) {
			/*
			 * MPSS, 64K base page size and 16MB parge page size
			 * We don't need all the bits, but rest of the bits
			 * must be ignored by the processor.
			 * vpn cover upto 65 bits of va. (0...65) and we need
			 * 58..64 bits of va.
			 */
			va |= (vpn & 0xfe);
		}
137
		va |= 1; /* L */
138 139 140 141 142 143 144
		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
			     : : "r"(va) : "memory");
		break;
	}

}

145 146
static inline void tlbie(unsigned long vpn, int psize, int apsize,
			 int ssize, int local)
147
{
148 149
	unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
150 151 152 153

	if (use_local)
		use_local = mmu_psize_defs[psize].tlbiel;
	if (lock_tlbie && !use_local)
154
		raw_spin_lock(&native_tlbie_lock);
155 156
	asm volatile("ptesync": : :"memory");
	if (use_local) {
157
		__tlbiel(vpn, psize, apsize, ssize);
158 159
		asm volatile("ptesync": : :"memory");
	} else {
160
		__tlbie(vpn, psize, apsize, ssize);
161 162 163
		asm volatile("eieio; tlbsync; ptesync": : :"memory");
	}
	if (lock_tlbie && !use_local)
164
		raw_spin_unlock(&native_tlbie_lock);
165 166
}

167
static inline void native_lock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
168
{
169
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
170 171

	while (1) {
172
		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
L
Linus Torvalds 已提交
173 174 175 176 177 178
			break;
		while(test_bit(HPTE_LOCK_BIT, word))
			cpu_relax();
	}
}

179
static inline void native_unlock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
180
{
181
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
182

183
	clear_bit_unlock(HPTE_LOCK_BIT, word);
L
Linus Torvalds 已提交
184 185
}

186
static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
187
			unsigned long pa, unsigned long rflags,
188
			unsigned long vflags, int psize, int apsize, int ssize)
L
Linus Torvalds 已提交
189
{
190
	struct hash_pte *hptep = htab_address + hpte_group;
191
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
192 193
	int i;

194
	if (!(vflags & HPTE_V_BOLTED)) {
195
		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
196
			" rflags=%lx, vflags=%lx, psize=%d)\n",
197
			hpte_group, vpn, pa, rflags, vflags, psize);
198 199
	}

L
Linus Torvalds 已提交
200
	for (i = 0; i < HPTES_PER_GROUP; i++) {
201
		if (! (hptep->v & HPTE_V_VALID)) {
L
Linus Torvalds 已提交
202 203
			/* retry with lock held */
			native_lock_hpte(hptep);
204
			if (! (hptep->v & HPTE_V_VALID))
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213 214
				break;
			native_unlock_hpte(hptep);
		}

		hptep++;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

215 216
	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
217 218 219 220 221

	if (!(vflags & HPTE_V_BOLTED)) {
		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
			i, hpte_v, hpte_r);
	}
L
Linus Torvalds 已提交
222

223
	hptep->r = hpte_r;
L
Linus Torvalds 已提交
224
	/* Guarantee the second dword is visible before the valid bit */
225
	eieio();
L
Linus Torvalds 已提交
226 227 228 229
	/*
	 * Now set the first dword including the valid bit
	 * NOTE: this also unlocks the hpte
	 */
230
	hptep->v = hpte_v;
L
Linus Torvalds 已提交
231 232 233

	__asm__ __volatile__ ("ptesync" : : : "memory");

234
	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
235 236 237 238
}

static long native_hpte_remove(unsigned long hpte_group)
{
239
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
240 241
	int i;
	int slot_offset;
242
	unsigned long hpte_v;
L
Linus Torvalds 已提交
243

244 245
	DBG_LOW("    remove(group=%lx)\n", hpte_group);

L
Linus Torvalds 已提交
246 247 248 249 250
	/* pick a random entry to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + hpte_group + slot_offset;
251
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
252

253
		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
L
Linus Torvalds 已提交
254 255
			/* retry with lock held */
			native_lock_hpte(hptep);
256 257 258
			hpte_v = hptep->v;
			if ((hpte_v & HPTE_V_VALID)
			    && !(hpte_v & HPTE_V_BOLTED))
L
Linus Torvalds 已提交
259 260 261 262 263 264 265 266 267 268 269 270
				break;
			native_unlock_hpte(hptep);
		}

		slot_offset++;
		slot_offset &= 0x7;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

	/* Invalidate the hpte. NOTE: this also unlocks it */
271
	hptep->v = 0;
L
Linus Torvalds 已提交
272 273 274 275

	return i;
}

276
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
277 278
				 unsigned long vpn, int bpsize,
				 int apsize, int ssize, int local)
L
Linus Torvalds 已提交
279
{
280
	struct hash_pte *hptep = htab_address + slot;
281 282 283
	unsigned long hpte_v, want_v;
	int ret = 0;

284
	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
285

286 287
	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
		vpn, want_v & HPTE_V_AVPN, slot, newpp);
288 289 290 291

	native_lock_hpte(hptep);

	hpte_v = hptep->v;
292 293 294 295 296 297 298
	/*
	 * We need to invalidate the TLB always because hpte_remove doesn't do
	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
	 * random entry from it. When we do that we don't invalidate the TLB
	 * (hpte_remove) because we assume the old translation is still
	 * technically "valid".
	 */
299
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
300 301 302 303 304 305
		DBG_LOW(" -> miss\n");
		ret = -1;
	} else {
		DBG_LOW(" -> hit\n");
		/* Update the HPTE */
		hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
306
			(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
307
	}
308
	native_unlock_hpte(hptep);
309 310

	/* Ensure it is out of the tlb too. */
311
	tlbie(vpn, bpsize, apsize, ssize, local);
312 313

	return ret;
L
Linus Torvalds 已提交
314 315
}

316
static long native_hpte_find(unsigned long vpn, int psize, int ssize)
L
Linus Torvalds 已提交
317
{
318
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
319
	unsigned long hash;
P
Paul Mackerras 已提交
320
	unsigned long i;
L
Linus Torvalds 已提交
321
	long slot;
322
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
323

324
	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
325
	want_v = hpte_encode_avpn(vpn, psize, ssize);
L
Linus Torvalds 已提交
326

P
Paul Mackerras 已提交
327 328 329 330 331
	/* Bolted mappings are only ever in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + slot;
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
332

P
Paul Mackerras 已提交
333 334 335 336
		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347 348
	}

	return -1;
}

/*
 * Update the page protection bits. Intended to be used to create
 * guard pages for kernel data structures on pages which are bolted
 * in the HPT. Assumes pages being operated on will not be stolen.
 *
 * No need to lock here because we should be the only user.
 */
349
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
P
Paul Mackerras 已提交
350
				       int psize, int ssize)
L
Linus Torvalds 已提交
351
{
352 353
	unsigned long vpn;
	unsigned long vsid;
L
Linus Torvalds 已提交
354
	long slot;
355
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
356

P
Paul Mackerras 已提交
357
	vsid = get_kernel_vsid(ea, ssize);
358
	vpn = hpt_vpn(ea, vsid, ssize);
L
Linus Torvalds 已提交
359

360
	slot = native_hpte_find(vpn, psize, ssize);
L
Linus Torvalds 已提交
361 362 363 364
	if (slot == -1)
		panic("could not find page to bolt\n");
	hptep = htab_address + slot;

365 366 367
	/* Update the HPTE */
	hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
		(newpp & (HPTE_R_PP | HPTE_R_N));
368 369 370 371 372
	/*
	 * Ensure it is out of the tlb too. Bolted entries base and
	 * actual page size will be same.
	 */
	tlbie(vpn, psize, psize, ssize, 0);
L
Linus Torvalds 已提交
373 374
}

375
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
376
				   int bpsize, int apsize, int ssize, int local)
L
Linus Torvalds 已提交
377
{
378
	struct hash_pte *hptep = htab_address + slot;
379
	unsigned long hpte_v;
380
	unsigned long want_v;
L
Linus Torvalds 已提交
381 382 383 384
	unsigned long flags;

	local_irq_save(flags);

385
	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
386

387
	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
388
	native_lock_hpte(hptep);
389
	hpte_v = hptep->v;
L
Linus Torvalds 已提交
390

391 392 393 394 395 396 397
	/*
	 * We need to invalidate the TLB always because hpte_remove doesn't do
	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
	 * random entry from it. When we do that we don't invalidate the TLB
	 * (hpte_remove) because we assume the old translation is still
	 * technically "valid".
	 */
398
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
L
Linus Torvalds 已提交
399
		native_unlock_hpte(hptep);
400
	else
L
Linus Torvalds 已提交
401
		/* Invalidate the hpte. NOTE: this also unlocks it */
402
		hptep->v = 0;
L
Linus Torvalds 已提交
403

404
	/* Invalidate the TLB */
405 406
	tlbie(vpn, bpsize, apsize, ssize, local);

L
Linus Torvalds 已提交
407 408 409
	local_irq_restore(flags);
}

410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
static inline int __hpte_actual_psize(unsigned int lp, int psize)
{
	int i, shift;
	unsigned int mask;

	/* start from 1 ignoring MMU_PAGE_4K */
	for (i = 1; i < MMU_PAGE_COUNT; i++) {

		/* invalid penc */
		if (mmu_psize_defs[psize].penc[i] == -1)
			continue;
		/*
		 * encoding bits per actual page size
		 *        PTE LP     actual page size
		 *    rrrr rrrz		>=8KB
		 *    rrrr rrzz		>=16KB
		 *    rrrr rzzz		>=32KB
		 *    rrrr zzzz		>=64KB
		 * .......
		 */
		shift = mmu_psize_defs[i].shift - LP_SHIFT;
		if (shift > LP_BITS)
			shift = LP_BITS;
		mask = (1 << shift) - 1;
		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
			return i;
	}
	return -1;
}

440
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
441
			int *psize, int *apsize, int *ssize, unsigned long *vpn)
442
{
443
	unsigned long avpn, pteg, vpi;
444
	unsigned long hpte_v = hpte->v;
445
	unsigned long vsid, seg_off;
446 447 448
	int size, a_size, shift;
	/* Look at the 8 bit LP value */
	unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
449

450 451 452 453
	if (!(hpte_v & HPTE_V_LARGE)) {
		size   = MMU_PAGE_4K;
		a_size = MMU_PAGE_4K;
	} else {
454
		for (size = 0; size < MMU_PAGE_COUNT; size++) {
455

456 457 458
			/* valid entries have a shift value */
			if (!mmu_psize_defs[size].shift)
				continue;
459

460 461 462
			a_size = __hpte_actual_psize(lp, size);
			if (a_size != -1)
				break;
463 464
		}
	}
465
	/* This works for all page sizes, and for 256M and 1T segments */
466
	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
467 468
	shift = mmu_psize_defs[size].shift;

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
	pteg = slot / HPTES_PER_GROUP;
	if (hpte_v & HPTE_V_SECONDARY)
		pteg = ~pteg;

	switch (*ssize) {
	case MMU_SEGSIZE_256M:
		/* We only have 28 - 23 bits of seg_off in avpn */
		seg_off = (avpn & 0x1f) << 23;
		vsid    =  avpn >> 5;
		/* We can find more bits from the pteg value */
		if (shift < 23) {
			vpi = (vsid ^ pteg) & htab_hash_mask;
			seg_off |= vpi << shift;
		}
484
		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
485 486 487 488 489
	case MMU_SEGSIZE_1T:
		/* We only have 40 - 23 bits of seg_off in avpn */
		seg_off = (avpn & 0x1ffff) << 23;
		vsid    = avpn >> 17;
		if (shift < 23) {
490
			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
491
			seg_off |= vpi << shift;
492
		}
493
		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
494
	default:
495
		*vpn = size = 0;
496
	}
497 498
	*psize  = size;
	*apsize = a_size;
499 500
}

501 502 503 504 505 506 507 508 509 510
/*
 * clear all mappings on kexec.  All cpus are in real mode (or they will
 * be when they isi), and we are the only one left.  We rely on our kernel
 * mapping being 0xC0's and the hardware ignoring those two real bits.
 *
 * TODO: add batching support when enabled.  remember, no dynamic memory here,
 * athough there is the control page available...
 */
static void native_hpte_clear(void)
{
511
	unsigned long vpn = 0;
512
	unsigned long slot, slots, flags;
513
	struct hash_pte *hptep = htab_address;
514
	unsigned long hpte_v;
515
	unsigned long pteg_count;
516
	int psize, apsize, ssize;
517 518 519 520 521 522 523 524

	pteg_count = htab_hash_mask + 1;

	local_irq_save(flags);

	/* we take the tlbie lock and hold it.  Some hardware will
	 * deadlock if we try to tlbie from two processors at once.
	 */
525
	raw_spin_lock(&native_tlbie_lock);
526 527 528 529 530 531 532 533 534

	slots = pteg_count * HPTES_PER_GROUP;

	for (slot = 0; slot < slots; slot++, hptep++) {
		/*
		 * we could lock the pte here, but we are the only cpu
		 * running,  right?  and for crash dump, we probably
		 * don't want to wait for a maybe bad cpu.
		 */
535
		hpte_v = hptep->v;
536

537 538 539 540
		/*
		 * Call __tlbie() here rather than tlbie() since we
		 * already hold the native_tlbie_lock.
		 */
541
		if (hpte_v & HPTE_V_VALID) {
542
			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
543
			hptep->v = 0;
544
			__tlbie(vpn, psize, apsize, ssize);
545 546 547
		}
	}

548
	asm volatile("eieio; tlbsync; ptesync":::"memory");
549
	raw_spin_unlock(&native_tlbie_lock);
550 551 552
	local_irq_restore(flags);
}

553 554 555 556
/*
 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
 * the lock all the time
 */
557
static void native_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
558
{
559 560
	unsigned long vpn;
	unsigned long hash, index, hidx, shift, slot;
561
	struct hash_pte *hptep;
562
	unsigned long hpte_v;
563 564 565
	unsigned long want_v;
	unsigned long flags;
	real_pte_t pte;
L
Linus Torvalds 已提交
566
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
567
	unsigned long psize = batch->psize;
P
Paul Mackerras 已提交
568
	int ssize = batch->ssize;
569
	int i;
L
Linus Torvalds 已提交
570 571 572 573

	local_irq_save(flags);

	for (i = 0; i < number; i++) {
574
		vpn = batch->vpn[i];
575 576
		pte = batch->pte[i];

577 578
		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
			hash = hpt_hash(vpn, shift, ssize);
579 580 581 582 583 584
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
			hptep = htab_address + slot;
585
			want_v = hpte_encode_avpn(vpn, psize, ssize);
586 587 588 589 590 591 592 593
			native_lock_hpte(hptep);
			hpte_v = hptep->v;
			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
			    !(hpte_v & HPTE_V_VALID))
				native_unlock_hpte(hptep);
			else
				hptep->v = 0;
		} pte_iterate_hashed_end();
L
Linus Torvalds 已提交
594 595
	}

596
	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
597
	    mmu_psize_defs[psize].tlbiel && local) {
L
Linus Torvalds 已提交
598
		asm volatile("ptesync":::"memory");
599
		for (i = 0; i < number; i++) {
600
			vpn = batch->vpn[i];
601 602
			pte = batch->pte[i];

603 604
			pte_iterate_hashed_subpages(pte, psize,
						    vpn, index, shift) {
605
				__tlbiel(vpn, psize, psize, ssize);
606 607
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
608 609
		asm volatile("ptesync":::"memory");
	} else {
610
		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
L
Linus Torvalds 已提交
611 612

		if (lock_tlbie)
613
			raw_spin_lock(&native_tlbie_lock);
L
Linus Torvalds 已提交
614 615

		asm volatile("ptesync":::"memory");
616
		for (i = 0; i < number; i++) {
617
			vpn = batch->vpn[i];
618 619
			pte = batch->pte[i];

620 621
			pte_iterate_hashed_subpages(pte, psize,
						    vpn, index, shift) {
622
				__tlbie(vpn, psize, psize, ssize);
623 624
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
625 626 627
		asm volatile("eieio; tlbsync; ptesync":::"memory");

		if (lock_tlbie)
628
			raw_spin_unlock(&native_tlbie_lock);
L
Linus Torvalds 已提交
629 630 631 632 633
	}

	local_irq_restore(flags);
}

634
void __init hpte_init_native(void)
L
Linus Torvalds 已提交
635 636 637 638 639
{
	ppc_md.hpte_invalidate	= native_hpte_invalidate;
	ppc_md.hpte_updatepp	= native_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
	ppc_md.hpte_insert	= native_hpte_insert;
640 641
	ppc_md.hpte_remove	= native_hpte_remove;
	ppc_md.hpte_clear_all	= native_hpte_clear;
642
	ppc_md.flush_hash_range = native_flush_hash_range;
L
Linus Torvalds 已提交
643
}