hash_native_64.c 16.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * native hashtable management.
 *
 * SMP scalability work:
 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 * 
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
12 13 14

#undef DEBUG_LOW

L
Linus Torvalds 已提交
15 16
#include <linux/spinlock.h>
#include <linux/bitops.h>
17
#include <linux/of.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26 27
#include <linux/threads.h>
#include <linux/smp.h>

#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
28
#include <asm/udbg.h>
29
#include <asm/kexec.h>
30
#include <asm/ppc-opcode.h>
31 32 33 34 35 36

#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
#define DBG_LOW(fmt...)
#endif
L
Linus Torvalds 已提交
37 38 39

#define HPTE_LOCK_BIT 3

40
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
L
Linus Torvalds 已提交
41

42
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43
{
44
	unsigned long va;
45 46
	unsigned int penc;

47 48 49 50 51 52 53 54 55 56 57 58 59
	/*
	 * We need 14 to 65 bits of va for a tlibe of 4K page
	 * With vpn we ignore the lower VPN_SHIFT bits already.
	 * And top two bits are already ignored because we can
	 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
	 * of 12.
	 */
	va = vpn << VPN_SHIFT;
	/*
	 * clear top 16 bits of 64bit va, non SLS segment
	 * Older versions of the architecture (2.02 and earler) require the
	 * masking of the top 16 bits.
	 */
60 61 62 63
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
P
Paul Mackerras 已提交
64
		va |= ssize << 8;
65
		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
66
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
67
			     : "memory");
68 69
		break;
	default:
70
		/* We need 14 to 14 + i bits of va */
71
		penc = mmu_psize_defs[psize].penc[apsize];
72
		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
73
		va |= penc << 12;
P
Paul Mackerras 已提交
74
		va |= ssize << 8;
75
		va |= 1; /* L */
76
		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
77
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
78
			     : "memory");
79 80 81 82
		break;
	}
}

83
static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
84
{
85
	unsigned long va;
86 87
	unsigned int penc;

88 89 90 91 92 93 94
	/* VPN_SHIFT can be atmost 12 */
	va = vpn << VPN_SHIFT;
	/*
	 * clear top 16 bits of 64 bit va, non SLS segment
	 * Older versions of the architecture (2.02 and earler) require the
	 * masking of the top 16 bits.
	 */
95 96 97 98
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
P
Paul Mackerras 已提交
99
		va |= ssize << 8;
100 101 102 103
		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
			     : : "r"(va) : "memory");
		break;
	default:
104
		/* We need 14 to 14 + i bits of va */
105
		penc = mmu_psize_defs[psize].penc[apsize];
106
		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
107
		va |= penc << 12;
P
Paul Mackerras 已提交
108
		va |= ssize << 8;
109
		va |= 1; /* L */
110 111 112 113 114 115 116
		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
			     : : "r"(va) : "memory");
		break;
	}

}

117 118
static inline void tlbie(unsigned long vpn, int psize, int apsize,
			 int ssize, int local)
119
{
120 121
	unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
122 123 124 125

	if (use_local)
		use_local = mmu_psize_defs[psize].tlbiel;
	if (lock_tlbie && !use_local)
126
		raw_spin_lock(&native_tlbie_lock);
127 128
	asm volatile("ptesync": : :"memory");
	if (use_local) {
129
		__tlbiel(vpn, psize, apsize, ssize);
130 131
		asm volatile("ptesync": : :"memory");
	} else {
132
		__tlbie(vpn, psize, apsize, ssize);
133 134 135
		asm volatile("eieio; tlbsync; ptesync": : :"memory");
	}
	if (lock_tlbie && !use_local)
136
		raw_spin_unlock(&native_tlbie_lock);
137 138
}

139
static inline void native_lock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
140
{
141
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
142 143

	while (1) {
144
		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
L
Linus Torvalds 已提交
145 146 147 148 149 150
			break;
		while(test_bit(HPTE_LOCK_BIT, word))
			cpu_relax();
	}
}

151
static inline void native_unlock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
152
{
153
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
154

155
	clear_bit_unlock(HPTE_LOCK_BIT, word);
L
Linus Torvalds 已提交
156 157
}

158
static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
159
			unsigned long pa, unsigned long rflags,
160
			unsigned long vflags, int psize, int apsize, int ssize)
L
Linus Torvalds 已提交
161
{
162
	struct hash_pte *hptep = htab_address + hpte_group;
163
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
164 165
	int i;

166
	if (!(vflags & HPTE_V_BOLTED)) {
167
		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
168
			" rflags=%lx, vflags=%lx, psize=%d)\n",
169
			hpte_group, vpn, pa, rflags, vflags, psize);
170 171
	}

L
Linus Torvalds 已提交
172
	for (i = 0; i < HPTES_PER_GROUP; i++) {
173
		if (! (hptep->v & HPTE_V_VALID)) {
L
Linus Torvalds 已提交
174 175
			/* retry with lock held */
			native_lock_hpte(hptep);
176
			if (! (hptep->v & HPTE_V_VALID))
L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186
				break;
			native_unlock_hpte(hptep);
		}

		hptep++;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

187 188
	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
189 190 191 192 193

	if (!(vflags & HPTE_V_BOLTED)) {
		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
			i, hpte_v, hpte_r);
	}
L
Linus Torvalds 已提交
194

195
	hptep->r = hpte_r;
L
Linus Torvalds 已提交
196
	/* Guarantee the second dword is visible before the valid bit */
197
	eieio();
L
Linus Torvalds 已提交
198 199 200 201
	/*
	 * Now set the first dword including the valid bit
	 * NOTE: this also unlocks the hpte
	 */
202
	hptep->v = hpte_v;
L
Linus Torvalds 已提交
203 204 205

	__asm__ __volatile__ ("ptesync" : : : "memory");

206
	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
207 208 209 210
}

static long native_hpte_remove(unsigned long hpte_group)
{
211
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
212 213
	int i;
	int slot_offset;
214
	unsigned long hpte_v;
L
Linus Torvalds 已提交
215

216 217
	DBG_LOW("    remove(group=%lx)\n", hpte_group);

L
Linus Torvalds 已提交
218 219 220 221 222
	/* pick a random entry to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + hpte_group + slot_offset;
223
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
224

225
		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
L
Linus Torvalds 已提交
226 227
			/* retry with lock held */
			native_lock_hpte(hptep);
228 229 230
			hpte_v = hptep->v;
			if ((hpte_v & HPTE_V_VALID)
			    && !(hpte_v & HPTE_V_BOLTED))
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239 240 241 242
				break;
			native_unlock_hpte(hptep);
		}

		slot_offset++;
		slot_offset &= 0x7;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

	/* Invalidate the hpte. NOTE: this also unlocks it */
243
	hptep->v = 0;
L
Linus Torvalds 已提交
244 245 246 247

	return i;
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
{
	int i, shift;
	unsigned int mask;
	/* Look at the 8 bit LP value */
	unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);

	if (!(hptep->v & HPTE_V_VALID))
		return -1;

	/* First check if it is large page */
	if (!(hptep->v & HPTE_V_LARGE))
		return MMU_PAGE_4K;

	/* start from 1 ignoring MMU_PAGE_4K */
	for (i = 1; i < MMU_PAGE_COUNT; i++) {

		/* invalid penc */
		if (mmu_psize_defs[psize].penc[i] == -1)
			continue;
		/*
		 * encoding bits per actual page size
		 *        PTE LP     actual page size
		 *    rrrr rrrz		>=8KB
		 *    rrrr rrzz		>=16KB
		 *    rrrr rzzz		>=32KB
		 *    rrrr zzzz		>=64KB
		 * .......
		 */
		shift = mmu_psize_defs[i].shift - LP_SHIFT;
		if (shift > LP_BITS)
			shift = LP_BITS;
		mask = (1 << shift) - 1;
		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
			return i;
	}
	return -1;
}

287
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
288
				 unsigned long vpn, int psize, int ssize,
P
Paul Mackerras 已提交
289
				 int local)
L
Linus Torvalds 已提交
290
{
291
	struct hash_pte *hptep = htab_address + slot;
292 293
	unsigned long hpte_v, want_v;
	int ret = 0;
294
	int actual_psize;
295

296
	want_v = hpte_encode_avpn(vpn, psize, ssize);
297

298 299
	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
		vpn, want_v & HPTE_V_AVPN, slot, newpp);
300 301 302 303

	native_lock_hpte(hptep);

	hpte_v = hptep->v;
304 305 306 307 308
	actual_psize = hpte_actual_psize(hptep, psize);
	if (actual_psize < 0) {
		native_unlock_hpte(hptep);
		return -1;
	}
309
	/* Even if we miss, we need to invalidate the TLB */
310
	if (!HPTE_V_COMPARE(hpte_v, want_v)) {
311 312 313 314 315 316
		DBG_LOW(" -> miss\n");
		ret = -1;
	} else {
		DBG_LOW(" -> hit\n");
		/* Update the HPTE */
		hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
317
			(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
318
	}
319
	native_unlock_hpte(hptep);
320 321

	/* Ensure it is out of the tlb too. */
322
	tlbie(vpn, psize, actual_psize, ssize, local);
323 324

	return ret;
L
Linus Torvalds 已提交
325 326
}

327
static long native_hpte_find(unsigned long vpn, int psize, int ssize)
L
Linus Torvalds 已提交
328
{
329
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
330
	unsigned long hash;
P
Paul Mackerras 已提交
331
	unsigned long i;
L
Linus Torvalds 已提交
332
	long slot;
333
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
334

335
	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
336
	want_v = hpte_encode_avpn(vpn, psize, ssize);
L
Linus Torvalds 已提交
337

P
Paul Mackerras 已提交
338 339 340 341 342
	/* Bolted mappings are only ever in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + slot;
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
343

P
Paul Mackerras 已提交
344 345 346 347
		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
348 349 350 351 352 353 354 355 356 357 358 359
	}

	return -1;
}

/*
 * Update the page protection bits. Intended to be used to create
 * guard pages for kernel data structures on pages which are bolted
 * in the HPT. Assumes pages being operated on will not be stolen.
 *
 * No need to lock here because we should be the only user.
 */
360
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
P
Paul Mackerras 已提交
361
				       int psize, int ssize)
L
Linus Torvalds 已提交
362
{
363
	int actual_psize;
364 365
	unsigned long vpn;
	unsigned long vsid;
L
Linus Torvalds 已提交
366
	long slot;
367
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
368

P
Paul Mackerras 已提交
369
	vsid = get_kernel_vsid(ea, ssize);
370
	vpn = hpt_vpn(ea, vsid, ssize);
L
Linus Torvalds 已提交
371

372
	slot = native_hpte_find(vpn, psize, ssize);
L
Linus Torvalds 已提交
373 374 375
	if (slot == -1)
		panic("could not find page to bolt\n");
	hptep = htab_address + slot;
376 377 378
	actual_psize = hpte_actual_psize(hptep, psize);
	if (actual_psize < 0)
		return;
L
Linus Torvalds 已提交
379

380 381 382
	/* Update the HPTE */
	hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
		(newpp & (HPTE_R_PP | HPTE_R_N));
L
Linus Torvalds 已提交
383

384
	/* Ensure it is out of the tlb too. */
385
	tlbie(vpn, psize, actual_psize, ssize, 0);
L
Linus Torvalds 已提交
386 387
}

388
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
P
Paul Mackerras 已提交
389
				   int psize, int ssize, int local)
L
Linus Torvalds 已提交
390
{
391
	struct hash_pte *hptep = htab_address + slot;
392
	unsigned long hpte_v;
393
	unsigned long want_v;
L
Linus Torvalds 已提交
394
	unsigned long flags;
395
	int actual_psize;
L
Linus Torvalds 已提交
396 397 398

	local_irq_save(flags);

399
	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
400

401
	want_v = hpte_encode_avpn(vpn, psize, ssize);
402
	native_lock_hpte(hptep);
403
	hpte_v = hptep->v;
L
Linus Torvalds 已提交
404

405 406 407 408 409 410
	actual_psize = hpte_actual_psize(hptep, psize);
	if (actual_psize < 0) {
		native_unlock_hpte(hptep);
		local_irq_restore(flags);
		return;
	}
L
Linus Torvalds 已提交
411
	/* Even if we miss, we need to invalidate the TLB */
412
	if (!HPTE_V_COMPARE(hpte_v, want_v))
L
Linus Torvalds 已提交
413
		native_unlock_hpte(hptep);
414
	else
L
Linus Torvalds 已提交
415
		/* Invalidate the hpte. NOTE: this also unlocks it */
416
		hptep->v = 0;
L
Linus Torvalds 已提交
417

418
	/* Invalidate the TLB */
419
	tlbie(vpn, psize, actual_psize, ssize, local);
420

L
Linus Torvalds 已提交
421 422 423
	local_irq_restore(flags);
}

424
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
425
			int *psize, int *apsize, int *ssize, unsigned long *vpn)
426
{
427
	unsigned long avpn, pteg, vpi;
428 429
	unsigned long hpte_r = hpte->r;
	unsigned long hpte_v = hpte->v;
430
	unsigned long vsid, seg_off;
431
	int i, size, a_size, shift, penc;
432

433 434 435 436
	if (!(hpte_v & HPTE_V_LARGE)) {
		size   = MMU_PAGE_4K;
		a_size = MMU_PAGE_4K;
	} else {
437 438 439 440 441 442
		for (i = 0; i < LP_BITS; i++) {
			if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
				break;
		}
		penc = LP_MASK(i+1) >> LP_SHIFT;
		for (size = 0; size < MMU_PAGE_COUNT; size++) {
443

444 445 446
			/* valid entries have a shift value */
			if (!mmu_psize_defs[size].shift)
				continue;
447
			for (a_size = 0; a_size < MMU_PAGE_COUNT; a_size++) {
448

449 450 451 452 453 454 455 456 457 458 459
				/* 4K pages are not represented by LP */
				if (a_size == MMU_PAGE_4K)
					continue;

				/* valid entries have a shift value */
				if (!mmu_psize_defs[a_size].shift)
					continue;

				if (penc == mmu_psize_defs[size].penc[a_size])
					goto out;
			}
460 461
		}
	}
462

463
out:
464
	/* This works for all page sizes, and for 256M and 1T segments */
465
	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
466 467
	shift = mmu_psize_defs[size].shift;

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
	pteg = slot / HPTES_PER_GROUP;
	if (hpte_v & HPTE_V_SECONDARY)
		pteg = ~pteg;

	switch (*ssize) {
	case MMU_SEGSIZE_256M:
		/* We only have 28 - 23 bits of seg_off in avpn */
		seg_off = (avpn & 0x1f) << 23;
		vsid    =  avpn >> 5;
		/* We can find more bits from the pteg value */
		if (shift < 23) {
			vpi = (vsid ^ pteg) & htab_hash_mask;
			seg_off |= vpi << shift;
		}
483
		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
484 485 486 487 488
	case MMU_SEGSIZE_1T:
		/* We only have 40 - 23 bits of seg_off in avpn */
		seg_off = (avpn & 0x1ffff) << 23;
		vsid    = avpn >> 17;
		if (shift < 23) {
489
			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
490
			seg_off |= vpi << shift;
491
		}
492
		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
493
	default:
494
		*vpn = size = 0;
495
	}
496 497
	*psize  = size;
	*apsize = a_size;
498 499
}

500 501 502 503 504 505 506 507 508 509
/*
 * clear all mappings on kexec.  All cpus are in real mode (or they will
 * be when they isi), and we are the only one left.  We rely on our kernel
 * mapping being 0xC0's and the hardware ignoring those two real bits.
 *
 * TODO: add batching support when enabled.  remember, no dynamic memory here,
 * athough there is the control page available...
 */
static void native_hpte_clear(void)
{
510
	unsigned long vpn = 0;
511
	unsigned long slot, slots, flags;
512
	struct hash_pte *hptep = htab_address;
513
	unsigned long hpte_v;
514
	unsigned long pteg_count;
515
	int psize, apsize, ssize;
516 517 518 519 520 521 522 523

	pteg_count = htab_hash_mask + 1;

	local_irq_save(flags);

	/* we take the tlbie lock and hold it.  Some hardware will
	 * deadlock if we try to tlbie from two processors at once.
	 */
524
	raw_spin_lock(&native_tlbie_lock);
525 526 527 528 529 530 531 532 533

	slots = pteg_count * HPTES_PER_GROUP;

	for (slot = 0; slot < slots; slot++, hptep++) {
		/*
		 * we could lock the pte here, but we are the only cpu
		 * running,  right?  and for crash dump, we probably
		 * don't want to wait for a maybe bad cpu.
		 */
534
		hpte_v = hptep->v;
535

536 537 538 539
		/*
		 * Call __tlbie() here rather than tlbie() since we
		 * already hold the native_tlbie_lock.
		 */
540
		if (hpte_v & HPTE_V_VALID) {
541
			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
542
			hptep->v = 0;
543
			__tlbie(vpn, psize, apsize, ssize);
544 545 546
		}
	}

547
	asm volatile("eieio; tlbsync; ptesync":::"memory");
548
	raw_spin_unlock(&native_tlbie_lock);
549 550 551
	local_irq_restore(flags);
}

552 553 554 555
/*
 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
 * the lock all the time
 */
556
static void native_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
557
{
558 559
	unsigned long vpn;
	unsigned long hash, index, hidx, shift, slot;
560
	struct hash_pte *hptep;
561
	unsigned long hpte_v;
562 563 564
	unsigned long want_v;
	unsigned long flags;
	real_pte_t pte;
L
Linus Torvalds 已提交
565
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
566
	unsigned long psize = batch->psize;
P
Paul Mackerras 已提交
567
	int ssize = batch->ssize;
568
	int i;
L
Linus Torvalds 已提交
569 570 571 572

	local_irq_save(flags);

	for (i = 0; i < number; i++) {
573
		vpn = batch->vpn[i];
574 575
		pte = batch->pte[i];

576 577
		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
			hash = hpt_hash(vpn, shift, ssize);
578 579 580 581 582 583
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
			hptep = htab_address + slot;
584
			want_v = hpte_encode_avpn(vpn, psize, ssize);
585 586 587 588 589 590 591 592
			native_lock_hpte(hptep);
			hpte_v = hptep->v;
			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
			    !(hpte_v & HPTE_V_VALID))
				native_unlock_hpte(hptep);
			else
				hptep->v = 0;
		} pte_iterate_hashed_end();
L
Linus Torvalds 已提交
593 594
	}

595
	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
596
	    mmu_psize_defs[psize].tlbiel && local) {
L
Linus Torvalds 已提交
597
		asm volatile("ptesync":::"memory");
598
		for (i = 0; i < number; i++) {
599
			vpn = batch->vpn[i];
600 601
			pte = batch->pte[i];

602 603
			pte_iterate_hashed_subpages(pte, psize,
						    vpn, index, shift) {
604
				__tlbiel(vpn, psize, psize, ssize);
605 606
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
607 608
		asm volatile("ptesync":::"memory");
	} else {
609
		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
L
Linus Torvalds 已提交
610 611

		if (lock_tlbie)
612
			raw_spin_lock(&native_tlbie_lock);
L
Linus Torvalds 已提交
613 614

		asm volatile("ptesync":::"memory");
615
		for (i = 0; i < number; i++) {
616
			vpn = batch->vpn[i];
617 618
			pte = batch->pte[i];

619 620
			pte_iterate_hashed_subpages(pte, psize,
						    vpn, index, shift) {
621
				__tlbie(vpn, psize, psize, ssize);
622 623
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
624 625 626
		asm volatile("eieio; tlbsync; ptesync":::"memory");

		if (lock_tlbie)
627
			raw_spin_unlock(&native_tlbie_lock);
L
Linus Torvalds 已提交
628 629 630 631 632
	}

	local_irq_restore(flags);
}

633
void __init hpte_init_native(void)
L
Linus Torvalds 已提交
634 635 636 637 638
{
	ppc_md.hpte_invalidate	= native_hpte_invalidate;
	ppc_md.hpte_updatepp	= native_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
	ppc_md.hpte_insert	= native_hpte_insert;
639 640
	ppc_md.hpte_remove	= native_hpte_remove;
	ppc_md.hpte_clear_all	= native_hpte_clear;
641
	ppc_md.flush_hash_range = native_flush_hash_range;
L
Linus Torvalds 已提交
642
}