hash_native_64.c 13.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * native hashtable management.
 *
 * SMP scalability work:
 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 * 
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
12 13 14

#undef DEBUG_LOW

L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/threads.h>
#include <linux/smp.h>

#include <asm/abs_addr.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
28
#include <asm/udbg.h>
29
#include <asm/kexec.h>
30
#include <asm/ppc-opcode.h>
31 32 33 34 35 36

#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
#define DBG_LOW(fmt...)
#endif
L
Linus Torvalds 已提交
37 38 39

#define HPTE_LOCK_BIT 3

40
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
L
Linus Torvalds 已提交
41

P
Paul Mackerras 已提交
42
static inline void __tlbie(unsigned long va, int psize, int ssize)
43 44 45 46 47 48 49 50 51
{
	unsigned int penc;

	/* clear top 16 bits, non SLS segment */
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
		va &= ~0xffful;
P
Paul Mackerras 已提交
52
		va |= ssize << 8;
53
		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
55
			     : "memory");
56 57 58 59
		break;
	default:
		penc = mmu_psize_defs[psize].penc;
		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
60
		va |= penc << 12;
P
Paul Mackerras 已提交
61
		va |= ssize << 8;
62
		va |= 1; /* L */
63
		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
64
			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
65
			     : "memory");
66 67 68 69
		break;
	}
}

P
Paul Mackerras 已提交
70
static inline void __tlbiel(unsigned long va, int psize, int ssize)
71 72 73 74 75 76 77 78 79
{
	unsigned int penc;

	/* clear top 16 bits, non SLS segment */
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
		va &= ~0xffful;
P
Paul Mackerras 已提交
80
		va |= ssize << 8;
81 82 83 84 85 86
		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
			     : : "r"(va) : "memory");
		break;
	default:
		penc = mmu_psize_defs[psize].penc;
		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
87
		va |= penc << 12;
P
Paul Mackerras 已提交
88
		va |= ssize << 8;
89
		va |= 1; /* L */
90 91 92 93 94 95 96
		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
			     : : "r"(va) : "memory");
		break;
	}

}

P
Paul Mackerras 已提交
97
static inline void tlbie(unsigned long va, int psize, int ssize, int local)
98
{
99 100
	unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
101 102 103 104

	if (use_local)
		use_local = mmu_psize_defs[psize].tlbiel;
	if (lock_tlbie && !use_local)
105
		raw_spin_lock(&native_tlbie_lock);
106 107
	asm volatile("ptesync": : :"memory");
	if (use_local) {
P
Paul Mackerras 已提交
108
		__tlbiel(va, psize, ssize);
109 110
		asm volatile("ptesync": : :"memory");
	} else {
P
Paul Mackerras 已提交
111
		__tlbie(va, psize, ssize);
112 113 114
		asm volatile("eieio; tlbsync; ptesync": : :"memory");
	}
	if (lock_tlbie && !use_local)
115
		raw_spin_unlock(&native_tlbie_lock);
116 117
}

118
static inline void native_lock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
119
{
120
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
121 122

	while (1) {
123
		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
L
Linus Torvalds 已提交
124 125 126 127 128 129
			break;
		while(test_bit(HPTE_LOCK_BIT, word))
			cpu_relax();
	}
}

130
static inline void native_unlock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
131
{
132
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
133

134
	clear_bit_unlock(HPTE_LOCK_BIT, word);
L
Linus Torvalds 已提交
135 136
}

137
static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
138
			unsigned long pa, unsigned long rflags,
P
Paul Mackerras 已提交
139
			unsigned long vflags, int psize, int ssize)
L
Linus Torvalds 已提交
140
{
141
	struct hash_pte *hptep = htab_address + hpte_group;
142
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
143 144
	int i;

145 146 147 148 149 150
	if (!(vflags & HPTE_V_BOLTED)) {
		DBG_LOW("    insert(group=%lx, va=%016lx, pa=%016lx,"
			" rflags=%lx, vflags=%lx, psize=%d)\n",
			hpte_group, va, pa, rflags, vflags, psize);
	}

L
Linus Torvalds 已提交
151
	for (i = 0; i < HPTES_PER_GROUP; i++) {
152
		if (! (hptep->v & HPTE_V_VALID)) {
L
Linus Torvalds 已提交
153 154
			/* retry with lock held */
			native_lock_hpte(hptep);
155
			if (! (hptep->v & HPTE_V_VALID))
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164 165
				break;
			native_unlock_hpte(hptep);
		}

		hptep++;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

P
Paul Mackerras 已提交
166
	hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
167 168 169 170 171 172
	hpte_r = hpte_encode_r(pa, psize) | rflags;

	if (!(vflags & HPTE_V_BOLTED)) {
		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
			i, hpte_v, hpte_r);
	}
L
Linus Torvalds 已提交
173

174
	hptep->r = hpte_r;
L
Linus Torvalds 已提交
175
	/* Guarantee the second dword is visible before the valid bit */
176
	eieio();
L
Linus Torvalds 已提交
177 178 179 180
	/*
	 * Now set the first dword including the valid bit
	 * NOTE: this also unlocks the hpte
	 */
181
	hptep->v = hpte_v;
L
Linus Torvalds 已提交
182 183 184

	__asm__ __volatile__ ("ptesync" : : : "memory");

185
	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
186 187 188 189
}

static long native_hpte_remove(unsigned long hpte_group)
{
190
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
191 192
	int i;
	int slot_offset;
193
	unsigned long hpte_v;
L
Linus Torvalds 已提交
194

195 196
	DBG_LOW("    remove(group=%lx)\n", hpte_group);

L
Linus Torvalds 已提交
197 198 199 200 201
	/* pick a random entry to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + hpte_group + slot_offset;
202
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
203

204
		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
L
Linus Torvalds 已提交
205 206
			/* retry with lock held */
			native_lock_hpte(hptep);
207 208 209
			hpte_v = hptep->v;
			if ((hpte_v & HPTE_V_VALID)
			    && !(hpte_v & HPTE_V_BOLTED))
L
Linus Torvalds 已提交
210 211 212 213 214 215 216 217 218 219 220 221
				break;
			native_unlock_hpte(hptep);
		}

		slot_offset++;
		slot_offset &= 0x7;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

	/* Invalidate the hpte. NOTE: this also unlocks it */
222
	hptep->v = 0;
L
Linus Torvalds 已提交
223 224 225 226

	return i;
}

227
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
P
Paul Mackerras 已提交
228 229
				 unsigned long va, int psize, int ssize,
				 int local)
L
Linus Torvalds 已提交
230
{
231
	struct hash_pte *hptep = htab_address + slot;
232 233 234
	unsigned long hpte_v, want_v;
	int ret = 0;

P
Paul Mackerras 已提交
235
	want_v = hpte_encode_v(va, psize, ssize);
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251

	DBG_LOW("    update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
		va, want_v & HPTE_V_AVPN, slot, newpp);

	native_lock_hpte(hptep);

	hpte_v = hptep->v;

	/* Even if we miss, we need to invalidate the TLB */
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
		DBG_LOW(" -> miss\n");
		ret = -1;
	} else {
		DBG_LOW(" -> hit\n");
		/* Update the HPTE */
		hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
252
			(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
253
	}
254
	native_unlock_hpte(hptep);
255 256

	/* Ensure it is out of the tlb too. */
P
Paul Mackerras 已提交
257
	tlbie(va, psize, ssize, local);
258 259

	return ret;
L
Linus Torvalds 已提交
260 261
}

P
Paul Mackerras 已提交
262
static long native_hpte_find(unsigned long va, int psize, int ssize)
L
Linus Torvalds 已提交
263
{
264
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
265
	unsigned long hash;
P
Paul Mackerras 已提交
266
	unsigned long i;
L
Linus Torvalds 已提交
267
	long slot;
268
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
269

P
Paul Mackerras 已提交
270 271
	hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_v(va, psize, ssize);
L
Linus Torvalds 已提交
272

P
Paul Mackerras 已提交
273 274 275 276 277
	/* Bolted mappings are only ever in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + slot;
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
278

P
Paul Mackerras 已提交
279 280 281 282
		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291 292 293 294
	}

	return -1;
}

/*
 * Update the page protection bits. Intended to be used to create
 * guard pages for kernel data structures on pages which are bolted
 * in the HPT. Assumes pages being operated on will not be stolen.
 *
 * No need to lock here because we should be the only user.
 */
295
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
P
Paul Mackerras 已提交
296
				       int psize, int ssize)
L
Linus Torvalds 已提交
297
{
298
	unsigned long vsid, va;
L
Linus Torvalds 已提交
299
	long slot;
300
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
301

P
Paul Mackerras 已提交
302 303
	vsid = get_kernel_vsid(ea, ssize);
	va = hpt_va(ea, vsid, ssize);
L
Linus Torvalds 已提交
304

P
Paul Mackerras 已提交
305
	slot = native_hpte_find(va, psize, ssize);
L
Linus Torvalds 已提交
306 307 308 309
	if (slot == -1)
		panic("could not find page to bolt\n");
	hptep = htab_address + slot;

310 311 312
	/* Update the HPTE */
	hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
		(newpp & (HPTE_R_PP | HPTE_R_N));
L
Linus Torvalds 已提交
313

314
	/* Ensure it is out of the tlb too. */
P
Paul Mackerras 已提交
315
	tlbie(va, psize, ssize, 0);
L
Linus Torvalds 已提交
316 317 318
}

static void native_hpte_invalidate(unsigned long slot, unsigned long va,
P
Paul Mackerras 已提交
319
				   int psize, int ssize, int local)
L
Linus Torvalds 已提交
320
{
321
	struct hash_pte *hptep = htab_address + slot;
322
	unsigned long hpte_v;
323
	unsigned long want_v;
L
Linus Torvalds 已提交
324 325 326 327
	unsigned long flags;

	local_irq_save(flags);

328 329
	DBG_LOW("    invalidate(va=%016lx, hash: %x)\n", va, slot);

P
Paul Mackerras 已提交
330
	want_v = hpte_encode_v(va, psize, ssize);
331
	native_lock_hpte(hptep);
332
	hpte_v = hptep->v;
L
Linus Torvalds 已提交
333 334

	/* Even if we miss, we need to invalidate the TLB */
335
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
L
Linus Torvalds 已提交
336
		native_unlock_hpte(hptep);
337
	else
L
Linus Torvalds 已提交
338
		/* Invalidate the hpte. NOTE: this also unlocks it */
339
		hptep->v = 0;
L
Linus Torvalds 已提交
340

341
	/* Invalidate the TLB */
P
Paul Mackerras 已提交
342
	tlbie(va, psize, ssize, local);
343

L
Linus Torvalds 已提交
344 345 346
	local_irq_restore(flags);
}

347 348 349
#define LP_SHIFT	12
#define LP_BITS		8
#define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
350

351
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
P
Paul Mackerras 已提交
352
			int *psize, int *ssize, unsigned long *va)
353 354 355 356
{
	unsigned long hpte_r = hpte->r;
	unsigned long hpte_v = hpte->v;
	unsigned long avpn;
357
	int i, size, shift, penc;
358 359 360 361 362 363 364 365 366 367

	if (!(hpte_v & HPTE_V_LARGE))
		size = MMU_PAGE_4K;
	else {
		for (i = 0; i < LP_BITS; i++) {
			if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
				break;
		}
		penc = LP_MASK(i+1) >> LP_SHIFT;
		for (size = 0; size < MMU_PAGE_COUNT; size++) {
368

369 370 371
			/* 4K pages are not represented by LP */
			if (size == MMU_PAGE_4K)
				continue;
372

373 374 375
			/* valid entries have a shift value */
			if (!mmu_psize_defs[size].shift)
				continue;
376

377 378 379 380
			if (penc == mmu_psize_defs[size].penc)
				break;
		}
	}
381

382
	/* This works for all page sizes, and for 256M and 1T segments */
383
	shift = mmu_psize_defs[size].shift;
384
	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
385

386 387
	if (shift < 23) {
		unsigned long vpi, vsid, pteg;
388

389 390 391 392 393
		pteg = slot / HPTES_PER_GROUP;
		if (hpte_v & HPTE_V_SECONDARY)
			pteg = ~pteg;
		switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
		case MMU_SEGSIZE_256M:
394
			vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
395 396 397 398 399 400
			break;
		case MMU_SEGSIZE_1T:
			vsid = avpn >> 40;
			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
			break;
		default:
401
			avpn = vpi = size = 0;
402
		}
403
		avpn |= (vpi << mmu_psize_defs[size].shift);
404 405
	}

406 407
	*va = avpn;
	*psize = size;
P
Paul Mackerras 已提交
408
	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
409 410
}

411 412 413 414 415 416 417 418 419 420 421
/*
 * clear all mappings on kexec.  All cpus are in real mode (or they will
 * be when they isi), and we are the only one left.  We rely on our kernel
 * mapping being 0xC0's and the hardware ignoring those two real bits.
 *
 * TODO: add batching support when enabled.  remember, no dynamic memory here,
 * athough there is the control page available...
 */
static void native_hpte_clear(void)
{
	unsigned long slot, slots, flags;
422
	struct hash_pte *hptep = htab_address;
423
	unsigned long hpte_v, va;
424
	unsigned long pteg_count;
P
Paul Mackerras 已提交
425
	int psize, ssize;
426 427 428 429 430 431 432 433

	pteg_count = htab_hash_mask + 1;

	local_irq_save(flags);

	/* we take the tlbie lock and hold it.  Some hardware will
	 * deadlock if we try to tlbie from two processors at once.
	 */
434
	raw_spin_lock(&native_tlbie_lock);
435 436 437 438 439 440 441 442 443

	slots = pteg_count * HPTES_PER_GROUP;

	for (slot = 0; slot < slots; slot++, hptep++) {
		/*
		 * we could lock the pte here, but we are the only cpu
		 * running,  right?  and for crash dump, we probably
		 * don't want to wait for a maybe bad cpu.
		 */
444
		hpte_v = hptep->v;
445

446 447 448 449
		/*
		 * Call __tlbie() here rather than tlbie() since we
		 * already hold the native_tlbie_lock.
		 */
450
		if (hpte_v & HPTE_V_VALID) {
P
Paul Mackerras 已提交
451
			hpte_decode(hptep, slot, &psize, &ssize, &va);
452
			hptep->v = 0;
P
Paul Mackerras 已提交
453
			__tlbie(va, psize, ssize);
454 455 456
		}
	}

457
	asm volatile("eieio; tlbsync; ptesync":::"memory");
458
	raw_spin_unlock(&native_tlbie_lock);
459 460 461
	local_irq_restore(flags);
}

462 463 464 465
/*
 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
 * the lock all the time
 */
466
static void native_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
467
{
468
	unsigned long va, hash, index, hidx, shift, slot;
469
	struct hash_pte *hptep;
470
	unsigned long hpte_v;
471 472 473
	unsigned long want_v;
	unsigned long flags;
	real_pte_t pte;
L
Linus Torvalds 已提交
474
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
475
	unsigned long psize = batch->psize;
P
Paul Mackerras 已提交
476
	int ssize = batch->ssize;
477
	int i;
L
Linus Torvalds 已提交
478 479 480 481

	local_irq_save(flags);

	for (i = 0; i < number; i++) {
482 483 484 485
		va = batch->vaddr[i];
		pte = batch->pte[i];

		pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
P
Paul Mackerras 已提交
486
			hash = hpt_hash(va, shift, ssize);
487 488 489 490 491 492
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
			hptep = htab_address + slot;
P
Paul Mackerras 已提交
493
			want_v = hpte_encode_v(va, psize, ssize);
494 495 496 497 498 499 500 501
			native_lock_hpte(hptep);
			hpte_v = hptep->v;
			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
			    !(hpte_v & HPTE_V_VALID))
				native_unlock_hpte(hptep);
			else
				hptep->v = 0;
		} pte_iterate_hashed_end();
L
Linus Torvalds 已提交
502 503
	}

504
	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
505
	    mmu_psize_defs[psize].tlbiel && local) {
L
Linus Torvalds 已提交
506
		asm volatile("ptesync":::"memory");
507 508 509 510 511 512
		for (i = 0; i < number; i++) {
			va = batch->vaddr[i];
			pte = batch->pte[i];

			pte_iterate_hashed_subpages(pte, psize, va, index,
						    shift) {
P
Paul Mackerras 已提交
513
				__tlbiel(va, psize, ssize);
514 515
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
516 517
		asm volatile("ptesync":::"memory");
	} else {
518
		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
L
Linus Torvalds 已提交
519 520

		if (lock_tlbie)
521
			raw_spin_lock(&native_tlbie_lock);
L
Linus Torvalds 已提交
522 523

		asm volatile("ptesync":::"memory");
524 525 526 527 528 529
		for (i = 0; i < number; i++) {
			va = batch->vaddr[i];
			pte = batch->pte[i];

			pte_iterate_hashed_subpages(pte, psize, va, index,
						    shift) {
P
Paul Mackerras 已提交
530
				__tlbie(va, psize, ssize);
531 532
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
533 534 535
		asm volatile("eieio; tlbsync; ptesync":::"memory");

		if (lock_tlbie)
536
			raw_spin_unlock(&native_tlbie_lock);
L
Linus Torvalds 已提交
537 538 539 540 541 542 543 544 545 546 547 548 549
	}

	local_irq_restore(flags);
}

#ifdef CONFIG_PPC_PSERIES
/* Disable TLB batching on nighthawk */
static inline int tlb_batching_enabled(void)
{
	struct device_node *root = of_find_node_by_path("/");
	int enabled = 1;

	if (root) {
550
		const char *model = of_get_property(root, "model", NULL);
L
Linus Torvalds 已提交
551 552 553 554 555 556 557 558 559 560 561 562 563 564
		if (model && !strcmp(model, "IBM,9076-N81"))
			enabled = 0;
		of_node_put(root);
	}

	return enabled;
}
#else
static inline int tlb_batching_enabled(void)
{
	return 1;
}
#endif

565
void __init hpte_init_native(void)
L
Linus Torvalds 已提交
566 567 568 569 570
{
	ppc_md.hpte_invalidate	= native_hpte_invalidate;
	ppc_md.hpte_updatepp	= native_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
	ppc_md.hpte_insert	= native_hpte_insert;
571 572
	ppc_md.hpte_remove	= native_hpte_remove;
	ppc_md.hpte_clear_all	= native_hpte_clear;
L
Linus Torvalds 已提交
573 574 575
	if (tlb_batching_enabled())
		ppc_md.flush_hash_range = native_flush_hash_range;
}