lpar.c 20.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * pSeries_lpar.c
 * Copyright (C) 2001 Todd Inglett, IBM Corporation
 *
 * pSeries LPAR support.
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

22 23
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
L
Linus Torvalds 已提交
24 25 26

#include <linux/kernel.h>
#include <linux/dma-mapping.h>
27
#include <linux/console.h>
28
#include <linux/export.h>
29
#include <linux/jump_label.h>
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
D
David Gibson 已提交
41
#include <asm/udbg.h>
P
Paul Mackerras 已提交
42
#include <asm/smp.h>
43
#include <asm/trace.h>
44
#include <asm/firmware.h>
45
#include <asm/plpar_wrappers.h>
46
#include <asm/kexec.h>
47
#include <asm/fadump.h>
48
#include <asm/asm-prototypes.h>
49

50
#include "pseries.h"
L
Linus Torvalds 已提交
51

52 53 54 55 56 57 58
/* Flag bits for H_BULK_REMOVE */
#define HBR_REQUEST	0x4000000000000000UL
#define HBR_RESPONSE	0x8000000000000000UL
#define HBR_END		0xc000000000000000UL
#define HBR_AVPN	0x0200000000000000UL
#define HBR_ANDCOND	0x0100000000000000UL

L
Linus Torvalds 已提交
59

60
/* in hvCall.S */
L
Linus Torvalds 已提交
61
EXPORT_SYMBOL(plpar_hcall);
62
EXPORT_SYMBOL(plpar_hcall9);
L
Linus Torvalds 已提交
63
EXPORT_SYMBOL(plpar_hcall_norets);
64

L
Linus Torvalds 已提交
65 66 67
void vpa_init(int cpu)
{
	int hwcpu = get_hard_smp_processor_id(cpu);
68
	unsigned long addr;
L
Linus Torvalds 已提交
69
	long ret;
70 71
	struct paca_struct *pp;
	struct dtl_entry *dtl;
72

73 74 75 76 77 78
	/*
	 * The spec says it "may be problematic" if CPU x registers the VPA of
	 * CPU y. We should never do that, but wail if we ever do.
	 */
	WARN_ON(cpu != smp_processor_id());

79
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
80
		lppaca_of(cpu).vmxregs_in_use = 1;
81

82 83 84
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		lppaca_of(cpu).ebb_regs_in_use = 1;

85
	addr = __pa(&lppaca_of(cpu));
86
	ret = register_vpa(hwcpu, addr);
L
Linus Torvalds 已提交
87

88
	if (ret) {
89 90
		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
91 92
		return;
	}
93 94

#ifdef CONFIG_PPC_STD_MMU_64
95 96 97 98
	/*
	 * PAPR says this feature is SLB-Buffer but firmware never
	 * reports that.  All SPLPAR support SLB shadow buffer.
	 */
99 100
	if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
		addr = __pa(paca[cpu].slb_shadow_ptr);
101 102
		ret = register_slb_shadow(hwcpu, addr);
		if (ret)
103 104 105
			pr_err("WARNING: SLB shadow buffer registration for "
			       "cpu %d (hw %d) of area %lx failed with %ld\n",
			       cpu, hwcpu, addr, ret);
106
	}
107
#endif /* CONFIG_PPC_STD_MMU_64 */
108 109 110 111 112 113 114 115 116 117 118 119

	/*
	 * Register dispatch trace log, if one has been allocated.
	 */
	pp = &paca[cpu];
	dtl = pp->dispatch_log;
	if (dtl) {
		pp->dtl_ridx = 0;
		pp->dtl_curr = dtl;
		lppaca_of(cpu).dtl_idx = 0;

		/* hypervisor reads buffer length from this field */
120
		dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
121 122
		ret = register_dtl(hwcpu, __pa(dtl));
		if (ret)
123 124 125
			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
			       "failed with %ld\n", smp_processor_id(),
			       hwcpu, ret);
126 127
		lppaca_of(cpu).dtl_enable_mask = 2;
	}
L
Linus Torvalds 已提交
128 129
}

130 131
#ifdef CONFIG_PPC_STD_MMU_64

132
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
133 134
				     unsigned long vpn, unsigned long pa,
				     unsigned long rflags, unsigned long vflags,
135
				     int psize, int apsize, int ssize)
L
Linus Torvalds 已提交
136 137 138 139
{
	unsigned long lpar_rc;
	unsigned long flags;
	unsigned long slot;
140
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
141

142
	if (!(vflags & HPTE_V_BOLTED))
143 144 145
		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
			 hpte_group, vpn,  pa, rflags, vflags, psize);
146

147
	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
148
	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
149 150

	if (!(vflags & HPTE_V_BOLTED))
151
		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
152

L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160
	/* Now fill in the actual HPTE */
	/* Set CEC cookie to 0         */
	/* Zero page = 0               */
	/* I-cache Invalidate = 0      */
	/* I-cache synchronize = 0     */
	/* Exact = 0                   */
	flags = 0;

161 162
	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
		flags |= H_COALESCE_CAND;
L
Linus Torvalds 已提交
163

164
	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
165
	if (unlikely(lpar_rc == H_PTEG_FULL)) {
166
		if (!(vflags & HPTE_V_BOLTED))
167
			pr_devel(" full\n");
L
Linus Torvalds 已提交
168
		return -1;
169
	}
L
Linus Torvalds 已提交
170 171 172 173 174 175

	/*
	 * Since we try and ioremap PHBs we don't own, the pte insert
	 * will fail. However we must catch the failure in hash_page
	 * or we will loop forever, so return -2 in this case.
	 */
176
	if (unlikely(lpar_rc != H_SUCCESS)) {
177
		if (!(vflags & HPTE_V_BOLTED))
178
			pr_devel(" lpar err %ld\n", lpar_rc);
L
Linus Torvalds 已提交
179
		return -2;
180 181
	}
	if (!(vflags & HPTE_V_BOLTED))
182
		pr_devel(" -> slot: %lu\n", slot & 7);
L
Linus Torvalds 已提交
183 184 185 186

	/* Because of iSeries, we have to pass down the secondary
	 * bucket bit here as well
	 */
187
	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
}

static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);

static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{
	unsigned long slot_offset;
	unsigned long lpar_rc;
	int i;
	unsigned long dummy1, dummy2;

	/* pick a random slot to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {

		/* don't remove a bolted entry */
		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
					   (0x1UL << 4), &dummy1, &dummy2);
207
		if (lpar_rc == H_SUCCESS)
L
Linus Torvalds 已提交
208
			return i;
209 210 211 212 213 214 215

		/*
		 * The test for adjunct partition is performed before the
		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
		 * check for that as well.
		 */
		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
L
Linus Torvalds 已提交
216 217 218 219 220 221 222 223

		slot_offset++;
		slot_offset &= 0x7;
	}

	return -1;
}

224
static void manual_hpte_clear_all(void)
L
Linus Torvalds 已提交
225 226 227
{
	unsigned long size_bytes = 1UL << ppc64_pft_size;
	unsigned long hpte_count = size_bytes >> 4;
228 229 230 231
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
232
	long lpar_rc;
233
	unsigned long i, j;
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249

	/* Read in batches of 4,
	 * invalidate only valid entries not in the VRMA
	 * hpte_count will be a multiple of 4
         */
	for (i = 0; i < hpte_count; i += 4) {
		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
		for (j = 0; j < 4; j++){
			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
				HPTE_V_VRMA_MASK)
				continue;
			if (ptes[j].pteh & HPTE_V_VALID)
				plpar_pte_remove_raw(0, i + j, 0,
					&(ptes[j].pteh), &(ptes[j].ptel));
250 251
		}
	}
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
}

static int hcall_hpte_clear_all(void)
{
	int rc;

	do {
		rc = plpar_hcall_norets(H_CLEAR_HPT);
	} while (rc == H_CONTINUE);

	return rc;
}

static void pseries_hpte_clear_all(void)
{
	int rc;

	rc = hcall_hpte_clear_all();
	if (rc != H_SUCCESS)
		manual_hpte_clear_all();
272 273

#ifdef __LITTLE_ENDIAN__
274 275 276 277 278 279 280 281 282 283
	/*
	 * Reset exceptions to big endian.
	 *
	 * FIXME this is a hack for kexec, we need to reset the exception
	 * endian before starting the new kernel and this is a convenient place
	 * to do it.
	 *
	 * This is also called on boot when a fadump happens. In that case we
	 * must not change the exception endian mode.
	 */
284 285
	if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
		pseries_big_endian_exceptions();
286
#endif
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294
}

/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
295 296
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
				       unsigned long newpp,
297
				       unsigned long vpn,
298
				       int psize, int apsize,
299
				       int ssize, unsigned long inv_flags)
L
Linus Torvalds 已提交
300 301 302
{
	unsigned long lpar_rc;
	unsigned long flags = (newpp & 7) | H_AVPN;
303
	unsigned long want_v;
L
Linus Torvalds 已提交
304

305
	want_v = hpte_encode_avpn(vpn, psize, ssize);
L
Linus Torvalds 已提交
306

307
	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
308
		 want_v, slot, flags, psize);
L
Linus Torvalds 已提交
309

P
Paul Mackerras 已提交
310
	lpar_rc = plpar_pte_protect(flags, slot, want_v);
311

312
	if (lpar_rc == H_NOT_FOUND) {
313
		pr_devel("not found !\n");
L
Linus Torvalds 已提交
314
		return -1;
315 316
	}

317
	pr_devel("ok\n");
L
Linus Torvalds 已提交
318

319
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
320 321 322 323

	return 0;
}

324
static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
L
Linus Torvalds 已提交
325
{
326 327 328 329 330 331
	long lpar_rc;
	unsigned long i, j;
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
L
Linus Torvalds 已提交
332

333
	for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
L
Linus Torvalds 已提交
334

335 336 337
		lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
L
Linus Torvalds 已提交
338

339 340 341 342 343 344
		for (j = 0; j < 4; j++) {
			if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
			    (ptes[j].pteh & HPTE_V_VALID))
				return i + j;
		}
	}
L
Linus Torvalds 已提交
345

346
	return -1;
L
Linus Torvalds 已提交
347 348
}

349
static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
L
Linus Torvalds 已提交
350 351
{
	long slot;
352 353 354
	unsigned long hash;
	unsigned long want_v;
	unsigned long hpte_group;
L
Linus Torvalds 已提交
355

356 357
	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
358 359

	/* Bolted entries are always in the primary group */
360 361 362 363 364 365
	hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
	if (slot < 0)
		return -1;
	return hpte_group + slot;
}
L
Linus Torvalds 已提交
366 367

static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
368
					     unsigned long ea,
P
Paul Mackerras 已提交
369
					     int psize, int ssize)
L
Linus Torvalds 已提交
370
{
371 372
	unsigned long vpn;
	unsigned long lpar_rc, slot, vsid, flags;
L
Linus Torvalds 已提交
373

P
Paul Mackerras 已提交
374
	vsid = get_kernel_vsid(ea, ssize);
375
	vpn = hpt_vpn(ea, vsid, ssize);
L
Linus Torvalds 已提交
376

377
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
L
Linus Torvalds 已提交
378 379 380 381 382
	BUG_ON(slot == -1);

	flags = newpp & 7;
	lpar_rc = plpar_pte_protect(flags, slot, 0);

383
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
384 385
}

386
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
387 388
					 int psize, int apsize,
					 int ssize, int local)
L
Linus Torvalds 已提交
389
{
390
	unsigned long want_v;
L
Linus Torvalds 已提交
391 392 393
	unsigned long lpar_rc;
	unsigned long dummy1, dummy2;

394 395
	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
		 slot, vpn, psize, local);
L
Linus Torvalds 已提交
396

397
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
398
	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
399
	if (lpar_rc == H_NOT_FOUND)
L
Linus Torvalds 已提交
400 401
		return;

402
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
403 404
}

405
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
406 407 408 409 410 411 412 413 414 415
/*
 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
 * to make sure that we avoid bouncing the hypervisor tlbie lock.
 */
#define PPC64_HUGE_HPTE_BATCH 12

static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
					     unsigned long *vpn, int count,
					     int psize, int ssize)
{
416
	unsigned long param[PLPAR_HCALL9_BUFSIZE];
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
	int i = 0, pix = 0, rc;
	unsigned long flags = 0;
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

	for (i = 0; i < count; i++) {

		if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
						     ssize, 0);
		} else {
			param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
			param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
			pix += 2;
			if (pix == 8) {
				rc = plpar_hcall9(H_BULK_REMOVE, param,
						  param[0], param[1], param[2],
						  param[3], param[4], param[5],
						  param[6], param[7]);
				BUG_ON(rc != H_SUCCESS);
				pix = 0;
			}
		}
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

455 456 457
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
					     unsigned long addr,
					     unsigned char *hpte_slot_array,
458
					     int psize, int ssize, int local)
459
{
460
	int i, index = 0;
461 462 463 464
	unsigned long s_addr = addr;
	unsigned int max_hpte_count, valid;
	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
465
	unsigned long shift, hidx, vpn = 0, hash, slot;
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503

	shift = mmu_psize_defs[psize].shift;
	max_hpte_count = 1U << (PMD_SHIFT - shift);

	for (i = 0; i < max_hpte_count; i++) {
		valid = hpte_valid(hpte_slot_array, i);
		if (!valid)
			continue;
		hidx =  hpte_hash_index(hpte_slot_array, i);

		/* get the vpn */
		addr = s_addr + (i * (1ul << shift));
		vpn = hpt_vpn(addr, vsid, ssize);
		hash = hpt_hash(vpn, shift, ssize);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;

		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;

		slot_array[index] = slot;
		vpn_array[index] = vpn;
		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
			/*
			 * Now do a bluk invalidate
			 */
			__pSeries_lpar_hugepage_invalidate(slot_array,
							   vpn_array,
							   PPC64_HUGE_HPTE_BATCH,
							   psize, ssize);
			index = 0;
		} else
			index++;
	}
	if (index)
		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
						   index, psize, ssize);
}
504 505 506 507 508 509 510 511 512
#else
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
					     unsigned long addr,
					     unsigned char *hpte_slot_array,
					     int psize, int ssize, int local)
{
	WARN(1, "%s called without THP support\n", __func__);
}
#endif
513

514 515
static int pSeries_lpar_hpte_removebolted(unsigned long ea,
					  int psize, int ssize)
516
{
517 518
	unsigned long vpn;
	unsigned long slot, vsid;
519 520

	vsid = get_kernel_vsid(ea, ssize);
521
	vpn = hpt_vpn(ea, vsid, ssize);
522

523
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
524 525 526
	if (slot == -1)
		return -ENOENT;

527 528 529 530
	/*
	 * lpar doesn't use the passed actual page size
	 */
	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
531
	return 0;
532 533
}

L
Linus Torvalds 已提交
534 535 536 537
/*
 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
 * lock.
 */
538
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
539
{
540
	unsigned long vpn;
541
	unsigned long i, pix, rc;
542
	unsigned long flags = 0;
543
	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
544
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
545
	unsigned long param[PLPAR_HCALL9_BUFSIZE];
546 547
	unsigned long hash, index, shift, hidx, slot;
	real_pte_t pte;
P
Paul Mackerras 已提交
548
	int psize, ssize;
L
Linus Torvalds 已提交
549 550 551 552

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

553
	psize = batch->psize;
P
Paul Mackerras 已提交
554
	ssize = batch->ssize;
555 556
	pix = 0;
	for (i = 0; i < number; i++) {
557
		vpn = batch->vpn[i];
558
		pte = batch->pte[i];
559 560
		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
			hash = hpt_hash(vpn, shift, ssize);
561 562 563 564 565
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
566
			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
567 568 569
				/*
				 * lpar doesn't use the passed actual page size
				 */
570
				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
571
							     0, ssize, local);
572 573
			} else {
				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
574
				param[pix+1] = hpte_encode_avpn(vpn, psize,
P
Paul Mackerras 已提交
575
								ssize);
576 577 578
				pix += 2;
				if (pix == 8) {
					rc = plpar_hcall9(H_BULK_REMOVE, param,
579 580 581
						param[0], param[1], param[2],
						param[3], param[4], param[5],
						param[6], param[7]);
582 583 584
					BUG_ON(rc != H_SUCCESS);
					pix = 0;
				}
585 586 587 588 589 590 591 592 593 594
			}
		} pte_iterate_hashed_end();
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}
L
Linus Torvalds 已提交
595 596 597 598 599

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

600 601 602 603 604 605 606 607 608 609 610 611
static int __init disable_bulk_remove(char *str)
{
	if (strcmp(str, "off") == 0 &&
	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
	}
	return 1;
}

__setup("bulk_remove=", disable_bulk_remove);

612
void __init hpte_init_pseries(void)
L
Linus Torvalds 已提交
613
{
614 615 616 617 618 619 620
	mmu_hash_ops.hpte_invalidate	 = pSeries_lpar_hpte_invalidate;
	mmu_hash_ops.hpte_updatepp	 = pSeries_lpar_hpte_updatepp;
	mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
	mmu_hash_ops.hpte_insert	 = pSeries_lpar_hpte_insert;
	mmu_hash_ops.hpte_remove	 = pSeries_lpar_hpte_remove;
	mmu_hash_ops.hpte_removebolted   = pSeries_lpar_hpte_removebolted;
	mmu_hash_ops.flush_hash_range	 = pSeries_lpar_flush_hash_range;
621
	mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
622
	mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
L
Linus Torvalds 已提交
623
}
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667

#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;

static int __init cmo_free_hint(char *str)
{
	char *parm;
	parm = strstrip(str);

	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
		cmo_free_hint_flag = 0;
		return 1;
	}

	cmo_free_hint_flag = 1;
	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");

	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
		return 1;

	return 0;
}

__setup("cmo_free_hint=", cmo_free_hint);

static void pSeries_set_page_state(struct page *page, int order,
				   unsigned long state)
{
	int i, j;
	unsigned long cmo_page_sz, addr;

	cmo_page_sz = cmo_get_page_size();
	addr = __pa((unsigned long)page_address(page));

	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
	}
}

void arch_free_page(struct page *page, int order)
{
668 669
	if (radix_enabled())
		return;
670 671 672 673 674 675 676
	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
		return;

	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
}
EXPORT_SYMBOL(arch_free_page);

677 678
#endif /* CONFIG_PPC_SMLPAR */
#endif /* CONFIG_PPC_STD_MMU_64 */
679 680

#ifdef CONFIG_TRACEPOINTS
681
#ifdef HAVE_JUMP_LABEL
682 683
struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;

684
int hcall_tracepoint_regfunc(void)
685 686
{
	static_key_slow_inc(&hcall_tracepoint_key);
687
	return 0;
688 689 690 691 692 693 694
}

void hcall_tracepoint_unregfunc(void)
{
	static_key_slow_dec(&hcall_tracepoint_key);
}
#else
695 696 697 698 699 700 701 702 703
/*
 * We optimise our hcall path by placing hcall_tracepoint_refcount
 * directly in the TOC so we can check if the hcall tracepoints are
 * enabled via a single load.
 */

/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;

704
int hcall_tracepoint_regfunc(void)
705 706
{
	hcall_tracepoint_refcount++;
707
	return 0;
708 709 710 711 712 713
}

void hcall_tracepoint_unregfunc(void)
{
	hcall_tracepoint_refcount--;
}
714 715 716 717 718 719 720 721 722
#endif

/*
 * Since the tracing code might execute hcalls we need to guard against
 * recursion. One example of this are spinlocks calling H_YIELD on
 * shared processor partitions.
 */
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

723

724
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
725
{
726 727 728
	unsigned long flags;
	unsigned int *depth;

729 730 731 732 733 734 735
	/*
	 * We cannot call tracepoints inside RCU idle regions which
	 * means we must not trace H_CEDE.
	 */
	if (opcode == H_CEDE)
		return;

736 737
	local_irq_save(flags);

738
	depth = this_cpu_ptr(&hcall_trace_depth);
739 740 741 742 743

	if (*depth)
		goto out;

	(*depth)++;
744
	preempt_disable();
745
	trace_hcall_entry(opcode, args);
746 747 748 749
	(*depth)--;

out:
	local_irq_restore(flags);
750 751
}

752 753
void __trace_hcall_exit(long opcode, unsigned long retval,
			unsigned long *retbuf)
754
{
755 756 757
	unsigned long flags;
	unsigned int *depth;

758 759 760
	if (opcode == H_CEDE)
		return;

761 762
	local_irq_save(flags);

763
	depth = this_cpu_ptr(&hcall_trace_depth);
764 765 766 767 768

	if (*depth)
		goto out;

	(*depth)++;
769
	trace_hcall_exit(opcode, retval, retbuf);
770
	preempt_enable();
771 772 773 774
	(*depth)--;

out:
	local_irq_restore(flags);
775 776
}
#endif
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796

/**
 * h_get_mpp
 * H_GET_MPP hcall returns info in 7 parms
 */
int h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];

	rc = plpar_hcall9(H_GET_MPP, retbuf);

	mpp_data->entitled_mem = retbuf[0];
	mpp_data->mapped_mem = retbuf[1];

	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
	mpp_data->pool_num = retbuf[2] & 0xffff;

	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
797
	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820

	mpp_data->pool_size = retbuf[4];
	mpp_data->loan_request = retbuf[5];
	mpp_data->backing_mem = retbuf[6];

	return rc;
}
EXPORT_SYMBOL(h_get_mpp);

int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };

	rc = plpar_hcall9(H_GET_MPP_X, retbuf);

	mpp_x_data->coalesced_bytes = retbuf[0];
	mpp_x_data->pool_coalesced_bytes = retbuf[1];
	mpp_x_data->pool_purr_cycles = retbuf[2];
	mpp_x_data->pool_spurr_cycles = retbuf[3];

	return rc;
}