lpar.c 20.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * pSeries_lpar.c
 * Copyright (C) 2001 Todd Inglett, IBM Corporation
 *
 * pSeries LPAR support.
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

22 23
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
L
Linus Torvalds 已提交
24 25 26

#include <linux/kernel.h>
#include <linux/dma-mapping.h>
27
#include <linux/console.h>
28
#include <linux/export.h>
29
#include <linux/jump_label.h>
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
D
David Gibson 已提交
41
#include <asm/udbg.h>
P
Paul Mackerras 已提交
42
#include <asm/smp.h>
43
#include <asm/trace.h>
44
#include <asm/firmware.h>
45
#include <asm/plpar_wrappers.h>
46
#include <asm/kexec.h>
47
#include <asm/fadump.h>
48

49
#include "pseries.h"
L
Linus Torvalds 已提交
50

51 52 53 54 55 56 57
/* Flag bits for H_BULK_REMOVE */
#define HBR_REQUEST	0x4000000000000000UL
#define HBR_RESPONSE	0x8000000000000000UL
#define HBR_END		0xc000000000000000UL
#define HBR_AVPN	0x0200000000000000UL
#define HBR_ANDCOND	0x0100000000000000UL

L
Linus Torvalds 已提交
58

59
/* in hvCall.S */
L
Linus Torvalds 已提交
60
EXPORT_SYMBOL(plpar_hcall);
61
EXPORT_SYMBOL(plpar_hcall9);
L
Linus Torvalds 已提交
62
EXPORT_SYMBOL(plpar_hcall_norets);
63

L
Linus Torvalds 已提交
64 65 66
void vpa_init(int cpu)
{
	int hwcpu = get_hard_smp_processor_id(cpu);
67
	unsigned long addr;
L
Linus Torvalds 已提交
68
	long ret;
69 70
	struct paca_struct *pp;
	struct dtl_entry *dtl;
71

72 73 74 75 76 77
	/*
	 * The spec says it "may be problematic" if CPU x registers the VPA of
	 * CPU y. We should never do that, but wail if we ever do.
	 */
	WARN_ON(cpu != smp_processor_id());

78
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
79
		lppaca_of(cpu).vmxregs_in_use = 1;
80

81 82 83
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		lppaca_of(cpu).ebb_regs_in_use = 1;

84
	addr = __pa(&lppaca_of(cpu));
85
	ret = register_vpa(hwcpu, addr);
L
Linus Torvalds 已提交
86

87
	if (ret) {
88 89
		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
90 91 92 93 94 95
		return;
	}
	/*
	 * PAPR says this feature is SLB-Buffer but firmware never
	 * reports that.  All SPLPAR support SLB shadow buffer.
	 */
J
Jeremy Kerr 已提交
96
	addr = __pa(paca[cpu].slb_shadow_ptr);
97 98 99
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
		ret = register_slb_shadow(hwcpu, addr);
		if (ret)
100 101 102
			pr_err("WARNING: SLB shadow buffer registration for "
			       "cpu %d (hw %d) of area %lx failed with %ld\n",
			       cpu, hwcpu, addr, ret);
103
	}
104 105 106 107 108 109 110 111 112 113 114 115

	/*
	 * Register dispatch trace log, if one has been allocated.
	 */
	pp = &paca[cpu];
	dtl = pp->dispatch_log;
	if (dtl) {
		pp->dtl_ridx = 0;
		pp->dtl_curr = dtl;
		lppaca_of(cpu).dtl_idx = 0;

		/* hypervisor reads buffer length from this field */
116
		dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
117 118
		ret = register_dtl(hwcpu, __pa(dtl));
		if (ret)
119 120 121
			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
			       "failed with %ld\n", smp_processor_id(),
			       hwcpu, ret);
122 123
		lppaca_of(cpu).dtl_enable_mask = 2;
	}
L
Linus Torvalds 已提交
124 125
}

126
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
127 128
				     unsigned long vpn, unsigned long pa,
				     unsigned long rflags, unsigned long vflags,
129
				     int psize, int apsize, int ssize)
L
Linus Torvalds 已提交
130 131 132 133
{
	unsigned long lpar_rc;
	unsigned long flags;
	unsigned long slot;
134
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
135

136
	if (!(vflags & HPTE_V_BOLTED))
137 138 139
		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
			 hpte_group, vpn,  pa, rflags, vflags, psize);
140

141
	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
142
	hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
143 144

	if (!(vflags & HPTE_V_BOLTED))
145
		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
146

L
Linus Torvalds 已提交
147 148 149 150 151 152 153 154
	/* Now fill in the actual HPTE */
	/* Set CEC cookie to 0         */
	/* Zero page = 0               */
	/* I-cache Invalidate = 0      */
	/* I-cache synchronize = 0     */
	/* Exact = 0                   */
	flags = 0;

155 156
	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
		flags |= H_COALESCE_CAND;
L
Linus Torvalds 已提交
157

158
	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
159
	if (unlikely(lpar_rc == H_PTEG_FULL)) {
160
		if (!(vflags & HPTE_V_BOLTED))
161
			pr_devel(" full\n");
L
Linus Torvalds 已提交
162
		return -1;
163
	}
L
Linus Torvalds 已提交
164 165 166 167 168 169

	/*
	 * Since we try and ioremap PHBs we don't own, the pte insert
	 * will fail. However we must catch the failure in hash_page
	 * or we will loop forever, so return -2 in this case.
	 */
170
	if (unlikely(lpar_rc != H_SUCCESS)) {
171
		if (!(vflags & HPTE_V_BOLTED))
172
			pr_devel(" lpar err %ld\n", lpar_rc);
L
Linus Torvalds 已提交
173
		return -2;
174 175
	}
	if (!(vflags & HPTE_V_BOLTED))
176
		pr_devel(" -> slot: %lu\n", slot & 7);
L
Linus Torvalds 已提交
177 178 179 180

	/* Because of iSeries, we have to pass down the secondary
	 * bucket bit here as well
	 */
181
	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
}

static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);

static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{
	unsigned long slot_offset;
	unsigned long lpar_rc;
	int i;
	unsigned long dummy1, dummy2;

	/* pick a random slot to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {

		/* don't remove a bolted entry */
		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
					   (0x1UL << 4), &dummy1, &dummy2);
201
		if (lpar_rc == H_SUCCESS)
L
Linus Torvalds 已提交
202
			return i;
203 204 205 206 207 208 209

		/*
		 * The test for adjunct partition is performed before the
		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
		 * check for that as well.
		 */
		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
L
Linus Torvalds 已提交
210 211 212 213 214 215 216 217 218 219 220 221

		slot_offset++;
		slot_offset &= 0x7;
	}

	return -1;
}

static void pSeries_lpar_hptab_clear(void)
{
	unsigned long size_bytes = 1UL << ppc64_pft_size;
	unsigned long hpte_count = size_bytes >> 4;
222 223 224 225
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
226
	long lpar_rc;
227
	unsigned long i, j;
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

	/* Read in batches of 4,
	 * invalidate only valid entries not in the VRMA
	 * hpte_count will be a multiple of 4
         */
	for (i = 0; i < hpte_count; i += 4) {
		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
		for (j = 0; j < 4; j++){
			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
				HPTE_V_VRMA_MASK)
				continue;
			if (ptes[j].pteh & HPTE_V_VALID)
				plpar_pte_remove_raw(0, i + j, 0,
					&(ptes[j].pteh), &(ptes[j].ptel));
244 245
		}
	}
246 247

#ifdef __LITTLE_ENDIAN__
248 249 250 251 252 253 254 255 256 257 258
	/*
	 * Reset exceptions to big endian.
	 *
	 * FIXME this is a hack for kexec, we need to reset the exception
	 * endian before starting the new kernel and this is a convenient place
	 * to do it.
	 *
	 * This is also called on boot when a fadump happens. In that case we
	 * must not change the exception endian mode.
	 */
	if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
259 260 261 262 263 264 265 266
		long rc;

		rc = pseries_big_endian_exceptions();
		/*
		 * At this point it is unlikely panic() will get anything
		 * out to the user, but at least this will stop us from
		 * continuing on further and creating an even more
		 * difficult to debug situation.
267 268 269 270 271
		 *
		 * There is a known problem when kdump'ing, if cpus are offline
		 * the above call will fail. Rather than panicking again, keep
		 * going and hope the kdump kernel is also little endian, which
		 * it usually is.
272
		 */
273
		if (rc && !kdump_in_progress())
274 275 276
			panic("Could not enable big endian exceptions");
	}
#endif
L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284
}

/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
285 286
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
				       unsigned long newpp,
287
				       unsigned long vpn,
288
				       int psize, int apsize,
289
				       int ssize, unsigned long inv_flags)
L
Linus Torvalds 已提交
290 291 292
{
	unsigned long lpar_rc;
	unsigned long flags = (newpp & 7) | H_AVPN;
293
	unsigned long want_v;
L
Linus Torvalds 已提交
294

295
	want_v = hpte_encode_avpn(vpn, psize, ssize);
L
Linus Torvalds 已提交
296

297
	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
298
		 want_v, slot, flags, psize);
L
Linus Torvalds 已提交
299

P
Paul Mackerras 已提交
300
	lpar_rc = plpar_pte_protect(flags, slot, want_v);
301

302
	if (lpar_rc == H_NOT_FOUND) {
303
		pr_devel("not found !\n");
L
Linus Torvalds 已提交
304
		return -1;
305 306
	}

307
	pr_devel("ok\n");
L
Linus Torvalds 已提交
308

309
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
310 311 312 313

	return 0;
}

314
static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
L
Linus Torvalds 已提交
315
{
316 317 318 319 320 321
	long lpar_rc;
	unsigned long i, j;
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
L
Linus Torvalds 已提交
322

323
	for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
L
Linus Torvalds 已提交
324

325 326 327
		lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
L
Linus Torvalds 已提交
328

329 330 331 332 333 334
		for (j = 0; j < 4; j++) {
			if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
			    (ptes[j].pteh & HPTE_V_VALID))
				return i + j;
		}
	}
L
Linus Torvalds 已提交
335

336
	return -1;
L
Linus Torvalds 已提交
337 338
}

339
static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
L
Linus Torvalds 已提交
340 341
{
	long slot;
342 343 344
	unsigned long hash;
	unsigned long want_v;
	unsigned long hpte_group;
L
Linus Torvalds 已提交
345

346 347
	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
348 349

	/* Bolted entries are always in the primary group */
350 351 352 353 354 355
	hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
	if (slot < 0)
		return -1;
	return hpte_group + slot;
}
L
Linus Torvalds 已提交
356 357

static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
358
					     unsigned long ea,
P
Paul Mackerras 已提交
359
					     int psize, int ssize)
L
Linus Torvalds 已提交
360
{
361 362
	unsigned long vpn;
	unsigned long lpar_rc, slot, vsid, flags;
L
Linus Torvalds 已提交
363

P
Paul Mackerras 已提交
364
	vsid = get_kernel_vsid(ea, ssize);
365
	vpn = hpt_vpn(ea, vsid, ssize);
L
Linus Torvalds 已提交
366

367
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
L
Linus Torvalds 已提交
368 369 370 371 372
	BUG_ON(slot == -1);

	flags = newpp & 7;
	lpar_rc = plpar_pte_protect(flags, slot, 0);

373
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
374 375
}

376
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
377 378
					 int psize, int apsize,
					 int ssize, int local)
L
Linus Torvalds 已提交
379
{
380
	unsigned long want_v;
L
Linus Torvalds 已提交
381 382 383
	unsigned long lpar_rc;
	unsigned long dummy1, dummy2;

384 385
	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
		 slot, vpn, psize, local);
L
Linus Torvalds 已提交
386

387
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
388
	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
389
	if (lpar_rc == H_NOT_FOUND)
L
Linus Torvalds 已提交
390 391
		return;

392
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
393 394
}

395
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
/*
 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
 * to make sure that we avoid bouncing the hypervisor tlbie lock.
 */
#define PPC64_HUGE_HPTE_BATCH 12

static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
					     unsigned long *vpn, int count,
					     int psize, int ssize)
{
	unsigned long param[8];
	int i = 0, pix = 0, rc;
	unsigned long flags = 0;
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

	for (i = 0; i < count; i++) {

		if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
						     ssize, 0);
		} else {
			param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
			param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
			pix += 2;
			if (pix == 8) {
				rc = plpar_hcall9(H_BULK_REMOVE, param,
						  param[0], param[1], param[2],
						  param[3], param[4], param[5],
						  param[6], param[7]);
				BUG_ON(rc != H_SUCCESS);
				pix = 0;
			}
		}
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

445 446 447
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
					     unsigned long addr,
					     unsigned char *hpte_slot_array,
448
					     int psize, int ssize, int local)
449
{
450
	int i, index = 0;
451 452 453 454
	unsigned long s_addr = addr;
	unsigned int max_hpte_count, valid;
	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
455
	unsigned long shift, hidx, vpn = 0, hash, slot;
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493

	shift = mmu_psize_defs[psize].shift;
	max_hpte_count = 1U << (PMD_SHIFT - shift);

	for (i = 0; i < max_hpte_count; i++) {
		valid = hpte_valid(hpte_slot_array, i);
		if (!valid)
			continue;
		hidx =  hpte_hash_index(hpte_slot_array, i);

		/* get the vpn */
		addr = s_addr + (i * (1ul << shift));
		vpn = hpt_vpn(addr, vsid, ssize);
		hash = hpt_hash(vpn, shift, ssize);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;

		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;

		slot_array[index] = slot;
		vpn_array[index] = vpn;
		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
			/*
			 * Now do a bluk invalidate
			 */
			__pSeries_lpar_hugepage_invalidate(slot_array,
							   vpn_array,
							   PPC64_HUGE_HPTE_BATCH,
							   psize, ssize);
			index = 0;
		} else
			index++;
	}
	if (index)
		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
						   index, psize, ssize);
}
494 495 496 497 498 499 500 501 502
#else
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
					     unsigned long addr,
					     unsigned char *hpte_slot_array,
					     int psize, int ssize, int local)
{
	WARN(1, "%s called without THP support\n", __func__);
}
#endif
503

504 505
static int pSeries_lpar_hpte_removebolted(unsigned long ea,
					  int psize, int ssize)
506
{
507 508
	unsigned long vpn;
	unsigned long slot, vsid;
509 510

	vsid = get_kernel_vsid(ea, ssize);
511
	vpn = hpt_vpn(ea, vsid, ssize);
512

513
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
514 515 516
	if (slot == -1)
		return -ENOENT;

517 518 519 520
	/*
	 * lpar doesn't use the passed actual page size
	 */
	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
521
	return 0;
522 523
}

L
Linus Torvalds 已提交
524 525 526 527
/*
 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
 * lock.
 */
528
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
529
{
530
	unsigned long vpn;
531
	unsigned long i, pix, rc;
532
	unsigned long flags = 0;
533
	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
534
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
535 536 537
	unsigned long param[9];
	unsigned long hash, index, shift, hidx, slot;
	real_pte_t pte;
P
Paul Mackerras 已提交
538
	int psize, ssize;
L
Linus Torvalds 已提交
539 540 541 542

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

543
	psize = batch->psize;
P
Paul Mackerras 已提交
544
	ssize = batch->ssize;
545 546
	pix = 0;
	for (i = 0; i < number; i++) {
547
		vpn = batch->vpn[i];
548
		pte = batch->pte[i];
549 550
		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
			hash = hpt_hash(vpn, shift, ssize);
551 552 553 554 555
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
556
			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
557 558 559
				/*
				 * lpar doesn't use the passed actual page size
				 */
560
				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
561
							     0, ssize, local);
562 563
			} else {
				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
564
				param[pix+1] = hpte_encode_avpn(vpn, psize,
P
Paul Mackerras 已提交
565
								ssize);
566 567 568
				pix += 2;
				if (pix == 8) {
					rc = plpar_hcall9(H_BULK_REMOVE, param,
569 570 571
						param[0], param[1], param[2],
						param[3], param[4], param[5],
						param[6], param[7]);
572 573 574
					BUG_ON(rc != H_SUCCESS);
					pix = 0;
				}
575 576 577 578 579 580 581 582 583 584
			}
		} pte_iterate_hashed_end();
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}
L
Linus Torvalds 已提交
585 586 587 588 589

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

590 591 592 593 594 595 596 597 598 599 600 601
static int __init disable_bulk_remove(char *str)
{
	if (strcmp(str, "off") == 0 &&
	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
	}
	return 1;
}

__setup("bulk_remove=", disable_bulk_remove);

602
void __init hpte_init_lpar(void)
L
Linus Torvalds 已提交
603 604 605 606 607 608
{
	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
609
	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
L
Linus Torvalds 已提交
610 611
	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
612
	ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
L
Linus Torvalds 已提交
613
}
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665

#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;

static int __init cmo_free_hint(char *str)
{
	char *parm;
	parm = strstrip(str);

	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
		cmo_free_hint_flag = 0;
		return 1;
	}

	cmo_free_hint_flag = 1;
	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");

	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
		return 1;

	return 0;
}

__setup("cmo_free_hint=", cmo_free_hint);

static void pSeries_set_page_state(struct page *page, int order,
				   unsigned long state)
{
	int i, j;
	unsigned long cmo_page_sz, addr;

	cmo_page_sz = cmo_get_page_size();
	addr = __pa((unsigned long)page_address(page));

	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
	}
}

void arch_free_page(struct page *page, int order)
{
	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
		return;

	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
}
EXPORT_SYMBOL(arch_free_page);

#endif
666 667

#ifdef CONFIG_TRACEPOINTS
668
#ifdef HAVE_JUMP_LABEL
669 670 671 672 673 674 675 676 677 678 679 680
struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;

void hcall_tracepoint_regfunc(void)
{
	static_key_slow_inc(&hcall_tracepoint_key);
}

void hcall_tracepoint_unregfunc(void)
{
	static_key_slow_dec(&hcall_tracepoint_key);
}
#else
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
/*
 * We optimise our hcall path by placing hcall_tracepoint_refcount
 * directly in the TOC so we can check if the hcall tracepoints are
 * enabled via a single load.
 */

/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;

void hcall_tracepoint_regfunc(void)
{
	hcall_tracepoint_refcount++;
}

void hcall_tracepoint_unregfunc(void)
{
	hcall_tracepoint_refcount--;
}
699 700 701 702 703 704 705 706 707
#endif

/*
 * Since the tracing code might execute hcalls we need to guard against
 * recursion. One example of this are spinlocks calling H_YIELD on
 * shared processor partitions.
 */
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

708

709
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
710
{
711 712 713
	unsigned long flags;
	unsigned int *depth;

714 715 716 717 718 719 720
	/*
	 * We cannot call tracepoints inside RCU idle regions which
	 * means we must not trace H_CEDE.
	 */
	if (opcode == H_CEDE)
		return;

721 722
	local_irq_save(flags);

723
	depth = this_cpu_ptr(&hcall_trace_depth);
724 725 726 727 728

	if (*depth)
		goto out;

	(*depth)++;
729
	preempt_disable();
730
	trace_hcall_entry(opcode, args);
731 732 733 734
	(*depth)--;

out:
	local_irq_restore(flags);
735 736
}

737 738
void __trace_hcall_exit(long opcode, unsigned long retval,
			unsigned long *retbuf)
739
{
740 741 742
	unsigned long flags;
	unsigned int *depth;

743 744 745
	if (opcode == H_CEDE)
		return;

746 747
	local_irq_save(flags);

748
	depth = this_cpu_ptr(&hcall_trace_depth);
749 750 751 752 753

	if (*depth)
		goto out;

	(*depth)++;
754
	trace_hcall_exit(opcode, retval, retbuf);
755
	preempt_enable();
756 757 758 759
	(*depth)--;

out:
	local_irq_restore(flags);
760 761
}
#endif
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781

/**
 * h_get_mpp
 * H_GET_MPP hcall returns info in 7 parms
 */
int h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];

	rc = plpar_hcall9(H_GET_MPP, retbuf);

	mpp_data->entitled_mem = retbuf[0];
	mpp_data->mapped_mem = retbuf[1];

	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
	mpp_data->pool_num = retbuf[2] & 0xffff;

	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
782
	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805

	mpp_data->pool_size = retbuf[4];
	mpp_data->loan_request = retbuf[5];
	mpp_data->backing_mem = retbuf[6];

	return rc;
}
EXPORT_SYMBOL(h_get_mpp);

int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };

	rc = plpar_hcall9(H_GET_MPP_X, retbuf);

	mpp_x_data->coalesced_bytes = retbuf[0];
	mpp_x_data->pool_coalesced_bytes = retbuf[1];
	mpp_x_data->pool_purr_cycles = retbuf[2];
	mpp_x_data->pool_spurr_cycles = retbuf[3];

	return rc;
}