lpar.c 16.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * pSeries_lpar.c
 * Copyright (C) 2001 Todd Inglett, IBM Corporation
 *
 * pSeries LPAR support.
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

22 23
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
L
Linus Torvalds 已提交
24 25 26

#include <linux/kernel.h>
#include <linux/dma-mapping.h>
27
#include <linux/console.h>
28
#include <linux/export.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
D
David Gibson 已提交
40
#include <asm/udbg.h>
P
Paul Mackerras 已提交
41
#include <asm/smp.h>
42
#include <asm/trace.h>
43
#include <asm/firmware.h>
44 45

#include "plpar_wrappers.h"
46
#include "pseries.h"
L
Linus Torvalds 已提交
47 48


49
/* in hvCall.S */
L
Linus Torvalds 已提交
50
EXPORT_SYMBOL(plpar_hcall);
51
EXPORT_SYMBOL(plpar_hcall9);
L
Linus Torvalds 已提交
52
EXPORT_SYMBOL(plpar_hcall_norets);
53

L
Linus Torvalds 已提交
54 55 56 57 58
extern void pSeries_find_serial_port(void);

void vpa_init(int cpu)
{
	int hwcpu = get_hard_smp_processor_id(cpu);
59
	unsigned long addr;
L
Linus Torvalds 已提交
60
	long ret;
61 62
	struct paca_struct *pp;
	struct dtl_entry *dtl;
63 64

	if (cpu_has_feature(CPU_FTR_ALTIVEC))
65
		lppaca_of(cpu).vmxregs_in_use = 1;
66

67
	addr = __pa(&lppaca_of(cpu));
68
	ret = register_vpa(hwcpu, addr);
L
Linus Torvalds 已提交
69

70
	if (ret) {
71 72
		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
73 74 75 76 77 78 79 80 81 82
		return;
	}
	/*
	 * PAPR says this feature is SLB-Buffer but firmware never
	 * reports that.  All SPLPAR support SLB shadow buffer.
	 */
	addr = __pa(&slb_shadow[cpu]);
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
		ret = register_slb_shadow(hwcpu, addr);
		if (ret)
83 84 85
			pr_err("WARNING: SLB shadow buffer registration for "
			       "cpu %d (hw %d) of area %lx failed with %ld\n",
			       cpu, hwcpu, addr, ret);
86
	}
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	/*
	 * Register dispatch trace log, if one has been allocated.
	 */
	pp = &paca[cpu];
	dtl = pp->dispatch_log;
	if (dtl) {
		pp->dtl_ridx = 0;
		pp->dtl_curr = dtl;
		lppaca_of(cpu).dtl_idx = 0;

		/* hypervisor reads buffer length from this field */
		dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
		ret = register_dtl(hwcpu, __pa(dtl));
		if (ret)
102 103 104
			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
			       "failed with %ld\n", smp_processor_id(),
			       hwcpu, ret);
105 106
		lppaca_of(cpu).dtl_enable_mask = 2;
	}
L
Linus Torvalds 已提交
107 108
}

109
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
110 111
				     unsigned long vpn, unsigned long pa,
				     unsigned long rflags, unsigned long vflags,
112
				     int psize, int apsize, int ssize)
L
Linus Torvalds 已提交
113 114 115 116
{
	unsigned long lpar_rc;
	unsigned long flags;
	unsigned long slot;
117
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
118

119
	if (!(vflags & HPTE_V_BOLTED))
120 121 122
		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
			 hpte_group, vpn,  pa, rflags, vflags, psize);
123

124 125
	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
126 127

	if (!(vflags & HPTE_V_BOLTED))
128
		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
129

L
Linus Torvalds 已提交
130 131 132 133 134 135 136 137
	/* Now fill in the actual HPTE */
	/* Set CEC cookie to 0         */
	/* Zero page = 0               */
	/* I-cache Invalidate = 0      */
	/* I-cache synchronize = 0     */
	/* Exact = 0                   */
	flags = 0;

138
	/* Make pHyp happy */
139
	if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
140
		hpte_r &= ~_PAGE_COHERENT;
141 142
	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
		flags |= H_COALESCE_CAND;
L
Linus Torvalds 已提交
143

144
	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
145
	if (unlikely(lpar_rc == H_PTEG_FULL)) {
146
		if (!(vflags & HPTE_V_BOLTED))
147
			pr_devel(" full\n");
L
Linus Torvalds 已提交
148
		return -1;
149
	}
L
Linus Torvalds 已提交
150 151 152 153 154 155

	/*
	 * Since we try and ioremap PHBs we don't own, the pte insert
	 * will fail. However we must catch the failure in hash_page
	 * or we will loop forever, so return -2 in this case.
	 */
156
	if (unlikely(lpar_rc != H_SUCCESS)) {
157
		if (!(vflags & HPTE_V_BOLTED))
158
			pr_devel(" lpar err %ld\n", lpar_rc);
L
Linus Torvalds 已提交
159
		return -2;
160 161
	}
	if (!(vflags & HPTE_V_BOLTED))
162
		pr_devel(" -> slot: %lu\n", slot & 7);
L
Linus Torvalds 已提交
163 164 165 166

	/* Because of iSeries, we have to pass down the secondary
	 * bucket bit here as well
	 */
167
	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
}

static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);

static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{
	unsigned long slot_offset;
	unsigned long lpar_rc;
	int i;
	unsigned long dummy1, dummy2;

	/* pick a random slot to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {

		/* don't remove a bolted entry */
		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
					   (0x1UL << 4), &dummy1, &dummy2);
187
		if (lpar_rc == H_SUCCESS)
L
Linus Torvalds 已提交
188
			return i;
189 190 191 192 193 194 195

		/*
		 * The test for adjunct partition is performed before the
		 * ANDCOND test.  H_RESOURCE may be returned, so we need to
		 * check for that as well.
		 */
		BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
L
Linus Torvalds 已提交
196 197 198 199 200 201 202 203 204 205 206 207

		slot_offset++;
		slot_offset &= 0x7;
	}

	return -1;
}

static void pSeries_lpar_hptab_clear(void)
{
	unsigned long size_bytes = 1UL << ppc64_pft_size;
	unsigned long hpte_count = size_bytes >> 4;
208 209 210 211
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
212
	long lpar_rc;
213
	unsigned long i, j;
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229

	/* Read in batches of 4,
	 * invalidate only valid entries not in the VRMA
	 * hpte_count will be a multiple of 4
         */
	for (i = 0; i < hpte_count; i += 4) {
		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
		for (j = 0; j < 4; j++){
			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
				HPTE_V_VRMA_MASK)
				continue;
			if (ptes[j].pteh & HPTE_V_VALID)
				plpar_pte_remove_raw(0, i + j, 0,
					&(ptes[j].pteh), &(ptes[j].ptel));
230 231
		}
	}
L
Linus Torvalds 已提交
232 233 234 235 236 237 238 239
}

/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
240 241
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
				       unsigned long newpp,
242
				       unsigned long vpn,
243 244
				       int psize, int apsize,
				       int ssize, int local)
L
Linus Torvalds 已提交
245 246 247
{
	unsigned long lpar_rc;
	unsigned long flags = (newpp & 7) | H_AVPN;
248
	unsigned long want_v;
L
Linus Torvalds 已提交
249

250
	want_v = hpte_encode_avpn(vpn, psize, ssize);
L
Linus Torvalds 已提交
251

252
	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
253
		 want_v, slot, flags, psize);
L
Linus Torvalds 已提交
254

P
Paul Mackerras 已提交
255
	lpar_rc = plpar_pte_protect(flags, slot, want_v);
256

257
	if (lpar_rc == H_NOT_FOUND) {
258
		pr_devel("not found !\n");
L
Linus Torvalds 已提交
259
		return -1;
260 261
	}

262
	pr_devel("ok\n");
L
Linus Torvalds 已提交
263

264
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282

	return 0;
}

static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
{
	unsigned long dword0;
	unsigned long lpar_rc;
	unsigned long dummy_word1;
	unsigned long flags;

	/* Read 1 pte at a time                        */
	/* Do not need RPN to logical page translation */
	/* No cross CEC PFT access                     */
	flags = 0;

	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);

283
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
284 285 286 287

	return dword0;
}

288
static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
L
Linus Torvalds 已提交
289 290
{
	unsigned long hash;
P
Paul Mackerras 已提交
291
	unsigned long i;
L
Linus Torvalds 已提交
292
	long slot;
293
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
294

295 296
	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
297 298 299 300 301 302 303 304 305 306

	/* Bolted entries are always in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hpte_v = pSeries_lpar_hpte_getword0(slot);

		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
307 308 309 310 311 312
	}

	return -1;
} 

static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
313
					     unsigned long ea,
P
Paul Mackerras 已提交
314
					     int psize, int ssize)
L
Linus Torvalds 已提交
315
{
316 317
	unsigned long vpn;
	unsigned long lpar_rc, slot, vsid, flags;
L
Linus Torvalds 已提交
318

P
Paul Mackerras 已提交
319
	vsid = get_kernel_vsid(ea, ssize);
320
	vpn = hpt_vpn(ea, vsid, ssize);
L
Linus Torvalds 已提交
321

322
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
L
Linus Torvalds 已提交
323 324 325 326 327
	BUG_ON(slot == -1);

	flags = newpp & 7;
	lpar_rc = plpar_pte_protect(flags, slot, 0);

328
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
329 330
}

331
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
332 333
					 int psize, int apsize,
					 int ssize, int local)
L
Linus Torvalds 已提交
334
{
335
	unsigned long want_v;
L
Linus Torvalds 已提交
336 337 338
	unsigned long lpar_rc;
	unsigned long dummy1, dummy2;

339 340
	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
		 slot, vpn, psize, local);
L
Linus Torvalds 已提交
341

342
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
343
	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
344
	if (lpar_rc == H_NOT_FOUND)
L
Linus Torvalds 已提交
345 346
		return;

347
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
348 349
}

350 351 352
static void pSeries_lpar_hpte_removebolted(unsigned long ea,
					   int psize, int ssize)
{
353 354
	unsigned long vpn;
	unsigned long slot, vsid;
355 356

	vsid = get_kernel_vsid(ea, ssize);
357
	vpn = hpt_vpn(ea, vsid, ssize);
358

359
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
360
	BUG_ON(slot == -1);
361 362 363 364
	/*
	 * lpar doesn't use the passed actual page size
	 */
	pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
365 366
}

367 368 369 370 371 372 373
/* Flag bits for H_BULK_REMOVE */
#define HBR_REQUEST	0x4000000000000000UL
#define HBR_RESPONSE	0x8000000000000000UL
#define HBR_END		0xc000000000000000UL
#define HBR_AVPN	0x0200000000000000UL
#define HBR_ANDCOND	0x0100000000000000UL

L
Linus Torvalds 已提交
374 375 376 377
/*
 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
 * lock.
 */
378
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
379
{
380
	unsigned long vpn;
381
	unsigned long i, pix, rc;
382
	unsigned long flags = 0;
L
Linus Torvalds 已提交
383
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
384
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
385 386 387
	unsigned long param[9];
	unsigned long hash, index, shift, hidx, slot;
	real_pte_t pte;
P
Paul Mackerras 已提交
388
	int psize, ssize;
L
Linus Torvalds 已提交
389 390 391 392

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

393
	psize = batch->psize;
P
Paul Mackerras 已提交
394
	ssize = batch->ssize;
395 396
	pix = 0;
	for (i = 0; i < number; i++) {
397
		vpn = batch->vpn[i];
398
		pte = batch->pte[i];
399 400
		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
			hash = hpt_hash(vpn, shift, ssize);
401 402 403 404 405
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
406
			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
407 408 409
				/*
				 * lpar doesn't use the passed actual page size
				 */
410
				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
411
							     0, ssize, local);
412 413
			} else {
				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
414
				param[pix+1] = hpte_encode_avpn(vpn, psize,
P
Paul Mackerras 已提交
415
								ssize);
416 417 418
				pix += 2;
				if (pix == 8) {
					rc = plpar_hcall9(H_BULK_REMOVE, param,
419 420 421
						param[0], param[1], param[2],
						param[3], param[4], param[5],
						param[6], param[7]);
422 423 424
					BUG_ON(rc != H_SUCCESS);
					pix = 0;
				}
425 426 427 428 429 430 431 432 433 434
			}
		} pte_iterate_hashed_end();
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}
L
Linus Torvalds 已提交
435 436 437 438 439

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

440 441 442 443 444 445 446 447 448 449 450 451
static int __init disable_bulk_remove(char *str)
{
	if (strcmp(str, "off") == 0 &&
	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
	}
	return 1;
}

__setup("bulk_remove=", disable_bulk_remove);

452
void __init hpte_init_lpar(void)
L
Linus Torvalds 已提交
453 454 455 456 457 458
{
	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
459
	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
L
Linus Torvalds 已提交
460 461 462
	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
}
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514

#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;

static int __init cmo_free_hint(char *str)
{
	char *parm;
	parm = strstrip(str);

	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
		cmo_free_hint_flag = 0;
		return 1;
	}

	cmo_free_hint_flag = 1;
	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");

	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
		return 1;

	return 0;
}

__setup("cmo_free_hint=", cmo_free_hint);

static void pSeries_set_page_state(struct page *page, int order,
				   unsigned long state)
{
	int i, j;
	unsigned long cmo_page_sz, addr;

	cmo_page_sz = cmo_get_page_size();
	addr = __pa((unsigned long)page_address(page));

	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
	}
}

void arch_free_page(struct page *page, int order)
{
	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
		return;

	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
}
EXPORT_SYMBOL(arch_free_page);

#endif
515 516 517 518 519 520 521 522 523 524 525

#ifdef CONFIG_TRACEPOINTS
/*
 * We optimise our hcall path by placing hcall_tracepoint_refcount
 * directly in the TOC so we can check if the hcall tracepoints are
 * enabled via a single load.
 */

/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;

526 527 528 529 530 531 532
/* 
 * Since the tracing code might execute hcalls we need to guard against
 * recursion. One example of this are spinlocks calling H_YIELD on
 * shared processor partitions.
 */
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

533 534 535 536 537 538 539 540 541 542
void hcall_tracepoint_regfunc(void)
{
	hcall_tracepoint_refcount++;
}

void hcall_tracepoint_unregfunc(void)
{
	hcall_tracepoint_refcount--;
}

543
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
544
{
545 546 547
	unsigned long flags;
	unsigned int *depth;

548 549 550 551 552 553 554
	/*
	 * We cannot call tracepoints inside RCU idle regions which
	 * means we must not trace H_CEDE.
	 */
	if (opcode == H_CEDE)
		return;

555 556 557 558 559 560 561 562
	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
563
	preempt_disable();
564
	trace_hcall_entry(opcode, args);
565 566 567 568
	(*depth)--;

out:
	local_irq_restore(flags);
569 570
}

571 572
void __trace_hcall_exit(long opcode, unsigned long retval,
			unsigned long *retbuf)
573
{
574 575 576
	unsigned long flags;
	unsigned int *depth;

577 578 579
	if (opcode == H_CEDE)
		return;

580 581 582 583 584 585 586 587
	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
588
	trace_hcall_exit(opcode, retval, retbuf);
589
	preempt_enable();
590 591 592 593
	(*depth)--;

out:
	local_irq_restore(flags);
594 595
}
#endif
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639

/**
 * h_get_mpp
 * H_GET_MPP hcall returns info in 7 parms
 */
int h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];

	rc = plpar_hcall9(H_GET_MPP, retbuf);

	mpp_data->entitled_mem = retbuf[0];
	mpp_data->mapped_mem = retbuf[1];

	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
	mpp_data->pool_num = retbuf[2] & 0xffff;

	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;

	mpp_data->pool_size = retbuf[4];
	mpp_data->loan_request = retbuf[5];
	mpp_data->backing_mem = retbuf[6];

	return rc;
}
EXPORT_SYMBOL(h_get_mpp);

int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };

	rc = plpar_hcall9(H_GET_MPP_X, retbuf);

	mpp_x_data->coalesced_bytes = retbuf[0];
	mpp_x_data->pool_coalesced_bytes = retbuf[1];
	mpp_x_data->pool_purr_cycles = retbuf[2];
	mpp_x_data->pool_spurr_cycles = retbuf[3];

	return rc;
}