lpar.c 15.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * pSeries_lpar.c
 * Copyright (C) 2001 Todd Inglett, IBM Corporation
 *
 * pSeries LPAR support.
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

22 23
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
L
Linus Torvalds 已提交
24 25 26

#include <linux/kernel.h>
#include <linux/dma-mapping.h>
27
#include <linux/console.h>
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35 36 37 38 39
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
D
David Gibson 已提交
40
#include <asm/udbg.h>
P
Paul Mackerras 已提交
41
#include <asm/smp.h>
42
#include <asm/trace.h>
43 44

#include "plpar_wrappers.h"
45
#include "pseries.h"
L
Linus Torvalds 已提交
46 47


48
/* in hvCall.S */
L
Linus Torvalds 已提交
49
EXPORT_SYMBOL(plpar_hcall);
50
EXPORT_SYMBOL(plpar_hcall9);
L
Linus Torvalds 已提交
51
EXPORT_SYMBOL(plpar_hcall_norets);
52

L
Linus Torvalds 已提交
53 54 55 56 57
extern void pSeries_find_serial_port(void);

void vpa_init(int cpu)
{
	int hwcpu = get_hard_smp_processor_id(cpu);
58
	unsigned long addr;
L
Linus Torvalds 已提交
59
	long ret;
60 61
	struct paca_struct *pp;
	struct dtl_entry *dtl;
62 63

	if (cpu_has_feature(CPU_FTR_ALTIVEC))
64
		lppaca_of(cpu).vmxregs_in_use = 1;
65

66
	addr = __pa(&lppaca_of(cpu));
67
	ret = register_vpa(hwcpu, addr);
L
Linus Torvalds 已提交
68

69
	if (ret) {
70 71
		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
72 73 74 75 76 77 78 79 80 81
		return;
	}
	/*
	 * PAPR says this feature is SLB-Buffer but firmware never
	 * reports that.  All SPLPAR support SLB shadow buffer.
	 */
	addr = __pa(&slb_shadow[cpu]);
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
		ret = register_slb_shadow(hwcpu, addr);
		if (ret)
82 83 84
			pr_err("WARNING: SLB shadow buffer registration for "
			       "cpu %d (hw %d) of area %lx failed with %ld\n",
			       cpu, hwcpu, addr, ret);
85
	}
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

	/*
	 * Register dispatch trace log, if one has been allocated.
	 */
	pp = &paca[cpu];
	dtl = pp->dispatch_log;
	if (dtl) {
		pp->dtl_ridx = 0;
		pp->dtl_curr = dtl;
		lppaca_of(cpu).dtl_idx = 0;

		/* hypervisor reads buffer length from this field */
		dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
		ret = register_dtl(hwcpu, __pa(dtl));
		if (ret)
101 102 103
			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
			       "failed with %ld\n", smp_processor_id(),
			       hwcpu, ret);
104 105
		lppaca_of(cpu).dtl_enable_mask = 2;
	}
L
Linus Torvalds 已提交
106 107
}

108
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
109 110
 			      unsigned long va, unsigned long pa,
 			      unsigned long rflags, unsigned long vflags,
P
Paul Mackerras 已提交
111
			      int psize, int ssize)
L
Linus Torvalds 已提交
112 113 114 115
{
	unsigned long lpar_rc;
	unsigned long flags;
	unsigned long slot;
116
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
117

118
	if (!(vflags & HPTE_V_BOLTED))
119
		pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
120 121
			 "rflags=%lx, vflags=%lx, psize=%d)\n",
			 hpte_group, va, pa, rflags, vflags, psize);
122

P
Paul Mackerras 已提交
123
	hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
124 125 126
	hpte_r = hpte_encode_r(pa, psize) | rflags;

	if (!(vflags & HPTE_V_BOLTED))
127
		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
128

L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136
	/* Now fill in the actual HPTE */
	/* Set CEC cookie to 0         */
	/* Zero page = 0               */
	/* I-cache Invalidate = 0      */
	/* I-cache synchronize = 0     */
	/* Exact = 0                   */
	flags = 0;

137
	/* Make pHyp happy */
138
	if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
139
		hpte_r &= ~_PAGE_COHERENT;
140 141
	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
		flags |= H_COALESCE_CAND;
L
Linus Torvalds 已提交
142

143
	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
144
	if (unlikely(lpar_rc == H_PTEG_FULL)) {
145
		if (!(vflags & HPTE_V_BOLTED))
146
			pr_devel(" full\n");
L
Linus Torvalds 已提交
147
		return -1;
148
	}
L
Linus Torvalds 已提交
149 150 151 152 153 154

	/*
	 * Since we try and ioremap PHBs we don't own, the pte insert
	 * will fail. However we must catch the failure in hash_page
	 * or we will loop forever, so return -2 in this case.
	 */
155
	if (unlikely(lpar_rc != H_SUCCESS)) {
156
		if (!(vflags & HPTE_V_BOLTED))
157
			pr_devel(" lpar err %lu\n", lpar_rc);
L
Linus Torvalds 已提交
158
		return -2;
159 160
	}
	if (!(vflags & HPTE_V_BOLTED))
161
		pr_devel(" -> slot: %lu\n", slot & 7);
L
Linus Torvalds 已提交
162 163 164 165

	/* Because of iSeries, we have to pass down the secondary
	 * bucket bit here as well
	 */
166
	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
}

static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);

static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{
	unsigned long slot_offset;
	unsigned long lpar_rc;
	int i;
	unsigned long dummy1, dummy2;

	/* pick a random slot to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {

		/* don't remove a bolted entry */
		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
					   (0x1UL << 4), &dummy1, &dummy2);
186
		if (lpar_rc == H_SUCCESS)
L
Linus Torvalds 已提交
187
			return i;
188
		BUG_ON(lpar_rc != H_NOT_FOUND);
L
Linus Torvalds 已提交
189 190 191 192 193 194 195 196 197 198 199 200

		slot_offset++;
		slot_offset &= 0x7;
	}

	return -1;
}

static void pSeries_lpar_hptab_clear(void)
{
	unsigned long size_bytes = 1UL << ppc64_pft_size;
	unsigned long hpte_count = size_bytes >> 4;
201 202 203 204
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
205
	long lpar_rc;
206
	unsigned long i, j;
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222

	/* Read in batches of 4,
	 * invalidate only valid entries not in the VRMA
	 * hpte_count will be a multiple of 4
         */
	for (i = 0; i < hpte_count; i += 4) {
		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
		for (j = 0; j < 4; j++){
			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
				HPTE_V_VRMA_MASK)
				continue;
			if (ptes[j].pteh & HPTE_V_VALID)
				plpar_pte_remove_raw(0, i + j, 0,
					&(ptes[j].pteh), &(ptes[j].ptel));
223 224
		}
	}
L
Linus Torvalds 已提交
225 226
}

P
Paul Mackerras 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
/*
 * This computes the AVPN and B fields of the first dword of a HPTE,
 * for use when we want to match an existing PTE.  The bottom 7 bits
 * of the returned value are zero.
 */
static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
					     int ssize)
{
	unsigned long v;

	v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
	v <<= HPTE_V_AVPN_SHIFT;
	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
	return v;
}

L
Linus Torvalds 已提交
243 244 245 246 247 248
/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
249 250 251
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
				       unsigned long newpp,
				       unsigned long va,
P
Paul Mackerras 已提交
252
				       int psize, int ssize, int local)
L
Linus Torvalds 已提交
253 254 255
{
	unsigned long lpar_rc;
	unsigned long flags = (newpp & 7) | H_AVPN;
256
	unsigned long want_v;
L
Linus Torvalds 已提交
257

P
Paul Mackerras 已提交
258
	want_v = hpte_encode_avpn(va, psize, ssize);
L
Linus Torvalds 已提交
259

260
	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
261
		 want_v, slot, flags, psize);
L
Linus Torvalds 已提交
262

P
Paul Mackerras 已提交
263
	lpar_rc = plpar_pte_protect(flags, slot, want_v);
264

265
	if (lpar_rc == H_NOT_FOUND) {
266
		pr_devel("not found !\n");
L
Linus Torvalds 已提交
267
		return -1;
268 269
	}

270
	pr_devel("ok\n");
L
Linus Torvalds 已提交
271

272
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

	return 0;
}

static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
{
	unsigned long dword0;
	unsigned long lpar_rc;
	unsigned long dummy_word1;
	unsigned long flags;

	/* Read 1 pte at a time                        */
	/* Do not need RPN to logical page translation */
	/* No cross CEC PFT access                     */
	flags = 0;

	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);

291
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
292 293 294 295

	return dword0;
}

P
Paul Mackerras 已提交
296
static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize)
L
Linus Torvalds 已提交
297 298
{
	unsigned long hash;
P
Paul Mackerras 已提交
299
	unsigned long i;
L
Linus Torvalds 已提交
300
	long slot;
301
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
302

P
Paul Mackerras 已提交
303 304 305 306 307 308 309 310 311 312 313 314
	hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_avpn(va, psize, ssize);

	/* Bolted entries are always in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hpte_v = pSeries_lpar_hpte_getword0(slot);

		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
315 316 317 318 319 320
	}

	return -1;
} 

static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
321
					     unsigned long ea,
P
Paul Mackerras 已提交
322
					     int psize, int ssize)
L
Linus Torvalds 已提交
323
{
324
	unsigned long lpar_rc, slot, vsid, va, flags;
L
Linus Torvalds 已提交
325

P
Paul Mackerras 已提交
326 327
	vsid = get_kernel_vsid(ea, ssize);
	va = hpt_va(ea, vsid, ssize);
L
Linus Torvalds 已提交
328

P
Paul Mackerras 已提交
329
	slot = pSeries_lpar_hpte_find(va, psize, ssize);
L
Linus Torvalds 已提交
330 331 332 333 334
	BUG_ON(slot == -1);

	flags = newpp & 7;
	lpar_rc = plpar_pte_protect(flags, slot, 0);

335
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
336 337 338
}

static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
P
Paul Mackerras 已提交
339
					 int psize, int ssize, int local)
L
Linus Torvalds 已提交
340
{
341
	unsigned long want_v;
L
Linus Torvalds 已提交
342 343 344
	unsigned long lpar_rc;
	unsigned long dummy1, dummy2;

345
	pr_devel("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
346
		 slot, va, psize, local);
L
Linus Torvalds 已提交
347

P
Paul Mackerras 已提交
348 349
	want_v = hpte_encode_avpn(va, psize, ssize);
	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
350
	if (lpar_rc == H_NOT_FOUND)
L
Linus Torvalds 已提交
351 352
		return;

353
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
354 355
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369
static void pSeries_lpar_hpte_removebolted(unsigned long ea,
					   int psize, int ssize)
{
	unsigned long slot, vsid, va;

	vsid = get_kernel_vsid(ea, ssize);
	va = hpt_va(ea, vsid, ssize);

	slot = pSeries_lpar_hpte_find(va, psize, ssize);
	BUG_ON(slot == -1);

	pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0);
}

370 371 372 373 374 375 376
/* Flag bits for H_BULK_REMOVE */
#define HBR_REQUEST	0x4000000000000000UL
#define HBR_RESPONSE	0x8000000000000000UL
#define HBR_END		0xc000000000000000UL
#define HBR_AVPN	0x0200000000000000UL
#define HBR_ANDCOND	0x0100000000000000UL

L
Linus Torvalds 已提交
377 378 379 380
/*
 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
 * lock.
 */
381
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
382
{
383
	unsigned long i, pix, rc;
384
	unsigned long flags = 0;
L
Linus Torvalds 已提交
385
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
386
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
387 388 389 390
	unsigned long param[9];
	unsigned long va;
	unsigned long hash, index, shift, hidx, slot;
	real_pte_t pte;
P
Paul Mackerras 已提交
391
	int psize, ssize;
L
Linus Torvalds 已提交
392 393 394 395

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

396
	psize = batch->psize;
P
Paul Mackerras 已提交
397
	ssize = batch->ssize;
398 399 400 401 402
	pix = 0;
	for (i = 0; i < number; i++) {
		va = batch->vaddr[i];
		pte = batch->pte[i];
		pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
P
Paul Mackerras 已提交
403
			hash = hpt_hash(va, shift, ssize);
404 405 406 407 408
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
409 410
			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
				pSeries_lpar_hpte_invalidate(slot, va, psize,
P
Paul Mackerras 已提交
411
							     ssize, local);
412 413
			} else {
				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
P
Paul Mackerras 已提交
414 415
				param[pix+1] = hpte_encode_avpn(va, psize,
								ssize);
416 417 418
				pix += 2;
				if (pix == 8) {
					rc = plpar_hcall9(H_BULK_REMOVE, param,
419 420 421
						param[0], param[1], param[2],
						param[3], param[4], param[5],
						param[6], param[7]);
422 423 424
					BUG_ON(rc != H_SUCCESS);
					pix = 0;
				}
425 426 427 428 429 430 431 432 433 434
			}
		} pte_iterate_hashed_end();
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}
L
Linus Torvalds 已提交
435 436 437 438 439

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

440 441 442 443 444 445 446 447 448 449 450 451
static int __init disable_bulk_remove(char *str)
{
	if (strcmp(str, "off") == 0 &&
	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
	}
	return 1;
}

__setup("bulk_remove=", disable_bulk_remove);

452
void __init hpte_init_lpar(void)
L
Linus Torvalds 已提交
453 454 455 456 457 458
{
	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
459
	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
L
Linus Torvalds 已提交
460 461 462
	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
}
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514

#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;

static int __init cmo_free_hint(char *str)
{
	char *parm;
	parm = strstrip(str);

	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
		cmo_free_hint_flag = 0;
		return 1;
	}

	cmo_free_hint_flag = 1;
	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");

	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
		return 1;

	return 0;
}

__setup("cmo_free_hint=", cmo_free_hint);

static void pSeries_set_page_state(struct page *page, int order,
				   unsigned long state)
{
	int i, j;
	unsigned long cmo_page_sz, addr;

	cmo_page_sz = cmo_get_page_size();
	addr = __pa((unsigned long)page_address(page));

	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
	}
}

void arch_free_page(struct page *page, int order)
{
	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
		return;

	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
}
EXPORT_SYMBOL(arch_free_page);

#endif
515 516 517 518 519 520 521 522 523 524 525

#ifdef CONFIG_TRACEPOINTS
/*
 * We optimise our hcall path by placing hcall_tracepoint_refcount
 * directly in the TOC so we can check if the hcall tracepoints are
 * enabled via a single load.
 */

/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;

526 527 528 529 530 531 532
/* 
 * Since the tracing code might execute hcalls we need to guard against
 * recursion. One example of this are spinlocks calling H_YIELD on
 * shared processor partitions.
 */
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

533 534 535 536 537 538 539 540 541 542
void hcall_tracepoint_regfunc(void)
{
	hcall_tracepoint_refcount++;
}

void hcall_tracepoint_unregfunc(void)
{
	hcall_tracepoint_refcount--;
}

543
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
544
{
545 546 547 548 549 550 551 552 553 554 555
	unsigned long flags;
	unsigned int *depth;

	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
556
	trace_hcall_entry(opcode, args);
557 558 559 560
	(*depth)--;

out:
	local_irq_restore(flags);
561 562
}

563 564
void __trace_hcall_exit(long opcode, unsigned long retval,
			unsigned long *retbuf)
565
{
566 567 568 569 570 571 572 573 574 575 576
	unsigned long flags;
	unsigned int *depth;

	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
577
	trace_hcall_exit(opcode, retval, retbuf);
578 579 580 581
	(*depth)--;

out:
	local_irq_restore(flags);
582 583
}
#endif
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627

/**
 * h_get_mpp
 * H_GET_MPP hcall returns info in 7 parms
 */
int h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];

	rc = plpar_hcall9(H_GET_MPP, retbuf);

	mpp_data->entitled_mem = retbuf[0];
	mpp_data->mapped_mem = retbuf[1];

	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
	mpp_data->pool_num = retbuf[2] & 0xffff;

	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;

	mpp_data->pool_size = retbuf[4];
	mpp_data->loan_request = retbuf[5];
	mpp_data->backing_mem = retbuf[6];

	return rc;
}
EXPORT_SYMBOL(h_get_mpp);

int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };

	rc = plpar_hcall9(H_GET_MPP_X, retbuf);

	mpp_x_data->coalesced_bytes = retbuf[0];
	mpp_x_data->pool_coalesced_bytes = retbuf[1];
	mpp_x_data->pool_purr_cycles = retbuf[2];
	mpp_x_data->pool_spurr_cycles = retbuf[3];

	return rc;
}