lpar.c 16.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * pSeries_lpar.c
 * Copyright (C) 2001 Todd Inglett, IBM Corporation
 *
 * pSeries LPAR support.
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

22 23
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
L
Linus Torvalds 已提交
24 25 26

#include <linux/kernel.h>
#include <linux/dma-mapping.h>
27
#include <linux/console.h>
28
#include <linux/export.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39 40
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
D
David Gibson 已提交
41
#include <asm/udbg.h>
P
Paul Mackerras 已提交
42
#include <asm/smp.h>
43
#include <asm/trace.h>
44 45

#include "plpar_wrappers.h"
46
#include "pseries.h"
L
Linus Torvalds 已提交
47 48


49
/* in hvCall.S */
L
Linus Torvalds 已提交
50
EXPORT_SYMBOL(plpar_hcall);
51
EXPORT_SYMBOL(plpar_hcall9);
L
Linus Torvalds 已提交
52
EXPORT_SYMBOL(plpar_hcall_norets);
53

L
Linus Torvalds 已提交
54 55 56 57 58
extern void pSeries_find_serial_port(void);

void vpa_init(int cpu)
{
	int hwcpu = get_hard_smp_processor_id(cpu);
59
	unsigned long addr;
L
Linus Torvalds 已提交
60
	long ret;
61 62
	struct paca_struct *pp;
	struct dtl_entry *dtl;
63 64

	if (cpu_has_feature(CPU_FTR_ALTIVEC))
65
		lppaca_of(cpu).vmxregs_in_use = 1;
66

67
	addr = __pa(&lppaca_of(cpu));
68
	ret = register_vpa(hwcpu, addr);
L
Linus Torvalds 已提交
69

70
	if (ret) {
71 72
		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
73 74 75 76 77 78 79 80 81 82
		return;
	}
	/*
	 * PAPR says this feature is SLB-Buffer but firmware never
	 * reports that.  All SPLPAR support SLB shadow buffer.
	 */
	addr = __pa(&slb_shadow[cpu]);
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
		ret = register_slb_shadow(hwcpu, addr);
		if (ret)
83 84 85
			pr_err("WARNING: SLB shadow buffer registration for "
			       "cpu %d (hw %d) of area %lx failed with %ld\n",
			       cpu, hwcpu, addr, ret);
86
	}
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	/*
	 * Register dispatch trace log, if one has been allocated.
	 */
	pp = &paca[cpu];
	dtl = pp->dispatch_log;
	if (dtl) {
		pp->dtl_ridx = 0;
		pp->dtl_curr = dtl;
		lppaca_of(cpu).dtl_idx = 0;

		/* hypervisor reads buffer length from this field */
		dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
		ret = register_dtl(hwcpu, __pa(dtl));
		if (ret)
102 103 104
			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
			       "failed with %ld\n", smp_processor_id(),
			       hwcpu, ret);
105 106
		lppaca_of(cpu).dtl_enable_mask = 2;
	}
L
Linus Torvalds 已提交
107 108
}

109
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
110 111
 			      unsigned long va, unsigned long pa,
 			      unsigned long rflags, unsigned long vflags,
P
Paul Mackerras 已提交
112
			      int psize, int ssize)
L
Linus Torvalds 已提交
113 114 115 116
{
	unsigned long lpar_rc;
	unsigned long flags;
	unsigned long slot;
117
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
118

119
	if (!(vflags & HPTE_V_BOLTED))
120
		pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
121 122
			 "rflags=%lx, vflags=%lx, psize=%d)\n",
			 hpte_group, va, pa, rflags, vflags, psize);
123

P
Paul Mackerras 已提交
124
	hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
125 126 127
	hpte_r = hpte_encode_r(pa, psize) | rflags;

	if (!(vflags & HPTE_V_BOLTED))
128
		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
129

L
Linus Torvalds 已提交
130 131 132 133 134 135 136 137
	/* Now fill in the actual HPTE */
	/* Set CEC cookie to 0         */
	/* Zero page = 0               */
	/* I-cache Invalidate = 0      */
	/* I-cache synchronize = 0     */
	/* Exact = 0                   */
	flags = 0;

138
	/* Make pHyp happy */
139
	if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
140
		hpte_r &= ~_PAGE_COHERENT;
141 142
	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
		flags |= H_COALESCE_CAND;
L
Linus Torvalds 已提交
143

144
	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
145
	if (unlikely(lpar_rc == H_PTEG_FULL)) {
146
		if (!(vflags & HPTE_V_BOLTED))
147
			pr_devel(" full\n");
L
Linus Torvalds 已提交
148
		return -1;
149
	}
L
Linus Torvalds 已提交
150 151 152 153 154 155

	/*
	 * Since we try and ioremap PHBs we don't own, the pte insert
	 * will fail. However we must catch the failure in hash_page
	 * or we will loop forever, so return -2 in this case.
	 */
156
	if (unlikely(lpar_rc != H_SUCCESS)) {
157
		if (!(vflags & HPTE_V_BOLTED))
158
			pr_devel(" lpar err %lu\n", lpar_rc);
L
Linus Torvalds 已提交
159
		return -2;
160 161
	}
	if (!(vflags & HPTE_V_BOLTED))
162
		pr_devel(" -> slot: %lu\n", slot & 7);
L
Linus Torvalds 已提交
163 164 165 166

	/* Because of iSeries, we have to pass down the secondary
	 * bucket bit here as well
	 */
167
	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
}

static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);

static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{
	unsigned long slot_offset;
	unsigned long lpar_rc;
	int i;
	unsigned long dummy1, dummy2;

	/* pick a random slot to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {

		/* don't remove a bolted entry */
		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
					   (0x1UL << 4), &dummy1, &dummy2);
187
		if (lpar_rc == H_SUCCESS)
L
Linus Torvalds 已提交
188
			return i;
189
		BUG_ON(lpar_rc != H_NOT_FOUND);
L
Linus Torvalds 已提交
190 191 192 193 194 195 196 197 198 199 200 201

		slot_offset++;
		slot_offset &= 0x7;
	}

	return -1;
}

static void pSeries_lpar_hptab_clear(void)
{
	unsigned long size_bytes = 1UL << ppc64_pft_size;
	unsigned long hpte_count = size_bytes >> 4;
202 203 204 205
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
206
	long lpar_rc;
207
	unsigned long i, j;
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

	/* Read in batches of 4,
	 * invalidate only valid entries not in the VRMA
	 * hpte_count will be a multiple of 4
         */
	for (i = 0; i < hpte_count; i += 4) {
		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
		for (j = 0; j < 4; j++){
			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
				HPTE_V_VRMA_MASK)
				continue;
			if (ptes[j].pteh & HPTE_V_VALID)
				plpar_pte_remove_raw(0, i + j, 0,
					&(ptes[j].pteh), &(ptes[j].ptel));
224 225
		}
	}
L
Linus Torvalds 已提交
226 227
}

P
Paul Mackerras 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
/*
 * This computes the AVPN and B fields of the first dword of a HPTE,
 * for use when we want to match an existing PTE.  The bottom 7 bits
 * of the returned value are zero.
 */
static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
					     int ssize)
{
	unsigned long v;

	v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
	v <<= HPTE_V_AVPN_SHIFT;
	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
	return v;
}

L
Linus Torvalds 已提交
244 245 246 247 248 249
/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
250 251 252
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
				       unsigned long newpp,
				       unsigned long va,
P
Paul Mackerras 已提交
253
				       int psize, int ssize, int local)
L
Linus Torvalds 已提交
254 255 256
{
	unsigned long lpar_rc;
	unsigned long flags = (newpp & 7) | H_AVPN;
257
	unsigned long want_v;
L
Linus Torvalds 已提交
258

P
Paul Mackerras 已提交
259
	want_v = hpte_encode_avpn(va, psize, ssize);
L
Linus Torvalds 已提交
260

261
	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
262
		 want_v, slot, flags, psize);
L
Linus Torvalds 已提交
263

P
Paul Mackerras 已提交
264
	lpar_rc = plpar_pte_protect(flags, slot, want_v);
265

266
	if (lpar_rc == H_NOT_FOUND) {
267
		pr_devel("not found !\n");
L
Linus Torvalds 已提交
268
		return -1;
269 270
	}

271
	pr_devel("ok\n");
L
Linus Torvalds 已提交
272

273
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

	return 0;
}

static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
{
	unsigned long dword0;
	unsigned long lpar_rc;
	unsigned long dummy_word1;
	unsigned long flags;

	/* Read 1 pte at a time                        */
	/* Do not need RPN to logical page translation */
	/* No cross CEC PFT access                     */
	flags = 0;

	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);

292
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
293 294 295 296

	return dword0;
}

P
Paul Mackerras 已提交
297
static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize)
L
Linus Torvalds 已提交
298 299
{
	unsigned long hash;
P
Paul Mackerras 已提交
300
	unsigned long i;
L
Linus Torvalds 已提交
301
	long slot;
302
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
303

P
Paul Mackerras 已提交
304 305 306 307 308 309 310 311 312 313 314 315
	hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_avpn(va, psize, ssize);

	/* Bolted entries are always in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hpte_v = pSeries_lpar_hpte_getword0(slot);

		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
316 317 318 319 320 321
	}

	return -1;
} 

static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
322
					     unsigned long ea,
P
Paul Mackerras 已提交
323
					     int psize, int ssize)
L
Linus Torvalds 已提交
324
{
325
	unsigned long lpar_rc, slot, vsid, va, flags;
L
Linus Torvalds 已提交
326

P
Paul Mackerras 已提交
327 328
	vsid = get_kernel_vsid(ea, ssize);
	va = hpt_va(ea, vsid, ssize);
L
Linus Torvalds 已提交
329

P
Paul Mackerras 已提交
330
	slot = pSeries_lpar_hpte_find(va, psize, ssize);
L
Linus Torvalds 已提交
331 332 333 334 335
	BUG_ON(slot == -1);

	flags = newpp & 7;
	lpar_rc = plpar_pte_protect(flags, slot, 0);

336
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
337 338 339
}

static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
P
Paul Mackerras 已提交
340
					 int psize, int ssize, int local)
L
Linus Torvalds 已提交
341
{
342
	unsigned long want_v;
L
Linus Torvalds 已提交
343 344 345
	unsigned long lpar_rc;
	unsigned long dummy1, dummy2;

346
	pr_devel("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
347
		 slot, va, psize, local);
L
Linus Torvalds 已提交
348

P
Paul Mackerras 已提交
349 350
	want_v = hpte_encode_avpn(va, psize, ssize);
	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
351
	if (lpar_rc == H_NOT_FOUND)
L
Linus Torvalds 已提交
352 353
		return;

354
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
355 356
}

357 358 359 360 361 362 363 364 365 366 367 368 369 370
static void pSeries_lpar_hpte_removebolted(unsigned long ea,
					   int psize, int ssize)
{
	unsigned long slot, vsid, va;

	vsid = get_kernel_vsid(ea, ssize);
	va = hpt_va(ea, vsid, ssize);

	slot = pSeries_lpar_hpte_find(va, psize, ssize);
	BUG_ON(slot == -1);

	pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0);
}

371 372 373 374 375 376 377
/* Flag bits for H_BULK_REMOVE */
#define HBR_REQUEST	0x4000000000000000UL
#define HBR_RESPONSE	0x8000000000000000UL
#define HBR_END		0xc000000000000000UL
#define HBR_AVPN	0x0200000000000000UL
#define HBR_ANDCOND	0x0100000000000000UL

L
Linus Torvalds 已提交
378 379 380 381
/*
 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
 * lock.
 */
382
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
383
{
384
	unsigned long i, pix, rc;
385
	unsigned long flags = 0;
L
Linus Torvalds 已提交
386
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
387
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
388 389 390 391
	unsigned long param[9];
	unsigned long va;
	unsigned long hash, index, shift, hidx, slot;
	real_pte_t pte;
P
Paul Mackerras 已提交
392
	int psize, ssize;
L
Linus Torvalds 已提交
393 394 395 396

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

397
	psize = batch->psize;
P
Paul Mackerras 已提交
398
	ssize = batch->ssize;
399 400 401 402 403
	pix = 0;
	for (i = 0; i < number; i++) {
		va = batch->vaddr[i];
		pte = batch->pte[i];
		pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
P
Paul Mackerras 已提交
404
			hash = hpt_hash(va, shift, ssize);
405 406 407 408 409
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
410 411
			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
				pSeries_lpar_hpte_invalidate(slot, va, psize,
P
Paul Mackerras 已提交
412
							     ssize, local);
413 414
			} else {
				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
P
Paul Mackerras 已提交
415 416
				param[pix+1] = hpte_encode_avpn(va, psize,
								ssize);
417 418 419
				pix += 2;
				if (pix == 8) {
					rc = plpar_hcall9(H_BULK_REMOVE, param,
420 421 422
						param[0], param[1], param[2],
						param[3], param[4], param[5],
						param[6], param[7]);
423 424 425
					BUG_ON(rc != H_SUCCESS);
					pix = 0;
				}
426 427 428 429 430 431 432 433 434 435
			}
		} pte_iterate_hashed_end();
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}
L
Linus Torvalds 已提交
436 437 438 439 440

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

441 442 443 444 445 446 447 448 449 450 451 452
static int __init disable_bulk_remove(char *str)
{
	if (strcmp(str, "off") == 0 &&
	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
	}
	return 1;
}

__setup("bulk_remove=", disable_bulk_remove);

453
void __init hpte_init_lpar(void)
L
Linus Torvalds 已提交
454 455 456 457 458 459
{
	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
460
	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
L
Linus Torvalds 已提交
461 462 463
	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
}
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515

#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;

static int __init cmo_free_hint(char *str)
{
	char *parm;
	parm = strstrip(str);

	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
		cmo_free_hint_flag = 0;
		return 1;
	}

	cmo_free_hint_flag = 1;
	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");

	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
		return 1;

	return 0;
}

__setup("cmo_free_hint=", cmo_free_hint);

static void pSeries_set_page_state(struct page *page, int order,
				   unsigned long state)
{
	int i, j;
	unsigned long cmo_page_sz, addr;

	cmo_page_sz = cmo_get_page_size();
	addr = __pa((unsigned long)page_address(page));

	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
	}
}

void arch_free_page(struct page *page, int order)
{
	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
		return;

	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
}
EXPORT_SYMBOL(arch_free_page);

#endif
516 517 518 519 520 521 522 523 524 525 526

#ifdef CONFIG_TRACEPOINTS
/*
 * We optimise our hcall path by placing hcall_tracepoint_refcount
 * directly in the TOC so we can check if the hcall tracepoints are
 * enabled via a single load.
 */

/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;

527 528 529 530 531 532 533
/* 
 * Since the tracing code might execute hcalls we need to guard against
 * recursion. One example of this are spinlocks calling H_YIELD on
 * shared processor partitions.
 */
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

534 535 536 537 538 539 540 541 542 543
void hcall_tracepoint_regfunc(void)
{
	hcall_tracepoint_refcount++;
}

void hcall_tracepoint_unregfunc(void)
{
	hcall_tracepoint_refcount--;
}

544
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
545
{
546 547 548
	unsigned long flags;
	unsigned int *depth;

549 550 551 552 553 554 555
	/*
	 * We cannot call tracepoints inside RCU idle regions which
	 * means we must not trace H_CEDE.
	 */
	if (opcode == H_CEDE)
		return;

556 557 558 559 560 561 562 563
	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
564
	preempt_disable();
565
	trace_hcall_entry(opcode, args);
566 567 568 569
	(*depth)--;

out:
	local_irq_restore(flags);
570 571
}

572 573
void __trace_hcall_exit(long opcode, unsigned long retval,
			unsigned long *retbuf)
574
{
575 576 577
	unsigned long flags;
	unsigned int *depth;

578 579 580
	if (opcode == H_CEDE)
		return;

581 582 583 584 585 586 587 588
	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
589
	trace_hcall_exit(opcode, retval, retbuf);
590
	preempt_enable();
591 592 593 594
	(*depth)--;

out:
	local_irq_restore(flags);
595 596
}
#endif
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640

/**
 * h_get_mpp
 * H_GET_MPP hcall returns info in 7 parms
 */
int h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];

	rc = plpar_hcall9(H_GET_MPP, retbuf);

	mpp_data->entitled_mem = retbuf[0];
	mpp_data->mapped_mem = retbuf[1];

	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
	mpp_data->pool_num = retbuf[2] & 0xffff;

	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;

	mpp_data->pool_size = retbuf[4];
	mpp_data->loan_request = retbuf[5];
	mpp_data->backing_mem = retbuf[6];

	return rc;
}
EXPORT_SYMBOL(h_get_mpp);

int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };

	rc = plpar_hcall9(H_GET_MPP_X, retbuf);

	mpp_x_data->coalesced_bytes = retbuf[0];
	mpp_x_data->pool_coalesced_bytes = retbuf[1];
	mpp_x_data->pool_purr_cycles = retbuf[2];
	mpp_x_data->pool_spurr_cycles = retbuf[3];

	return rc;
}