lpar.c 15.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * pSeries_lpar.c
 * Copyright (C) 2001 Todd Inglett, IBM Corporation
 *
 * pSeries LPAR support.
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

22 23
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
L
Linus Torvalds 已提交
24 25 26

#include <linux/kernel.h>
#include <linux/dma-mapping.h>
27
#include <linux/console.h>
28
#include <linux/export.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/cputable.h>
D
David Gibson 已提交
40
#include <asm/udbg.h>
P
Paul Mackerras 已提交
41
#include <asm/smp.h>
42
#include <asm/trace.h>
43
#include <asm/firmware.h>
44 45

#include "plpar_wrappers.h"
46
#include "pseries.h"
L
Linus Torvalds 已提交
47 48


49
/* in hvCall.S */
L
Linus Torvalds 已提交
50
EXPORT_SYMBOL(plpar_hcall);
51
EXPORT_SYMBOL(plpar_hcall9);
L
Linus Torvalds 已提交
52
EXPORT_SYMBOL(plpar_hcall_norets);
53

L
Linus Torvalds 已提交
54 55 56 57 58
extern void pSeries_find_serial_port(void);

void vpa_init(int cpu)
{
	int hwcpu = get_hard_smp_processor_id(cpu);
59
	unsigned long addr;
L
Linus Torvalds 已提交
60
	long ret;
61 62
	struct paca_struct *pp;
	struct dtl_entry *dtl;
63 64

	if (cpu_has_feature(CPU_FTR_ALTIVEC))
65
		lppaca_of(cpu).vmxregs_in_use = 1;
66

67
	addr = __pa(&lppaca_of(cpu));
68
	ret = register_vpa(hwcpu, addr);
L
Linus Torvalds 已提交
69

70
	if (ret) {
71 72
		pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
73 74 75 76 77 78 79 80 81 82
		return;
	}
	/*
	 * PAPR says this feature is SLB-Buffer but firmware never
	 * reports that.  All SPLPAR support SLB shadow buffer.
	 */
	addr = __pa(&slb_shadow[cpu]);
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
		ret = register_slb_shadow(hwcpu, addr);
		if (ret)
83 84 85
			pr_err("WARNING: SLB shadow buffer registration for "
			       "cpu %d (hw %d) of area %lx failed with %ld\n",
			       cpu, hwcpu, addr, ret);
86
	}
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	/*
	 * Register dispatch trace log, if one has been allocated.
	 */
	pp = &paca[cpu];
	dtl = pp->dispatch_log;
	if (dtl) {
		pp->dtl_ridx = 0;
		pp->dtl_curr = dtl;
		lppaca_of(cpu).dtl_idx = 0;

		/* hypervisor reads buffer length from this field */
		dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
		ret = register_dtl(hwcpu, __pa(dtl));
		if (ret)
102 103 104
			pr_err("WARNING: DTL registration of cpu %d (hw %d) "
			       "failed with %ld\n", smp_processor_id(),
			       hwcpu, ret);
105 106
		lppaca_of(cpu).dtl_enable_mask = 2;
	}
L
Linus Torvalds 已提交
107 108
}

109
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
110 111 112
				     unsigned long vpn, unsigned long pa,
				     unsigned long rflags, unsigned long vflags,
				     int psize, int ssize)
L
Linus Torvalds 已提交
113 114 115 116
{
	unsigned long lpar_rc;
	unsigned long flags;
	unsigned long slot;
117
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
118

119
	if (!(vflags & HPTE_V_BOLTED))
120 121 122
		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
			 hpte_group, vpn,  pa, rflags, vflags, psize);
123

124
	hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
125 126 127
	hpte_r = hpte_encode_r(pa, psize) | rflags;

	if (!(vflags & HPTE_V_BOLTED))
128
		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
129

L
Linus Torvalds 已提交
130 131 132 133 134 135 136 137
	/* Now fill in the actual HPTE */
	/* Set CEC cookie to 0         */
	/* Zero page = 0               */
	/* I-cache Invalidate = 0      */
	/* I-cache synchronize = 0     */
	/* Exact = 0                   */
	flags = 0;

138
	/* Make pHyp happy */
139
	if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
140
		hpte_r &= ~_PAGE_COHERENT;
141 142
	if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
		flags |= H_COALESCE_CAND;
L
Linus Torvalds 已提交
143

144
	lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
145
	if (unlikely(lpar_rc == H_PTEG_FULL)) {
146
		if (!(vflags & HPTE_V_BOLTED))
147
			pr_devel(" full\n");
L
Linus Torvalds 已提交
148
		return -1;
149
	}
L
Linus Torvalds 已提交
150 151 152 153 154 155

	/*
	 * Since we try and ioremap PHBs we don't own, the pte insert
	 * will fail. However we must catch the failure in hash_page
	 * or we will loop forever, so return -2 in this case.
	 */
156
	if (unlikely(lpar_rc != H_SUCCESS)) {
157
		if (!(vflags & HPTE_V_BOLTED))
158
			pr_devel(" lpar err %lu\n", lpar_rc);
L
Linus Torvalds 已提交
159
		return -2;
160 161
	}
	if (!(vflags & HPTE_V_BOLTED))
162
		pr_devel(" -> slot: %lu\n", slot & 7);
L
Linus Torvalds 已提交
163 164 165 166

	/* Because of iSeries, we have to pass down the secondary
	 * bucket bit here as well
	 */
167
	return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
}

static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);

static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{
	unsigned long slot_offset;
	unsigned long lpar_rc;
	int i;
	unsigned long dummy1, dummy2;

	/* pick a random slot to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {

		/* don't remove a bolted entry */
		lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
					   (0x1UL << 4), &dummy1, &dummy2);
187
		if (lpar_rc == H_SUCCESS)
L
Linus Torvalds 已提交
188
			return i;
189
		BUG_ON(lpar_rc != H_NOT_FOUND);
L
Linus Torvalds 已提交
190 191 192 193 194 195 196 197 198 199 200 201

		slot_offset++;
		slot_offset &= 0x7;
	}

	return -1;
}

static void pSeries_lpar_hptab_clear(void)
{
	unsigned long size_bytes = 1UL << ppc64_pft_size;
	unsigned long hpte_count = size_bytes >> 4;
202 203 204 205
	struct {
		unsigned long pteh;
		unsigned long ptel;
	} ptes[4];
206
	long lpar_rc;
207
	unsigned long i, j;
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

	/* Read in batches of 4,
	 * invalidate only valid entries not in the VRMA
	 * hpte_count will be a multiple of 4
         */
	for (i = 0; i < hpte_count; i += 4) {
		lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
		if (lpar_rc != H_SUCCESS)
			continue;
		for (j = 0; j < 4; j++){
			if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
				HPTE_V_VRMA_MASK)
				continue;
			if (ptes[j].pteh & HPTE_V_VALID)
				plpar_pte_remove_raw(0, i + j, 0,
					&(ptes[j].pteh), &(ptes[j].ptel));
224 225
		}
	}
L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233
}

/*
 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
 * the low 3 bits of flags happen to line up.  So no transform is needed.
 * We can probably optimize here and assume the high bits of newpp are
 * already zero.  For now I am paranoid.
 */
234 235
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
				       unsigned long newpp,
236
				       unsigned long vpn,
P
Paul Mackerras 已提交
237
				       int psize, int ssize, int local)
L
Linus Torvalds 已提交
238 239 240
{
	unsigned long lpar_rc;
	unsigned long flags = (newpp & 7) | H_AVPN;
241
	unsigned long want_v;
L
Linus Torvalds 已提交
242

243
	want_v = hpte_encode_avpn(vpn, psize, ssize);
L
Linus Torvalds 已提交
244

245
	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
246
		 want_v, slot, flags, psize);
L
Linus Torvalds 已提交
247

P
Paul Mackerras 已提交
248
	lpar_rc = plpar_pte_protect(flags, slot, want_v);
249

250
	if (lpar_rc == H_NOT_FOUND) {
251
		pr_devel("not found !\n");
L
Linus Torvalds 已提交
252
		return -1;
253 254
	}

255
	pr_devel("ok\n");
L
Linus Torvalds 已提交
256

257
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

	return 0;
}

static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
{
	unsigned long dword0;
	unsigned long lpar_rc;
	unsigned long dummy_word1;
	unsigned long flags;

	/* Read 1 pte at a time                        */
	/* Do not need RPN to logical page translation */
	/* No cross CEC PFT access                     */
	flags = 0;

	lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);

276
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
277 278 279 280

	return dword0;
}

281
static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
L
Linus Torvalds 已提交
282 283
{
	unsigned long hash;
P
Paul Mackerras 已提交
284
	unsigned long i;
L
Linus Torvalds 已提交
285
	long slot;
286
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
287

288 289
	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
290 291 292 293 294 295 296 297 298 299

	/* Bolted entries are always in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hpte_v = pSeries_lpar_hpte_getword0(slot);

		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
300 301 302 303 304 305
	}

	return -1;
} 

static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
306
					     unsigned long ea,
P
Paul Mackerras 已提交
307
					     int psize, int ssize)
L
Linus Torvalds 已提交
308
{
309 310
	unsigned long vpn;
	unsigned long lpar_rc, slot, vsid, flags;
L
Linus Torvalds 已提交
311

P
Paul Mackerras 已提交
312
	vsid = get_kernel_vsid(ea, ssize);
313
	vpn = hpt_vpn(ea, vsid, ssize);
L
Linus Torvalds 已提交
314

315
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
L
Linus Torvalds 已提交
316 317 318 319 320
	BUG_ON(slot == -1);

	flags = newpp & 7;
	lpar_rc = plpar_pte_protect(flags, slot, 0);

321
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
322 323
}

324
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
P
Paul Mackerras 已提交
325
					 int psize, int ssize, int local)
L
Linus Torvalds 已提交
326
{
327
	unsigned long want_v;
L
Linus Torvalds 已提交
328 329 330
	unsigned long lpar_rc;
	unsigned long dummy1, dummy2;

331 332
	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
		 slot, vpn, psize, local);
L
Linus Torvalds 已提交
333

334
	want_v = hpte_encode_avpn(vpn, psize, ssize);
P
Paul Mackerras 已提交
335
	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
336
	if (lpar_rc == H_NOT_FOUND)
L
Linus Torvalds 已提交
337 338
		return;

339
	BUG_ON(lpar_rc != H_SUCCESS);
L
Linus Torvalds 已提交
340 341
}

342 343 344
static void pSeries_lpar_hpte_removebolted(unsigned long ea,
					   int psize, int ssize)
{
345 346
	unsigned long vpn;
	unsigned long slot, vsid;
347 348

	vsid = get_kernel_vsid(ea, ssize);
349
	vpn = hpt_vpn(ea, vsid, ssize);
350

351
	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
352 353
	BUG_ON(slot == -1);

354
	pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
355 356
}

357 358 359 360 361 362 363
/* Flag bits for H_BULK_REMOVE */
#define HBR_REQUEST	0x4000000000000000UL
#define HBR_RESPONSE	0x8000000000000000UL
#define HBR_END		0xc000000000000000UL
#define HBR_AVPN	0x0200000000000000UL
#define HBR_ANDCOND	0x0100000000000000UL

L
Linus Torvalds 已提交
364 365 366 367
/*
 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
 * lock.
 */
368
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
369
{
370
	unsigned long vpn;
371
	unsigned long i, pix, rc;
372
	unsigned long flags = 0;
L
Linus Torvalds 已提交
373
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
374
	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
375 376 377
	unsigned long param[9];
	unsigned long hash, index, shift, hidx, slot;
	real_pte_t pte;
P
Paul Mackerras 已提交
378
	int psize, ssize;
L
Linus Torvalds 已提交
379 380 381 382

	if (lock_tlbie)
		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

383
	psize = batch->psize;
P
Paul Mackerras 已提交
384
	ssize = batch->ssize;
385 386
	pix = 0;
	for (i = 0; i < number; i++) {
387
		vpn = batch->vpn[i];
388
		pte = batch->pte[i];
389 390
		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
			hash = hpt_hash(vpn, shift, ssize);
391 392 393 394 395
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
396
			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
397
				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
P
Paul Mackerras 已提交
398
							     ssize, local);
399 400
			} else {
				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
401
				param[pix+1] = hpte_encode_avpn(vpn, psize,
P
Paul Mackerras 已提交
402
								ssize);
403 404 405
				pix += 2;
				if (pix == 8) {
					rc = plpar_hcall9(H_BULK_REMOVE, param,
406 407 408
						param[0], param[1], param[2],
						param[3], param[4], param[5],
						param[6], param[7]);
409 410 411
					BUG_ON(rc != H_SUCCESS);
					pix = 0;
				}
412 413 414 415 416 417 418 419 420 421
			}
		} pte_iterate_hashed_end();
	}
	if (pix) {
		param[pix] = HBR_END;
		rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
				  param[2], param[3], param[4], param[5],
				  param[6], param[7]);
		BUG_ON(rc != H_SUCCESS);
	}
L
Linus Torvalds 已提交
422 423 424 425 426

	if (lock_tlbie)
		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}

427 428 429 430 431 432 433 434 435 436 437 438
static int __init disable_bulk_remove(char *str)
{
	if (strcmp(str, "off") == 0 &&
	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
	}
	return 1;
}

__setup("bulk_remove=", disable_bulk_remove);

439
void __init hpte_init_lpar(void)
L
Linus Torvalds 已提交
440 441 442 443 444 445
{
	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
	ppc_md.hpte_updatepp	= pSeries_lpar_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
	ppc_md.hpte_insert	= pSeries_lpar_hpte_insert;
	ppc_md.hpte_remove	= pSeries_lpar_hpte_remove;
446
	ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
L
Linus Torvalds 已提交
447 448 449
	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
}
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501

#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;

static int __init cmo_free_hint(char *str)
{
	char *parm;
	parm = strstrip(str);

	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
		printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
		cmo_free_hint_flag = 0;
		return 1;
	}

	cmo_free_hint_flag = 1;
	printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");

	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
		return 1;

	return 0;
}

__setup("cmo_free_hint=", cmo_free_hint);

static void pSeries_set_page_state(struct page *page, int order,
				   unsigned long state)
{
	int i, j;
	unsigned long cmo_page_sz, addr;

	cmo_page_sz = cmo_get_page_size();
	addr = __pa((unsigned long)page_address(page));

	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
	}
}

void arch_free_page(struct page *page, int order)
{
	if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
		return;

	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
}
EXPORT_SYMBOL(arch_free_page);

#endif
502 503 504 505 506 507 508 509 510 511 512

#ifdef CONFIG_TRACEPOINTS
/*
 * We optimise our hcall path by placing hcall_tracepoint_refcount
 * directly in the TOC so we can check if the hcall tracepoints are
 * enabled via a single load.
 */

/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;

513 514 515 516 517 518 519
/* 
 * Since the tracing code might execute hcalls we need to guard against
 * recursion. One example of this are spinlocks calling H_YIELD on
 * shared processor partitions.
 */
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);

520 521 522 523 524 525 526 527 528 529
void hcall_tracepoint_regfunc(void)
{
	hcall_tracepoint_refcount++;
}

void hcall_tracepoint_unregfunc(void)
{
	hcall_tracepoint_refcount--;
}

530
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
531
{
532 533 534
	unsigned long flags;
	unsigned int *depth;

535 536 537 538 539 540 541
	/*
	 * We cannot call tracepoints inside RCU idle regions which
	 * means we must not trace H_CEDE.
	 */
	if (opcode == H_CEDE)
		return;

542 543 544 545 546 547 548 549
	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
550
	preempt_disable();
551
	trace_hcall_entry(opcode, args);
552 553 554 555
	(*depth)--;

out:
	local_irq_restore(flags);
556 557
}

558 559
void __trace_hcall_exit(long opcode, unsigned long retval,
			unsigned long *retbuf)
560
{
561 562 563
	unsigned long flags;
	unsigned int *depth;

564 565 566
	if (opcode == H_CEDE)
		return;

567 568 569 570 571 572 573 574
	local_irq_save(flags);

	depth = &__get_cpu_var(hcall_trace_depth);

	if (*depth)
		goto out;

	(*depth)++;
575
	trace_hcall_exit(opcode, retval, retbuf);
576
	preempt_enable();
577 578 579 580
	(*depth)--;

out:
	local_irq_restore(flags);
581 582
}
#endif
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626

/**
 * h_get_mpp
 * H_GET_MPP hcall returns info in 7 parms
 */
int h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];

	rc = plpar_hcall9(H_GET_MPP, retbuf);

	mpp_data->entitled_mem = retbuf[0];
	mpp_data->mapped_mem = retbuf[1];

	mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
	mpp_data->pool_num = retbuf[2] & 0xffff;

	mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
	mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
	mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;

	mpp_data->pool_size = retbuf[4];
	mpp_data->loan_request = retbuf[5];
	mpp_data->backing_mem = retbuf[6];

	return rc;
}
EXPORT_SYMBOL(h_get_mpp);

int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
{
	int rc;
	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };

	rc = plpar_hcall9(H_GET_MPP_X, retbuf);

	mpp_x_data->coalesced_bytes = retbuf[0];
	mpp_x_data->pool_coalesced_bytes = retbuf[1];
	mpp_x_data->pool_purr_cycles = retbuf[2];
	mpp_x_data->pool_spurr_cycles = retbuf[3];

	return rc;
}