p2m.c 21.6 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0

3 4 5 6 7
/*
 * Xen leaves the responsibility for maintaining p2m mappings to the
 * guests themselves, but it must also access and update the p2m array
 * during suspend/resume when all the pages are reallocated.
 *
8 9 10
 * The logical flat p2m table is mapped to a linear kernel memory area.
 * For accesses by Xen a three-level tree linked via mfns only is set up to
 * allow the address space to be sparse.
11
 *
12 13 14 15 16 17 18
 *               Xen
 *                |
 *          p2m_top_mfn
 *              /   \
 * p2m_mid_mfn p2m_mid_mfn
 *         /           /
 *  p2m p2m p2m ...
19 20 21
 *
 * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
 *
22 23
 * The p2m_top_mfn level is limited to 1 page, so the maximum representable
 * pseudo-physical address space is:
24 25 26 27
 *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
 *
 * P2M_PER_PAGE depends on the architecture, as a mfn is always
 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
28
 * 512 and 1024 entries respectively.
29 30 31 32 33 34 35
 *
 * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
 *
 * However not all entries are filled with MFNs. Specifically for all other
 * leaf entries, or for the top  root, or middle one, for which there is a void
 * entry, we assume it is  "missing". So (for example)
 *  pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
36 37 38
 * We have a dedicated page p2m_missing with all entries being
 * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
 * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
39 40 41 42 43 44
 *
 * We also have the possibility of setting 1-1 mappings on certain regions, so
 * that:
 *  pfn_to_mfn(0xc0000)=0xc0000
 *
 * The benefit of this is, that we can assume for non-RAM regions (think
45
 * PCI BARs, or ACPI spaces), we can create mappings easily because we
46 47
 * get the PFN value to match the MFN.
 *
48 49 50
 * For this to work efficiently we have one new page p2m_identity. All entries
 * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
 * recognizes that and MFNs, no other fancy value).
51 52 53 54
 *
 * On lookup we spot that the entry points to p2m_identity and return the
 * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
 * If the entry points to an allocated page, we just proceed as before and
55
 * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
56 57 58 59 60 61
 * appropriate functions (pfn_to_mfn).
 *
 * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
 * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
 * non-identity pfn. To protect ourselves against we elect to set (and get) the
 * IDENTITY_FRAME_BIT on all identity mapped PFNs.
62 63 64
 */

#include <linux/init.h>
65
#include <linux/export.h>
66 67
#include <linux/list.h>
#include <linux/hash.h>
68
#include <linux/sched.h>
69
#include <linux/seq_file.h>
70
#include <linux/bootmem.h>
71
#include <linux/slab.h>
72
#include <linux/vmalloc.h>
73 74 75

#include <asm/cache.h>
#include <asm/setup.h>
76
#include <linux/uaccess.h>
77 78 79 80

#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
81
#include <xen/balloon.h>
82
#include <xen/grant_table.h>
83

84
#include "multicalls.h"
85 86
#include "xen-ops.h"

87 88 89 90 91
#define P2M_MID_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long *))
#define P2M_TOP_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long **))

#define MAX_P2M_PFN	(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)

92 93
#define PMDS_PER_MID_PAGE	(P2M_MID_PER_PAGE / PTRS_PER_PTE)

94 95 96 97
unsigned long *xen_p2m_addr __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_addr);
unsigned long xen_p2m_size __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_size);
98
unsigned long xen_max_p2m_pfn __read_mostly;
99
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
100

101 102 103 104 105 106
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif

107 108
static DEFINE_SPINLOCK(p2m_update_lock);

109 110 111
static unsigned long *p2m_mid_missing_mfn;
static unsigned long *p2m_top_mfn;
static unsigned long **p2m_top_mfn_p;
112 113 114 115
static unsigned long *p2m_missing;
static unsigned long *p2m_identity;
static pte_t *p2m_missing_pte;
static pte_t *p2m_identity_pte;
116

117 118 119 120 121 122 123 124 125
/*
 * Hint at last populated PFN.
 *
 * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
 * can avoid scanning the whole P2M (which may be sized to account for
 * hotplugged memory).
 */
static unsigned long xen_p2m_last_pfn;

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static inline unsigned p2m_top_index(unsigned long pfn)
{
	BUG_ON(pfn >= MAX_P2M_PFN);
	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
}

static inline unsigned p2m_mid_index(unsigned long pfn)
{
	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
}

static inline unsigned p2m_index(unsigned long pfn)
{
	return pfn % P2M_PER_PAGE;
}

static void p2m_top_mfn_init(unsigned long *top)
{
	unsigned i;

	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
}

static void p2m_top_mfn_p_init(unsigned long **top)
{
	unsigned i;

	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
		top[i] = p2m_mid_missing_mfn;
}

158
static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
159 160 161 162
{
	unsigned i;

	for (i = 0; i < P2M_MID_PER_PAGE; i++)
163
		mid[i] = virt_to_mfn(leaf);
164 165
}

166
static void p2m_init(unsigned long *p2m)
167 168 169
{
	unsigned i;

170 171
	for (i = 0; i < P2M_PER_PAGE; i++)
		p2m[i] = INVALID_P2M_ENTRY;
172 173
}

174
static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
175 176 177
{
	unsigned i;

178 179
	for (i = 0; i < P2M_PER_PAGE; i++)
		p2m[i] = IDENTITY_FRAME(pfn + i);
180 181
}

182 183 184 185 186
static void * __ref alloc_p2m_page(void)
{
	if (unlikely(!slab_is_available()))
		return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);

187
	return (void *)__get_free_page(GFP_KERNEL);
188 189
}

190
static void __ref free_p2m_page(void *p)
191
{
192 193 194 195 196
	if (unlikely(!slab_is_available())) {
		free_bootmem((unsigned long)p, PAGE_SIZE);
		return;
	}

197 198 199
	free_page((unsigned long)p);
}

200 201 202 203
/*
 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
 *
 * This is called both at boot time, and after resuming from suspend:
204
 * - At boot time we're called rather early, and must use alloc_bootmem*()
205 206 207
 *   to allocate memory.
 *
 * - After resume we're called from within stop_machine, but the mfn
208
 *   tree should already be completely allocated.
209
 */
210
void __ref xen_build_mfn_list_list(void)
211
{
212 213 214 215
	unsigned long pfn, mfn;
	pte_t *ptep;
	unsigned int level, topidx, mididx;
	unsigned long *mid_mfn_p;
216

217
	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
218 219
		return;

220 221
	/* Pre-initialize p2m_top_mfn to be completely missing */
	if (p2m_top_mfn == NULL) {
222
		p2m_mid_missing_mfn = alloc_p2m_page();
223
		p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
224

225
		p2m_top_mfn_p = alloc_p2m_page();
226 227
		p2m_top_mfn_p_init(p2m_top_mfn_p);

228
		p2m_top_mfn = alloc_p2m_page();
229 230 231
		p2m_top_mfn_init(p2m_top_mfn);
	} else {
		/* Reinitialise, mfn's all change after migration */
232
		p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
233 234
	}

235 236 237 238
	for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
	     pfn += P2M_PER_PAGE) {
		topidx = p2m_top_index(pfn);
		mididx = p2m_mid_index(pfn);
239 240

		mid_mfn_p = p2m_top_mfn_p[topidx];
241 242 243 244 245
		ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
				      &level);
		BUG_ON(!ptep || level != PG_LEVEL_4K);
		mfn = pte_mfn(*ptep);
		ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
246 247 248 249 250

		/* Don't bother allocating any mfn mid levels if
		 * they're just missing, just update the stored mfn,
		 * since all could have changed over a migrate.
		 */
251
		if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
252 253 254 255 256 257 258 259
			BUG_ON(mididx);
			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
			continue;
		}

		if (mid_mfn_p == p2m_mid_missing_mfn) {
260
			mid_mfn_p = alloc_p2m_page();
261
			p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
262 263 264 265 266

			p2m_top_mfn_p[topidx] = mid_mfn_p;
		}

		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
267
		mid_mfn_p[mididx] = mfn;
268 269 270 271 272 273 274
	}
}

void xen_setup_mfn_list_list(void)
{
	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

275 276 277 278 279
	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
	else
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
			virt_to_mfn(p2m_top_mfn);
280
	HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
281 282 283 284
	HYPERVISOR_shared_info->arch.p2m_generation = 0;
	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
	HYPERVISOR_shared_info->arch.p2m_cr3 =
		xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
285 286 287 288 289 290 291
}

/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
	unsigned long pfn;

292
	xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
293
	xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
294

295 296
	for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
		xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
297

298 299
	xen_max_p2m_pfn = xen_p2m_size;
}
300

301 302 303 304
#define P2M_TYPE_IDENTITY	0
#define P2M_TYPE_MISSING	1
#define P2M_TYPE_PFN		2
#define P2M_TYPE_UNKNOWN	3
305

306 307 308
static int xen_p2m_elem_type(unsigned long pfn)
{
	unsigned long mfn;
309

310 311
	if (pfn >= xen_p2m_size)
		return P2M_TYPE_IDENTITY;
312

313
	mfn = xen_p2m_addr[pfn];
314

315 316
	if (mfn == INVALID_P2M_ENTRY)
		return P2M_TYPE_MISSING;
317

318 319 320 321
	if (mfn & IDENTITY_FRAME_BIT)
		return P2M_TYPE_IDENTITY;

	return P2M_TYPE_PFN;
322
}
323 324

static void __init xen_rebuild_p2m_list(unsigned long *p2m)
325
{
326
	unsigned int i, chunk;
327
	unsigned long pfn;
328 329 330 331
	unsigned long *mfns;
	pte_t *ptep;
	pmd_t *pmdp;
	int type;
332

333 334 335 336
	p2m_missing = alloc_p2m_page();
	p2m_init(p2m_missing);
	p2m_identity = alloc_p2m_page();
	p2m_init(p2m_identity);
337

338 339 340 341 342 343
	p2m_missing_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
	p2m_identity_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		set_pte(p2m_missing_pte + i,
344
			pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
345
		set_pte(p2m_identity_pte + i,
346
			pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
347
	}
348

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
		/*
		 * Try to map missing/identity PMDs or p2m-pages if possible.
		 * We have to respect the structure of the mfn_list_list
		 * which will be built just afterwards.
		 * Chunk size to test is one p2m page if we are in the middle
		 * of a mfn_list_list mid page and the complete mid page area
		 * if we are at index 0 of the mid page. Please note that a
		 * mid page might cover more than one PMD, e.g. on 32 bit PAE
		 * kernels.
		 */
		chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
			P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;

		type = xen_p2m_elem_type(pfn);
		i = 0;
		if (type != P2M_TYPE_PFN)
			for (i = 1; i < chunk; i++)
				if (xen_p2m_elem_type(pfn + i) != type)
					break;
		if (i < chunk)
			/* Reset to minimal chunk size. */
			chunk = P2M_PER_PAGE;

		if (type == P2M_TYPE_PFN || i < chunk) {
			/* Use initial p2m page contents. */
#ifdef CONFIG_X86_64
			mfns = alloc_p2m_page();
			copy_page(mfns, xen_p2m_addr + pfn);
#else
			mfns = xen_p2m_addr + pfn;
#endif
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
384
			continue;
385
		}
386

387 388 389 390 391 392
		if (chunk == P2M_PER_PAGE) {
			/* Map complete missing or identity p2m-page. */
			mfns = (type == P2M_TYPE_MISSING) ?
				p2m_missing : p2m_identity;
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
393
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
394
			continue;
395
		}
396

397 398 399 400 401
		/* Complete missing or identity PMD(s) can be mapped. */
		ptep = (type == P2M_TYPE_MISSING) ?
			p2m_missing_pte : p2m_identity_pte;
		for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
			pmdp = populate_extra_pmd(
402
				(unsigned long)(p2m + pfn) + i * PMD_SIZE);
403 404 405 406
			set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
		}
	}
}
407

408 409 410
void __init xen_vmalloc_p2m_tree(void)
{
	static struct vm_struct vm;
411
	unsigned long p2m_limit;
412

413 414
	xen_p2m_last_pfn = xen_max_p2m_pfn;

415
	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
416
	vm.flags = VM_ALLOC;
417
	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
418 419 420
			PMD_SIZE * PMDS_PER_MID_PAGE);
	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
421

422
	xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
423

424
	xen_rebuild_p2m_list(vm.addr);
425

426
	xen_p2m_addr = vm.addr;
427 428 429
	xen_p2m_size = xen_max_p2m_pfn;

	xen_inv_extra_mem();
430
}
431

432 433
unsigned long get_phys_to_machine(unsigned long pfn)
{
434 435
	pte_t *ptep;
	unsigned int level;
436

437 438 439 440
	if (unlikely(pfn >= xen_p2m_size)) {
		if (pfn < xen_max_p2m_pfn)
			return xen_chk_extra_mem(pfn);

441
		return IDENTITY_FRAME(pfn);
442
	}
443

444 445
	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
446

447 448 449 450 451
	/*
	 * The INVALID_P2M_ENTRY is filled in both p2m_*identity
	 * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
	 * would be wrong.
	 */
452
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
453 454
		return IDENTITY_FRAME(pfn);

455
	return xen_p2m_addr[pfn];
456 457 458
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);

459 460 461 462 463 464
/*
 * Allocate new pmd(s). It is checked whether the old pmd is still in place.
 * If not, nothing is changed. This is okay as the only reason for allocating
 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
 */
465
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
{
	pte_t *ptechk;
	pte_t *pte_newpg[PMDS_PER_MID_PAGE];
	pmd_t *pmdp;
	unsigned int level;
	unsigned long flags;
	unsigned long vaddr;
	int i;

	/* Do all allocations first to bail out in error case. */
	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		pte_newpg[i] = alloc_p2m_page();
		if (!pte_newpg[i]) {
			for (i--; i >= 0; i--)
				free_p2m_page(pte_newpg[i]);

			return NULL;
		}
	}

	vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);

	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		copy_page(pte_newpg[i], pte_pg);
		paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);

		pmdp = lookup_pmd_address(vaddr);
		BUG_ON(!pmdp);

		spin_lock_irqsave(&p2m_update_lock, flags);

		ptechk = lookup_address(vaddr, &level);
		if (ptechk == pte_pg) {
499 500
			HYPERVISOR_shared_info->arch.p2m_generation++;
			wmb(); /* Tools are synchronizing via p2m_generation. */
501 502
			set_pmd(pmdp,
				__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
503 504
			wmb(); /* Tools are synchronizing via p2m_generation. */
			HYPERVISOR_shared_info->arch.p2m_generation++;
505 506 507 508 509 510 511 512 513 514 515 516 517
			pte_newpg[i] = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);

		if (pte_newpg[i]) {
			paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
			free_p2m_page(pte_newpg[i]);
		}

		vaddr += PMD_SIZE;
	}

518
	return lookup_address(addr, &level);
519 520
}

521
/*
522 523 524 525 526 527
 * Fully allocate the p2m structure for a given pfn.  We need to check
 * that both the top and mid levels are allocated, and make sure the
 * parallel mfn tree is kept in sync.  We may race with other cpus, so
 * the new pages are installed with cmpxchg; if we lose the race then
 * simply free the page we allocated and use the one that's there.
 */
528
int xen_alloc_p2m_entry(unsigned long pfn)
529
{
530
	unsigned topidx;
531
	unsigned long *top_mfn_p, *mid_mfn;
532 533 534 535 536
	pte_t *ptep, *pte_pg;
	unsigned int level;
	unsigned long flags;
	unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
	unsigned long p2m_pfn;
537

538 539 540
	ptep = lookup_address(addr, &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
	pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
541

542 543
	if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
		/* PMD level is missing, allocate a new one */
544
		ptep = alloc_p2m_pmd(addr, pte_pg);
545
		if (!ptep)
546
			return -ENOMEM;
547 548
	}

549 550
	if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
		topidx = p2m_top_index(pfn);
551
		top_mfn_p = &p2m_top_mfn[topidx];
552
		mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
553

554
		BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
555

556 557 558 559 560
		if (mid_mfn == p2m_mid_missing_mfn) {
			/* Separately check the mid mfn level */
			unsigned long missing_mfn;
			unsigned long mid_mfn_mfn;
			unsigned long old_mfn;
561

562 563
			mid_mfn = alloc_p2m_page();
			if (!mid_mfn)
564
				return -ENOMEM;
565

566
			p2m_mid_mfn_init(mid_mfn, p2m_missing);
567

568 569 570 571 572 573 574 575 576
			missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
			mid_mfn_mfn = virt_to_mfn(mid_mfn);
			old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
			if (old_mfn != missing_mfn) {
				free_p2m_page(mid_mfn);
				mid_mfn = mfn_to_virt(old_mfn);
			} else {
				p2m_top_mfn_p[topidx] = mid_mfn;
			}
577
		}
578 579
	} else {
		mid_mfn = NULL;
580 581
	}

582
	p2m_pfn = pte_pfn(READ_ONCE(*ptep));
583 584
	if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
	    p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
585 586 587 588 589
		/* p2m leaf page is missing */
		unsigned long *p2m;

		p2m = alloc_p2m_page();
		if (!p2m)
590
			return -ENOMEM;
591

592 593 594
		if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
			p2m_init(p2m);
		else
595
			p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
596 597 598 599

		spin_lock_irqsave(&p2m_update_lock, flags);

		if (pte_pfn(*ptep) == p2m_pfn) {
600 601
			HYPERVISOR_shared_info->arch.p2m_generation++;
			wmb(); /* Tools are synchronizing via p2m_generation. */
602 603
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
604 605
			wmb(); /* Tools are synchronizing via p2m_generation. */
			HYPERVISOR_shared_info->arch.p2m_generation++;
606
			if (mid_mfn)
607
				mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
608 609 610 611
			p2m = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);
612

613
		if (p2m)
614 615 616
			free_p2m_page(p2m);
	}

617 618 619 620 621 622
	/* Expanded the p2m? */
	if (pfn > xen_p2m_last_pfn) {
		xen_p2m_last_pfn = pfn;
		HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
	}

623
	return 0;
624
}
625
EXPORT_SYMBOL(xen_alloc_p2m_entry);
626

R
Randy Dunlap 已提交
627
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
628 629 630 631
				      unsigned long pfn_e)
{
	unsigned long pfn;

632
	if (unlikely(pfn_s >= xen_p2m_size))
633 634 635 636 637
		return 0;

	if (pfn_s > pfn_e)
		return 0;

638 639
	if (pfn_e > xen_p2m_size)
		pfn_e = xen_p2m_size;
640

641 642
	for (pfn = pfn_s; pfn < pfn_e; pfn++)
		xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
643 644 645 646

	return pfn - pfn_s;
}

647 648
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
649 650
	pte_t *ptep;
	unsigned int level;
651

652
	if (unlikely(pfn >= xen_p2m_size)) {
653 654 655 656
		BUG_ON(mfn != INVALID_P2M_ENTRY);
		return true;
	}

657 658 659 660 661
	/*
	 * The interface requires atomic updates on p2m elements.
	 * xen_safe_write_ulong() is using __put_user which does an atomic
	 * store via asm().
	 */
662
	if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
663 664
		return true;

665 666
	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
667

668
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
669 670
		return mfn == INVALID_P2M_ENTRY;

671 672 673
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
		return mfn == IDENTITY_FRAME(pfn);

674
	return false;
675 676 677 678
}

bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
679
	if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
680 681 682 683
		int ret;

		ret = xen_alloc_p2m_entry(pfn);
		if (ret < 0)
684 685
			return false;

686
		return __set_phys_to_machine(pfn, mfn);
687 688 689 690
	}

	return true;
}
691

J
Juergen Gross 已提交
692 693 694
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
695 696
{
	int i, ret = 0;
J
Juergen Gross 已提交
697
	pte_t *pte;
698

699 700 701
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

702 703 704 705 706
	if (kmap_ops) {
		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
						kmap_ops, count);
		if (ret)
			goto out;
707 708 709
	}

	for (i = 0; i < count; i++) {
J
Juergen Gross 已提交
710
		unsigned long mfn, pfn;
711

J
Juergen Gross 已提交
712 713 714 715 716 717 718 719 720 721
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
722
		}
J
Juergen Gross 已提交
723
		pfn = page_to_pfn(pages[i]);
724

725 726
		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");

J
Juergen Gross 已提交
727 728
		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
729
			goto out;
J
Juergen Gross 已提交
730
		}
731 732 733 734 735
	}

out:
	return ret;
}
J
Juergen Gross 已提交
736
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
737

J
Juergen Gross 已提交
738
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
739
			      struct gnttab_unmap_grant_ref *kunmap_ops,
J
Juergen Gross 已提交
740
			      struct page **pages, unsigned int count)
741
{
J
Juergen Gross 已提交
742
	int i, ret = 0;
743

744 745 746
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

J
Juergen Gross 已提交
747
	for (i = 0; i < count; i++) {
748
		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
J
Juergen Gross 已提交
749 750 751 752 753
		unsigned long pfn = page_to_pfn(pages[i]);

		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
			ret = -EINVAL;
			goto out;
754 755
		}

756
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
J
Juergen Gross 已提交
757
	}
758 759 760
	if (kunmap_ops)
		ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
						kunmap_ops, count);
J
Juergen Gross 已提交
761
out:
762 763
	return ret;
}
J
Juergen Gross 已提交
764
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
765

766
#ifdef CONFIG_XEN_DEBUG_FS
767 768 769
#include <linux/debugfs.h>
#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
770
{
771
	static const char * const type_name[] = {
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
				[P2M_TYPE_IDENTITY] = "identity",
				[P2M_TYPE_MISSING] = "missing",
				[P2M_TYPE_PFN] = "pfn",
				[P2M_TYPE_UNKNOWN] = "abnormal"};
	unsigned long pfn, first_pfn;
	int type, prev_type;

	prev_type = xen_p2m_elem_type(0);
	first_pfn = 0;

	for (pfn = 0; pfn < xen_p2m_size; pfn++) {
		type = xen_p2m_elem_type(pfn);
		if (type != prev_type) {
			seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
				   type_name[prev_type]);
787
			prev_type = type;
788
			first_pfn = pfn;
789 790
		}
	}
791 792
	seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
		   type_name[prev_type]);
793 794
	return 0;
}
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823

static int p2m_dump_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, p2m_dump_show, NULL);
}

static const struct file_operations p2m_dump_fops = {
	.open		= p2m_dump_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static struct dentry *d_mmu_debug;

static int __init xen_p2m_debugfs(void)
{
	struct dentry *d_xen = xen_init_debugfs();

	if (d_xen == NULL)
		return -ENOMEM;

	d_mmu_debug = debugfs_create_dir("mmu", d_xen);

	debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
	return 0;
}
fs_initcall(xen_p2m_debugfs);
#endif /* CONFIG_XEN_DEBUG_FS */