p2m.c 21.3 KB
Newer Older
1 2 3 4 5
/*
 * Xen leaves the responsibility for maintaining p2m mappings to the
 * guests themselves, but it must also access and update the p2m array
 * during suspend/resume when all the pages are reallocated.
 *
6 7 8
 * The logical flat p2m table is mapped to a linear kernel memory area.
 * For accesses by Xen a three-level tree linked via mfns only is set up to
 * allow the address space to be sparse.
9
 *
10 11 12 13 14 15 16
 *               Xen
 *                |
 *          p2m_top_mfn
 *              /   \
 * p2m_mid_mfn p2m_mid_mfn
 *         /           /
 *  p2m p2m p2m ...
17 18 19
 *
 * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
 *
20 21
 * The p2m_top_mfn level is limited to 1 page, so the maximum representable
 * pseudo-physical address space is:
22 23 24 25
 *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
 *
 * P2M_PER_PAGE depends on the architecture, as a mfn is always
 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
26
 * 512 and 1024 entries respectively.
27 28 29 30 31 32 33
 *
 * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
 *
 * However not all entries are filled with MFNs. Specifically for all other
 * leaf entries, or for the top  root, or middle one, for which there is a void
 * entry, we assume it is  "missing". So (for example)
 *  pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
34 35 36
 * We have a dedicated page p2m_missing with all entries being
 * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
 * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
37 38 39 40 41 42
 *
 * We also have the possibility of setting 1-1 mappings on certain regions, so
 * that:
 *  pfn_to_mfn(0xc0000)=0xc0000
 *
 * The benefit of this is, that we can assume for non-RAM regions (think
43
 * PCI BARs, or ACPI spaces), we can create mappings easily because we
44 45
 * get the PFN value to match the MFN.
 *
46 47 48
 * For this to work efficiently we have one new page p2m_identity. All entries
 * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
 * recognizes that and MFNs, no other fancy value).
49 50 51 52
 *
 * On lookup we spot that the entry points to p2m_identity and return the
 * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
 * If the entry points to an allocated page, we just proceed as before and
53
 * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
54 55 56 57 58 59
 * appropriate functions (pfn_to_mfn).
 *
 * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
 * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
 * non-identity pfn. To protect ourselves against we elect to set (and get) the
 * IDENTITY_FRAME_BIT on all identity mapped PFNs.
60 61 62 63
 */

#include <linux/init.h>
#include <linux/module.h>
64 65
#include <linux/list.h>
#include <linux/hash.h>
66
#include <linux/sched.h>
67
#include <linux/seq_file.h>
68
#include <linux/bootmem.h>
69
#include <linux/slab.h>
70
#include <linux/vmalloc.h>
71 72 73

#include <asm/cache.h>
#include <asm/setup.h>
74
#include <asm/uaccess.h>
75 76 77 78

#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
79
#include <xen/balloon.h>
80
#include <xen/grant_table.h>
81

82
#include "p2m.h"
83
#include "multicalls.h"
84 85
#include "xen-ops.h"

86 87
#define PMDS_PER_MID_PAGE	(P2M_MID_PER_PAGE / PTRS_PER_PTE)

88 89 90 91
unsigned long *xen_p2m_addr __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_addr);
unsigned long xen_p2m_size __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_size);
92
unsigned long xen_max_p2m_pfn __read_mostly;
93
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
94

95 96 97 98 99 100
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif

101 102
static DEFINE_SPINLOCK(p2m_update_lock);

103 104 105
static unsigned long *p2m_mid_missing_mfn;
static unsigned long *p2m_top_mfn;
static unsigned long **p2m_top_mfn_p;
106 107 108 109
static unsigned long *p2m_missing;
static unsigned long *p2m_identity;
static pte_t *p2m_missing_pte;
static pte_t *p2m_identity_pte;
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
static inline unsigned p2m_top_index(unsigned long pfn)
{
	BUG_ON(pfn >= MAX_P2M_PFN);
	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
}

static inline unsigned p2m_mid_index(unsigned long pfn)
{
	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
}

static inline unsigned p2m_index(unsigned long pfn)
{
	return pfn % P2M_PER_PAGE;
}

static void p2m_top_mfn_init(unsigned long *top)
{
	unsigned i;

	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
}

static void p2m_top_mfn_p_init(unsigned long **top)
{
	unsigned i;

	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
		top[i] = p2m_mid_missing_mfn;
}

143
static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
144 145 146 147
{
	unsigned i;

	for (i = 0; i < P2M_MID_PER_PAGE; i++)
148
		mid[i] = virt_to_mfn(leaf);
149 150
}

151
static void p2m_init(unsigned long *p2m)
152 153 154
{
	unsigned i;

155 156
	for (i = 0; i < P2M_PER_PAGE; i++)
		p2m[i] = INVALID_P2M_ENTRY;
157 158
}

159
static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
160 161 162
{
	unsigned i;

163 164
	for (i = 0; i < P2M_PER_PAGE; i++)
		p2m[i] = IDENTITY_FRAME(pfn + i);
165 166
}

167 168 169 170 171 172 173 174
static void * __ref alloc_p2m_page(void)
{
	if (unlikely(!slab_is_available()))
		return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);

	return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
}

175
static void __ref free_p2m_page(void *p)
176
{
177 178 179 180 181
	if (unlikely(!slab_is_available())) {
		free_bootmem((unsigned long)p, PAGE_SIZE);
		return;
	}

182 183 184
	free_page((unsigned long)p);
}

185 186 187 188
/*
 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
 *
 * This is called both at boot time, and after resuming from suspend:
189
 * - At boot time we're called rather early, and must use alloc_bootmem*()
190 191 192
 *   to allocate memory.
 *
 * - After resume we're called from within stop_machine, but the mfn
193
 *   tree should already be completely allocated.
194
 */
195
void __ref xen_build_mfn_list_list(void)
196
{
197 198 199 200
	unsigned long pfn, mfn;
	pte_t *ptep;
	unsigned int level, topidx, mididx;
	unsigned long *mid_mfn_p;
201

202 203
	if (xen_feature(XENFEAT_auto_translated_physmap) ||
	    xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
204 205
		return;

206 207
	/* Pre-initialize p2m_top_mfn to be completely missing */
	if (p2m_top_mfn == NULL) {
208
		p2m_mid_missing_mfn = alloc_p2m_page();
209
		p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
210

211
		p2m_top_mfn_p = alloc_p2m_page();
212 213
		p2m_top_mfn_p_init(p2m_top_mfn_p);

214
		p2m_top_mfn = alloc_p2m_page();
215 216 217
		p2m_top_mfn_init(p2m_top_mfn);
	} else {
		/* Reinitialise, mfn's all change after migration */
218
		p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
219 220
	}

221 222 223 224
	for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
	     pfn += P2M_PER_PAGE) {
		topidx = p2m_top_index(pfn);
		mididx = p2m_mid_index(pfn);
225 226

		mid_mfn_p = p2m_top_mfn_p[topidx];
227 228 229 230 231
		ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
				      &level);
		BUG_ON(!ptep || level != PG_LEVEL_4K);
		mfn = pte_mfn(*ptep);
		ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
232 233 234 235 236

		/* Don't bother allocating any mfn mid levels if
		 * they're just missing, just update the stored mfn,
		 * since all could have changed over a migrate.
		 */
237
		if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
238 239 240 241 242 243 244 245
			BUG_ON(mididx);
			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
			continue;
		}

		if (mid_mfn_p == p2m_mid_missing_mfn) {
246
			mid_mfn_p = alloc_p2m_page();
247
			p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
248 249 250 251 252

			p2m_top_mfn_p[topidx] = mid_mfn_p;
		}

		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
253
		mid_mfn_p[mididx] = mfn;
254 255 256 257 258
	}
}

void xen_setup_mfn_list_list(void)
{
M
Mukesh Rathor 已提交
259 260 261
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

262 263
	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

264 265 266 267 268
	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
	else
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
			virt_to_mfn(p2m_top_mfn);
269
	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
270 271 272 273
	HYPERVISOR_shared_info->arch.p2m_generation = 0;
	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
	HYPERVISOR_shared_info->arch.p2m_cr3 =
		xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
274 275 276 277 278 279 280
}

/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
	unsigned long pfn;

281 282 283
	 if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

284
	xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
285
	xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
286

287 288
	for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
		xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
289

290 291
	xen_max_p2m_pfn = xen_p2m_size;
}
292

293 294 295 296
#define P2M_TYPE_IDENTITY	0
#define P2M_TYPE_MISSING	1
#define P2M_TYPE_PFN		2
#define P2M_TYPE_UNKNOWN	3
297

298 299 300
static int xen_p2m_elem_type(unsigned long pfn)
{
	unsigned long mfn;
301

302 303
	if (pfn >= xen_p2m_size)
		return P2M_TYPE_IDENTITY;
304

305
	mfn = xen_p2m_addr[pfn];
306

307 308
	if (mfn == INVALID_P2M_ENTRY)
		return P2M_TYPE_MISSING;
309

310 311 312 313
	if (mfn & IDENTITY_FRAME_BIT)
		return P2M_TYPE_IDENTITY;

	return P2M_TYPE_PFN;
314
}
315 316

static void __init xen_rebuild_p2m_list(unsigned long *p2m)
317
{
318
	unsigned int i, chunk;
319
	unsigned long pfn;
320 321 322 323
	unsigned long *mfns;
	pte_t *ptep;
	pmd_t *pmdp;
	int type;
324

325 326 327 328
	p2m_missing = alloc_p2m_page();
	p2m_init(p2m_missing);
	p2m_identity = alloc_p2m_page();
	p2m_init(p2m_identity);
329

330 331 332 333 334 335
	p2m_missing_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
	p2m_identity_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		set_pte(p2m_missing_pte + i,
336
			pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
337
		set_pte(p2m_identity_pte + i,
338
			pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
339
	}
340

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
		/*
		 * Try to map missing/identity PMDs or p2m-pages if possible.
		 * We have to respect the structure of the mfn_list_list
		 * which will be built just afterwards.
		 * Chunk size to test is one p2m page if we are in the middle
		 * of a mfn_list_list mid page and the complete mid page area
		 * if we are at index 0 of the mid page. Please note that a
		 * mid page might cover more than one PMD, e.g. on 32 bit PAE
		 * kernels.
		 */
		chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
			P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;

		type = xen_p2m_elem_type(pfn);
		i = 0;
		if (type != P2M_TYPE_PFN)
			for (i = 1; i < chunk; i++)
				if (xen_p2m_elem_type(pfn + i) != type)
					break;
		if (i < chunk)
			/* Reset to minimal chunk size. */
			chunk = P2M_PER_PAGE;

		if (type == P2M_TYPE_PFN || i < chunk) {
			/* Use initial p2m page contents. */
#ifdef CONFIG_X86_64
			mfns = alloc_p2m_page();
			copy_page(mfns, xen_p2m_addr + pfn);
#else
			mfns = xen_p2m_addr + pfn;
#endif
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
376
			continue;
377
		}
378

379 380 381 382 383 384
		if (chunk == P2M_PER_PAGE) {
			/* Map complete missing or identity p2m-page. */
			mfns = (type == P2M_TYPE_MISSING) ?
				p2m_missing : p2m_identity;
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
385
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
386
			continue;
387
		}
388

389 390 391 392 393
		/* Complete missing or identity PMD(s) can be mapped. */
		ptep = (type == P2M_TYPE_MISSING) ?
			p2m_missing_pte : p2m_identity_pte;
		for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
			pmdp = populate_extra_pmd(
394
				(unsigned long)(p2m + pfn) + i * PMD_SIZE);
395 396 397 398
			set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
		}
	}
}
399

400 401 402
void __init xen_vmalloc_p2m_tree(void)
{
	static struct vm_struct vm;
403
	unsigned long p2m_limit;
404

405
	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
406
	vm.flags = VM_ALLOC;
407
	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
408 409 410
			PMD_SIZE * PMDS_PER_MID_PAGE);
	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
411

412
	xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
413

414
	xen_rebuild_p2m_list(vm.addr);
415

416
	xen_p2m_addr = vm.addr;
417 418 419
	xen_p2m_size = xen_max_p2m_pfn;

	xen_inv_extra_mem();
420
}
421

422 423
unsigned long get_phys_to_machine(unsigned long pfn)
{
424 425
	pte_t *ptep;
	unsigned int level;
426

427 428 429 430
	if (unlikely(pfn >= xen_p2m_size)) {
		if (pfn < xen_max_p2m_pfn)
			return xen_chk_extra_mem(pfn);

431
		return IDENTITY_FRAME(pfn);
432
	}
433

434 435
	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
436

437 438 439 440 441
	/*
	 * The INVALID_P2M_ENTRY is filled in both p2m_*identity
	 * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
	 * would be wrong.
	 */
442
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
443 444
		return IDENTITY_FRAME(pfn);

445
	return xen_p2m_addr[pfn];
446 447 448
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);

449 450 451 452 453 454
/*
 * Allocate new pmd(s). It is checked whether the old pmd is still in place.
 * If not, nothing is changed. This is okay as the only reason for allocating
 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
 */
455
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
{
	pte_t *ptechk;
	pte_t *pte_newpg[PMDS_PER_MID_PAGE];
	pmd_t *pmdp;
	unsigned int level;
	unsigned long flags;
	unsigned long vaddr;
	int i;

	/* Do all allocations first to bail out in error case. */
	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		pte_newpg[i] = alloc_p2m_page();
		if (!pte_newpg[i]) {
			for (i--; i >= 0; i--)
				free_p2m_page(pte_newpg[i]);

			return NULL;
		}
	}

	vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);

	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		copy_page(pte_newpg[i], pte_pg);
		paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);

		pmdp = lookup_pmd_address(vaddr);
		BUG_ON(!pmdp);

		spin_lock_irqsave(&p2m_update_lock, flags);

		ptechk = lookup_address(vaddr, &level);
		if (ptechk == pte_pg) {
489 490
			HYPERVISOR_shared_info->arch.p2m_generation++;
			wmb(); /* Tools are synchronizing via p2m_generation. */
491 492
			set_pmd(pmdp,
				__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
493 494
			wmb(); /* Tools are synchronizing via p2m_generation. */
			HYPERVISOR_shared_info->arch.p2m_generation++;
495 496 497 498 499 500 501 502 503 504 505 506 507
			pte_newpg[i] = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);

		if (pte_newpg[i]) {
			paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
			free_p2m_page(pte_newpg[i]);
		}

		vaddr += PMD_SIZE;
	}

508
	return lookup_address(addr, &level);
509 510
}

511
/*
512 513 514 515 516 517 518 519
 * Fully allocate the p2m structure for a given pfn.  We need to check
 * that both the top and mid levels are allocated, and make sure the
 * parallel mfn tree is kept in sync.  We may race with other cpus, so
 * the new pages are installed with cmpxchg; if we lose the race then
 * simply free the page we allocated and use the one that's there.
 */
static bool alloc_p2m(unsigned long pfn)
{
520
	unsigned topidx;
521
	unsigned long *top_mfn_p, *mid_mfn;
522 523 524 525 526
	pte_t *ptep, *pte_pg;
	unsigned int level;
	unsigned long flags;
	unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
	unsigned long p2m_pfn;
527

528 529 530
	ptep = lookup_address(addr, &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
	pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
531

532 533
	if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
		/* PMD level is missing, allocate a new one */
534
		ptep = alloc_p2m_pmd(addr, pte_pg);
535
		if (!ptep)
536 537 538
			return false;
	}

539 540
	if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
		topidx = p2m_top_index(pfn);
541 542
		top_mfn_p = &p2m_top_mfn[topidx];
		mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
543

544
		BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
545

546 547 548 549 550
		if (mid_mfn == p2m_mid_missing_mfn) {
			/* Separately check the mid mfn level */
			unsigned long missing_mfn;
			unsigned long mid_mfn_mfn;
			unsigned long old_mfn;
551

552 553 554
			mid_mfn = alloc_p2m_page();
			if (!mid_mfn)
				return false;
555

556
			p2m_mid_mfn_init(mid_mfn, p2m_missing);
557

558 559 560 561 562 563 564 565 566
			missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
			mid_mfn_mfn = virt_to_mfn(mid_mfn);
			old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
			if (old_mfn != missing_mfn) {
				free_p2m_page(mid_mfn);
				mid_mfn = mfn_to_virt(old_mfn);
			} else {
				p2m_top_mfn_p[topidx] = mid_mfn;
			}
567
		}
568 569
	} else {
		mid_mfn = NULL;
570 571
	}

572
	p2m_pfn = pte_pfn(READ_ONCE(*ptep));
573 574
	if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
	    p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
575 576 577 578 579 580 581
		/* p2m leaf page is missing */
		unsigned long *p2m;

		p2m = alloc_p2m_page();
		if (!p2m)
			return false;

582 583 584
		if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
			p2m_init(p2m);
		else
585
			p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
586 587 588 589

		spin_lock_irqsave(&p2m_update_lock, flags);

		if (pte_pfn(*ptep) == p2m_pfn) {
590 591
			HYPERVISOR_shared_info->arch.p2m_generation++;
			wmb(); /* Tools are synchronizing via p2m_generation. */
592 593
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
594 595
			wmb(); /* Tools are synchronizing via p2m_generation. */
			HYPERVISOR_shared_info->arch.p2m_generation++;
596
			if (mid_mfn)
597
				mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
598 599 600 601
			p2m = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);
602

603
		if (p2m)
604 605 606 607 608 609
			free_p2m_page(p2m);
	}

	return true;
}

R
Randy Dunlap 已提交
610
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
611 612 613 614
				      unsigned long pfn_e)
{
	unsigned long pfn;

615
	if (unlikely(pfn_s >= xen_p2m_size))
616 617 618 619 620 621 622 623
		return 0;

	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
		return pfn_e - pfn_s;

	if (pfn_s > pfn_e)
		return 0;

624 625
	if (pfn_e > xen_p2m_size)
		pfn_e = xen_p2m_size;
626

627 628
	for (pfn = pfn_s; pfn < pfn_e; pfn++)
		xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
629 630 631 632

	return pfn - pfn_s;
}

633 634
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
635 636
	pte_t *ptep;
	unsigned int level;
637

638 639
	/* don't track P2M changes in autotranslate guests */
	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
640
		return true;
641

642
	if (unlikely(pfn >= xen_p2m_size)) {
643 644 645 646
		BUG_ON(mfn != INVALID_P2M_ENTRY);
		return true;
	}

647 648 649 650 651
	/*
	 * The interface requires atomic updates on p2m elements.
	 * xen_safe_write_ulong() is using __put_user which does an atomic
	 * store via asm().
	 */
652
	if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
653 654
		return true;

655 656
	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
657

658
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
659 660
		return mfn == INVALID_P2M_ENTRY;

661 662 663
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
		return mfn == IDENTITY_FRAME(pfn);

664
	return false;
665 666 667 668
}

bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
669
	if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
670 671 672
		if (!alloc_p2m(pfn))
			return false;

673
		return __set_phys_to_machine(pfn, mfn);
674 675 676 677
	}

	return true;
}
678

J
Juergen Gross 已提交
679 680 681
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
682 683
{
	int i, ret = 0;
J
Juergen Gross 已提交
684
	pte_t *pte;
685 686 687 688

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

689 690 691 692 693
	if (kmap_ops) {
		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
						kmap_ops, count);
		if (ret)
			goto out;
694 695 696
	}

	for (i = 0; i < count; i++) {
J
Juergen Gross 已提交
697
		unsigned long mfn, pfn;
698

J
Juergen Gross 已提交
699 700 701 702 703 704 705 706 707 708
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
709
		}
J
Juergen Gross 已提交
710
		pfn = page_to_pfn(pages[i]);
711

712 713
		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");

J
Juergen Gross 已提交
714 715
		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
716
			goto out;
J
Juergen Gross 已提交
717
		}
718 719 720 721 722
	}

out:
	return ret;
}
J
Juergen Gross 已提交
723
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
724

J
Juergen Gross 已提交
725
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
726
			      struct gnttab_unmap_grant_ref *kunmap_ops,
J
Juergen Gross 已提交
727
			      struct page **pages, unsigned int count)
728
{
J
Juergen Gross 已提交
729
	int i, ret = 0;
730

J
Juergen Gross 已提交
731 732
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;
733

J
Juergen Gross 已提交
734
	for (i = 0; i < count; i++) {
735
		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
J
Juergen Gross 已提交
736 737 738 739 740
		unsigned long pfn = page_to_pfn(pages[i]);

		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
			ret = -EINVAL;
			goto out;
741 742
		}

743
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
J
Juergen Gross 已提交
744
	}
745 746 747
	if (kunmap_ops)
		ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
						kunmap_ops, count);
J
Juergen Gross 已提交
748
out:
749 750
	return ret;
}
J
Juergen Gross 已提交
751
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
752

753
#ifdef CONFIG_XEN_DEBUG_FS
754 755 756
#include <linux/debugfs.h>
#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
757
{
758
	static const char * const type_name[] = {
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
				[P2M_TYPE_IDENTITY] = "identity",
				[P2M_TYPE_MISSING] = "missing",
				[P2M_TYPE_PFN] = "pfn",
				[P2M_TYPE_UNKNOWN] = "abnormal"};
	unsigned long pfn, first_pfn;
	int type, prev_type;

	prev_type = xen_p2m_elem_type(0);
	first_pfn = 0;

	for (pfn = 0; pfn < xen_p2m_size; pfn++) {
		type = xen_p2m_elem_type(pfn);
		if (type != prev_type) {
			seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
				   type_name[prev_type]);
774
			prev_type = type;
775
			first_pfn = pfn;
776 777
		}
	}
778 779
	seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
		   type_name[prev_type]);
780 781
	return 0;
}
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810

static int p2m_dump_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, p2m_dump_show, NULL);
}

static const struct file_operations p2m_dump_fops = {
	.open		= p2m_dump_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static struct dentry *d_mmu_debug;

static int __init xen_p2m_debugfs(void)
{
	struct dentry *d_xen = xen_init_debugfs();

	if (d_xen == NULL)
		return -ENOMEM;

	d_mmu_debug = debugfs_create_dir("mmu", d_xen);

	debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
	return 0;
}
fs_initcall(xen_p2m_debugfs);
#endif /* CONFIG_XEN_DEBUG_FS */