p2m.c 20.3 KB
Newer Older
1 2 3 4 5
/*
 * Xen leaves the responsibility for maintaining p2m mappings to the
 * guests themselves, but it must also access and update the p2m array
 * during suspend/resume when all the pages are reallocated.
 *
6 7 8
 * The logical flat p2m table is mapped to a linear kernel memory area.
 * For accesses by Xen a three-level tree linked via mfns only is set up to
 * allow the address space to be sparse.
9
 *
10 11 12 13 14 15 16
 *               Xen
 *                |
 *          p2m_top_mfn
 *              /   \
 * p2m_mid_mfn p2m_mid_mfn
 *         /           /
 *  p2m p2m p2m ...
17 18 19
 *
 * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
 *
20 21
 * The p2m_top_mfn level is limited to 1 page, so the maximum representable
 * pseudo-physical address space is:
22 23 24 25
 *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
 *
 * P2M_PER_PAGE depends on the architecture, as a mfn is always
 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
26
 * 512 and 1024 entries respectively.
27 28 29 30 31 32 33
 *
 * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
 *
 * However not all entries are filled with MFNs. Specifically for all other
 * leaf entries, or for the top  root, or middle one, for which there is a void
 * entry, we assume it is  "missing". So (for example)
 *  pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
34 35 36
 * We have a dedicated page p2m_missing with all entries being
 * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
 * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
37 38 39 40 41 42
 *
 * We also have the possibility of setting 1-1 mappings on certain regions, so
 * that:
 *  pfn_to_mfn(0xc0000)=0xc0000
 *
 * The benefit of this is, that we can assume for non-RAM regions (think
43
 * PCI BARs, or ACPI spaces), we can create mappings easily because we
44 45
 * get the PFN value to match the MFN.
 *
46 47 48
 * For this to work efficiently we have one new page p2m_identity. All entries
 * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
 * recognizes that and MFNs, no other fancy value).
49 50 51 52
 *
 * On lookup we spot that the entry points to p2m_identity and return the
 * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
 * If the entry points to an allocated page, we just proceed as before and
53
 * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
54 55 56 57 58 59
 * appropriate functions (pfn_to_mfn).
 *
 * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
 * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
 * non-identity pfn. To protect ourselves against we elect to set (and get) the
 * IDENTITY_FRAME_BIT on all identity mapped PFNs.
60 61 62 63
 */

#include <linux/init.h>
#include <linux/module.h>
64 65
#include <linux/list.h>
#include <linux/hash.h>
66
#include <linux/sched.h>
67
#include <linux/seq_file.h>
68
#include <linux/bootmem.h>
69
#include <linux/slab.h>
70 71 72

#include <asm/cache.h>
#include <asm/setup.h>
73
#include <asm/uaccess.h>
74 75 76 77

#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
78
#include <xen/balloon.h>
79
#include <xen/grant_table.h>
80

81
#include "p2m.h"
82
#include "multicalls.h"
83 84
#include "xen-ops.h"

85 86
#define PMDS_PER_MID_PAGE	(P2M_MID_PER_PAGE / PTRS_PER_PTE)

87 88 89 90
unsigned long *xen_p2m_addr __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_addr);
unsigned long xen_p2m_size __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_size);
91
unsigned long xen_max_p2m_pfn __read_mostly;
92
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
93

94 95 96 97 98 99
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif

100 101
static DEFINE_SPINLOCK(p2m_update_lock);

102 103 104
static unsigned long *p2m_mid_missing_mfn;
static unsigned long *p2m_top_mfn;
static unsigned long **p2m_top_mfn_p;
105 106 107 108
static unsigned long *p2m_missing;
static unsigned long *p2m_identity;
static pte_t *p2m_missing_pte;
static pte_t *p2m_identity_pte;
109

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static inline unsigned p2m_top_index(unsigned long pfn)
{
	BUG_ON(pfn >= MAX_P2M_PFN);
	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
}

static inline unsigned p2m_mid_index(unsigned long pfn)
{
	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
}

static inline unsigned p2m_index(unsigned long pfn)
{
	return pfn % P2M_PER_PAGE;
}

static void p2m_top_mfn_init(unsigned long *top)
{
	unsigned i;

	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
}

static void p2m_top_mfn_p_init(unsigned long **top)
{
	unsigned i;

	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
		top[i] = p2m_mid_missing_mfn;
}

142
static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
143 144 145 146
{
	unsigned i;

	for (i = 0; i < P2M_MID_PER_PAGE; i++)
147
		mid[i] = virt_to_mfn(leaf);
148 149
}

150
static void p2m_init(unsigned long *p2m)
151 152 153
{
	unsigned i;

154 155
	for (i = 0; i < P2M_PER_PAGE; i++)
		p2m[i] = INVALID_P2M_ENTRY;
156 157
}

158
static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
159 160 161
{
	unsigned i;

162 163
	for (i = 0; i < P2M_PER_PAGE; i++)
		p2m[i] = IDENTITY_FRAME(pfn + i);
164 165
}

166 167 168 169 170 171 172 173
static void * __ref alloc_p2m_page(void)
{
	if (unlikely(!slab_is_available()))
		return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);

	return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
}

174
static void __ref free_p2m_page(void *p)
175
{
176 177 178 179 180
	if (unlikely(!slab_is_available())) {
		free_bootmem((unsigned long)p, PAGE_SIZE);
		return;
	}

181 182 183
	free_page((unsigned long)p);
}

184 185 186 187
/*
 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
 *
 * This is called both at boot time, and after resuming from suspend:
188
 * - At boot time we're called rather early, and must use alloc_bootmem*()
189 190 191
 *   to allocate memory.
 *
 * - After resume we're called from within stop_machine, but the mfn
192
 *   tree should already be completely allocated.
193
 */
194
void __ref xen_build_mfn_list_list(void)
195
{
196 197 198 199
	unsigned long pfn, mfn;
	pte_t *ptep;
	unsigned int level, topidx, mididx;
	unsigned long *mid_mfn_p;
200

201 202 203
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

204 205
	/* Pre-initialize p2m_top_mfn to be completely missing */
	if (p2m_top_mfn == NULL) {
206
		p2m_mid_missing_mfn = alloc_p2m_page();
207
		p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
208

209
		p2m_top_mfn_p = alloc_p2m_page();
210 211
		p2m_top_mfn_p_init(p2m_top_mfn_p);

212
		p2m_top_mfn = alloc_p2m_page();
213 214 215
		p2m_top_mfn_init(p2m_top_mfn);
	} else {
		/* Reinitialise, mfn's all change after migration */
216
		p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
217 218
	}

219 220 221 222
	for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
	     pfn += P2M_PER_PAGE) {
		topidx = p2m_top_index(pfn);
		mididx = p2m_mid_index(pfn);
223 224

		mid_mfn_p = p2m_top_mfn_p[topidx];
225 226 227 228 229
		ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
				      &level);
		BUG_ON(!ptep || level != PG_LEVEL_4K);
		mfn = pte_mfn(*ptep);
		ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
230 231 232 233 234

		/* Don't bother allocating any mfn mid levels if
		 * they're just missing, just update the stored mfn,
		 * since all could have changed over a migrate.
		 */
235
		if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
236 237 238 239 240 241 242 243
			BUG_ON(mididx);
			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
			continue;
		}

		if (mid_mfn_p == p2m_mid_missing_mfn) {
244
			mid_mfn_p = alloc_p2m_page();
245
			p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
246 247 248 249 250

			p2m_top_mfn_p[topidx] = mid_mfn_p;
		}

		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
251
		mid_mfn_p[mididx] = mfn;
252 253 254 255 256
	}
}

void xen_setup_mfn_list_list(void)
{
M
Mukesh Rathor 已提交
257 258 259
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

260 261 262 263 264 265 266 267 268 269 270 271
	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(p2m_top_mfn);
	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
}

/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
	unsigned long pfn;

272 273 274
	 if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

275
	xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
276
	xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
277

278 279
	for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
		xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
280

281 282
	xen_max_p2m_pfn = xen_p2m_size;
}
283

284 285 286 287
#define P2M_TYPE_IDENTITY	0
#define P2M_TYPE_MISSING	1
#define P2M_TYPE_PFN		2
#define P2M_TYPE_UNKNOWN	3
288

289 290 291
static int xen_p2m_elem_type(unsigned long pfn)
{
	unsigned long mfn;
292

293 294
	if (pfn >= xen_p2m_size)
		return P2M_TYPE_IDENTITY;
295

296
	mfn = xen_p2m_addr[pfn];
297

298 299
	if (mfn == INVALID_P2M_ENTRY)
		return P2M_TYPE_MISSING;
300

301 302 303 304
	if (mfn & IDENTITY_FRAME_BIT)
		return P2M_TYPE_IDENTITY;

	return P2M_TYPE_PFN;
305
}
306 307

static void __init xen_rebuild_p2m_list(unsigned long *p2m)
308
{
309
	unsigned int i, chunk;
310
	unsigned long pfn;
311 312 313 314
	unsigned long *mfns;
	pte_t *ptep;
	pmd_t *pmdp;
	int type;
315

316 317 318 319
	p2m_missing = alloc_p2m_page();
	p2m_init(p2m_missing);
	p2m_identity = alloc_p2m_page();
	p2m_init(p2m_identity);
320

321 322 323 324 325 326
	p2m_missing_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
	p2m_identity_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		set_pte(p2m_missing_pte + i,
327
			pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
328
		set_pte(p2m_identity_pte + i,
329
			pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
330
	}
331

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
		/*
		 * Try to map missing/identity PMDs or p2m-pages if possible.
		 * We have to respect the structure of the mfn_list_list
		 * which will be built just afterwards.
		 * Chunk size to test is one p2m page if we are in the middle
		 * of a mfn_list_list mid page and the complete mid page area
		 * if we are at index 0 of the mid page. Please note that a
		 * mid page might cover more than one PMD, e.g. on 32 bit PAE
		 * kernels.
		 */
		chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
			P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;

		type = xen_p2m_elem_type(pfn);
		i = 0;
		if (type != P2M_TYPE_PFN)
			for (i = 1; i < chunk; i++)
				if (xen_p2m_elem_type(pfn + i) != type)
					break;
		if (i < chunk)
			/* Reset to minimal chunk size. */
			chunk = P2M_PER_PAGE;

		if (type == P2M_TYPE_PFN || i < chunk) {
			/* Use initial p2m page contents. */
#ifdef CONFIG_X86_64
			mfns = alloc_p2m_page();
			copy_page(mfns, xen_p2m_addr + pfn);
#else
			mfns = xen_p2m_addr + pfn;
#endif
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
367
			continue;
368
		}
369

370 371 372 373 374 375
		if (chunk == P2M_PER_PAGE) {
			/* Map complete missing or identity p2m-page. */
			mfns = (type == P2M_TYPE_MISSING) ?
				p2m_missing : p2m_identity;
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
376
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
377
			continue;
378
		}
379

380 381 382 383 384
		/* Complete missing or identity PMD(s) can be mapped. */
		ptep = (type == P2M_TYPE_MISSING) ?
			p2m_missing_pte : p2m_identity_pte;
		for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
			pmdp = populate_extra_pmd(
385
				(unsigned long)(p2m + pfn) + i * PMD_SIZE);
386 387 388 389
			set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
		}
	}
}
390

391 392 393
void __init xen_vmalloc_p2m_tree(void)
{
	static struct vm_struct vm;
394
	unsigned long p2m_limit;
395

396
	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
397
	vm.flags = VM_ALLOC;
398
	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
399 400 401
			PMD_SIZE * PMDS_PER_MID_PAGE);
	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
402

403
	xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
404

405
	xen_rebuild_p2m_list(vm.addr);
406

407
	xen_p2m_addr = vm.addr;
408 409 410
	xen_p2m_size = xen_max_p2m_pfn;

	xen_inv_extra_mem();
411
}
412

413 414
unsigned long get_phys_to_machine(unsigned long pfn)
{
415 416
	pte_t *ptep;
	unsigned int level;
417

418 419 420 421
	if (unlikely(pfn >= xen_p2m_size)) {
		if (pfn < xen_max_p2m_pfn)
			return xen_chk_extra_mem(pfn);

422
		return IDENTITY_FRAME(pfn);
423
	}
424

425 426
	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
427

428 429 430 431 432
	/*
	 * The INVALID_P2M_ENTRY is filled in both p2m_*identity
	 * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
	 * would be wrong.
	 */
433
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
434 435
		return IDENTITY_FRAME(pfn);

436
	return xen_p2m_addr[pfn];
437 438 439
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);

440 441 442 443 444 445
/*
 * Allocate new pmd(s). It is checked whether the old pmd is still in place.
 * If not, nothing is changed. This is okay as the only reason for allocating
 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
 */
446
static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
{
	pte_t *ptechk;
	pte_t *pte_newpg[PMDS_PER_MID_PAGE];
	pmd_t *pmdp;
	unsigned int level;
	unsigned long flags;
	unsigned long vaddr;
	int i;

	/* Do all allocations first to bail out in error case. */
	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		pte_newpg[i] = alloc_p2m_page();
		if (!pte_newpg[i]) {
			for (i--; i >= 0; i--)
				free_p2m_page(pte_newpg[i]);

			return NULL;
		}
	}

	vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);

	for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
		copy_page(pte_newpg[i], pte_pg);
		paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);

		pmdp = lookup_pmd_address(vaddr);
		BUG_ON(!pmdp);

		spin_lock_irqsave(&p2m_update_lock, flags);

		ptechk = lookup_address(vaddr, &level);
		if (ptechk == pte_pg) {
			set_pmd(pmdp,
				__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
			pte_newpg[i] = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);

		if (pte_newpg[i]) {
			paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
			free_p2m_page(pte_newpg[i]);
		}

		vaddr += PMD_SIZE;
	}

495
	return lookup_address(addr, &level);
496 497
}

498
/*
499 500 501 502 503 504 505 506 507 508
 * Fully allocate the p2m structure for a given pfn.  We need to check
 * that both the top and mid levels are allocated, and make sure the
 * parallel mfn tree is kept in sync.  We may race with other cpus, so
 * the new pages are installed with cmpxchg; if we lose the race then
 * simply free the page we allocated and use the one that's there.
 */
static bool alloc_p2m(unsigned long pfn)
{
	unsigned topidx, mididx;
	unsigned long *top_mfn_p, *mid_mfn;
509 510 511 512 513
	pte_t *ptep, *pte_pg;
	unsigned int level;
	unsigned long flags;
	unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
	unsigned long p2m_pfn;
514 515 516 517

	topidx = p2m_top_index(pfn);
	mididx = p2m_mid_index(pfn);

518 519 520
	ptep = lookup_address(addr, &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
	pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
521

522 523
	if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
		/* PMD level is missing, allocate a new one */
524
		ptep = alloc_p2m_pmd(addr, pte_pg);
525
		if (!ptep)
526 527 528
			return false;
	}

529 530 531
	if (p2m_top_mfn) {
		top_mfn_p = &p2m_top_mfn[topidx];
		mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
532

533
		BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
534

535 536 537 538 539
		if (mid_mfn == p2m_mid_missing_mfn) {
			/* Separately check the mid mfn level */
			unsigned long missing_mfn;
			unsigned long mid_mfn_mfn;
			unsigned long old_mfn;
540

541 542 543
			mid_mfn = alloc_p2m_page();
			if (!mid_mfn)
				return false;
544

545
			p2m_mid_mfn_init(mid_mfn, p2m_missing);
546

547 548 549 550 551 552 553 554 555
			missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
			mid_mfn_mfn = virt_to_mfn(mid_mfn);
			old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
			if (old_mfn != missing_mfn) {
				free_p2m_page(mid_mfn);
				mid_mfn = mfn_to_virt(old_mfn);
			} else {
				p2m_top_mfn_p[topidx] = mid_mfn;
			}
556
		}
557 558
	} else {
		mid_mfn = NULL;
559 560
	}

561
	p2m_pfn = pte_pfn(READ_ONCE(*ptep));
562 563
	if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
	    p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
564 565 566 567 568 569 570
		/* p2m leaf page is missing */
		unsigned long *p2m;

		p2m = alloc_p2m_page();
		if (!p2m)
			return false;

571 572 573
		if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
			p2m_init(p2m);
		else
574
			p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
575 576 577 578 579 580 581 582 583 584 585 586

		spin_lock_irqsave(&p2m_update_lock, flags);

		if (pte_pfn(*ptep) == p2m_pfn) {
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
			if (mid_mfn)
				mid_mfn[mididx] = virt_to_mfn(p2m);
			p2m = NULL;
		}

		spin_unlock_irqrestore(&p2m_update_lock, flags);
587

588
		if (p2m)
589 590 591 592 593 594
			free_p2m_page(p2m);
	}

	return true;
}

R
Randy Dunlap 已提交
595
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
596 597 598 599
				      unsigned long pfn_e)
{
	unsigned long pfn;

600
	if (unlikely(pfn_s >= xen_p2m_size))
601 602 603 604 605 606 607 608
		return 0;

	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
		return pfn_e - pfn_s;

	if (pfn_s > pfn_e)
		return 0;

609 610
	if (pfn_e > xen_p2m_size)
		pfn_e = xen_p2m_size;
611

612 613
	for (pfn = pfn_s; pfn < pfn_e; pfn++)
		xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
614 615 616 617

	return pfn - pfn_s;
}

618 619
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
620 621
	pte_t *ptep;
	unsigned int level;
622

623 624
	/* don't track P2M changes in autotranslate guests */
	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
625
		return true;
626

627
	if (unlikely(pfn >= xen_p2m_size)) {
628 629 630 631
		BUG_ON(mfn != INVALID_P2M_ENTRY);
		return true;
	}

632
	if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
633 634
		return true;

635 636
	ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
	BUG_ON(!ptep || level != PG_LEVEL_4K);
637

638
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
639 640
		return mfn == INVALID_P2M_ENTRY;

641 642 643
	if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
		return mfn == IDENTITY_FRAME(pfn);

644
	return false;
645 646 647 648
}

bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
649
	if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
650 651 652
		if (!alloc_p2m(pfn))
			return false;

653
		return __set_phys_to_machine(pfn, mfn);
654 655 656 657
	}

	return true;
}
658

J
Juergen Gross 已提交
659 660 661
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
662 663
{
	int i, ret = 0;
J
Juergen Gross 已提交
664
	pte_t *pte;
665 666 667 668

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

669 670 671 672 673
	if (kmap_ops) {
		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
						kmap_ops, count);
		if (ret)
			goto out;
674 675 676
	}

	for (i = 0; i < count; i++) {
J
Juergen Gross 已提交
677
		unsigned long mfn, pfn;
678

J
Juergen Gross 已提交
679 680 681 682 683 684 685 686 687 688
		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
689
		}
J
Juergen Gross 已提交
690
		pfn = page_to_pfn(pages[i]);
691

692 693
		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");

J
Juergen Gross 已提交
694 695
		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
696
			goto out;
J
Juergen Gross 已提交
697
		}
698 699 700 701 702
	}

out:
	return ret;
}
J
Juergen Gross 已提交
703
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
704

J
Juergen Gross 已提交
705
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
706
			      struct gnttab_unmap_grant_ref *kunmap_ops,
J
Juergen Gross 已提交
707
			      struct page **pages, unsigned int count)
708
{
J
Juergen Gross 已提交
709
	int i, ret = 0;
710

J
Juergen Gross 已提交
711 712
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;
713

J
Juergen Gross 已提交
714
	for (i = 0; i < count; i++) {
715
		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
J
Juergen Gross 已提交
716 717 718 719 720
		unsigned long pfn = page_to_pfn(pages[i]);

		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
			ret = -EINVAL;
			goto out;
721 722
		}

723
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
J
Juergen Gross 已提交
724
	}
725 726 727
	if (kunmap_ops)
		ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
						kunmap_ops, count);
J
Juergen Gross 已提交
728
out:
729 730
	return ret;
}
J
Juergen Gross 已提交
731
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
732

733
#ifdef CONFIG_XEN_DEBUG_FS
734 735 736
#include <linux/debugfs.h>
#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
737
{
738
	static const char * const type_name[] = {
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
				[P2M_TYPE_IDENTITY] = "identity",
				[P2M_TYPE_MISSING] = "missing",
				[P2M_TYPE_PFN] = "pfn",
				[P2M_TYPE_UNKNOWN] = "abnormal"};
	unsigned long pfn, first_pfn;
	int type, prev_type;

	prev_type = xen_p2m_elem_type(0);
	first_pfn = 0;

	for (pfn = 0; pfn < xen_p2m_size; pfn++) {
		type = xen_p2m_elem_type(pfn);
		if (type != prev_type) {
			seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
				   type_name[prev_type]);
754
			prev_type = type;
755
			first_pfn = pfn;
756 757
		}
	}
758 759
	seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
		   type_name[prev_type]);
760 761
	return 0;
}
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790

static int p2m_dump_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, p2m_dump_show, NULL);
}

static const struct file_operations p2m_dump_fops = {
	.open		= p2m_dump_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static struct dentry *d_mmu_debug;

static int __init xen_p2m_debugfs(void)
{
	struct dentry *d_xen = xen_init_debugfs();

	if (d_xen == NULL)
		return -ENOMEM;

	d_mmu_debug = debugfs_create_dir("mmu", d_xen);

	debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
	return 0;
}
fs_initcall(xen_p2m_debugfs);
#endif /* CONFIG_XEN_DEBUG_FS */