mmu.c 20.5 KB
Newer Older
J
Jeremy Fitzhardinge 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * Xen mmu operations
 *
 * This file contains the various mmu fetch and update operations.
 * The most important job they must perform is the mapping between the
 * domain's pfn and the overall machine mfns.
 *
 * Xen allows guests to directly update the pagetable, in a controlled
 * fashion.  In other words, the guest modifies the same pagetable
 * that the CPU actually uses, which eliminates the overhead of having
 * a separate shadow pagetable.
 *
 * In order to allow this, it falls on the guest domain to map its
 * notion of a "physical" pfn - which is just a domain-local linear
 * address - into a real "machine address" which the CPU's MMU can
 * use.
 *
 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
 * inserted directly into the pagetable.  When creating a new
 * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
 * when reading the content back with __(pgd|pmd|pte)_val, it converts
 * the mfn back into a pfn.
 *
 * The other constraint is that all pages which make up a pagetable
 * must be mapped read-only in the guest.  This prevents uncontrolled
 * guest updates to the pagetable.  Xen strictly enforces this, and
 * will disallow any pagetable update which will end up mapping a
 * pagetable page RW, and will disallow using any writable page as a
 * pagetable.
 *
 * Naively, when loading %cr3 with the base of a new pagetable, Xen
 * would need to validate the whole pagetable before going on.
 * Naturally, this is quite slow.  The solution is to "pin" a
 * pagetable, which enforces all the constraints on the pagetable even
 * when it is not actively in use.  This menas that Xen can be assured
 * that it is still valid when you do load it into %cr3, and doesn't
 * need to revalidate it.
 *
 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
 */
41
#include <linux/sched.h>
42
#include <linux/highmem.h>
J
Jeremy Fitzhardinge 已提交
43 44 45 46 47
#include <linux/bug.h>

#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
48
#include <asm/paravirt.h>
49
#include <asm/linkage.h>
J
Jeremy Fitzhardinge 已提交
50 51

#include <asm/xen/hypercall.h>
52
#include <asm/xen/hypervisor.h>
J
Jeremy Fitzhardinge 已提交
53 54 55 56

#include <xen/page.h>
#include <xen/interface/xen.h>

57
#include "multicalls.h"
J
Jeremy Fitzhardinge 已提交
58 59
#include "mmu.h"

60
#define P2M_ENTRIES_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long))
61
#define TOP_ENTRIES		(MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
62

63
/* Placeholder for holes in the address space */
64
static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
65 66 67
		{ [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };

 /* Array of pointers to pages containing p2m entries */
68
static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
69
		{ [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
70

J
Jeremy Fitzhardinge 已提交
71
/* Arrays of p2m arrays expressed in mfns used for save/restore */
72
static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
J
Jeremy Fitzhardinge 已提交
73

74 75
static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
	__page_aligned_bss;
J
Jeremy Fitzhardinge 已提交
76

77 78
static inline unsigned p2m_top_index(unsigned long pfn)
{
79
	BUG_ON(pfn >= MAX_DOMAIN_PAGES);
80 81 82 83 84 85 86 87
	return pfn / P2M_ENTRIES_PER_PAGE;
}

static inline unsigned p2m_index(unsigned long pfn)
{
	return pfn % P2M_ENTRIES_PER_PAGE;
}

J
Jeremy Fitzhardinge 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
/* Build the parallel p2m_top_mfn structures */
void xen_setup_mfn_list_list(void)
{
	unsigned pfn, idx;

	for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
		unsigned topidx = p2m_top_index(pfn);

		p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
	}

	for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
		unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
		p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
	}

	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(p2m_top_mfn_list);
	HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
}

/* Set up p2m_top to point to the domain-builder provided p2m pages */
112 113 114
void __init xen_build_dynamic_phys_to_machine(void)
{
	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
115
	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
J
Jeremy Fitzhardinge 已提交
116
	unsigned pfn;
117

118
	for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
119 120 121 122 123 124 125 126 127 128
		unsigned topidx = p2m_top_index(pfn);

		p2m_top[topidx] = &mfn_list[pfn];
	}
}

unsigned long get_phys_to_machine(unsigned long pfn)
{
	unsigned topidx, idx;

129 130 131
	if (unlikely(pfn >= MAX_DOMAIN_PAGES))
		return INVALID_P2M_ENTRY;

132 133 134 135
	topidx = p2m_top_index(pfn);
	idx = p2m_index(pfn);
	return p2m_top[topidx][idx];
}
I
Ingo Molnar 已提交
136
EXPORT_SYMBOL_GPL(get_phys_to_machine);
137

J
Jeremy Fitzhardinge 已提交
138
static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
139 140 141 142 143 144 145 146 147 148
{
	unsigned long *p;
	unsigned i;

	p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
	BUG_ON(p == NULL);

	for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
		p[i] = INVALID_P2M_ENTRY;

149
	if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
150
		free_page((unsigned long)p);
J
Jeremy Fitzhardinge 已提交
151 152
	else
		*mfnp = virt_to_mfn(p);
153 154 155 156 157 158 159 160
}

void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
	unsigned topidx, idx;

	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
161 162 163 164 165
		return;
	}

	if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
		BUG_ON(mfn != INVALID_P2M_ENTRY);
166 167 168 169
		return;
	}

	topidx = p2m_top_index(pfn);
170
	if (p2m_top[topidx] == p2m_missing) {
171 172 173
		/* no need to allocate a page to store an invalid entry */
		if (mfn == INVALID_P2M_ENTRY)
			return;
J
Jeremy Fitzhardinge 已提交
174
		alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
175 176 177 178 179 180
	}

	idx = p2m_index(pfn);
	p2m_top[topidx][idx] = mfn;
}

181
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
J
Jeremy Fitzhardinge 已提交
182
{
183
	unsigned long address = (unsigned long)vaddr;
184
	unsigned int level;
185
	pte_t *pte = lookup_address(address, &level);
186
	unsigned offset = address & ~PAGE_MASK;
J
Jeremy Fitzhardinge 已提交
187 188 189

	BUG_ON(pte == NULL);

190
	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
J
Jeremy Fitzhardinge 已提交
191 192 193 194 195 196
}

void make_lowmem_page_readonly(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
197
	unsigned int level;
J
Jeremy Fitzhardinge 已提交
198

199
	pte = lookup_address(address, &level);
J
Jeremy Fitzhardinge 已提交
200 201 202 203 204 205 206 207 208 209 210 211
	BUG_ON(pte == NULL);

	ptev = pte_wrprotect(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}

void make_lowmem_page_readwrite(void *vaddr)
{
	pte_t *pte, ptev;
	unsigned long address = (unsigned long)vaddr;
212
	unsigned int level;
J
Jeremy Fitzhardinge 已提交
213

214
	pte = lookup_address(address, &level);
J
Jeremy Fitzhardinge 已提交
215 216 217 218 219 220 221 222 223
	BUG_ON(pte == NULL);

	ptev = pte_mkwrite(*pte);

	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
		BUG();
}


224 225 226 227 228 229 230
static bool page_pinned(void *ptr)
{
	struct page *page = virt_to_page(ptr);

	return PagePinned(page);
}

231
static void extend_mmu_update(const struct mmu_update *update)
J
Jeremy Fitzhardinge 已提交
232
{
J
Jeremy Fitzhardinge 已提交
233 234
	struct multicall_space mcs;
	struct mmu_update *u;
J
Jeremy Fitzhardinge 已提交
235

236 237 238 239 240 241 242 243
	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));

	if (mcs.mc != NULL)
		mcs.mc->args[1]++;
	else {
		mcs = __xen_mc_entry(sizeof(*u));
		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
	}
J
Jeremy Fitzhardinge 已提交
244 245

	u = mcs.args;
246 247 248 249 250 251 252 253 254 255 256
	*u = *update;
}

void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
	struct mmu_update u;

	preempt_disable();

	xen_mc_batch();

257 258
	/* ptr may be ioremapped for 64-bit pagetable setup */
	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
259 260
	u.val = pmd_val_ma(val);
	extend_mmu_update(&u);
J
Jeremy Fitzhardinge 已提交
261 262 263 264

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
J
Jeremy Fitzhardinge 已提交
265 266
}

267 268 269 270 271 272 273 274 275 276 277 278
void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
	/* If page is not pinned, we can just update the entry
	   directly */
	if (!page_pinned(ptr)) {
		*ptr = val;
		return;
	}

	xen_set_pmd_hyper(ptr, val);
}

J
Jeremy Fitzhardinge 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
/*
 * Associate a virtual page frame with a given physical page frame
 * and protection flags for that frame.
 */
void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
		BUG();
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		BUG();
		return;
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		BUG();
		return;
	}
	pte = pte_offset_kernel(pmd, vaddr);
	/* <mfn,flags> stored as-is, to permit clearing entries */
	xen_set_pte(pte, mfn_pte(mfn, flags));

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}

void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, pte_t pteval)
{
319 320 321 322
	/* updates to init_mm may be done without lock */
	if (mm == &init_mm)
		preempt_disable();

J
Jeremy Fitzhardinge 已提交
323
	if (mm == current->mm || mm == &init_mm) {
324
		if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
J
Jeremy Fitzhardinge 已提交
325 326 327 328 329
			struct multicall_space mcs;
			mcs = xen_mc_entry(0);

			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
			xen_mc_issue(PARAVIRT_LAZY_MMU);
330
			goto out;
J
Jeremy Fitzhardinge 已提交
331 332
		} else
			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
333
				goto out;
J
Jeremy Fitzhardinge 已提交
334 335
	}
	xen_set_pte(ptep, pteval);
336 337 338 339

out:
	if (mm == &init_mm)
		preempt_enable();
J
Jeremy Fitzhardinge 已提交
340 341
}

342
pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
J
Jeremy Fitzhardinge 已提交
343
{
344 345 346 347 348 349 350
	/* Just return the pte as-is.  We preserve the bits on commit */
	return *ptep;
}

void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep, pte_t pte)
{
351
	struct mmu_update u;
352

353
	xen_mc_batch();
J
Jeremy Fitzhardinge 已提交
354

355 356 357
	u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
	u.val = pte_val_ma(pte);
	extend_mmu_update(&u);
J
Jeremy Fitzhardinge 已提交
358

359
	xen_mc_issue(PARAVIRT_LAZY_MMU);
J
Jeremy Fitzhardinge 已提交
360 361
}

J
Jeremy Fitzhardinge 已提交
362 363
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
J
Jeremy Fitzhardinge 已提交
364
{
J
Jeremy Fitzhardinge 已提交
365 366 367
	if (val & _PAGE_PRESENT) {
		unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
		pteval_t flags = val & ~PTE_MASK;
368
		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
J
Jeremy Fitzhardinge 已提交
369
	}
J
Jeremy Fitzhardinge 已提交
370

J
Jeremy Fitzhardinge 已提交
371
	return val;
J
Jeremy Fitzhardinge 已提交
372 373
}

J
Jeremy Fitzhardinge 已提交
374
static pteval_t pte_pfn_to_mfn(pteval_t val)
J
Jeremy Fitzhardinge 已提交
375
{
J
Jeremy Fitzhardinge 已提交
376 377 378
	if (val & _PAGE_PRESENT) {
		unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
		pteval_t flags = val & ~PTE_MASK;
379
		val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
J
Jeremy Fitzhardinge 已提交
380 381
	}

J
Jeremy Fitzhardinge 已提交
382
	return val;
J
Jeremy Fitzhardinge 已提交
383 384
}

J
Jeremy Fitzhardinge 已提交
385
pteval_t xen_pte_val(pte_t pte)
J
Jeremy Fitzhardinge 已提交
386
{
J
Jeremy Fitzhardinge 已提交
387
	return pte_mfn_to_pfn(pte.pte);
J
Jeremy Fitzhardinge 已提交
388 389 390 391
}

pgdval_t xen_pgd_val(pgd_t pgd)
{
J
Jeremy Fitzhardinge 已提交
392
	return pte_mfn_to_pfn(pgd.pgd);
J
Jeremy Fitzhardinge 已提交
393 394 395 396
}

pte_t xen_make_pte(pteval_t pte)
{
J
Jeremy Fitzhardinge 已提交
397 398
	pte = pte_pfn_to_mfn(pte);
	return native_make_pte(pte);
J
Jeremy Fitzhardinge 已提交
399 400 401 402
}

pgd_t xen_make_pgd(pgdval_t pgd)
{
J
Jeremy Fitzhardinge 已提交
403 404
	pgd = pte_pfn_to_mfn(pgd);
	return native_make_pgd(pgd);
J
Jeremy Fitzhardinge 已提交
405 406 407 408
}

pmdval_t xen_pmd_val(pmd_t pmd)
{
J
Jeremy Fitzhardinge 已提交
409
	return pte_mfn_to_pfn(pmd.pmd);
J
Jeremy Fitzhardinge 已提交
410
}
411

412
void xen_set_pud_hyper(pud_t *ptr, pud_t val)
413
{
414
	struct mmu_update u;
415

J
Jeremy Fitzhardinge 已提交
416 417
	preempt_disable();

418 419
	xen_mc_batch();

420 421
	/* ptr may be ioremapped for 64-bit pagetable setup */
	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
422 423
	u.val = pud_val_ma(val);
	extend_mmu_update(&u);
J
Jeremy Fitzhardinge 已提交
424 425 426 427

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
428 429
}

430 431 432 433 434 435 436 437 438 439 440 441
void xen_set_pud(pud_t *ptr, pud_t val)
{
	/* If page is not pinned, we can just update the entry
	   directly */
	if (!page_pinned(ptr)) {
		*ptr = val;
		return;
	}

	xen_set_pud_hyper(ptr, val);
}

442 443
void xen_set_pte(pte_t *ptep, pte_t pte)
{
444
#ifdef CONFIG_X86_PAE
445 446 447
	ptep->pte_high = pte.pte_high;
	smp_wmb();
	ptep->pte_low = pte.pte_low;
448 449 450
#else
	*ptep = pte;
#endif
451 452
}

453
#ifdef CONFIG_X86_PAE
J
Jeremy Fitzhardinge 已提交
454 455
void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
{
456
	set_64bit((u64 *)ptep, native_pte_val(pte));
J
Jeremy Fitzhardinge 已提交
457 458 459 460 461 462 463 464 465 466 467
}

void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	ptep->pte_low = 0;
	smp_wmb();		/* make sure low gets written first */
	ptep->pte_high = 0;
}

void xen_pmd_clear(pmd_t *pmdp)
{
468
	set_pmd(pmdp, __pmd(0));
J
Jeremy Fitzhardinge 已提交
469
}
470
#endif	/* CONFIG_X86_PAE */
J
Jeremy Fitzhardinge 已提交
471

472
pmd_t xen_make_pmd(pmdval_t pmd)
J
Jeremy Fitzhardinge 已提交
473
{
J
Jeremy Fitzhardinge 已提交
474
	pmd = pte_pfn_to_mfn(pmd);
J
Jeremy Fitzhardinge 已提交
475
	return native_make_pmd(pmd);
J
Jeremy Fitzhardinge 已提交
476 477
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
#if PAGETABLE_LEVELS == 4
pudval_t xen_pud_val(pud_t pud)
{
	return pte_mfn_to_pfn(pud.pud);
}

pud_t xen_make_pud(pudval_t pud)
{
	pud = pte_pfn_to_mfn(pud);

	return native_make_pud(pud);
}

void xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
{
	struct mmu_update u;

	preempt_disable();

	xen_mc_batch();

	u.ptr = virt_to_machine(ptr).maddr;
	u.val = pgd_val_ma(val);
	extend_mmu_update(&u);

	xen_mc_issue(PARAVIRT_LAZY_MMU);

	preempt_enable();
}

void xen_set_pgd(pgd_t *ptr, pgd_t val)
{
	/* If page is not pinned, we can just update the entry
	   directly */
	if (!page_pinned(ptr)) {
		*ptr = val;
		return;
	}

	xen_set_pgd_hyper(ptr, val);
}
#endif	/* PAGETABLE_LEVELS == 4 */

521 522 523 524 525 526 527 528 529 530
/*
  (Yet another) pagetable walker.  This one is intended for pinning a
  pagetable.  This means that it walks a pagetable and calls the
  callback function on each page it finds making up the page table,
  at every level.  It walks the entire pagetable, but it only bothers
  pinning pte pages which are below pte_limit.  In the normal case
  this will be TASK_SIZE, but at boot we need to pin up to
  FIXADDR_TOP.  But the important bit is that we don't pin beyond
  there, because then we start getting into Xen's ptes.
*/
531
static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
532
		    unsigned long limit)
J
Jeremy Fitzhardinge 已提交
533 534
{
	pgd_t *pgd = pgd_base;
535 536 537 538 539
	int flush = 0;
	unsigned long addr = 0;
	unsigned long pgd_next;

	BUG_ON(limit > FIXADDR_TOP);
J
Jeremy Fitzhardinge 已提交
540 541

	if (xen_feature(XENFEAT_auto_translated_physmap))
542 543 544 545 546
		return 0;

	for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
		pud_t *pud;
		unsigned long pud_limit, pud_next;
J
Jeremy Fitzhardinge 已提交
547

548 549 550
		pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);

		if (!pgd_val(*pgd))
J
Jeremy Fitzhardinge 已提交
551
			continue;
552

J
Jeremy Fitzhardinge 已提交
553 554 555
		pud = pud_offset(pgd, 0);

		if (PTRS_PER_PUD > 1) /* not folded */
556
			flush |= (*func)(virt_to_page(pud), PT_PUD);
557 558 559 560 561 562 563 564 565 566 567

		for (; addr != pud_limit; pud++, addr = pud_next) {
			pmd_t *pmd;
			unsigned long pmd_limit;

			pud_next = pud_addr_end(addr, pud_limit);

			if (pud_next < limit)
				pmd_limit = pud_next;
			else
				pmd_limit = limit;
J
Jeremy Fitzhardinge 已提交
568 569 570

			if (pud_none(*pud))
				continue;
571

J
Jeremy Fitzhardinge 已提交
572 573 574
			pmd = pmd_offset(pud, 0);

			if (PTRS_PER_PMD > 1) /* not folded */
575
				flush |= (*func)(virt_to_page(pmd), PT_PMD);
576 577 578 579 580 581 582

			for (; addr != pmd_limit; pmd++) {
				addr += (PAGE_SIZE * PTRS_PER_PTE);
				if ((pmd_limit-1) < (addr-1)) {
					addr = pmd_limit;
					break;
				}
J
Jeremy Fitzhardinge 已提交
583 584 585 586

				if (pmd_none(*pmd))
					continue;

587
				flush |= (*func)(pmd_page(*pmd), PT_PTE);
J
Jeremy Fitzhardinge 已提交
588 589 590 591
			}
		}
	}

592
	flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
593 594

	return flush;
J
Jeremy Fitzhardinge 已提交
595 596
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
static spinlock_t *lock_pte(struct page *page)
{
	spinlock_t *ptl = NULL;

#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
	ptl = __pte_lockptr(page);
	spin_lock(ptl);
#endif

	return ptl;
}

static void do_unlock(void *v)
{
	spinlock_t *ptl = v;
	spin_unlock(ptl);
}

static void xen_do_pin(unsigned level, unsigned long pfn)
{
	struct mmuext_op *op;
	struct multicall_space mcs;

	mcs = __xen_mc_entry(sizeof(*op));
	op = mcs.args;
	op->cmd = level;
	op->arg1.mfn = pfn_to_mfn(pfn);
	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
}

static int pin_page(struct page *page, enum pt_level level)
628
{
629
	unsigned pgfl = TestSetPagePinned(page);
630 631 632 633 634 635 636 637 638 639 640 641
	int flush;

	if (pgfl)
		flush = 0;		/* already pinned */
	else if (PageHighMem(page))
		/* kmaps need flushing if we found an unpinned
		   highpage */
		flush = 1;
	else {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
		struct multicall_space mcs = __xen_mc_entry(0);
642
		spinlock_t *ptl;
643 644 645

		flush = 0;

646 647 648 649
		ptl = NULL;
		if (level == PT_PTE)
			ptl = lock_pte(page);

650 651
		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL_RO),
652 653 654 655 656 657 658 659 660 661
					level == PT_PGD ? UVMF_TLB_FLUSH : 0);

		if (level == PT_PTE)
			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);

		if (ptl) {
			/* Queue a deferred unlock for when this batch
			   is completed. */
			xen_mc_callback(do_unlock, ptl);
		}
662 663 664 665
	}

	return flush;
}
J
Jeremy Fitzhardinge 已提交
666

667 668 669
/* This is called just after a mm has been created, but it has not
   been used yet.  We need to make sure that its pagetable is all
   read-only, and can be pinned. */
J
Jeremy Fitzhardinge 已提交
670 671
void xen_pgd_pin(pgd_t *pgd)
{
672
	xen_mc_batch();
J
Jeremy Fitzhardinge 已提交
673

J
Jeremy Fitzhardinge 已提交
674 675 676
	if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
		/* re-enable interrupts for kmap_flush_unused */
		xen_mc_issue(0);
677
		kmap_flush_unused();
J
Jeremy Fitzhardinge 已提交
678 679
		xen_mc_batch();
	}
680

681
	xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
682
	xen_mc_issue(0);
J
Jeremy Fitzhardinge 已提交
683 684
}

685 686 687 688 689 690 691 692 693 694
/*
 * On save, we need to pin all pagetables to make sure they get their
 * mfns turned into pfns.  Search the list for any unpinned pgds and pin
 * them (unpinned pgds are not currently in use, probably because the
 * process is under construction or destruction).
 */
void xen_mm_pin_all(void)
{
	unsigned long flags;
	struct page *page;
695

696
	spin_lock_irqsave(&pgd_lock, flags);
697

698 699 700 701 702 703 704 705
	list_for_each_entry(page, &pgd_list, lru) {
		if (!PagePinned(page)) {
			xen_pgd_pin((pgd_t *)page_address(page));
			SetPageSavePinned(page);
		}
	}

	spin_unlock_irqrestore(&pgd_lock, flags);
J
Jeremy Fitzhardinge 已提交
706 707
}

708 709 710 711 712
/*
 * The init_mm pagetable is really pinned as soon as its created, but
 * that's before we have page structures to store the bits.  So do all
 * the book-keeping now.
 */
713
static __init int mark_pinned(struct page *page, enum pt_level level)
J
Jeremy Fitzhardinge 已提交
714
{
715 716 717
	SetPagePinned(page);
	return 0;
}
J
Jeremy Fitzhardinge 已提交
718

719 720 721 722
void __init xen_mark_init_mm_pinned(void)
{
	pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
}
J
Jeremy Fitzhardinge 已提交
723

724
static int unpin_page(struct page *page, enum pt_level level)
725
{
726
	unsigned pgfl = TestClearPagePinned(page);
J
Jeremy Fitzhardinge 已提交
727

728 729 730
	if (pgfl && !PageHighMem(page)) {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
731 732 733 734 735 736 737 738 739 740
		spinlock_t *ptl = NULL;
		struct multicall_space mcs;

		if (level == PT_PTE) {
			ptl = lock_pte(page);

			xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
		}

		mcs = __xen_mc_entry(0);
741 742 743

		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL),
744 745 746 747 748 749
					level == PT_PGD ? UVMF_TLB_FLUSH : 0);

		if (ptl) {
			/* unlock when batch completed */
			xen_mc_callback(do_unlock, ptl);
		}
750 751 752
	}

	return 0;		/* never need to flush on unpin */
J
Jeremy Fitzhardinge 已提交
753 754
}

755 756 757 758 759
/* Release a pagetables pages back as normal RW */
static void xen_pgd_unpin(pgd_t *pgd)
{
	xen_mc_batch();

760
	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
761 762 763 764 765

	pgd_walk(pgd, unpin_page, TASK_SIZE);

	xen_mc_issue(0);
}
J
Jeremy Fitzhardinge 已提交
766

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
/*
 * On resume, undo any pinning done at save, so that the rest of the
 * kernel doesn't see any unexpected pinned pagetables.
 */
void xen_mm_unpin_all(void)
{
	unsigned long flags;
	struct page *page;

	spin_lock_irqsave(&pgd_lock, flags);

	list_for_each_entry(page, &pgd_list, lru) {
		if (PageSavePinned(page)) {
			BUG_ON(!PagePinned(page));
			printk("unpinning pinned %p\n", page_address(page));
			xen_pgd_unpin((pgd_t *)page_address(page));
			ClearPageSavePinned(page);
		}
	}

	spin_unlock_irqrestore(&pgd_lock, flags);
}

J
Jeremy Fitzhardinge 已提交
790 791
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
792
	spin_lock(&next->page_table_lock);
J
Jeremy Fitzhardinge 已提交
793
	xen_pgd_pin(next->pgd);
794
	spin_unlock(&next->page_table_lock);
J
Jeremy Fitzhardinge 已提交
795 796 797 798
}

void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
799
	spin_lock(&mm->page_table_lock);
J
Jeremy Fitzhardinge 已提交
800
	xen_pgd_pin(mm->pgd);
801
	spin_unlock(&mm->page_table_lock);
J
Jeremy Fitzhardinge 已提交
802 803 804
}


J
Jeremy Fitzhardinge 已提交
805 806 807 808 809 810
#ifdef CONFIG_SMP
/* Another cpu may still have their %cr3 pointing at the pagetable, so
   we need to repoint it somewhere else before we can unpin it. */
static void drop_other_mm_ref(void *info)
{
	struct mm_struct *mm = info;
811
	struct mm_struct *active_mm;
J
Jeremy Fitzhardinge 已提交
812

813 814 815 816 817 818 819
#ifdef CONFIG_X86_64
	active_mm = read_pda(active_mm);
#else
	active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
#endif

	if (active_mm == mm)
J
Jeremy Fitzhardinge 已提交
820
		leave_mm(smp_processor_id());
821 822 823 824 825 826 827

	/* If this cpu still has a stale cr3 reference, then make sure
	   it has been flushed. */
	if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
		load_cr3(swapper_pg_dir);
		arch_flush_lazy_cpu_mode();
	}
J
Jeremy Fitzhardinge 已提交
828
}
J
Jeremy Fitzhardinge 已提交
829

J
Jeremy Fitzhardinge 已提交
830 831
static void drop_mm_ref(struct mm_struct *mm)
{
832 833 834
	cpumask_t mask;
	unsigned cpu;

J
Jeremy Fitzhardinge 已提交
835 836 837 838 839
	if (current->active_mm == mm) {
		if (current->mm == mm)
			load_cr3(swapper_pg_dir);
		else
			leave_mm(smp_processor_id());
840 841 842 843 844 845 846 847 848 849 850 851 852 853
		arch_flush_lazy_cpu_mode();
	}

	/* Get the "official" set of cpus referring to our pagetable. */
	mask = mm->cpu_vm_mask;

	/* It's possible that a vcpu may have a stale reference to our
	   cr3, because its in lazy mode, and it hasn't yet flushed
	   its set of pending hypercalls yet.  In this case, we can
	   look at its actual current cr3 value, and force it to flush
	   if needed. */
	for_each_online_cpu(cpu) {
		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
			cpu_set(cpu, mask);
J
Jeremy Fitzhardinge 已提交
854 855
	}

856
	if (!cpus_empty(mask))
857
		smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
J
Jeremy Fitzhardinge 已提交
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
}
#else
static void drop_mm_ref(struct mm_struct *mm)
{
	if (current->active_mm == mm)
		load_cr3(swapper_pg_dir);
}
#endif

/*
 * While a process runs, Xen pins its pagetables, which means that the
 * hypervisor forces it to be read-only, and it controls all updates
 * to it.  This means that all pagetable updates have to go via the
 * hypervisor, which is moderately expensive.
 *
 * Since we're pulling the pagetable down, we switch to use init_mm,
 * unpin old process pagetable and mark it all read-write, which
 * allows further operations on it to be simple memory accesses.
 *
 * The only subtle point is that another CPU may be still using the
 * pagetable because of lazy tlb flushing.  This means we need need to
 * switch all CPUs off this pagetable before we can unpin it.
 */
void xen_exit_mmap(struct mm_struct *mm)
{
	get_cpu();		/* make sure we don't move around */
	drop_mm_ref(mm);
	put_cpu();
J
Jeremy Fitzhardinge 已提交
886

887
	spin_lock(&mm->page_table_lock);
888 889

	/* pgd may not be pinned in the error exit path of execve */
890
	if (page_pinned(mm->pgd))
891
		xen_pgd_unpin(mm->pgd);
892

893
	spin_unlock(&mm->page_table_lock);
J
Jeremy Fitzhardinge 已提交
894
}