page_tables.c 36.8 KB
Newer Older
R
Rusty Russell 已提交
1 2
/*P:700
 * The pagetable code, on the other hand, still shows the scars of
3 4 5
 * previous encounters.  It's functional, and as neat as it can be in the
 * circumstances, but be wary, for these things are subtle and break easily.
 * The Guest provides a virtual to physical mapping, but we can neither trust
6
 * it nor use it: we verify and convert it here then point the CPU to the
R
Rusty Russell 已提交
7 8
 * converted Guest pages when running the Guest.
:*/
9

10
/* Copyright (C) Rusty Russell IBM Corporation 2013.
R
Rusty Russell 已提交
11 12
 * GPL v2 and any later version */
#include <linux/mm.h>
13
#include <linux/gfp.h>
R
Rusty Russell 已提交
14 15 16 17 18
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <linux/percpu.h>
#include <asm/tlbflush.h>
19
#include <asm/uaccess.h>
R
Rusty Russell 已提交
20 21
#include "lg.h"

R
Rusty Russell 已提交
22 23
/*M:008
 * We hold reference to pages, which prevents them from being swapped.
24 25
 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
 * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
R
Rusty Russell 已提交
26 27
 * could probably consider launching Guests as non-root.
:*/
28

R
Rusty Russell 已提交
29 30 31
/*H:300
 * The Page Table Code
 *
R
Rusty Russell 已提交
32 33 34 35
 * We use two-level page tables for the Guest, or three-level with PAE.  If
 * you're not entirely comfortable with virtual addresses, physical addresses
 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
 * Table Handling" (with diagrams!).
R
Rusty Russell 已提交
36 37 38 39 40 41 42 43 44
 *
 * The Guest keeps page tables, but we maintain the actual ones here: these are
 * called "shadow" page tables.  Which is a very Guest-centric name: these are
 * the real page tables the CPU uses, although we keep them up to date to
 * reflect the Guest's.  (See what I mean about weird naming?  Since when do
 * shadows reflect anything?)
 *
 * Anyway, this is the most complicated part of the Host code.  There are seven
 * parts to this:
R
Rusty Russell 已提交
45 46 47
 *  (i) Looking up a page table entry when the Guest faults,
 *  (ii) Making sure the Guest stack is mapped,
 *  (iii) Setting up a page table entry when the Guest tells us one has changed,
R
Rusty Russell 已提交
48
 *  (iv) Switching page tables,
R
Rusty Russell 已提交
49
 *  (v) Flushing (throwing away) page tables,
R
Rusty Russell 已提交
50 51
 *  (vi) Mapping the Switcher when the Guest is about to run,
 *  (vii) Setting up the page tables initially.
R
Rusty Russell 已提交
52
:*/
R
Rusty Russell 已提交
53

R
Rusty Russell 已提交
54
/*
R
Rusty Russell 已提交
55 56
 * The Switcher uses the complete top PTE page.  That's 1024 PTE entries (4MB)
 * or 512 PTE entries with PAE (2MB).
R
Rusty Russell 已提交
57
 */
58
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
R
Rusty Russell 已提交
59

R
Rusty Russell 已提交
60 61 62 63
/*
 * For PAE we need the PMD index as well. We use the last 2MB, so we
 * will need the last pmd entry of the last pmd page.
 */
M
Matias Zabaljauregui 已提交
64 65 66 67 68 69
#ifdef CONFIG_X86_PAE
#define CHECK_GPGD_MASK		_PAGE_PRESENT
#else
#define CHECK_GPGD_MASK		_PAGE_TABLE
#endif

R
Rusty Russell 已提交
70 71
/*H:320
 * The page table code is curly enough to need helper functions to keep it
R
Rusty Russell 已提交
72
 * clear and clean.  The kernel itself provides many of them; one advantage
73
 * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
R
Rusty Russell 已提交
74
 *
75
 * There are two functions which return pointers to the shadow (aka "real")
R
Rusty Russell 已提交
76 77 78
 * page tables.
 *
 * spgd_addr() takes the virtual address and returns a pointer to the top-level
R
Rusty Russell 已提交
79 80
 * page directory entry (PGD) for that address.  Since we keep track of several
 * page tables, the "i" argument tells us which one we're interested in (it's
R
Rusty Russell 已提交
81 82
 * usually the current one).
 */
83
static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
R
Rusty Russell 已提交
84
{
85
	unsigned int index = pgd_index(vaddr);
R
Rusty Russell 已提交
86

R
Rusty Russell 已提交
87
	/* Return a pointer index'th pgd entry for the i'th page table. */
88
	return &cpu->lg->pgdirs[i].pgdir[index];
R
Rusty Russell 已提交
89 90
}

M
Matias Zabaljauregui 已提交
91
#ifdef CONFIG_X86_PAE
R
Rusty Russell 已提交
92 93
/*
 * This routine then takes the PGD entry given above, which contains the
M
Matias Zabaljauregui 已提交
94
 * address of the PMD page.  It then returns a pointer to the PMD entry for the
R
Rusty Russell 已提交
95 96
 * given address.
 */
M
Matias Zabaljauregui 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109
static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
{
	unsigned int index = pmd_index(vaddr);
	pmd_t *page;

	/* You should never call this if the PGD entry wasn't valid */
	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);

	return &page[index];
}
#endif

R
Rusty Russell 已提交
110 111
/*
 * This routine then takes the page directory entry returned above, which
R
Rusty Russell 已提交
112
 * contains the address of the page table entry (PTE) page.  It then returns a
R
Rusty Russell 已提交
113 114
 * pointer to the PTE entry for the given address.
 */
M
Matias Zabaljauregui 已提交
115
static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
R
Rusty Russell 已提交
116
{
M
Matias Zabaljauregui 已提交
117 118 119 120 121 122 123
#ifdef CONFIG_X86_PAE
	pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
	pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);

	/* You should never call this if the PMD entry wasn't valid */
	BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
#else
124
	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
R
Rusty Russell 已提交
125
	/* You should never call this if the PGD entry wasn't valid */
126
	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
M
Matias Zabaljauregui 已提交
127 128
#endif

129
	return &page[pte_index(vaddr)];
R
Rusty Russell 已提交
130 131
}

R
Rusty Russell 已提交
132
/*
R
Rusty Russell 已提交
133
 * These functions are just like the above, except they access the Guest
R
Rusty Russell 已提交
134 135
 * page tables.  Hence they return a Guest address.
 */
136
static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
137
{
138
	unsigned int index = vaddr >> (PGDIR_SHIFT);
139
	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
R
Rusty Russell 已提交
140 141
}

M
Matias Zabaljauregui 已提交
142
#ifdef CONFIG_X86_PAE
R
Rusty Russell 已提交
143
/* Follow the PGD to the PMD. */
M
Matias Zabaljauregui 已提交
144
static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
R
Rusty Russell 已提交
145
{
146 147
	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
M
Matias Zabaljauregui 已提交
148 149 150
	return gpage + pmd_index(vaddr) * sizeof(pmd_t);
}

R
Rusty Russell 已提交
151
/* Follow the PMD to the PTE. */
M
Matias Zabaljauregui 已提交
152
static unsigned long gpte_addr(struct lg_cpu *cpu,
R
Rusty Russell 已提交
153
			       pmd_t gpmd, unsigned long vaddr)
M
Matias Zabaljauregui 已提交
154
{
R
Rusty Russell 已提交
155
	unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
M
Matias Zabaljauregui 已提交
156 157

	BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
R
Rusty Russell 已提交
158 159
	return gpage + pte_index(vaddr) * sizeof(pte_t);
}
M
Matias Zabaljauregui 已提交
160
#else
R
Rusty Russell 已提交
161
/* Follow the PGD to the PTE (no mid-level for !PAE). */
R
Rusty Russell 已提交
162 163 164 165 166 167
static unsigned long gpte_addr(struct lg_cpu *cpu,
				pgd_t gpgd, unsigned long vaddr)
{
	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;

	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
168
	return gpage + pte_index(vaddr) * sizeof(pte_t);
R
Rusty Russell 已提交
169
}
R
Rusty Russell 已提交
170
#endif
171 172
/*:*/

R
Rusty Russell 已提交
173
/*M:007
R
Rusty Russell 已提交
174 175 176
 * get_pfn is slow: we could probably try to grab batches of pages here as
 * an optimization (ie. pre-faulting).
:*/
R
Rusty Russell 已提交
177

R
Rusty Russell 已提交
178 179
/*H:350
 * This routine takes a page number given by the Guest and converts it to
R
Rusty Russell 已提交
180 181 182 183 184
 * an actual, physical page number.  It can fail for several reasons: the
 * virtual address might not be mapped by the Launcher, the write flag is set
 * and the page is read-only, or the write flag was set and the page was
 * shared so had to be copied, but we ran out of memory.
 *
185
 * This holds a reference to the page, so release_pte() is careful to put that
R
Rusty Russell 已提交
186 187
 * back.
 */
R
Rusty Russell 已提交
188 189 190
static unsigned long get_pfn(unsigned long virtpfn, int write)
{
	struct page *page;
191 192 193 194 195

	/* gup me one page at this address please! */
	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
		return page_to_pfn(page);

R
Rusty Russell 已提交
196
	/* This value indicates failure. */
197
	return -1UL;
R
Rusty Russell 已提交
198 199
}

R
Rusty Russell 已提交
200 201
/*H:340
 * Converting a Guest page table entry to a shadow (ie. real) page table
R
Rusty Russell 已提交
202 203
 * entry can be a little tricky.  The flags are (almost) the same, but the
 * Guest PTE contains a virtual page number: the CPU needs the real page
R
Rusty Russell 已提交
204 205
 * number.
 */
206
static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
R
Rusty Russell 已提交
207
{
208
	unsigned long pfn, base, flags;
R
Rusty Russell 已提交
209

R
Rusty Russell 已提交
210 211
	/*
	 * The Guest sets the global flag, because it thinks that it is using
R
Rusty Russell 已提交
212 213
	 * PGE.  We only told it to use PGE so it would tell us whether it was
	 * flushing a kernel mapping or a userspace mapping.  We don't actually
R
Rusty Russell 已提交
214 215
	 * use the global bit, so throw it away.
	 */
216
	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
R
Rusty Russell 已提交
217

218
	/* The Guest's pages are offset inside the Launcher. */
219
	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
220

R
Rusty Russell 已提交
221 222
	/*
	 * We need a temporary "unsigned long" variable to hold the answer from
R
Rusty Russell 已提交
223 224
	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
	 * fit in spte.pfn.  get_pfn() finds the real physical number of the
R
Rusty Russell 已提交
225 226
	 * page, given the virtual number.
	 */
227
	pfn = get_pfn(base + pte_pfn(gpte), write);
R
Rusty Russell 已提交
228
	if (pfn == -1UL) {
229
		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
R
Rusty Russell 已提交
230 231
		/*
		 * When we destroy the Guest, we'll go through the shadow page
R
Rusty Russell 已提交
232
		 * tables and release_pte() them.  Make sure we don't think
R
Rusty Russell 已提交
233 234
		 * this one is valid!
		 */
235
		flags = 0;
R
Rusty Russell 已提交
236
	}
237 238
	/* Now we assemble our shadow PTE from the page number and flags. */
	return pfn_pte(pfn, __pgprot(flags));
R
Rusty Russell 已提交
239 240
}

R
Rusty Russell 已提交
241
/*H:460 And to complete the chain, release_pte() looks like this: */
242
static void release_pte(pte_t pte)
R
Rusty Russell 已提交
243
{
R
Rusty Russell 已提交
244 245 246 247
	/*
	 * Remember that get_user_pages_fast() took a reference to the page, in
	 * get_pfn()?  We have to put it back now.
	 */
248
	if (pte_flags(pte) & _PAGE_PRESENT)
249
		put_page(pte_page(pte));
R
Rusty Russell 已提交
250
}
R
Rusty Russell 已提交
251
/*:*/
R
Rusty Russell 已提交
252

253
static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
R
Rusty Russell 已提交
254
{
255
	if ((pte_flags(gpte) & _PAGE_PSE) ||
256
	    pte_pfn(gpte) >= cpu->lg->pfn_limit) {
257
		kill_guest(cpu, "bad page table entry");
258 259 260
		return false;
	}
	return true;
R
Rusty Russell 已提交
261 262
}

263
static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
R
Rusty Russell 已提交
264
{
M
Matias Zabaljauregui 已提交
265
	if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
266
	    (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
267
		kill_guest(cpu, "bad page directory entry");
268 269 270
		return false;
	}
	return true;
R
Rusty Russell 已提交
271 272
}

M
Matias Zabaljauregui 已提交
273
#ifdef CONFIG_X86_PAE
274
static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
M
Matias Zabaljauregui 已提交
275 276
{
	if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
277
	    (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
M
Matias Zabaljauregui 已提交
278
		kill_guest(cpu, "bad page middle directory entry");
279 280 281
		return false;
	}
	return true;
M
Matias Zabaljauregui 已提交
282 283 284
}
#endif

285 286 287
/*H:331
 * This is the core routine to walk the shadow page tables and find the page
 * table entry for a specific address.
R
Rusty Russell 已提交
288
 *
289 290 291
 * If allocate is set, then we allocate any missing levels, setting the flags
 * on the new page directory and mid-level directories using the arguments
 * (which are copied from the Guest's page table entries).
R
Rusty Russell 已提交
292
 */
293 294
static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
			int pgd_flags, int pmd_flags)
R
Rusty Russell 已提交
295
{
296
	pgd_t *spgd;
R
Rusty Russell 已提交
297
	/* Mid level for PAE. */
M
Matias Zabaljauregui 已提交
298 299 300 301
#ifdef CONFIG_X86_PAE
	pmd_t *spmd;
#endif

302
	/* Get top level entry. */
303
	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
304
	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
R
Rusty Russell 已提交
305
		/* No shadow entry: allocate a new shadow PTE page. */
306 307 308 309 310 311 312
		unsigned long ptepage;

		/* If they didn't want us to allocate anything, stop. */
		if (!allocate)
			return NULL;

		ptepage = get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
313 314 315 316
		/*
		 * This is not really the Guest's fault, but killing it is
		 * simple for this corner case.
		 */
R
Rusty Russell 已提交
317
		if (!ptepage) {
318
			kill_guest(cpu, "out of memory allocating pte page");
319
			return NULL;
R
Rusty Russell 已提交
320
		}
R
Rusty Russell 已提交
321 322 323 324
		/*
		 * And we copy the flags to the shadow PGD entry.  The page
		 * number in the shadow PGD is the page we just allocated.
		 */
325
		set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
R
Rusty Russell 已提交
326 327
	}

328 329 330 331
	/*
	 * Intel's Physical Address Extension actually uses three levels of
	 * page tables, so we need to look in the mid-level.
	 */
M
Matias Zabaljauregui 已提交
332
#ifdef CONFIG_X86_PAE
333
	/* Now look at the mid-level shadow entry. */
M
Matias Zabaljauregui 已提交
334 335 336 337
	spmd = spmd_addr(cpu, *spgd, vaddr);

	if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
		/* No shadow entry: allocate a new shadow PTE page. */
338 339 340 341 342 343 344
		unsigned long ptepage;

		/* If they didn't want us to allocate anything, stop. */
		if (!allocate)
			return NULL;

		ptepage = get_zeroed_page(GFP_KERNEL);
M
Matias Zabaljauregui 已提交
345

R
Rusty Russell 已提交
346 347 348 349
		/*
		 * This is not really the Guest's fault, but killing it is
		 * simple for this corner case.
		 */
M
Matias Zabaljauregui 已提交
350
		if (!ptepage) {
351 352
			kill_guest(cpu, "out of memory allocating pmd page");
			return NULL;
M
Matias Zabaljauregui 已提交
353 354
		}

R
Rusty Russell 已提交
355 356 357 358
		/*
		 * And we copy the flags to the shadow PMD entry.  The page
		 * number in the shadow PMD is the page we just allocated.
		 */
359 360 361 362 363 364 365 366
		set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
	}
#endif

	/* Get the pointer to the shadow PTE entry we're going to set. */
	return spte_addr(cpu, *spgd, vaddr);
}

R
Rusty Russell 已提交
367
/*H:330
R
Rusty Russell 已提交
368
 * (i) Looking up a page table entry when the Guest faults.
R
Rusty Russell 已提交
369 370 371 372 373 374 375
 *
 * We saw this call in run_guest(): when we see a page fault in the Guest, we
 * come here.  That's because we only set up the shadow page tables lazily as
 * they're needed, so we get page faults all the time and quietly fix them up
 * and return to the Guest without it knowing.
 *
 * If we fixed up the fault (ie. we mapped the address), this routine returns
R
Rusty Russell 已提交
376 377
 * true.  Otherwise, it was a real fault and we need to tell the Guest.
 */
378
bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
R
Rusty Russell 已提交
379 380
{
	unsigned long gpte_ptr;
381 382
	pte_t gpte;
	pte_t *spte;
M
Matias Zabaljauregui 已提交
383
	pmd_t gpmd;
384
	pgd_t gpgd;
M
Matias Zabaljauregui 已提交
385

386 387 388 389
	/* We never demand page the Switcher, so trying is a mistake. */
	if (vaddr >= switcher_addr)
		return false;

R
Rusty Russell 已提交
390
	/* First step: get the top-level Guest page table entry. */
391 392 393 394 395 396 397 398
	if (unlikely(cpu->linear_pages)) {
		/* Faking up a linear mapping. */
		gpgd = __pgd(CHECK_GPGD_MASK);
	} else {
		gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
		/* Toplevel not present?  We can't map it in. */
		if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
			return false;
R
Rusty Russell 已提交
399

400 401 402
		/* 
		 * This kills the Guest if it has weird flags or tries to
		 * refer to a "physical" address outside the bounds.
R
Rusty Russell 已提交
403
		 */
404 405
		if (!check_gpgd(cpu, gpgd))
			return false;
R
Rusty Russell 已提交
406 407
	}

408 409 410
	/* This "mid-level" entry is only used for non-linear, PAE mode. */
	gpmd = __pmd(_PAGE_TABLE);

M
Matias Zabaljauregui 已提交
411
#ifdef CONFIG_X86_PAE
412
	if (likely(!cpu->linear_pages)) {
413 414 415 416
		gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
		/* Middle level not present?  We can't map it in. */
		if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
			return false;
M
Matias Zabaljauregui 已提交
417

418 419 420
		/* 
		 * This kills the Guest if it has weird flags or tries to
		 * refer to a "physical" address outside the bounds.
R
Rusty Russell 已提交
421
		 */
422 423
		if (!check_gpmd(cpu, gpmd))
			return false;
M
Matias Zabaljauregui 已提交
424
	}
R
Rusty Russell 已提交
425

R
Rusty Russell 已提交
426 427 428 429
	/*
	 * OK, now we look at the lower level in the Guest page table: keep its
	 * address, because we might update it later.
	 */
R
Rusty Russell 已提交
430 431
	gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
#else
R
Rusty Russell 已提交
432 433 434 435
	/*
	 * OK, now we look at the lower level in the Guest page table: keep its
	 * address, because we might update it later.
	 */
M
Matias Zabaljauregui 已提交
436
	gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
R
Rusty Russell 已提交
437
#endif
R
Rusty Russell 已提交
438

439 440 441 442 443 444 445
	if (unlikely(cpu->linear_pages)) {
		/* Linear?  Make up a PTE which points to same page. */
		gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
	} else {
		/* Read the actual PTE value. */
		gpte = lgread(cpu, gpte_ptr, pte_t);
	}
R
Rusty Russell 已提交
446

R
Rusty Russell 已提交
447
	/* If this page isn't in the Guest page tables, we can't page it in. */
448
	if (!(pte_flags(gpte) & _PAGE_PRESENT))
449
		return false;
R
Rusty Russell 已提交
450

R
Rusty Russell 已提交
451 452 453 454
	/*
	 * Check they're not trying to write to a page the Guest wants
	 * read-only (bit 2 of errcode == write).
	 */
455
	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
456
		return false;
R
Rusty Russell 已提交
457

R
Rusty Russell 已提交
458
	/* User access to a kernel-only page? (bit 3 == user access) */
459
	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
460
		return false;
R
Rusty Russell 已提交
461

R
Rusty Russell 已提交
462 463 464 465
	/*
	 * Check that the Guest PTE flags are OK, and the page number is below
	 * the pfn_limit (ie. not mapping the Launcher binary).
	 */
466 467
	if (!check_gpte(cpu, gpte))
		return false;
R
Rusty Russell 已提交
468

R
Rusty Russell 已提交
469
	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
470
	gpte = pte_mkyoung(gpte);
R
Rusty Russell 已提交
471
	if (errcode & 2)
472
		gpte = pte_mkdirty(gpte);
R
Rusty Russell 已提交
473

R
Rusty Russell 已提交
474
	/* Get the pointer to the shadow PTE entry we're going to set. */
475 476 477
	spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
	if (!spte)
		return false;
R
Rusty Russell 已提交
478 479 480 481 482

	/*
	 * If there was a valid shadow PTE entry here before, we release it.
	 * This can happen with a write to a previously read-only entry.
	 */
R
Rusty Russell 已提交
483 484
	release_pte(*spte);

R
Rusty Russell 已提交
485 486 487 488
	/*
	 * If this is a write, we insist that the Guest page is writable (the
	 * final arg to gpte_to_spte()).
	 */
489
	if (pte_dirty(gpte))
490
		*spte = gpte_to_spte(cpu, gpte, 1);
491
	else
R
Rusty Russell 已提交
492 493
		/*
		 * If this is a read, don't set the "writable" bit in the page
R
Rusty Russell 已提交
494
		 * table entry, even if the Guest says it's writable.  That way
R
Rusty Russell 已提交
495
		 * we will come back here when a write does actually occur, so
R
Rusty Russell 已提交
496 497
		 * we can update the Guest's _PAGE_DIRTY flag.
		 */
498
		set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
R
Rusty Russell 已提交
499

R
Rusty Russell 已提交
500 501 502 503
	/*
	 * Finally, we write the Guest PTE entry back: we've set the
	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
	 */
504 505
	if (likely(!cpu->linear_pages))
		lgwrite(cpu, gpte_ptr, pte_t, gpte);
R
Rusty Russell 已提交
506

R
Rusty Russell 已提交
507 508
	/*
	 * The fault is fixed, the page table is populated, the mapping
R
Rusty Russell 已提交
509 510
	 * manipulated, the result returned and the code complete.  A small
	 * delay and a trace of alliteration are the only indications the Guest
R
Rusty Russell 已提交
511 512
	 * has that a page fault occurred at all.
	 */
513
	return true;
R
Rusty Russell 已提交
514 515
}

R
Rusty Russell 已提交
516 517
/*H:360
 * (ii) Making sure the Guest stack is mapped.
R
Rusty Russell 已提交
518
 *
R
Rusty Russell 已提交
519 520 521 522
 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
 * we've seen that logic is quite long, and usually the stack pages are already
 * mapped, so it's overkill.
R
Rusty Russell 已提交
523 524
 *
 * This is a quick version which answers the question: is this virtual address
R
Rusty Russell 已提交
525 526
 * mapped by the shadow page tables, and is it writable?
 */
527
static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
528
{
529
	pte_t *spte;
R
Rusty Russell 已提交
530 531
	unsigned long flags;

532 533
	/* You can't put your stack in the Switcher! */
	if (vaddr >= switcher_addr)
534
		return false;
R
Rusty Russell 已提交
535

536 537 538
	/* If there's no shadow PTE, it's not writable. */
	spte = find_spte(cpu, vaddr, false, 0, 0);
	if (!spte)
M
Matias Zabaljauregui 已提交
539 540
		return false;

R
Rusty Russell 已提交
541 542 543 544
	/*
	 * Check the flags on the pte entry itself: it must be present and
	 * writable.
	 */
545
	flags = pte_flags(*spte);
R
Rusty Russell 已提交
546 547 548
	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
}

R
Rusty Russell 已提交
549 550
/*
 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
R
Rusty Russell 已提交
551
 * in the page tables, and if not, we call demand_page() with error code 2
R
Rusty Russell 已提交
552 553
 * (meaning "write").
 */
554
void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
555
{
556
	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
557
		kill_guest(cpu, "bad stack page %#lx", vaddr);
R
Rusty Russell 已提交
558
}
R
Rusty Russell 已提交
559
/*:*/
R
Rusty Russell 已提交
560

M
Matias Zabaljauregui 已提交
561 562 563 564 565 566 567 568 569 570 571 572 573
#ifdef CONFIG_X86_PAE
static void release_pmd(pmd_t *spmd)
{
	/* If the entry's not present, there's nothing to release. */
	if (pmd_flags(*spmd) & _PAGE_PRESENT) {
		unsigned int i;
		pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
		/* For each entry in the page, we might need to release it. */
		for (i = 0; i < PTRS_PER_PTE; i++)
			release_pte(ptepage[i]);
		/* Now we can free the page of PTEs */
		free_page((long)ptepage);
		/* And zero out the PMD entry so we never release it twice. */
574
		set_pmd(spmd, __pmd(0));
M
Matias Zabaljauregui 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	}
}

static void release_pgd(pgd_t *spgd)
{
	/* If the entry's not present, there's nothing to release. */
	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
		unsigned int i;
		pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);

		for (i = 0; i < PTRS_PER_PMD; i++)
			release_pmd(&pmdpage[i]);

		/* Now we can free the page of PMDs */
		free_page((long)pmdpage);
		/* And zero out the PGD entry so we never release it twice. */
		set_pgd(spgd, __pgd(0));
	}
}

#else /* !CONFIG_X86_PAE */
R
Rusty Russell 已提交
596 597 598 599 600
/*H:450
 * If we chase down the release_pgd() code, the non-PAE version looks like
 * this.  The PAE version is almost identical, but instead of calling
 * release_pte it calls release_pmd(), which looks much like this.
 */
601
static void release_pgd(pgd_t *spgd)
R
Rusty Russell 已提交
602
{
R
Rusty Russell 已提交
603
	/* If the entry's not present, there's nothing to release. */
604
	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
R
Rusty Russell 已提交
605
		unsigned int i;
R
Rusty Russell 已提交
606 607
		/*
		 * Converting the pfn to find the actual PTE page is easy: turn
R
Rusty Russell 已提交
608
		 * the page number into a physical address, then convert to a
R
Rusty Russell 已提交
609 610
		 * virtual address (easy for kernel pages like this one).
		 */
611
		pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
R
Rusty Russell 已提交
612
		/* For each entry in the page, we might need to release it. */
613
		for (i = 0; i < PTRS_PER_PTE; i++)
R
Rusty Russell 已提交
614
			release_pte(ptepage[i]);
R
Rusty Russell 已提交
615
		/* Now we can free the page of PTEs */
R
Rusty Russell 已提交
616
		free_page((long)ptepage);
R
Rusty Russell 已提交
617
		/* And zero out the PGD entry so we never release it twice. */
618
		*spgd = __pgd(0);
R
Rusty Russell 已提交
619 620
	}
}
M
Matias Zabaljauregui 已提交
621
#endif
R
Rusty Russell 已提交
622 623 624

/*H:445
 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
R
Rusty Russell 已提交
625
 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
R
Rusty Russell 已提交
626 627
 * It simply releases every PTE page from 0 up to the Guest's kernel address.
 */
R
Rusty Russell 已提交
628 629 630
static void flush_user_mappings(struct lguest *lg, int idx)
{
	unsigned int i;
R
Rusty Russell 已提交
631
	/* Release every pgd entry up to the kernel's address. */
632
	for (i = 0; i < pgd_index(lg->kernel_address); i++)
633
		release_pgd(lg->pgdirs[idx].pgdir + i);
R
Rusty Russell 已提交
634 635
}

R
Rusty Russell 已提交
636 637
/*H:440
 * (v) Flushing (throwing away) page tables,
R
Rusty Russell 已提交
638 639
 *
 * The Guest has a hypercall to throw away the page tables: it's used when a
R
Rusty Russell 已提交
640 641
 * large number of mappings have been changed.
 */
642
void guest_pagetable_flush_user(struct lg_cpu *cpu)
R
Rusty Russell 已提交
643
{
R
Rusty Russell 已提交
644
	/* Drop the userspace part of the current page table. */
645
	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
R
Rusty Russell 已提交
646
}
R
Rusty Russell 已提交
647
/*:*/
R
Rusty Russell 已提交
648

649
/* We walk down the guest page tables to get a guest-physical address */
650
unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
651 652 653
{
	pgd_t gpgd;
	pte_t gpte;
M
Matias Zabaljauregui 已提交
654 655 656
#ifdef CONFIG_X86_PAE
	pmd_t gpmd;
#endif
657 658 659 660 661

	/* Still not set up?  Just map 1:1. */
	if (unlikely(cpu->linear_pages))
		return vaddr;

662
	/* First step: get the top-level Guest page table entry. */
663
	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
664
	/* Toplevel not present?  We can't map it in. */
665
	if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
666
		kill_guest(cpu, "Bad address %#lx", vaddr);
667 668
		return -1UL;
	}
669

M
Matias Zabaljauregui 已提交
670 671
#ifdef CONFIG_X86_PAE
	gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
672
	if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) {
M
Matias Zabaljauregui 已提交
673
		kill_guest(cpu, "Bad address %#lx", vaddr);
674 675
		return -1UL;
	}
R
Rusty Russell 已提交
676 677
	gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
#else
M
Matias Zabaljauregui 已提交
678
	gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
R
Rusty Russell 已提交
679
#endif
680
	if (!(pte_flags(gpte) & _PAGE_PRESENT))
681
		kill_guest(cpu, "Bad address %#lx", vaddr);
682 683 684 685

	return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
}

R
Rusty Russell 已提交
686 687
/*
 * We keep several page tables.  This is a simple routine to find the page
R
Rusty Russell 已提交
688
 * table (if any) corresponding to this top-level address the Guest has given
R
Rusty Russell 已提交
689 690
 * us.
 */
R
Rusty Russell 已提交
691 692 693 694
static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
{
	unsigned int i;
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
695
		if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
R
Rusty Russell 已提交
696 697 698 699
			break;
	return i;
}

R
Rusty Russell 已提交
700 701
/*H:435
 * And this is us, creating the new page directory.  If we really do
R
Rusty Russell 已提交
702
 * allocate a new one (and so the kernel parts are not there), we set
R
Rusty Russell 已提交
703 704
 * blank_pgdir.
 */
705
static unsigned int new_pgdir(struct lg_cpu *cpu,
706
			      unsigned long gpgdir,
R
Rusty Russell 已提交
707 708 709 710
			      int *blank_pgdir)
{
	unsigned int next;

R
Rusty Russell 已提交
711 712 713 714
	/*
	 * We pick one entry at random to throw out.  Choosing the Least
	 * Recently Used might be better, but this is easy.
	 */
715
	next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs);
R
Rusty Russell 已提交
716
	/* If it's never been allocated at all before, try now. */
717 718 719
	if (!cpu->lg->pgdirs[next].pgdir) {
		cpu->lg->pgdirs[next].pgdir =
					(pgd_t *)get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
720
		/* If the allocation fails, just keep using the one we have */
721
		if (!cpu->lg->pgdirs[next].pgdir)
722
			next = cpu->cpu_pgd;
M
Matias Zabaljauregui 已提交
723
		else {
R
Rusty Russell 已提交
724
			/*
725 726
			 * This is a blank page, so there are no kernel
			 * mappings: caller must map the stack!
R
Rusty Russell 已提交
727
			 */
R
Rusty Russell 已提交
728
			*blank_pgdir = 1;
M
Matias Zabaljauregui 已提交
729
		}
R
Rusty Russell 已提交
730
	}
R
Rusty Russell 已提交
731
	/* Record which Guest toplevel this shadows. */
732
	cpu->lg->pgdirs[next].gpgdir = gpgdir;
R
Rusty Russell 已提交
733
	/* Release all the non-kernel mappings. */
734
	flush_user_mappings(cpu->lg, next);
R
Rusty Russell 已提交
735

736 737 738
	/* This hasn't run on any CPU at all. */
	cpu->lg->pgdirs[next].last_host_cpu = -1;

R
Rusty Russell 已提交
739 740 741
	return next;
}

742 743
/*H:501
 * We do need the Switcher code mapped at all times, so we allocate that
744 745 746 747 748 749 750
 * part of the Guest page table here.  We map the Switcher code immediately,
 * but defer mapping of the guest register page and IDT/LDT etc page until
 * just before we run the guest in map_switcher_in_guest().
 *
 * We *could* do this setup in map_switcher_in_guest(), but at that point
 * we've interrupts disabled, and allocating pages like that is fraught: we
 * can't sleep if we need to free up some memory.
751 752 753 754 755 756
 */
static bool allocate_switcher_mapping(struct lg_cpu *cpu)
{
	int i;

	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
757 758 759
		pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
				       CHECK_GPGD_MASK, _PAGE_TABLE);
		if (!pte)
760
			return false;
761 762 763 764 765 766 767 768 769 770 771 772 773 774

		/*
		 * Map the switcher page if not already there.  It might
		 * already be there because we call allocate_switcher_mapping()
		 * in guest_set_pgd() just in case it did discard our Switcher
		 * mapping, but it probably didn't.
		 */
		if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
			/* Get a reference to the Switcher page. */
			get_page(lg_switcher_pages[0]);
			/* Create a read-only, exectuable, kernel-style PTE */
			set_pte(pte,
				mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
		}
775
	}
776
	cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
777 778 779
	return true;
}

R
Rusty Russell 已提交
780 781
/*H:470
 * Finally, a routine which throws away everything: all PGD entries in all
R
Rusty Russell 已提交
782
 * the shadow page tables, including the Guest's kernel mappings.  This is used
R
Rusty Russell 已提交
783 784
 * when we destroy the Guest.
 */
R
Rusty Russell 已提交
785 786 787 788
static void release_all_pagetables(struct lguest *lg)
{
	unsigned int i, j;

R
Rusty Russell 已提交
789
	/* Every shadow pagetable this Guest has */
790 791 792
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
		if (!lg->pgdirs[i].pgdir)
			continue;
M
Matias Zabaljauregui 已提交
793

794 795 796
		/* Every PGD entry. */
		for (j = 0; j < PTRS_PER_PGD; j++)
			release_pgd(lg->pgdirs[i].pgdir + j);
797
		lg->pgdirs[i].switcher_mapped = false;
798
		lg->pgdirs[i].last_host_cpu = -1;
799
	}
R
Rusty Russell 已提交
800 801
}

R
Rusty Russell 已提交
802 803
/*
 * We also throw away everything when a Guest tells us it's changed a kernel
R
Rusty Russell 已提交
804
 * mapping.  Since kernel mappings are in every page table, it's easiest to
R
Rusty Russell 已提交
805
 * throw them all away.  This traps the Guest in amber for a while as
R
Rusty Russell 已提交
806 807
 * everything faults back in, but it's rare.
 */
808
void guest_pagetable_clear_all(struct lg_cpu *cpu)
R
Rusty Russell 已提交
809
{
810
	release_all_pagetables(cpu->lg);
R
Rusty Russell 已提交
811
	/* We need the Guest kernel stack mapped again. */
812
	pin_stack_pages(cpu);
813 814 815
	/* And we need Switcher allocated. */
	if (!allocate_switcher_mapping(cpu))
		kill_guest(cpu, "Cannot populate switcher mapping");
R
Rusty Russell 已提交
816
}
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850

/*H:430
 * (iv) Switching page tables
 *
 * Now we've seen all the page table setting and manipulation, let's see
 * what happens when the Guest changes page tables (ie. changes the top-level
 * pgdir).  This occurs on almost every context switch.
 */
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
{
	int newpgdir, repin = 0;

	/*
	 * The very first time they call this, we're actually running without
	 * any page tables; we've been making it up.  Throw them away now.
	 */
	if (unlikely(cpu->linear_pages)) {
		release_all_pagetables(cpu->lg);
		cpu->linear_pages = false;
		/* Force allocation of a new pgdir. */
		newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
	} else {
		/* Look to see if we have this one already. */
		newpgdir = find_pgdir(cpu->lg, pgtable);
	}

	/*
	 * If not, we allocate or mug an existing one: if it's a fresh one,
	 * repin gets set to 1.
	 */
	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
		newpgdir = new_pgdir(cpu, pgtable, &repin);
	/* Change the current pgd index to the new one. */
	cpu->cpu_pgd = newpgdir;
851 852 853 854
	/*
	 * If it was completely blank, we map in the Guest kernel stack and
	 * the Switcher.
	 */
855 856
	if (repin)
		pin_stack_pages(cpu);
857

858 859 860 861
	if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
		if (!allocate_switcher_mapping(cpu))
			kill_guest(cpu, "Cannot populate switcher mapping");
	}
862
}
R
Rusty Russell 已提交
863
/*:*/
R
Rusty Russell 已提交
864 865 866

/*M:009
 * Since we throw away all mappings when a kernel mapping changes, our
R
Rusty Russell 已提交
867 868 869 870 871
 * performance sucks for guests using highmem.  In fact, a guest with
 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
 * usually slower than a Guest with less memory.
 *
 * This, of course, cannot be fixed.  It would take some kind of... well, I
R
Rusty Russell 已提交
872 873
 * don't know, but the term "puissant code-fu" comes to mind.
:*/
R
Rusty Russell 已提交
874

R
Rusty Russell 已提交
875 876
/*H:420
 * This is the routine which actually sets the page table entry for then
R
Rusty Russell 已提交
877 878 879 880 881 882 883 884 885 886 887 888 889
 * "idx"'th shadow page table.
 *
 * Normally, we can just throw out the old entry and replace it with 0: if they
 * use it demand_page() will put the new entry in.  We need to do this anyway:
 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
 * is read from, and _PAGE_DIRTY when it's written to.
 *
 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
 * these bits on PTEs immediately anyway.  This is done to save the CPU from
 * having to update them, but it helps us the same way: if they set
 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
 */
890
static void __guest_set_pte(struct lg_cpu *cpu, int idx,
891
		       unsigned long vaddr, pte_t gpte)
R
Rusty Russell 已提交
892
{
R
Rusty Russell 已提交
893
	/* Look up the matching shadow page directory entry. */
894
	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
M
Matias Zabaljauregui 已提交
895 896 897
#ifdef CONFIG_X86_PAE
	pmd_t *spmd;
#endif
R
Rusty Russell 已提交
898 899

	/* If the top level isn't present, there's no entry to update. */
900
	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
M
Matias Zabaljauregui 已提交
901 902 903 904
#ifdef CONFIG_X86_PAE
		spmd = spmd_addr(cpu, *spgd, vaddr);
		if (pmd_flags(*spmd) & _PAGE_PRESENT) {
#endif
R
Rusty Russell 已提交
905
			/* Otherwise, start by releasing the existing entry. */
M
Matias Zabaljauregui 已提交
906 907 908
			pte_t *spte = spte_addr(cpu, *spgd, vaddr);
			release_pte(*spte);

R
Rusty Russell 已提交
909 910 911 912 913 914
			/*
			 * If they're setting this entry as dirty or accessed,
			 * we might as well put that entry they've given us in
			 * now.  This shaves 10% off a copy-on-write
			 * micro-benchmark.
			 */
M
Matias Zabaljauregui 已提交
915
			if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
916 917
				if (!check_gpte(cpu, gpte))
					return;
918 919
				set_pte(spte,
					gpte_to_spte(cpu, gpte,
M
Matias Zabaljauregui 已提交
920
						pte_flags(gpte) & _PAGE_DIRTY));
R
Rusty Russell 已提交
921 922 923 924 925
			} else {
				/*
				 * Otherwise kill it and we can demand_page()
				 * it in later.
				 */
926
				set_pte(spte, __pte(0));
R
Rusty Russell 已提交
927
			}
M
Matias Zabaljauregui 已提交
928 929 930
#ifdef CONFIG_X86_PAE
		}
#endif
R
Rusty Russell 已提交
931 932 933
	}
}

R
Rusty Russell 已提交
934 935
/*H:410
 * Updating a PTE entry is a little trickier.
R
Rusty Russell 已提交
936 937 938 939 940 941 942
 *
 * We keep track of several different page tables (the Guest uses one for each
 * process, so it makes sense to cache at least a few).  Each of these have
 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
 * all processes.  So when the page table above that address changes, we update
 * all the page tables, not just the current one.  This is rare.
 *
943
 * The benefit is that when we have to track a new page table, we can keep all
R
Rusty Russell 已提交
944 945
 * the kernel mappings.  This speeds up context switch immensely.
 */
946
void guest_set_pte(struct lg_cpu *cpu,
947
		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
R
Rusty Russell 已提交
948
{
949 950 951 952 953 954
	/* We don't let you remap the Switcher; we need it to get back! */
	if (vaddr >= switcher_addr) {
		kill_guest(cpu, "attempt to set pte into Switcher pages");
		return;
	}

R
Rusty Russell 已提交
955 956 957 958
	/*
	 * Kernel mappings must be changed on all top levels.  Slow, but doesn't
	 * happen often.
	 */
959
	if (vaddr >= cpu->lg->kernel_address) {
R
Rusty Russell 已提交
960
		unsigned int i;
961 962
		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
			if (cpu->lg->pgdirs[i].pgdir)
963
				__guest_set_pte(cpu, i, vaddr, gpte);
R
Rusty Russell 已提交
964
	} else {
R
Rusty Russell 已提交
965
		/* Is this page table one we have a shadow for? */
966 967
		int pgdir = find_pgdir(cpu->lg, gpgdir);
		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
R
Rusty Russell 已提交
968
			/* If so, do the update. */
969
			__guest_set_pte(cpu, pgdir, vaddr, gpte);
R
Rusty Russell 已提交
970 971 972
	}
}

R
Rusty Russell 已提交
973
/*H:400
R
Rusty Russell 已提交
974
 * (iii) Setting up a page table entry when the Guest tells us one has changed.
R
Rusty Russell 已提交
975 976 977 978 979 980 981 982 983 984
 *
 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
 * with the other side of page tables while we're here: what happens when the
 * Guest asks for a page table to be updated?
 *
 * We already saw that demand_page() will fill in the shadow page tables when
 * needed, so we can simply remove shadow page table entries whenever the Guest
 * tells us they've changed.  When the Guest tries to use the new entry it will
 * fault and demand_page() will fix it up.
 *
985
 * So with that in mind here's our code to update a (top-level) PGD entry:
R
Rusty Russell 已提交
986
 */
987
void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
R
Rusty Russell 已提交
988 989 990
{
	int pgdir;

991 992 993
	if (idx > PTRS_PER_PGD) {
		kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
			   idx, PTRS_PER_PGD);
R
Rusty Russell 已提交
994
		return;
995
	}
R
Rusty Russell 已提交
996

R
Rusty Russell 已提交
997
	/* If they're talking about a page table we have a shadow for... */
998
	pgdir = find_pgdir(lg, gpgdir);
999
	if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
R
Rusty Russell 已提交
1000
		/* ... throw it away. */
1001
		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
1002 1003 1004 1005 1006
		/* That might have been the Switcher mapping, remap it. */
		if (!allocate_switcher_mapping(&lg->cpus[0])) {
			kill_guest(&lg->cpus[0],
				   "Cannot populate switcher mapping");
		}
1007
		lg->pgdirs[pgdir].last_host_cpu = -1;
1008
	}
R
Rusty Russell 已提交
1009
}
R
Rusty Russell 已提交
1010

M
Matias Zabaljauregui 已提交
1011
#ifdef CONFIG_X86_PAE
R
Rusty Russell 已提交
1012
/* For setting a mid-level, we just throw everything away.  It's easy. */
M
Matias Zabaljauregui 已提交
1013 1014 1015 1016 1017
void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
{
	guest_pagetable_clear_all(&lg->cpus[0]);
}
#endif
R
Rusty Russell 已提交
1018

R
Rusty Russell 已提交
1019 1020
/*H:500
 * (vii) Setting up the page tables initially.
R
Rusty Russell 已提交
1021
 *
1022 1023 1024 1025
 * When a Guest is first created, set initialize a shadow page table which
 * we will populate on future faults.  The Guest doesn't have any actual
 * pagetables yet, so we set linear_pages to tell demand_page() to fake it
 * for the moment.
1026 1027 1028
 *
 * We do need the Switcher to be mapped at all times, so we allocate that
 * part of the Guest page table here.
R
Rusty Russell 已提交
1029
 */
1030
int init_guest_pagetable(struct lguest *lg)
R
Rusty Russell 已提交
1031
{
1032 1033
	struct lg_cpu *cpu = &lg->cpus[0];
	int allocated = 0;
1034

1035 1036 1037
	/* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
	cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
	if (!allocated)
R
Rusty Russell 已提交
1038
		return -ENOMEM;
R
Rusty Russell 已提交
1039

1040 1041
	/* We start with a linear mapping until the initialize. */
	cpu->linear_pages = true;
1042 1043 1044 1045 1046 1047 1048

	/* Allocate the page tables for the Switcher. */
	if (!allocate_switcher_mapping(cpu)) {
		release_all_pagetables(lg);
		return -ENOMEM;
	}

R
Rusty Russell 已提交
1049 1050 1051
	return 0;
}

R
Rusty Russell 已提交
1052
/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
1053
void page_table_guest_data_init(struct lg_cpu *cpu)
1054
{
1055 1056 1057 1058 1059 1060 1061
	/*
	 * We tell the Guest that it can't use the virtual addresses
	 * used by the Switcher.  This trick is equivalent to 4GB -
	 * switcher_addr.
	 */
	u32 top = ~switcher_addr + 1;

1062
	/* We get the kernel address: above this is all kernel memory. */
1063
	if (get_user(cpu->lg->kernel_address,
1064
		     &cpu->lg->lguest_data->kernel_address)
R
Rusty Russell 已提交
1065
		/*
1066 1067
		 * We tell the Guest that it can't use the top virtual
		 * addresses (used by the Switcher).
R
Rusty Russell 已提交
1068
		 */
1069
	    || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
1070
		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
1071 1072
		return;
	}
1073

R
Rusty Russell 已提交
1074 1075
	/*
	 * In flush_user_mappings() we loop from 0 to
1076
	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
R
Rusty Russell 已提交
1077 1078
	 * Switcher mappings, so check that now.
	 */
1079
	if (cpu->lg->kernel_address >= switcher_addr)
1080 1081
		kill_guest(cpu, "bad kernel address %#lx",
				 cpu->lg->kernel_address);
1082 1083
}

R
Rusty Russell 已提交
1084
/* When a Guest dies, our cleanup is fairly simple. */
R
Rusty Russell 已提交
1085 1086 1087 1088
void free_guest_pagetable(struct lguest *lg)
{
	unsigned int i;

R
Rusty Russell 已提交
1089
	/* Throw away all page table pages. */
R
Rusty Russell 已提交
1090
	release_all_pagetables(lg);
R
Rusty Russell 已提交
1091
	/* Now free the top levels: free_page() can handle 0 just fine. */
R
Rusty Russell 已提交
1092 1093 1094 1095
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
		free_page((long)lg->pgdirs[i].pgdir);
}

1096 1097
/*H:481
 * This clears the Switcher mappings for cpu #i.
R
Rusty Russell 已提交
1098
 */
1099
static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
R
Rusty Russell 已提交
1100
{
1101 1102
	unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
	pte_t *pte;
R
Rusty Russell 已提交
1103

1104 1105 1106 1107
	/* Clear the mappings for both pages. */
	pte = find_spte(cpu, base, false, 0, 0);
	release_pte(*pte);
	set_pte(pte, __pte(0));
M
Matias Zabaljauregui 已提交
1108

1109 1110 1111
	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
	release_pte(*pte);
	set_pte(pte, __pte(0));
R
Rusty Russell 已提交
1112 1113
}

R
Rusty Russell 已提交
1114 1115
/*H:480
 * (vi) Mapping the Switcher when the Guest is about to run.
R
Rusty Russell 已提交
1116
 *
1117 1118
 * The Switcher and the two pages for this CPU need to be visible in the Guest
 * (and not the pages for other CPUs).
R
Rusty Russell 已提交
1119
 *
1120 1121 1122
 * The pages for the pagetables have all been allocated before: we just need
 * to make sure the actual PTEs are up-to-date for the CPU we're about to run
 * on.
R
Rusty Russell 已提交
1123
 */
1124
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
R
Rusty Russell 已提交
1125
{
1126
	unsigned long base;
1127 1128
	struct page *percpu_switcher_page, *regs_page;
	pte_t *pte;
1129
	struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
R
Rusty Russell 已提交
1130

1131 1132
	/* Switcher page should always be mapped by now! */
	BUG_ON(!pgdir->switcher_mapped);
1133

1134 1135 1136 1137 1138 1139 1140 1141 1142
	/* 
	 * Remember that we have two pages for each Host CPU, so we can run a
	 * Guest on each CPU without them interfering.  We need to make sure
	 * those pages are mapped correctly in the Guest, but since we usually
	 * run on the same CPU, we cache that, and only update the mappings
	 * when we move.
	 */
	if (pgdir->last_host_cpu == raw_smp_processor_id())
		return;
R
Rusty Russell 已提交
1143

1144 1145 1146 1147 1148 1149 1150 1151
	/* -1 means unknown so we remove everything. */
	if (pgdir->last_host_cpu == -1) {
		unsigned int i;
		for_each_possible_cpu(i)
			remove_switcher_percpu_map(cpu, i);
	} else {
		/* We know exactly what CPU mapping to remove. */
		remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
R
Rusty Russell 已提交
1152 1153
	}

R
Rusty Russell 已提交
1154
	/*
1155 1156 1157 1158 1159 1160
	 * When we're running the Guest, we want the Guest's "regs" page to
	 * appear where the first Switcher page for this CPU is.  This is an
	 * optimization: when the Switcher saves the Guest registers, it saves
	 * them into the first page of this CPU's "struct lguest_pages": if we
	 * make sure the Guest's register page is already mapped there, we
	 * don't have to copy them out again.
R
Rusty Russell 已提交
1161
	 */
1162 1163 1164 1165 1166 1167 1168
	/* Find the shadow PTE for this regs page. */
	base = switcher_addr + PAGE_SIZE
		+ raw_smp_processor_id() * sizeof(struct lguest_pages);
	pte = find_spte(cpu, base, false, 0, 0);
	regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
	get_page(regs_page);
	set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
1169

R
Rusty Russell 已提交
1170
	/*
1171 1172 1173
	 * We map the second page of the struct lguest_pages read-only in
	 * the Guest: the IDT, GDT and other things it's not supposed to
	 * change.
R
Rusty Russell 已提交
1174
	 */
1175
	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1176 1177 1178 1179 1180
	percpu_switcher_page
		= lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
	get_page(percpu_switcher_page);
	set_pte(pte, mk_pte(percpu_switcher_page,
			    __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
1181 1182

	pgdir->last_host_cpu = raw_smp_processor_id();
R
Rusty Russell 已提交
1183 1184
}

1185
/*H:490
R
Rusty Russell 已提交
1186
 * We've made it through the page table code.  Perhaps our tired brains are
R
Rusty Russell 已提交
1187 1188
 * still processing the details, or perhaps we're simply glad it's over.
 *
1189 1190 1191 1192 1193
 * If nothing else, note that all this complexity in juggling shadow page tables
 * in sync with the Guest's page tables is for one reason: for most Guests this
 * page table dance determines how bad performance will be.  This is why Xen
 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
 * have implemented shadow page table support directly into hardware.
R
Rusty Russell 已提交
1194
 *
R
Rusty Russell 已提交
1195 1196
 * There is just one file remaining in the Host.
 */