page_tables.c 27.3 KB
Newer Older
1 2 3 4
/*P:700 The pagetable code, on the other hand, still shows the scars of
 * previous encounters.  It's functional, and as neat as it can be in the
 * circumstances, but be wary, for these things are subtle and break easily.
 * The Guest provides a virtual to physical mapping, but we can neither trust
5 6
 * it nor use it: we verify and convert it here then point the CPU to the
 * converted Guest pages when running the Guest. :*/
7 8

/* Copyright (C) Rusty Russell IBM Corporation 2006.
R
Rusty Russell 已提交
9 10 11 12 13 14 15
 * GPL v2 and any later version */
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <linux/percpu.h>
#include <asm/tlbflush.h>
16
#include <asm/uaccess.h>
R
Rusty Russell 已提交
17 18
#include "lg.h"

19 20 21 22 23
/*M:008 We hold reference to pages, which prevents them from being swapped.
 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
 * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
 * could probably consider launching Guests as non-root. :*/

R
Rusty Russell 已提交
24 25 26 27 28
/*H:300
 * The Page Table Code
 *
 * We use two-level page tables for the Guest.  If you're not entirely
 * comfortable with virtual addresses, physical addresses and page tables then
R
Rusty Russell 已提交
29 30
 * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with
 * diagrams!).
R
Rusty Russell 已提交
31 32 33 34 35 36 37 38 39
 *
 * The Guest keeps page tables, but we maintain the actual ones here: these are
 * called "shadow" page tables.  Which is a very Guest-centric name: these are
 * the real page tables the CPU uses, although we keep them up to date to
 * reflect the Guest's.  (See what I mean about weird naming?  Since when do
 * shadows reflect anything?)
 *
 * Anyway, this is the most complicated part of the Host code.  There are seven
 * parts to this:
R
Rusty Russell 已提交
40 41 42
 *  (i) Looking up a page table entry when the Guest faults,
 *  (ii) Making sure the Guest stack is mapped,
 *  (iii) Setting up a page table entry when the Guest tells us one has changed,
R
Rusty Russell 已提交
43
 *  (iv) Switching page tables,
R
Rusty Russell 已提交
44
 *  (v) Flushing (throwing away) page tables,
R
Rusty Russell 已提交
45 46 47 48 49 50 51 52
 *  (vi) Mapping the Switcher when the Guest is about to run,
 *  (vii) Setting up the page tables initially.
 :*/


/* 1024 entries in a page table page maps 1024 pages: 4MB.  The Switcher is
 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
 * page.  */
53
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
R
Rusty Russell 已提交
54

R
Rusty Russell 已提交
55 56 57
/* We actually need a separate PTE page for each CPU.  Remember that after the
 * Switcher code itself comes two pages for each CPU, and we don't want this
 * CPU's guest to see the pages of any other CPU. */
58
static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
R
Rusty Russell 已提交
59 60
#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)

R
Rusty Russell 已提交
61 62
/*H:320 The page table code is curly enough to need helper functions to keep it
 * clear and clean.
R
Rusty Russell 已提交
63
 *
64
 * There are two functions which return pointers to the shadow (aka "real")
R
Rusty Russell 已提交
65 66 67
 * page tables.
 *
 * spgd_addr() takes the virtual address and returns a pointer to the top-level
R
Rusty Russell 已提交
68 69
 * page directory entry (PGD) for that address.  Since we keep track of several
 * page tables, the "i" argument tells us which one we're interested in (it's
R
Rusty Russell 已提交
70
 * usually the current one). */
71
static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
R
Rusty Russell 已提交
72
{
73
	unsigned int index = pgd_index(vaddr);
R
Rusty Russell 已提交
74

R
Rusty Russell 已提交
75
	/* We kill any Guest trying to touch the Switcher addresses. */
R
Rusty Russell 已提交
76
	if (index >= SWITCHER_PGD_INDEX) {
77
		kill_guest(cpu, "attempt to access switcher pages");
R
Rusty Russell 已提交
78 79
		index = 0;
	}
R
Rusty Russell 已提交
80
	/* Return a pointer index'th pgd entry for the i'th page table. */
81
	return &cpu->lg->pgdirs[i].pgdir[index];
R
Rusty Russell 已提交
82 83
}

R
Rusty Russell 已提交
84 85 86
/* This routine then takes the page directory entry returned above, which
 * contains the address of the page table entry (PTE) page.  It then returns a
 * pointer to the PTE entry for the given address. */
87
static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
R
Rusty Russell 已提交
88
{
89
	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
R
Rusty Russell 已提交
90
	/* You should never call this if the PGD entry wasn't valid */
91 92
	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
	return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
R
Rusty Russell 已提交
93 94
}

R
Rusty Russell 已提交
95 96
/* These two functions just like the above two, except they access the Guest
 * page tables.  Hence they return a Guest address. */
97
static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
98
{
99
	unsigned int index = vaddr >> (PGDIR_SHIFT);
100
	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
R
Rusty Russell 已提交
101 102
}

103
static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
R
Rusty Russell 已提交
104
{
105 106 107
	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
	return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
R
Rusty Russell 已提交
108
}
109 110
/*:*/

111 112
/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
 * an optimization (ie. pre-faulting). :*/
R
Rusty Russell 已提交
113

R
Rusty Russell 已提交
114 115 116 117 118 119
/*H:350 This routine takes a page number given by the Guest and converts it to
 * an actual, physical page number.  It can fail for several reasons: the
 * virtual address might not be mapped by the Launcher, the write flag is set
 * and the page is read-only, or the write flag was set and the page was
 * shared so had to be copied, but we ran out of memory.
 *
120 121
 * This holds a reference to the page, so release_pte() is careful to put that
 * back. */
R
Rusty Russell 已提交
122 123 124
static unsigned long get_pfn(unsigned long virtpfn, int write)
{
	struct page *page;
125 126 127 128 129

	/* gup me one page at this address please! */
	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
		return page_to_pfn(page);

R
Rusty Russell 已提交
130
	/* This value indicates failure. */
131
	return -1UL;
R
Rusty Russell 已提交
132 133
}

R
Rusty Russell 已提交
134 135 136 137
/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
 * entry can be a little tricky.  The flags are (almost) the same, but the
 * Guest PTE contains a virtual page number: the CPU needs the real page
 * number. */
138
static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
R
Rusty Russell 已提交
139
{
140
	unsigned long pfn, base, flags;
R
Rusty Russell 已提交
141

R
Rusty Russell 已提交
142 143 144 145
	/* The Guest sets the global flag, because it thinks that it is using
	 * PGE.  We only told it to use PGE so it would tell us whether it was
	 * flushing a kernel mapping or a userspace mapping.  We don't actually
	 * use the global bit, so throw it away. */
146
	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
R
Rusty Russell 已提交
147

148
	/* The Guest's pages are offset inside the Launcher. */
149
	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
150

R
Rusty Russell 已提交
151 152 153 154
	/* We need a temporary "unsigned long" variable to hold the answer from
	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
	 * fit in spte.pfn.  get_pfn() finds the real physical number of the
	 * page, given the virtual number. */
155
	pfn = get_pfn(base + pte_pfn(gpte), write);
R
Rusty Russell 已提交
156
	if (pfn == -1UL) {
157
		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
R
Rusty Russell 已提交
158 159 160
		/* When we destroy the Guest, we'll go through the shadow page
		 * tables and release_pte() them.  Make sure we don't think
		 * this one is valid! */
161
		flags = 0;
R
Rusty Russell 已提交
162
	}
163 164
	/* Now we assemble our shadow PTE from the page number and flags. */
	return pfn_pte(pfn, __pgprot(flags));
R
Rusty Russell 已提交
165 166
}

R
Rusty Russell 已提交
167
/*H:460 And to complete the chain, release_pte() looks like this: */
168
static void release_pte(pte_t pte)
R
Rusty Russell 已提交
169
{
170
	/* Remember that get_user_pages_fast() took a reference to the page, in
R
Rusty Russell 已提交
171
	 * get_pfn()?  We have to put it back now. */
172 173
	if (pte_flags(pte) & _PAGE_PRESENT)
		put_page(pfn_to_page(pte_pfn(pte)));
R
Rusty Russell 已提交
174
}
R
Rusty Russell 已提交
175
/*:*/
R
Rusty Russell 已提交
176

177
static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
R
Rusty Russell 已提交
178
{
179 180
	if ((pte_flags(gpte) & _PAGE_PSE) ||
	    pte_pfn(gpte) >= cpu->lg->pfn_limit)
181
		kill_guest(cpu, "bad page table entry");
R
Rusty Russell 已提交
182 183
}

184
static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
R
Rusty Russell 已提交
185
{
186 187 188
	if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
	   (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
		kill_guest(cpu, "bad page directory entry");
R
Rusty Russell 已提交
189 190
}

R
Rusty Russell 已提交
191
/*H:330
R
Rusty Russell 已提交
192
 * (i) Looking up a page table entry when the Guest faults.
R
Rusty Russell 已提交
193 194 195 196 197 198 199
 *
 * We saw this call in run_guest(): when we see a page fault in the Guest, we
 * come here.  That's because we only set up the shadow page tables lazily as
 * they're needed, so we get page faults all the time and quietly fix them up
 * and return to the Guest without it knowing.
 *
 * If we fixed up the fault (ie. we mapped the address), this routine returns
R
Rusty Russell 已提交
200
 * true.  Otherwise, it was a real fault and we need to tell the Guest. */
201
int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
R
Rusty Russell 已提交
202
{
203 204
	pgd_t gpgd;
	pgd_t *spgd;
R
Rusty Russell 已提交
205
	unsigned long gpte_ptr;
206 207
	pte_t gpte;
	pte_t *spte;
R
Rusty Russell 已提交
208

R
Rusty Russell 已提交
209
	/* First step: get the top-level Guest page table entry. */
210
	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
R
Rusty Russell 已提交
211
	/* Toplevel not present?  We can't map it in. */
212
	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
R
Rusty Russell 已提交
213 214
		return 0;

R
Rusty Russell 已提交
215
	/* Now look at the matching shadow entry. */
216
	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
217
	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
R
Rusty Russell 已提交
218
		/* No shadow entry: allocate a new shadow PTE page. */
R
Rusty Russell 已提交
219
		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
220 221
		/* This is not really the Guest's fault, but killing it is
		 * simple for this corner case. */
R
Rusty Russell 已提交
222
		if (!ptepage) {
223
			kill_guest(cpu, "out of memory allocating pte page");
R
Rusty Russell 已提交
224 225
			return 0;
		}
R
Rusty Russell 已提交
226
		/* We check that the Guest pgd is OK. */
227
		check_gpgd(cpu, gpgd);
R
Rusty Russell 已提交
228 229
		/* And we copy the flags to the shadow PGD entry.  The page
		 * number in the shadow PGD is the page we just allocated. */
230
		*spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
R
Rusty Russell 已提交
231 232
	}

R
Rusty Russell 已提交
233 234
	/* OK, now we look at the lower level in the Guest page table: keep its
	 * address, because we might update it later. */
235
	gpte_ptr = gpte_addr(gpgd, vaddr);
236
	gpte = lgread(cpu, gpte_ptr, pte_t);
R
Rusty Russell 已提交
237

R
Rusty Russell 已提交
238
	/* If this page isn't in the Guest page tables, we can't page it in. */
239
	if (!(pte_flags(gpte) & _PAGE_PRESENT))
R
Rusty Russell 已提交
240 241
		return 0;

R
Rusty Russell 已提交
242 243
	/* Check they're not trying to write to a page the Guest wants
	 * read-only (bit 2 of errcode == write). */
244
	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
R
Rusty Russell 已提交
245 246
		return 0;

R
Rusty Russell 已提交
247
	/* User access to a kernel-only page? (bit 3 == user access) */
248
	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
R
Rusty Russell 已提交
249 250
		return 0;

R
Rusty Russell 已提交
251 252
	/* Check that the Guest PTE flags are OK, and the page number is below
	 * the pfn_limit (ie. not mapping the Launcher binary). */
253
	check_gpte(cpu, gpte);
R
Rusty Russell 已提交
254

R
Rusty Russell 已提交
255
	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
256
	gpte = pte_mkyoung(gpte);
R
Rusty Russell 已提交
257
	if (errcode & 2)
258
		gpte = pte_mkdirty(gpte);
R
Rusty Russell 已提交
259

R
Rusty Russell 已提交
260
	/* Get the pointer to the shadow PTE entry we're going to set. */
261
	spte = spte_addr(*spgd, vaddr);
R
Rusty Russell 已提交
262 263
	/* If there was a valid shadow PTE entry here before, we release it.
	 * This can happen with a write to a previously read-only entry. */
R
Rusty Russell 已提交
264 265
	release_pte(*spte);

R
Rusty Russell 已提交
266 267
	/* If this is a write, we insist that the Guest page is writable (the
	 * final arg to gpte_to_spte()). */
268
	if (pte_dirty(gpte))
269
		*spte = gpte_to_spte(cpu, gpte, 1);
270
	else
R
Rusty Russell 已提交
271 272
		/* If this is a read, don't set the "writable" bit in the page
		 * table entry, even if the Guest says it's writable.  That way
R
Rusty Russell 已提交
273 274
		 * we will come back here when a write does actually occur, so
		 * we can update the Guest's _PAGE_DIRTY flag. */
275
		*spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0);
R
Rusty Russell 已提交
276

R
Rusty Russell 已提交
277 278
	/* Finally, we write the Guest PTE entry back: we've set the
	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
279
	lgwrite(cpu, gpte_ptr, pte_t, gpte);
R
Rusty Russell 已提交
280

R
Rusty Russell 已提交
281 282 283 284
	/* The fault is fixed, the page table is populated, the mapping
	 * manipulated, the result returned and the code complete.  A small
	 * delay and a trace of alliteration are the only indications the Guest
	 * has that a page fault occurred at all. */
R
Rusty Russell 已提交
285 286 287
	return 1;
}

R
Rusty Russell 已提交
288 289
/*H:360
 * (ii) Making sure the Guest stack is mapped.
R
Rusty Russell 已提交
290
 *
R
Rusty Russell 已提交
291 292 293 294
 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
 * we've seen that logic is quite long, and usually the stack pages are already
 * mapped, so it's overkill.
R
Rusty Russell 已提交
295 296 297
 *
 * This is a quick version which answers the question: is this virtual address
 * mapped by the shadow page tables, and is it writable? */
298
static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
299
{
300
	pgd_t *spgd;
R
Rusty Russell 已提交
301 302
	unsigned long flags;

R
Rusty Russell 已提交
303
	/* Look at the current top level entry: is it present? */
304
	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
305
	if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
R
Rusty Russell 已提交
306 307
		return 0;

R
Rusty Russell 已提交
308 309
	/* Check the flags on the pte entry itself: it must be present and
	 * writable. */
310
	flags = pte_flags(*(spte_addr(*spgd, vaddr)));
311

R
Rusty Russell 已提交
312 313 314
	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
}

R
Rusty Russell 已提交
315 316 317
/* So, when pin_stack_pages() asks us to pin a page, we check if it's already
 * in the page tables, and if not, we call demand_page() with error code 2
 * (meaning "write"). */
318
void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
319
{
320
	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
321
		kill_guest(cpu, "bad stack page %#lx", vaddr);
R
Rusty Russell 已提交
322 323
}

R
Rusty Russell 已提交
324
/*H:450 If we chase down the release_pgd() code, it looks like this: */
325
static void release_pgd(struct lguest *lg, pgd_t *spgd)
R
Rusty Russell 已提交
326
{
R
Rusty Russell 已提交
327
	/* If the entry's not present, there's nothing to release. */
328
	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
R
Rusty Russell 已提交
329
		unsigned int i;
R
Rusty Russell 已提交
330 331 332
		/* Converting the pfn to find the actual PTE page is easy: turn
		 * the page number into a physical address, then convert to a
		 * virtual address (easy for kernel pages like this one). */
333
		pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
R
Rusty Russell 已提交
334
		/* For each entry in the page, we might need to release it. */
335
		for (i = 0; i < PTRS_PER_PTE; i++)
R
Rusty Russell 已提交
336
			release_pte(ptepage[i]);
R
Rusty Russell 已提交
337
		/* Now we can free the page of PTEs */
R
Rusty Russell 已提交
338
		free_page((long)ptepage);
R
Rusty Russell 已提交
339
		/* And zero out the PGD entry so we never release it twice. */
340
		*spgd = __pgd(0);
R
Rusty Russell 已提交
341 342 343
	}
}

R
Rusty Russell 已提交
344 345 346
/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
 * It simply releases every PTE page from 0 up to the Guest's kernel address. */
R
Rusty Russell 已提交
347 348 349
static void flush_user_mappings(struct lguest *lg, int idx)
{
	unsigned int i;
R
Rusty Russell 已提交
350
	/* Release every pgd entry up to the kernel's address. */
351
	for (i = 0; i < pgd_index(lg->kernel_address); i++)
R
Rusty Russell 已提交
352 353 354
		release_pgd(lg, lg->pgdirs[idx].pgdir + i);
}

R
Rusty Russell 已提交
355 356 357 358
/*H:440 (v) Flushing (throwing away) page tables,
 *
 * The Guest has a hypercall to throw away the page tables: it's used when a
 * large number of mappings have been changed. */
359
void guest_pagetable_flush_user(struct lg_cpu *cpu)
R
Rusty Russell 已提交
360
{
R
Rusty Russell 已提交
361
	/* Drop the userspace part of the current page table. */
362
	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
R
Rusty Russell 已提交
363
}
R
Rusty Russell 已提交
364
/*:*/
R
Rusty Russell 已提交
365

366
/* We walk down the guest page tables to get a guest-physical address */
367
unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
368 369 370 371 372
{
	pgd_t gpgd;
	pte_t gpte;

	/* First step: get the top-level Guest page table entry. */
373
	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
374 375
	/* Toplevel not present?  We can't map it in. */
	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
376
		kill_guest(cpu, "Bad address %#lx", vaddr);
377

378
	gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
379
	if (!(pte_flags(gpte) & _PAGE_PRESENT))
380
		kill_guest(cpu, "Bad address %#lx", vaddr);
381 382 383 384

	return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
}

R
Rusty Russell 已提交
385 386 387
/* We keep several page tables.  This is a simple routine to find the page
 * table (if any) corresponding to this top-level address the Guest has given
 * us. */
R
Rusty Russell 已提交
388 389 390 391
static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
{
	unsigned int i;
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
392
		if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
R
Rusty Russell 已提交
393 394 395 396
			break;
	return i;
}

R
Rusty Russell 已提交
397 398 399
/*H:435 And this is us, creating the new page directory.  If we really do
 * allocate a new one (and so the kernel parts are not there), we set
 * blank_pgdir. */
400
static unsigned int new_pgdir(struct lg_cpu *cpu,
401
			      unsigned long gpgdir,
R
Rusty Russell 已提交
402 403 404 405
			      int *blank_pgdir)
{
	unsigned int next;

R
Rusty Russell 已提交
406 407
	/* We pick one entry at random to throw out.  Choosing the Least
	 * Recently Used might be better, but this is easy. */
408
	next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
R
Rusty Russell 已提交
409
	/* If it's never been allocated at all before, try now. */
410 411 412
	if (!cpu->lg->pgdirs[next].pgdir) {
		cpu->lg->pgdirs[next].pgdir =
					(pgd_t *)get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
413
		/* If the allocation fails, just keep using the one we have */
414
		if (!cpu->lg->pgdirs[next].pgdir)
415
			next = cpu->cpu_pgd;
R
Rusty Russell 已提交
416
		else
R
Rusty Russell 已提交
417 418
			/* This is a blank page, so there are no kernel
			 * mappings: caller must map the stack! */
R
Rusty Russell 已提交
419 420
			*blank_pgdir = 1;
	}
R
Rusty Russell 已提交
421
	/* Record which Guest toplevel this shadows. */
422
	cpu->lg->pgdirs[next].gpgdir = gpgdir;
R
Rusty Russell 已提交
423
	/* Release all the non-kernel mappings. */
424
	flush_user_mappings(cpu->lg, next);
R
Rusty Russell 已提交
425 426 427 428

	return next;
}

R
Rusty Russell 已提交
429 430
/*H:430 (iv) Switching page tables
 *
R
Rusty Russell 已提交
431 432 433
 * Now we've seen all the page table setting and manipulation, let's see what
 * what happens when the Guest changes page tables (ie. changes the top-level
 * pgdir).  This occurs on almost every context switch. */
434
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
R
Rusty Russell 已提交
435 436 437
{
	int newpgdir, repin = 0;

R
Rusty Russell 已提交
438
	/* Look to see if we have this one already. */
439
	newpgdir = find_pgdir(cpu->lg, pgtable);
R
Rusty Russell 已提交
440 441
	/* If not, we allocate or mug an existing one: if it's a fresh one,
	 * repin gets set to 1. */
442
	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
443
		newpgdir = new_pgdir(cpu, pgtable, &repin);
R
Rusty Russell 已提交
444
	/* Change the current pgd index to the new one. */
445
	cpu->cpu_pgd = newpgdir;
R
Rusty Russell 已提交
446
	/* If it was completely blank, we map in the Guest kernel stack */
R
Rusty Russell 已提交
447
	if (repin)
448
		pin_stack_pages(cpu);
R
Rusty Russell 已提交
449 450
}

R
Rusty Russell 已提交
451
/*H:470 Finally, a routine which throws away everything: all PGD entries in all
R
Rusty Russell 已提交
452 453
 * the shadow page tables, including the Guest's kernel mappings.  This is used
 * when we destroy the Guest. */
R
Rusty Russell 已提交
454 455 456 457
static void release_all_pagetables(struct lguest *lg)
{
	unsigned int i, j;

R
Rusty Russell 已提交
458
	/* Every shadow pagetable this Guest has */
R
Rusty Russell 已提交
459 460
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
		if (lg->pgdirs[i].pgdir)
R
Rusty Russell 已提交
461
			/* Every PGD entry except the Switcher at the top */
R
Rusty Russell 已提交
462 463 464 465
			for (j = 0; j < SWITCHER_PGD_INDEX; j++)
				release_pgd(lg, lg->pgdirs[i].pgdir + j);
}

R
Rusty Russell 已提交
466 467
/* We also throw away everything when a Guest tells us it's changed a kernel
 * mapping.  Since kernel mappings are in every page table, it's easiest to
R
Rusty Russell 已提交
468 469
 * throw them all away.  This traps the Guest in amber for a while as
 * everything faults back in, but it's rare. */
470
void guest_pagetable_clear_all(struct lg_cpu *cpu)
R
Rusty Russell 已提交
471
{
472
	release_all_pagetables(cpu->lg);
R
Rusty Russell 已提交
473
	/* We need the Guest kernel stack mapped again. */
474
	pin_stack_pages(cpu);
R
Rusty Russell 已提交
475
}
R
Rusty Russell 已提交
476 477 478 479 480 481 482 483
/*:*/
/*M:009 Since we throw away all mappings when a kernel mapping changes, our
 * performance sucks for guests using highmem.  In fact, a guest with
 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
 * usually slower than a Guest with less memory.
 *
 * This, of course, cannot be fixed.  It would take some kind of... well, I
 * don't know, but the term "puissant code-fu" comes to mind. :*/
R
Rusty Russell 已提交
484

R
Rusty Russell 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498
/*H:420 This is the routine which actually sets the page table entry for then
 * "idx"'th shadow page table.
 *
 * Normally, we can just throw out the old entry and replace it with 0: if they
 * use it demand_page() will put the new entry in.  We need to do this anyway:
 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
 * is read from, and _PAGE_DIRTY when it's written to.
 *
 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
 * these bits on PTEs immediately anyway.  This is done to save the CPU from
 * having to update them, but it helps us the same way: if they set
 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
 */
499
static void do_set_pte(struct lg_cpu *cpu, int idx,
500
		       unsigned long vaddr, pte_t gpte)
R
Rusty Russell 已提交
501
{
R
Rusty Russell 已提交
502
	/* Look up the matching shadow page directory entry. */
503
	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
R
Rusty Russell 已提交
504 505

	/* If the top level isn't present, there's no entry to update. */
506
	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
R
Rusty Russell 已提交
507
		/* Otherwise, we start by releasing the existing entry. */
508
		pte_t *spte = spte_addr(*spgd, vaddr);
R
Rusty Russell 已提交
509
		release_pte(*spte);
R
Rusty Russell 已提交
510 511 512 513

		/* If they're setting this entry as dirty or accessed, we might
		 * as well put that entry they've given us in now.  This shaves
		 * 10% off a copy-on-write micro-benchmark. */
514
		if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
515 516
			check_gpte(cpu, gpte);
			*spte = gpte_to_spte(cpu, gpte,
517
					     pte_flags(gpte) & _PAGE_DIRTY);
R
Rusty Russell 已提交
518
		} else
R
Rusty Russell 已提交
519 520
			/* Otherwise kill it and we can demand_page() it in
			 * later. */
521
			*spte = __pte(0);
R
Rusty Russell 已提交
522 523 524
	}
}

R
Rusty Russell 已提交
525 526 527 528 529 530 531 532
/*H:410 Updating a PTE entry is a little trickier.
 *
 * We keep track of several different page tables (the Guest uses one for each
 * process, so it makes sense to cache at least a few).  Each of these have
 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
 * all processes.  So when the page table above that address changes, we update
 * all the page tables, not just the current one.  This is rare.
 *
533 534
 * The benefit is that when we have to track a new page table, we can keep all
 * the kernel mappings.  This speeds up context switch immensely. */
535
void guest_set_pte(struct lg_cpu *cpu,
536
		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
R
Rusty Russell 已提交
537
{
538 539
	/* Kernel mappings must be changed on all top levels.  Slow, but doesn't
	 * happen often. */
540
	if (vaddr >= cpu->lg->kernel_address) {
R
Rusty Russell 已提交
541
		unsigned int i;
542 543 544
		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
			if (cpu->lg->pgdirs[i].pgdir)
				do_set_pte(cpu, i, vaddr, gpte);
R
Rusty Russell 已提交
545
	} else {
R
Rusty Russell 已提交
546
		/* Is this page table one we have a shadow for? */
547 548
		int pgdir = find_pgdir(cpu->lg, gpgdir);
		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
R
Rusty Russell 已提交
549
			/* If so, do the update. */
550
			do_set_pte(cpu, pgdir, vaddr, gpte);
R
Rusty Russell 已提交
551 552 553
	}
}

R
Rusty Russell 已提交
554
/*H:400
R
Rusty Russell 已提交
555
 * (iii) Setting up a page table entry when the Guest tells us one has changed.
R
Rusty Russell 已提交
556 557 558 559 560 561 562 563 564 565 566 567
 *
 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
 * with the other side of page tables while we're here: what happens when the
 * Guest asks for a page table to be updated?
 *
 * We already saw that demand_page() will fill in the shadow page tables when
 * needed, so we can simply remove shadow page table entries whenever the Guest
 * tells us they've changed.  When the Guest tries to use the new entry it will
 * fault and demand_page() will fix it up.
 *
 * So with that in mind here's our code to to update a (top-level) PGD entry:
 */
568
void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
R
Rusty Russell 已提交
569 570 571
{
	int pgdir;

R
Rusty Russell 已提交
572 573
	/* The kernel seems to try to initialize this early on: we ignore its
	 * attempts to map over the Switcher. */
R
Rusty Russell 已提交
574 575 576
	if (idx >= SWITCHER_PGD_INDEX)
		return;

R
Rusty Russell 已提交
577
	/* If they're talking about a page table we have a shadow for... */
578
	pgdir = find_pgdir(lg, gpgdir);
R
Rusty Russell 已提交
579
	if (pgdir < ARRAY_SIZE(lg->pgdirs))
R
Rusty Russell 已提交
580
		/* ... throw it away. */
R
Rusty Russell 已提交
581 582 583
		release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
}

R
Rusty Russell 已提交
584 585 586 587
/*H:500 (vii) Setting up the page tables initially.
 *
 * When a Guest is first created, the Launcher tells us where the toplevel of
 * its first page table is.  We set some things up here: */
R
Rusty Russell 已提交
588 589
int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
{
R
Rusty Russell 已提交
590 591
	/* We start on the first shadow page table, and give it a blank PGD
	 * page. */
592 593 594
	lg->pgdirs[0].gpgdir = pgtable;
	lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
	if (!lg->pgdirs[0].pgdir)
R
Rusty Russell 已提交
595
		return -ENOMEM;
596
	lg->cpus[0].cpu_pgd = 0;
R
Rusty Russell 已提交
597 598 599
	return 0;
}

600
/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
601
void page_table_guest_data_init(struct lg_cpu *cpu)
602 603
{
	/* We get the kernel address: above this is all kernel memory. */
604 605
	if (get_user(cpu->lg->kernel_address,
		     &cpu->lg->lguest_data->kernel_address)
606 607
	    /* We tell the Guest that it can't use the top 4MB of virtual
	     * addresses used by the Switcher. */
608 609 610
	    || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
	    || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir))
		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
611 612 613 614

	/* In flush_user_mappings() we loop from 0 to
	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
	 * Switcher mappings, so check that now. */
615 616 617
	if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
		kill_guest(cpu, "bad kernel address %#lx",
				 cpu->lg->kernel_address);
618 619
}

R
Rusty Russell 已提交
620
/* When a Guest dies, our cleanup is fairly simple. */
R
Rusty Russell 已提交
621 622 623 624
void free_guest_pagetable(struct lguest *lg)
{
	unsigned int i;

R
Rusty Russell 已提交
625
	/* Throw away all page table pages. */
R
Rusty Russell 已提交
626
	release_all_pagetables(lg);
R
Rusty Russell 已提交
627
	/* Now free the top levels: free_page() can handle 0 just fine. */
R
Rusty Russell 已提交
628 629 630 631
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
		free_page((long)lg->pgdirs[i].pgdir);
}

R
Rusty Russell 已提交
632 633
/*H:480 (vi) Mapping the Switcher when the Guest is about to run.
 *
R
Rusty Russell 已提交
634
 * The Switcher and the two pages for this CPU need to be visible in the
R
Rusty Russell 已提交
635
 * Guest (and not the pages for other CPUs).  We have the appropriate PTE pages
R
Rusty Russell 已提交
636 637
 * for each CPU already set up, we just need to hook them in now we know which
 * Guest is about to run on this CPU. */
638
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
R
Rusty Russell 已提交
639
{
640 641 642
	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
	pgd_t switcher_pgd;
	pte_t regs_pte;
643
	unsigned long pfn;
R
Rusty Russell 已提交
644

R
Rusty Russell 已提交
645 646
	/* Make the last PGD entry for this Guest point to the Switcher's PTE
	 * page for this CPU (with appropriate flags). */
647
	switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL);
648

649
	cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
R
Rusty Russell 已提交
650

R
Rusty Russell 已提交
651 652 653 654 655 656 657
	/* We also change the Switcher PTE page.  When we're running the Guest,
	 * we want the Guest's "regs" page to appear where the first Switcher
	 * page for this CPU is.  This is an optimization: when the Switcher
	 * saves the Guest registers, it saves them into the first page of this
	 * CPU's "struct lguest_pages": if we make sure the Guest's register
	 * page is already mapped there, we don't have to copy them out
	 * again. */
658
	pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
659
	regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL));
660
	switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
R
Rusty Russell 已提交
661
}
R
Rusty Russell 已提交
662
/*:*/
R
Rusty Russell 已提交
663 664 665 666 667 668 669 670 671

static void free_switcher_pte_pages(void)
{
	unsigned int i;

	for_each_possible_cpu(i)
		free_page((long)switcher_pte_page(i));
}

R
Rusty Russell 已提交
672 673 674 675
/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
 * the CPU number and the "struct page"s for the Switcher code itself.
 *
 * Currently the Switcher is less than a page long, so "pages" is always 1. */
R
Rusty Russell 已提交
676 677 678 679 680
static __init void populate_switcher_pte_page(unsigned int cpu,
					      struct page *switcher_page[],
					      unsigned int pages)
{
	unsigned int i;
681
	pte_t *pte = switcher_pte_page(cpu);
R
Rusty Russell 已提交
682

R
Rusty Russell 已提交
683
	/* The first entries are easy: they map the Switcher code. */
R
Rusty Russell 已提交
684
	for (i = 0; i < pages; i++) {
685 686
		pte[i] = mk_pte(switcher_page[i],
				__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
R
Rusty Russell 已提交
687 688
	}

R
Rusty Russell 已提交
689
	/* The only other thing we map is this CPU's pair of pages. */
R
Rusty Russell 已提交
690 691
	i = pages + cpu*2;

R
Rusty Russell 已提交
692
	/* First page (Guest registers) is writable from the Guest */
693 694 695
	pte[i] = pfn_pte(page_to_pfn(switcher_page[i]),
			 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW));

R
Rusty Russell 已提交
696 697
	/* The second page contains the "struct lguest_ro_state", and is
	 * read-only. */
698 699
	pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]),
			   __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
R
Rusty Russell 已提交
700 701
}

R
Rusty Russell 已提交
702 703 704
/* We've made it through the page table code.  Perhaps our tired brains are
 * still processing the details, or perhaps we're simply glad it's over.
 *
705 706 707 708 709
 * If nothing else, note that all this complexity in juggling shadow page tables
 * in sync with the Guest's page tables is for one reason: for most Guests this
 * page table dance determines how bad performance will be.  This is why Xen
 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
 * have implemented shadow page table support directly into hardware.
R
Rusty Russell 已提交
710 711 712
 *
 * There is just one file remaining in the Host. */

R
Rusty Russell 已提交
713 714
/*H:510 At boot or module load time, init_pagetables() allocates and populates
 * the Switcher PTE page for each CPU. */
R
Rusty Russell 已提交
715 716 717 718 719
__init int init_pagetables(struct page **switcher_page, unsigned int pages)
{
	unsigned int i;

	for_each_possible_cpu(i) {
720
		switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
721 722 723 724 725 726 727 728
		if (!switcher_pte_page(i)) {
			free_switcher_pte_pages();
			return -ENOMEM;
		}
		populate_switcher_pte_page(i, switcher_page, pages);
	}
	return 0;
}
R
Rusty Russell 已提交
729
/*:*/
R
Rusty Russell 已提交
730

R
Rusty Russell 已提交
731
/* Cleaning up simply involves freeing the PTE page for each CPU. */
R
Rusty Russell 已提交
732 733 734 735
void free_pagetables(void)
{
	free_switcher_pte_pages();
}