page_tables.c 29.7 KB
Newer Older
1 2 3 4
/*P:700 The pagetable code, on the other hand, still shows the scars of
 * previous encounters.  It's functional, and as neat as it can be in the
 * circumstances, but be wary, for these things are subtle and break easily.
 * The Guest provides a virtual to physical mapping, but we can neither trust
5 6
 * it nor use it: we verify and convert it here then point the CPU to the
 * converted Guest pages when running the Guest. :*/
7 8

/* Copyright (C) Rusty Russell IBM Corporation 2006.
R
Rusty Russell 已提交
9 10 11 12 13 14 15
 * GPL v2 and any later version */
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <linux/percpu.h>
#include <asm/tlbflush.h>
16
#include <asm/uaccess.h>
17
#include <asm/bootparam.h>
R
Rusty Russell 已提交
18 19
#include "lg.h"

20 21 22 23 24
/*M:008 We hold reference to pages, which prevents them from being swapped.
 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
 * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
 * could probably consider launching Guests as non-root. :*/

R
Rusty Russell 已提交
25 26 27 28 29
/*H:300
 * The Page Table Code
 *
 * We use two-level page tables for the Guest.  If you're not entirely
 * comfortable with virtual addresses, physical addresses and page tables then
R
Rusty Russell 已提交
30 31
 * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with
 * diagrams!).
R
Rusty Russell 已提交
32 33 34 35 36 37 38 39 40
 *
 * The Guest keeps page tables, but we maintain the actual ones here: these are
 * called "shadow" page tables.  Which is a very Guest-centric name: these are
 * the real page tables the CPU uses, although we keep them up to date to
 * reflect the Guest's.  (See what I mean about weird naming?  Since when do
 * shadows reflect anything?)
 *
 * Anyway, this is the most complicated part of the Host code.  There are seven
 * parts to this:
R
Rusty Russell 已提交
41 42 43
 *  (i) Looking up a page table entry when the Guest faults,
 *  (ii) Making sure the Guest stack is mapped,
 *  (iii) Setting up a page table entry when the Guest tells us one has changed,
R
Rusty Russell 已提交
44
 *  (iv) Switching page tables,
R
Rusty Russell 已提交
45
 *  (v) Flushing (throwing away) page tables,
R
Rusty Russell 已提交
46 47 48 49 50 51 52 53
 *  (vi) Mapping the Switcher when the Guest is about to run,
 *  (vii) Setting up the page tables initially.
 :*/


/* 1024 entries in a page table page maps 1024 pages: 4MB.  The Switcher is
 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
 * page.  */
54
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
R
Rusty Russell 已提交
55

R
Rusty Russell 已提交
56 57 58
/* We actually need a separate PTE page for each CPU.  Remember that after the
 * Switcher code itself comes two pages for each CPU, and we don't want this
 * CPU's guest to see the pages of any other CPU. */
59
static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
R
Rusty Russell 已提交
60 61
#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)

R
Rusty Russell 已提交
62 63
/*H:320 The page table code is curly enough to need helper functions to keep it
 * clear and clean.
R
Rusty Russell 已提交
64
 *
65
 * There are two functions which return pointers to the shadow (aka "real")
R
Rusty Russell 已提交
66 67 68
 * page tables.
 *
 * spgd_addr() takes the virtual address and returns a pointer to the top-level
R
Rusty Russell 已提交
69 70
 * page directory entry (PGD) for that address.  Since we keep track of several
 * page tables, the "i" argument tells us which one we're interested in (it's
R
Rusty Russell 已提交
71
 * usually the current one). */
72
static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
R
Rusty Russell 已提交
73
{
74
	unsigned int index = pgd_index(vaddr);
R
Rusty Russell 已提交
75

R
Rusty Russell 已提交
76
	/* We kill any Guest trying to touch the Switcher addresses. */
R
Rusty Russell 已提交
77
	if (index >= SWITCHER_PGD_INDEX) {
78
		kill_guest(cpu, "attempt to access switcher pages");
R
Rusty Russell 已提交
79 80
		index = 0;
	}
R
Rusty Russell 已提交
81
	/* Return a pointer index'th pgd entry for the i'th page table. */
82
	return &cpu->lg->pgdirs[i].pgdir[index];
R
Rusty Russell 已提交
83 84
}

R
Rusty Russell 已提交
85 86 87
/* This routine then takes the page directory entry returned above, which
 * contains the address of the page table entry (PTE) page.  It then returns a
 * pointer to the PTE entry for the given address. */
88
static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
R
Rusty Russell 已提交
89
{
90
	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
R
Rusty Russell 已提交
91
	/* You should never call this if the PGD entry wasn't valid */
92
	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
93
	return &page[pte_index(vaddr)];
R
Rusty Russell 已提交
94 95
}

R
Rusty Russell 已提交
96 97
/* These two functions just like the above two, except they access the Guest
 * page tables.  Hence they return a Guest address. */
98
static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
99
{
100
	unsigned int index = vaddr >> (PGDIR_SHIFT);
101
	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
R
Rusty Russell 已提交
102 103
}

104
static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
R
Rusty Russell 已提交
105
{
106 107
	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
108
	return gpage + pte_index(vaddr) * sizeof(pte_t);
R
Rusty Russell 已提交
109
}
110 111
/*:*/

112 113
/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
 * an optimization (ie. pre-faulting). :*/
R
Rusty Russell 已提交
114

R
Rusty Russell 已提交
115 116 117 118 119 120
/*H:350 This routine takes a page number given by the Guest and converts it to
 * an actual, physical page number.  It can fail for several reasons: the
 * virtual address might not be mapped by the Launcher, the write flag is set
 * and the page is read-only, or the write flag was set and the page was
 * shared so had to be copied, but we ran out of memory.
 *
121 122
 * This holds a reference to the page, so release_pte() is careful to put that
 * back. */
R
Rusty Russell 已提交
123 124 125
static unsigned long get_pfn(unsigned long virtpfn, int write)
{
	struct page *page;
126 127 128 129 130

	/* gup me one page at this address please! */
	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
		return page_to_pfn(page);

R
Rusty Russell 已提交
131
	/* This value indicates failure. */
132
	return -1UL;
R
Rusty Russell 已提交
133 134
}

R
Rusty Russell 已提交
135 136 137 138
/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
 * entry can be a little tricky.  The flags are (almost) the same, but the
 * Guest PTE contains a virtual page number: the CPU needs the real page
 * number. */
139
static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
R
Rusty Russell 已提交
140
{
141
	unsigned long pfn, base, flags;
R
Rusty Russell 已提交
142

R
Rusty Russell 已提交
143 144 145 146
	/* The Guest sets the global flag, because it thinks that it is using
	 * PGE.  We only told it to use PGE so it would tell us whether it was
	 * flushing a kernel mapping or a userspace mapping.  We don't actually
	 * use the global bit, so throw it away. */
147
	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
R
Rusty Russell 已提交
148

149
	/* The Guest's pages are offset inside the Launcher. */
150
	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
151

R
Rusty Russell 已提交
152 153 154 155
	/* We need a temporary "unsigned long" variable to hold the answer from
	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
	 * fit in spte.pfn.  get_pfn() finds the real physical number of the
	 * page, given the virtual number. */
156
	pfn = get_pfn(base + pte_pfn(gpte), write);
R
Rusty Russell 已提交
157
	if (pfn == -1UL) {
158
		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
R
Rusty Russell 已提交
159 160 161
		/* When we destroy the Guest, we'll go through the shadow page
		 * tables and release_pte() them.  Make sure we don't think
		 * this one is valid! */
162
		flags = 0;
R
Rusty Russell 已提交
163
	}
164 165
	/* Now we assemble our shadow PTE from the page number and flags. */
	return pfn_pte(pfn, __pgprot(flags));
R
Rusty Russell 已提交
166 167
}

R
Rusty Russell 已提交
168
/*H:460 And to complete the chain, release_pte() looks like this: */
169
static void release_pte(pte_t pte)
R
Rusty Russell 已提交
170
{
171
	/* Remember that get_user_pages_fast() took a reference to the page, in
R
Rusty Russell 已提交
172
	 * get_pfn()?  We have to put it back now. */
173
	if (pte_flags(pte) & _PAGE_PRESENT)
174
		put_page(pte_page(pte));
R
Rusty Russell 已提交
175
}
R
Rusty Russell 已提交
176
/*:*/
R
Rusty Russell 已提交
177

178
static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
R
Rusty Russell 已提交
179
{
180 181
	if ((pte_flags(gpte) & _PAGE_PSE) ||
	    pte_pfn(gpte) >= cpu->lg->pfn_limit)
182
		kill_guest(cpu, "bad page table entry");
R
Rusty Russell 已提交
183 184
}

185
static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
R
Rusty Russell 已提交
186
{
187 188 189
	if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
	   (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
		kill_guest(cpu, "bad page directory entry");
R
Rusty Russell 已提交
190 191
}

R
Rusty Russell 已提交
192
/*H:330
R
Rusty Russell 已提交
193
 * (i) Looking up a page table entry when the Guest faults.
R
Rusty Russell 已提交
194 195 196 197 198 199 200
 *
 * We saw this call in run_guest(): when we see a page fault in the Guest, we
 * come here.  That's because we only set up the shadow page tables lazily as
 * they're needed, so we get page faults all the time and quietly fix them up
 * and return to the Guest without it knowing.
 *
 * If we fixed up the fault (ie. we mapped the address), this routine returns
R
Rusty Russell 已提交
201
 * true.  Otherwise, it was a real fault and we need to tell the Guest. */
202
bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
R
Rusty Russell 已提交
203
{
204 205
	pgd_t gpgd;
	pgd_t *spgd;
R
Rusty Russell 已提交
206
	unsigned long gpte_ptr;
207 208
	pte_t gpte;
	pte_t *spte;
R
Rusty Russell 已提交
209

R
Rusty Russell 已提交
210
	/* First step: get the top-level Guest page table entry. */
211
	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
R
Rusty Russell 已提交
212
	/* Toplevel not present?  We can't map it in. */
213
	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
214
		return false;
R
Rusty Russell 已提交
215

R
Rusty Russell 已提交
216
	/* Now look at the matching shadow entry. */
217
	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
218
	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
R
Rusty Russell 已提交
219
		/* No shadow entry: allocate a new shadow PTE page. */
R
Rusty Russell 已提交
220
		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
221 222
		/* This is not really the Guest's fault, but killing it is
		 * simple for this corner case. */
R
Rusty Russell 已提交
223
		if (!ptepage) {
224
			kill_guest(cpu, "out of memory allocating pte page");
225
			return false;
R
Rusty Russell 已提交
226
		}
R
Rusty Russell 已提交
227
		/* We check that the Guest pgd is OK. */
228
		check_gpgd(cpu, gpgd);
R
Rusty Russell 已提交
229 230
		/* And we copy the flags to the shadow PGD entry.  The page
		 * number in the shadow PGD is the page we just allocated. */
231
		*spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
R
Rusty Russell 已提交
232 233
	}

R
Rusty Russell 已提交
234 235
	/* OK, now we look at the lower level in the Guest page table: keep its
	 * address, because we might update it later. */
236
	gpte_ptr = gpte_addr(gpgd, vaddr);
237
	gpte = lgread(cpu, gpte_ptr, pte_t);
R
Rusty Russell 已提交
238

R
Rusty Russell 已提交
239
	/* If this page isn't in the Guest page tables, we can't page it in. */
240
	if (!(pte_flags(gpte) & _PAGE_PRESENT))
241
		return false;
R
Rusty Russell 已提交
242

R
Rusty Russell 已提交
243 244
	/* Check they're not trying to write to a page the Guest wants
	 * read-only (bit 2 of errcode == write). */
245
	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
246
		return false;
R
Rusty Russell 已提交
247

R
Rusty Russell 已提交
248
	/* User access to a kernel-only page? (bit 3 == user access) */
249
	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
250
		return false;
R
Rusty Russell 已提交
251

R
Rusty Russell 已提交
252 253
	/* Check that the Guest PTE flags are OK, and the page number is below
	 * the pfn_limit (ie. not mapping the Launcher binary). */
254
	check_gpte(cpu, gpte);
R
Rusty Russell 已提交
255

R
Rusty Russell 已提交
256
	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
257
	gpte = pte_mkyoung(gpte);
R
Rusty Russell 已提交
258
	if (errcode & 2)
259
		gpte = pte_mkdirty(gpte);
R
Rusty Russell 已提交
260

R
Rusty Russell 已提交
261
	/* Get the pointer to the shadow PTE entry we're going to set. */
262
	spte = spte_addr(*spgd, vaddr);
R
Rusty Russell 已提交
263 264
	/* If there was a valid shadow PTE entry here before, we release it.
	 * This can happen with a write to a previously read-only entry. */
R
Rusty Russell 已提交
265 266
	release_pte(*spte);

R
Rusty Russell 已提交
267 268
	/* If this is a write, we insist that the Guest page is writable (the
	 * final arg to gpte_to_spte()). */
269
	if (pte_dirty(gpte))
270
		*spte = gpte_to_spte(cpu, gpte, 1);
271
	else
R
Rusty Russell 已提交
272 273
		/* If this is a read, don't set the "writable" bit in the page
		 * table entry, even if the Guest says it's writable.  That way
R
Rusty Russell 已提交
274 275
		 * we will come back here when a write does actually occur, so
		 * we can update the Guest's _PAGE_DIRTY flag. */
276
		native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
R
Rusty Russell 已提交
277

R
Rusty Russell 已提交
278 279
	/* Finally, we write the Guest PTE entry back: we've set the
	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
280
	lgwrite(cpu, gpte_ptr, pte_t, gpte);
R
Rusty Russell 已提交
281

R
Rusty Russell 已提交
282 283 284 285
	/* The fault is fixed, the page table is populated, the mapping
	 * manipulated, the result returned and the code complete.  A small
	 * delay and a trace of alliteration are the only indications the Guest
	 * has that a page fault occurred at all. */
286
	return true;
R
Rusty Russell 已提交
287 288
}

R
Rusty Russell 已提交
289 290
/*H:360
 * (ii) Making sure the Guest stack is mapped.
R
Rusty Russell 已提交
291
 *
R
Rusty Russell 已提交
292 293 294 295
 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
 * we've seen that logic is quite long, and usually the stack pages are already
 * mapped, so it's overkill.
R
Rusty Russell 已提交
296 297 298
 *
 * This is a quick version which answers the question: is this virtual address
 * mapped by the shadow page tables, and is it writable? */
299
static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
300
{
301
	pgd_t *spgd;
R
Rusty Russell 已提交
302 303
	unsigned long flags;

R
Rusty Russell 已提交
304
	/* Look at the current top level entry: is it present? */
305
	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
306
	if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
307
		return false;
R
Rusty Russell 已提交
308

R
Rusty Russell 已提交
309 310
	/* Check the flags on the pte entry itself: it must be present and
	 * writable. */
311
	flags = pte_flags(*(spte_addr(*spgd, vaddr)));
312

R
Rusty Russell 已提交
313 314 315
	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
}

R
Rusty Russell 已提交
316 317 318
/* So, when pin_stack_pages() asks us to pin a page, we check if it's already
 * in the page tables, and if not, we call demand_page() with error code 2
 * (meaning "write"). */
319
void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
R
Rusty Russell 已提交
320
{
321
	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
322
		kill_guest(cpu, "bad stack page %#lx", vaddr);
R
Rusty Russell 已提交
323 324
}

R
Rusty Russell 已提交
325
/*H:450 If we chase down the release_pgd() code, it looks like this: */
326
static void release_pgd(pgd_t *spgd)
R
Rusty Russell 已提交
327
{
R
Rusty Russell 已提交
328
	/* If the entry's not present, there's nothing to release. */
329
	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
R
Rusty Russell 已提交
330
		unsigned int i;
R
Rusty Russell 已提交
331 332 333
		/* Converting the pfn to find the actual PTE page is easy: turn
		 * the page number into a physical address, then convert to a
		 * virtual address (easy for kernel pages like this one). */
334
		pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
R
Rusty Russell 已提交
335
		/* For each entry in the page, we might need to release it. */
336
		for (i = 0; i < PTRS_PER_PTE; i++)
R
Rusty Russell 已提交
337
			release_pte(ptepage[i]);
R
Rusty Russell 已提交
338
		/* Now we can free the page of PTEs */
R
Rusty Russell 已提交
339
		free_page((long)ptepage);
R
Rusty Russell 已提交
340
		/* And zero out the PGD entry so we never release it twice. */
341
		*spgd = __pgd(0);
R
Rusty Russell 已提交
342 343 344
	}
}

R
Rusty Russell 已提交
345 346 347
/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
 * It simply releases every PTE page from 0 up to the Guest's kernel address. */
R
Rusty Russell 已提交
348 349 350
static void flush_user_mappings(struct lguest *lg, int idx)
{
	unsigned int i;
R
Rusty Russell 已提交
351
	/* Release every pgd entry up to the kernel's address. */
352
	for (i = 0; i < pgd_index(lg->kernel_address); i++)
353
		release_pgd(lg->pgdirs[idx].pgdir + i);
R
Rusty Russell 已提交
354 355
}

R
Rusty Russell 已提交
356 357 358 359
/*H:440 (v) Flushing (throwing away) page tables,
 *
 * The Guest has a hypercall to throw away the page tables: it's used when a
 * large number of mappings have been changed. */
360
void guest_pagetable_flush_user(struct lg_cpu *cpu)
R
Rusty Russell 已提交
361
{
R
Rusty Russell 已提交
362
	/* Drop the userspace part of the current page table. */
363
	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
R
Rusty Russell 已提交
364
}
R
Rusty Russell 已提交
365
/*:*/
R
Rusty Russell 已提交
366

367
/* We walk down the guest page tables to get a guest-physical address */
368
unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
369 370 371 372 373
{
	pgd_t gpgd;
	pte_t gpte;

	/* First step: get the top-level Guest page table entry. */
374
	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
375
	/* Toplevel not present?  We can't map it in. */
376
	if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
377
		kill_guest(cpu, "Bad address %#lx", vaddr);
378 379
		return -1UL;
	}
380

381
	gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
382
	if (!(pte_flags(gpte) & _PAGE_PRESENT))
383
		kill_guest(cpu, "Bad address %#lx", vaddr);
384 385 386 387

	return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
}

R
Rusty Russell 已提交
388 389 390
/* We keep several page tables.  This is a simple routine to find the page
 * table (if any) corresponding to this top-level address the Guest has given
 * us. */
R
Rusty Russell 已提交
391 392 393 394
static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
{
	unsigned int i;
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
395
		if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
R
Rusty Russell 已提交
396 397 398 399
			break;
	return i;
}

R
Rusty Russell 已提交
400 401 402
/*H:435 And this is us, creating the new page directory.  If we really do
 * allocate a new one (and so the kernel parts are not there), we set
 * blank_pgdir. */
403
static unsigned int new_pgdir(struct lg_cpu *cpu,
404
			      unsigned long gpgdir,
R
Rusty Russell 已提交
405 406 407 408
			      int *blank_pgdir)
{
	unsigned int next;

R
Rusty Russell 已提交
409 410
	/* We pick one entry at random to throw out.  Choosing the Least
	 * Recently Used might be better, but this is easy. */
411
	next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
R
Rusty Russell 已提交
412
	/* If it's never been allocated at all before, try now. */
413 414 415
	if (!cpu->lg->pgdirs[next].pgdir) {
		cpu->lg->pgdirs[next].pgdir =
					(pgd_t *)get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
416
		/* If the allocation fails, just keep using the one we have */
417
		if (!cpu->lg->pgdirs[next].pgdir)
418
			next = cpu->cpu_pgd;
R
Rusty Russell 已提交
419
		else
R
Rusty Russell 已提交
420 421
			/* This is a blank page, so there are no kernel
			 * mappings: caller must map the stack! */
R
Rusty Russell 已提交
422 423
			*blank_pgdir = 1;
	}
R
Rusty Russell 已提交
424
	/* Record which Guest toplevel this shadows. */
425
	cpu->lg->pgdirs[next].gpgdir = gpgdir;
R
Rusty Russell 已提交
426
	/* Release all the non-kernel mappings. */
427
	flush_user_mappings(cpu->lg, next);
R
Rusty Russell 已提交
428 429 430 431

	return next;
}

R
Rusty Russell 已提交
432 433
/*H:430 (iv) Switching page tables
 *
434
 * Now we've seen all the page table setting and manipulation, let's see
R
Rusty Russell 已提交
435 436
 * what happens when the Guest changes page tables (ie. changes the top-level
 * pgdir).  This occurs on almost every context switch. */
437
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
R
Rusty Russell 已提交
438 439 440
{
	int newpgdir, repin = 0;

R
Rusty Russell 已提交
441
	/* Look to see if we have this one already. */
442
	newpgdir = find_pgdir(cpu->lg, pgtable);
R
Rusty Russell 已提交
443 444
	/* If not, we allocate or mug an existing one: if it's a fresh one,
	 * repin gets set to 1. */
445
	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
446
		newpgdir = new_pgdir(cpu, pgtable, &repin);
R
Rusty Russell 已提交
447
	/* Change the current pgd index to the new one. */
448
	cpu->cpu_pgd = newpgdir;
R
Rusty Russell 已提交
449
	/* If it was completely blank, we map in the Guest kernel stack */
R
Rusty Russell 已提交
450
	if (repin)
451
		pin_stack_pages(cpu);
R
Rusty Russell 已提交
452 453
}

R
Rusty Russell 已提交
454
/*H:470 Finally, a routine which throws away everything: all PGD entries in all
R
Rusty Russell 已提交
455 456
 * the shadow page tables, including the Guest's kernel mappings.  This is used
 * when we destroy the Guest. */
R
Rusty Russell 已提交
457 458 459 460
static void release_all_pagetables(struct lguest *lg)
{
	unsigned int i, j;

R
Rusty Russell 已提交
461
	/* Every shadow pagetable this Guest has */
R
Rusty Russell 已提交
462 463
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
		if (lg->pgdirs[i].pgdir)
R
Rusty Russell 已提交
464
			/* Every PGD entry except the Switcher at the top */
R
Rusty Russell 已提交
465
			for (j = 0; j < SWITCHER_PGD_INDEX; j++)
466
				release_pgd(lg->pgdirs[i].pgdir + j);
R
Rusty Russell 已提交
467 468
}

R
Rusty Russell 已提交
469 470
/* We also throw away everything when a Guest tells us it's changed a kernel
 * mapping.  Since kernel mappings are in every page table, it's easiest to
R
Rusty Russell 已提交
471 472
 * throw them all away.  This traps the Guest in amber for a while as
 * everything faults back in, but it's rare. */
473
void guest_pagetable_clear_all(struct lg_cpu *cpu)
R
Rusty Russell 已提交
474
{
475
	release_all_pagetables(cpu->lg);
R
Rusty Russell 已提交
476
	/* We need the Guest kernel stack mapped again. */
477
	pin_stack_pages(cpu);
R
Rusty Russell 已提交
478
}
R
Rusty Russell 已提交
479 480 481 482 483 484 485 486
/*:*/
/*M:009 Since we throw away all mappings when a kernel mapping changes, our
 * performance sucks for guests using highmem.  In fact, a guest with
 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
 * usually slower than a Guest with less memory.
 *
 * This, of course, cannot be fixed.  It would take some kind of... well, I
 * don't know, but the term "puissant code-fu" comes to mind. :*/
R
Rusty Russell 已提交
487

R
Rusty Russell 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501
/*H:420 This is the routine which actually sets the page table entry for then
 * "idx"'th shadow page table.
 *
 * Normally, we can just throw out the old entry and replace it with 0: if they
 * use it demand_page() will put the new entry in.  We need to do this anyway:
 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
 * is read from, and _PAGE_DIRTY when it's written to.
 *
 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
 * these bits on PTEs immediately anyway.  This is done to save the CPU from
 * having to update them, but it helps us the same way: if they set
 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
 */
502
static void do_set_pte(struct lg_cpu *cpu, int idx,
503
		       unsigned long vaddr, pte_t gpte)
R
Rusty Russell 已提交
504
{
R
Rusty Russell 已提交
505
	/* Look up the matching shadow page directory entry. */
506
	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
R
Rusty Russell 已提交
507 508

	/* If the top level isn't present, there's no entry to update. */
509
	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
R
Rusty Russell 已提交
510
		/* Otherwise, we start by releasing the existing entry. */
511
		pte_t *spte = spte_addr(*spgd, vaddr);
R
Rusty Russell 已提交
512
		release_pte(*spte);
R
Rusty Russell 已提交
513 514 515 516

		/* If they're setting this entry as dirty or accessed, we might
		 * as well put that entry they've given us in now.  This shaves
		 * 10% off a copy-on-write micro-benchmark. */
517
		if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
518 519
			check_gpte(cpu, gpte);
			*spte = gpte_to_spte(cpu, gpte,
520
					     pte_flags(gpte) & _PAGE_DIRTY);
R
Rusty Russell 已提交
521
		} else
R
Rusty Russell 已提交
522 523
			/* Otherwise kill it and we can demand_page() it in
			 * later. */
524
			*spte = __pte(0);
R
Rusty Russell 已提交
525 526 527
	}
}

R
Rusty Russell 已提交
528 529 530 531 532 533 534 535
/*H:410 Updating a PTE entry is a little trickier.
 *
 * We keep track of several different page tables (the Guest uses one for each
 * process, so it makes sense to cache at least a few).  Each of these have
 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
 * all processes.  So when the page table above that address changes, we update
 * all the page tables, not just the current one.  This is rare.
 *
536 537
 * The benefit is that when we have to track a new page table, we can keep all
 * the kernel mappings.  This speeds up context switch immensely. */
538
void guest_set_pte(struct lg_cpu *cpu,
539
		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
R
Rusty Russell 已提交
540
{
541 542
	/* Kernel mappings must be changed on all top levels.  Slow, but doesn't
	 * happen often. */
543
	if (vaddr >= cpu->lg->kernel_address) {
R
Rusty Russell 已提交
544
		unsigned int i;
545 546 547
		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
			if (cpu->lg->pgdirs[i].pgdir)
				do_set_pte(cpu, i, vaddr, gpte);
R
Rusty Russell 已提交
548
	} else {
R
Rusty Russell 已提交
549
		/* Is this page table one we have a shadow for? */
550 551
		int pgdir = find_pgdir(cpu->lg, gpgdir);
		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
R
Rusty Russell 已提交
552
			/* If so, do the update. */
553
			do_set_pte(cpu, pgdir, vaddr, gpte);
R
Rusty Russell 已提交
554 555 556
	}
}

R
Rusty Russell 已提交
557
/*H:400
R
Rusty Russell 已提交
558
 * (iii) Setting up a page table entry when the Guest tells us one has changed.
R
Rusty Russell 已提交
559 560 561 562 563 564 565 566 567 568 569 570
 *
 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
 * with the other side of page tables while we're here: what happens when the
 * Guest asks for a page table to be updated?
 *
 * We already saw that demand_page() will fill in the shadow page tables when
 * needed, so we can simply remove shadow page table entries whenever the Guest
 * tells us they've changed.  When the Guest tries to use the new entry it will
 * fault and demand_page() will fix it up.
 *
 * So with that in mind here's our code to to update a (top-level) PGD entry:
 */
571
void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
R
Rusty Russell 已提交
572 573 574
{
	int pgdir;

R
Rusty Russell 已提交
575 576
	/* The kernel seems to try to initialize this early on: we ignore its
	 * attempts to map over the Switcher. */
R
Rusty Russell 已提交
577 578 579
	if (idx >= SWITCHER_PGD_INDEX)
		return;

R
Rusty Russell 已提交
580
	/* If they're talking about a page table we have a shadow for... */
581
	pgdir = find_pgdir(lg, gpgdir);
R
Rusty Russell 已提交
582
	if (pgdir < ARRAY_SIZE(lg->pgdirs))
R
Rusty Russell 已提交
583
		/* ... throw it away. */
584
		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
R
Rusty Russell 已提交
585 586
}

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
/* Once we know how much memory we have we can construct simple identity
 * (which set virtual == physical) and linear mappings
 * which will get the Guest far enough into the boot to create its own.
 *
 * We lay them out of the way, just below the initrd (which is why we need to
 * know its size here). */
static unsigned long setup_pagetables(struct lguest *lg,
				      unsigned long mem,
				      unsigned long initrd_size)
{
	pgd_t __user *pgdir;
	pte_t __user *linear;
	unsigned int mapped_pages, i, linear_pages, phys_linear;
	unsigned long mem_base = (unsigned long)lg->mem_base;

	/* We have mapped_pages frames to map, so we need
	 * linear_pages page tables to map them. */
	mapped_pages = mem / PAGE_SIZE;
	linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE;

	/* We put the toplevel page directory page at the top of memory. */
	pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE);

	/* Now we use the next linear_pages pages as pte pages */
	linear = (void *)pgdir - linear_pages * PAGE_SIZE;

	/* Linear mapping is easy: put every page's address into the
	 * mapping in order. */
	for (i = 0; i < mapped_pages; i++) {
		pte_t pte;
		pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER));
		if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0)
			return -EFAULT;
	}

	/* The top level points to the linear page table pages above.
	 * We setup the identity and linear mappings here. */
	phys_linear = (unsigned long)linear - mem_base;
	for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
		pgd_t pgd;
		pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
			    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));

		if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
		    || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
					   + i / PTRS_PER_PTE],
				    &pgd, sizeof(pgd)))
			return -EFAULT;
	}

	/* We return the top level (guest-physical) address: remember where
	 * this is. */
	return (unsigned long)pgdir - mem_base;
}

R
Rusty Russell 已提交
642 643 644 645
/*H:500 (vii) Setting up the page tables initially.
 *
 * When a Guest is first created, the Launcher tells us where the toplevel of
 * its first page table is.  We set some things up here: */
646
int init_guest_pagetable(struct lguest *lg)
R
Rusty Russell 已提交
647
{
648 649 650 651 652 653 654 655 656 657
	u64 mem;
	u32 initrd_size;
	struct boot_params __user *boot = (struct boot_params *)lg->mem_base;

	/* Get the Guest memory size and the ramdisk size from the boot header
	 * located at lg->mem_base (Guest address 0). */
	if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
	    || get_user(initrd_size, &boot->hdr.ramdisk_size))
		return -EFAULT;

R
Rusty Russell 已提交
658 659
	/* We start on the first shadow page table, and give it a blank PGD
	 * page. */
660 661 662
	lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size);
	if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir))
		return lg->pgdirs[0].gpgdir;
663 664
	lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
	if (!lg->pgdirs[0].pgdir)
R
Rusty Russell 已提交
665
		return -ENOMEM;
666
	lg->cpus[0].cpu_pgd = 0;
R
Rusty Russell 已提交
667 668 669
	return 0;
}

670
/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
671
void page_table_guest_data_init(struct lg_cpu *cpu)
672 673
{
	/* We get the kernel address: above this is all kernel memory. */
674 675
	if (get_user(cpu->lg->kernel_address,
		     &cpu->lg->lguest_data->kernel_address)
676 677
	    /* We tell the Guest that it can't use the top 4MB of virtual
	     * addresses used by the Switcher. */
678 679 680
	    || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
	    || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir))
		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
681 682 683 684

	/* In flush_user_mappings() we loop from 0 to
	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
	 * Switcher mappings, so check that now. */
685 686 687
	if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
		kill_guest(cpu, "bad kernel address %#lx",
				 cpu->lg->kernel_address);
688 689
}

R
Rusty Russell 已提交
690
/* When a Guest dies, our cleanup is fairly simple. */
R
Rusty Russell 已提交
691 692 693 694
void free_guest_pagetable(struct lguest *lg)
{
	unsigned int i;

R
Rusty Russell 已提交
695
	/* Throw away all page table pages. */
R
Rusty Russell 已提交
696
	release_all_pagetables(lg);
R
Rusty Russell 已提交
697
	/* Now free the top levels: free_page() can handle 0 just fine. */
R
Rusty Russell 已提交
698 699 700 701
	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
		free_page((long)lg->pgdirs[i].pgdir);
}

R
Rusty Russell 已提交
702 703
/*H:480 (vi) Mapping the Switcher when the Guest is about to run.
 *
R
Rusty Russell 已提交
704
 * The Switcher and the two pages for this CPU need to be visible in the
R
Rusty Russell 已提交
705
 * Guest (and not the pages for other CPUs).  We have the appropriate PTE pages
R
Rusty Russell 已提交
706 707
 * for each CPU already set up, we just need to hook them in now we know which
 * Guest is about to run on this CPU. */
708
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
R
Rusty Russell 已提交
709
{
710 711 712
	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
	pgd_t switcher_pgd;
	pte_t regs_pte;
713
	unsigned long pfn;
R
Rusty Russell 已提交
714

R
Rusty Russell 已提交
715 716
	/* Make the last PGD entry for this Guest point to the Switcher's PTE
	 * page for this CPU (with appropriate flags). */
717
	switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
718

719
	cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
R
Rusty Russell 已提交
720

R
Rusty Russell 已提交
721 722 723 724 725 726 727
	/* We also change the Switcher PTE page.  When we're running the Guest,
	 * we want the Guest's "regs" page to appear where the first Switcher
	 * page for this CPU is.  This is an optimization: when the Switcher
	 * saves the Guest registers, it saves them into the first page of this
	 * CPU's "struct lguest_pages": if we make sure the Guest's register
	 * page is already mapped there, we don't have to copy them out
	 * again. */
728
	pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
729 730 731
	native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
	native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
			regs_pte);
R
Rusty Russell 已提交
732
}
R
Rusty Russell 已提交
733
/*:*/
R
Rusty Russell 已提交
734 735 736 737 738 739 740 741 742

static void free_switcher_pte_pages(void)
{
	unsigned int i;

	for_each_possible_cpu(i)
		free_page((long)switcher_pte_page(i));
}

R
Rusty Russell 已提交
743 744 745 746
/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
 * the CPU number and the "struct page"s for the Switcher code itself.
 *
 * Currently the Switcher is less than a page long, so "pages" is always 1. */
R
Rusty Russell 已提交
747 748 749 750 751
static __init void populate_switcher_pte_page(unsigned int cpu,
					      struct page *switcher_page[],
					      unsigned int pages)
{
	unsigned int i;
752
	pte_t *pte = switcher_pte_page(cpu);
R
Rusty Russell 已提交
753

R
Rusty Russell 已提交
754
	/* The first entries are easy: they map the Switcher code. */
R
Rusty Russell 已提交
755
	for (i = 0; i < pages; i++) {
756 757
		native_set_pte(&pte[i], mk_pte(switcher_page[i],
				__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
R
Rusty Russell 已提交
758 759
	}

R
Rusty Russell 已提交
760
	/* The only other thing we map is this CPU's pair of pages. */
R
Rusty Russell 已提交
761 762
	i = pages + cpu*2;

R
Rusty Russell 已提交
763
	/* First page (Guest registers) is writable from the Guest */
764 765
	native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
			 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
766

R
Rusty Russell 已提交
767 768
	/* The second page contains the "struct lguest_ro_state", and is
	 * read-only. */
769 770
	native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
			   __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
R
Rusty Russell 已提交
771 772
}

R
Rusty Russell 已提交
773 774 775
/* We've made it through the page table code.  Perhaps our tired brains are
 * still processing the details, or perhaps we're simply glad it's over.
 *
776 777 778 779 780
 * If nothing else, note that all this complexity in juggling shadow page tables
 * in sync with the Guest's page tables is for one reason: for most Guests this
 * page table dance determines how bad performance will be.  This is why Xen
 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
 * have implemented shadow page table support directly into hardware.
R
Rusty Russell 已提交
781 782 783
 *
 * There is just one file remaining in the Host. */

R
Rusty Russell 已提交
784 785
/*H:510 At boot or module load time, init_pagetables() allocates and populates
 * the Switcher PTE page for each CPU. */
R
Rusty Russell 已提交
786 787 788 789 790
__init int init_pagetables(struct page **switcher_page, unsigned int pages)
{
	unsigned int i;

	for_each_possible_cpu(i) {
791
		switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
R
Rusty Russell 已提交
792 793 794 795 796 797 798 799
		if (!switcher_pte_page(i)) {
			free_switcher_pte_pages();
			return -ENOMEM;
		}
		populate_switcher_pte_page(i, switcher_page, pages);
	}
	return 0;
}
R
Rusty Russell 已提交
800
/*:*/
R
Rusty Russell 已提交
801

R
Rusty Russell 已提交
802
/* Cleaning up simply involves freeing the PTE page for each CPU. */
R
Rusty Russell 已提交
803 804 805 806
void free_pagetables(void)
{
	free_switcher_pte_pages();
}