boot.c 45.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*P:010
 * A hypervisor allows multiple Operating Systems to run on a single machine.
 * To quote David Wheeler: "Any problem in computer science can be solved with
 * another layer of indirection."
 *
 * We keep things simple in two ways.  First, we start with a normal Linux
 * kernel and insert a module (lg.ko) which allows us to run other Linux
 * kernels the same way we'd run processes.  We call the first kernel the Host,
 * and the others the Guests.  The program which sets up and configures Guests
 * (such as the example in Documentation/lguest/lguest.c) is called the
 * Launcher.
 *
13 14 15 16
 * Secondly, we only run specially modified Guests, not normal kernels: setting
 * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
 * how to be a Guest at boot time.  This means that you can use the same kernel
 * you boot normally (ie. as a Host) as a Guest.
R
Rusty Russell 已提交
17
 *
18 19 20 21 22
 * These Guests know that they cannot do privileged operations, such as disable
 * interrupts, and that they have to ask the Host to do such things explicitly.
 * This file consists of all the replacements for such low-level native
 * hardware operations: these special Guest versions call the Host.
 *
23 24 25
 * So how does the kernel know it's a Guest?  We'll see that later, but let's
 * just say that we end up here where we replace the native functions various
 * "paravirt" structures with our Guest versions, then boot like normal. :*/
26 27

/*
R
Rusty Russell 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
#include <linux/kernel.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
R
Rusty Russell 已提交
52 53
#include <linux/clocksource.h>
#include <linux/clockchips.h>
R
Rusty Russell 已提交
54 55
#include <linux/lguest.h>
#include <linux/lguest_launcher.h>
R
Rusty Russell 已提交
56
#include <linux/virtio_console.h>
J
Jeff Garzik 已提交
57
#include <linux/pm.h>
I
Ingo Molnar 已提交
58
#include <asm/apic.h>
59
#include <asm/lguest.h>
R
Rusty Russell 已提交
60 61 62 63 64 65 66 67 68
#include <asm/paravirt.h>
#include <asm/param.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/e820.h>
#include <asm/mce.h>
#include <asm/io.h>
69
#include <asm/i387.h>
70
#include <asm/stackprotector.h>
B
Balaji Rao 已提交
71
#include <asm/reboot.h>		/* for struct machine_ops */
R
Rusty Russell 已提交
72

R
Rusty Russell 已提交
73 74 75 76 77 78
/*G:010 Welcome to the Guest!
 *
 * The Guest in our tale is a simple creature: identical to the Host but
 * behaving in simplified but equivalent ways.  In particular, the Guest is the
 * same kernel as the Host (or at least, built from the same source code). :*/

R
Rusty Russell 已提交
79 80 81 82
struct lguest_data lguest_data = {
	.hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
	.noirq_start = (u32)lguest_noirq_start,
	.noirq_end = (u32)lguest_noirq_end,
83
	.kernel_address = PAGE_OFFSET,
R
Rusty Russell 已提交
84
	.blocked_interrupts = { 1 }, /* Block timer interrupts */
85
	.syscall_vec = SYSCALL_VECTOR,
R
Rusty Russell 已提交
86 87
};

R
Rusty Russell 已提交
88
/*G:037 async_hcall() is pretty simple: I'm quite proud of it really.  We have a
R
Rusty Russell 已提交
89
 * ring buffer of stored hypercalls which the Host will run though next time we
90
 * do a normal hypercall.  Each entry in the ring has 5 slots for the hypercall
R
Rusty Russell 已提交
91 92 93 94 95 96 97
 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
 * and 255 once the Host has finished with it.
 *
 * If we come around to a slot which hasn't been finished, then the table is
 * full and we just make the hypercall directly.  This has the nice side
 * effect of causing the Host to run all the stored calls in the ring buffer
 * which empties it for next time! */
A
Adrian Bunk 已提交
98
static void async_hcall(unsigned long call, unsigned long arg1,
99 100
			unsigned long arg2, unsigned long arg3,
			unsigned long arg4)
R
Rusty Russell 已提交
101 102 103 104 105
{
	/* Note: This code assumes we're uniprocessor. */
	static unsigned int next_call;
	unsigned long flags;

R
Rusty Russell 已提交
106 107 108
	/* Disable interrupts if not already disabled: we don't want an
	 * interrupt handler making a hypercall while we're already doing
	 * one! */
R
Rusty Russell 已提交
109 110 111
	local_irq_save(flags);
	if (lguest_data.hcall_status[next_call] != 0xFF) {
		/* Table full, so do normal hcall which will flush table. */
112
		kvm_hypercall4(call, arg1, arg2, arg3, arg4);
R
Rusty Russell 已提交
113
	} else {
J
Jes Sorensen 已提交
114 115 116 117
		lguest_data.hcalls[next_call].arg0 = call;
		lguest_data.hcalls[next_call].arg1 = arg1;
		lguest_data.hcalls[next_call].arg2 = arg2;
		lguest_data.hcalls[next_call].arg3 = arg3;
118
		lguest_data.hcalls[next_call].arg4 = arg4;
R
Rusty Russell 已提交
119
		/* Arguments must all be written before we mark it to go */
R
Rusty Russell 已提交
120 121 122 123 124 125 126
		wmb();
		lguest_data.hcall_status[next_call] = 0;
		if (++next_call == LHCALL_RING_SIZE)
			next_call = 0;
	}
	local_irq_restore(flags);
}
A
Adrian Bunk 已提交
127

R
Rusty Russell 已提交
128 129 130 131 132 133 134 135 136 137 138
/*G:035 Notice the lazy_hcall() above, rather than hcall().  This is our first
 * real optimization trick!
 *
 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
 * them as a batch when lazy_mode is eventually turned off.  Because hypercalls
 * are reasonably expensive, batching them up makes sense.  For example, a
 * large munmap might update dozens of page table entries: that code calls
 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
 * lguest_leave_lazy_mode().
 *
 * So, when we're in lazy mode, we call async_hcall() to store the call for
139
 * future processing: */
140 141 142 143 144 145
static void lazy_hcall1(unsigned long call,
		       unsigned long arg1)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
		kvm_hypercall1(call, arg1);
	else
146
		async_hcall(call, arg1, 0, 0, 0);
147 148 149 150 151 152 153 154 155
}

static void lazy_hcall2(unsigned long call,
		       unsigned long arg1,
		       unsigned long arg2)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
		kvm_hypercall2(call, arg1, arg2);
	else
156
		async_hcall(call, arg1, arg2, 0, 0);
157 158 159
}

static void lazy_hcall3(unsigned long call,
A
Adrian Bunk 已提交
160 161 162 163 164
		       unsigned long arg1,
		       unsigned long arg2,
		       unsigned long arg3)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
165
		kvm_hypercall3(call, arg1, arg2, arg3);
A
Adrian Bunk 已提交
166
	else
167 168 169
		async_hcall(call, arg1, arg2, arg3, 0);
}

M
Matias Zabaljauregui 已提交
170
#ifdef CONFIG_X86_PAE
171 172 173 174 175 176 177 178 179 180
static void lazy_hcall4(unsigned long call,
		       unsigned long arg1,
		       unsigned long arg2,
		       unsigned long arg3,
		       unsigned long arg4)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
		kvm_hypercall4(call, arg1, arg2, arg3, arg4);
	else
		async_hcall(call, arg1, arg2, arg3, arg4);
A
Adrian Bunk 已提交
181
}
M
Matias Zabaljauregui 已提交
182
#endif
R
Rusty Russell 已提交
183 184

/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
185
 * issue the do-nothing hypercall to flush any stored calls. */
186
static void lguest_leave_lazy_mmu_mode(void)
R
Rusty Russell 已提交
187
{
188
	kvm_hypercall0(LHCALL_FLUSH_ASYNC);
189 190 191
	paravirt_leave_lazy_mmu();
}

192
static void lguest_end_context_switch(struct task_struct *next)
193
{
194
	kvm_hypercall0(LHCALL_FLUSH_ASYNC);
195
	paravirt_end_context_switch(next);
R
Rusty Russell 已提交
196
}
R
Rusty Russell 已提交
197

198
/*G:032
R
Rusty Russell 已提交
199 200
 * After that diversion we return to our first native-instruction
 * replacements: four functions for interrupt control.
R
Rusty Russell 已提交
201 202 203 204 205 206 207
 *
 * The simplest way of implementing these would be to have "turn interrupts
 * off" and "turn interrupts on" hypercalls.  Unfortunately, this is too slow:
 * these are by far the most commonly called functions of those we override.
 *
 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
 * which the Guest can update with a single instruction.  The Host knows to
208
 * check there before it tries to deliver an interrupt.
R
Rusty Russell 已提交
209 210
 */

211 212
/* save_flags() is expected to return the processor state (ie. "flags").  The
 * flags word contains all kind of stuff, but in practice Linux only cares
R
Rusty Russell 已提交
213
 * about the interrupt flag.  Our "save_flags()" just returns that. */
R
Rusty Russell 已提交
214 215 216 217 218
static unsigned long save_fl(void)
{
	return lguest_data.irq_enabled;
}

R
Rusty Russell 已提交
219
/* Interrupts go off... */
R
Rusty Russell 已提交
220 221 222 223 224
static void irq_disable(void)
{
	lguest_data.irq_enabled = 0;
}

225 226 227 228 229 230 231 232 233 234
/* Let's pause a moment.  Remember how I said these are called so often?
 * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
 * break some rules.  In particular, these functions are assumed to save their
 * own registers if they need to: normal C functions assume they can trash the
 * eax register.  To use normal C functions, we use
 * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
 * C function, then restores it. */
PV_CALLEE_SAVE_REGS_THUNK(save_fl);
PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
/*:*/
235

236 237 238
/* These are in i386_head.S */
extern void lg_irq_enable(void);
extern void lg_restore_fl(unsigned long flags);
239

240 241 242
/*M:003 Note that we don't check for outstanding interrupts when we re-enable
 * them (or when we unmask an interrupt).  This seems to work for the moment,
 * since interrupts are rare and we'll just get the interrupt on the next timer
243
 * tick, but now we can run with CONFIG_NO_HZ, we should revisit this.  One way
244 245
 * would be to put the "irq_enabled" field in a page by itself, and have the
 * Host write-protect it when an interrupt comes in when irqs are disabled.
246 247 248 249 250 251
 * There will then be a page fault as soon as interrupts are re-enabled.
 *
 * A better method is to implement soft interrupt disable generally for x86:
 * instead of disabling interrupts, we set a flag.  If an interrupt does come
 * in, we then disable them for real.  This is uncommon, so we could simply use
 * a hypercall for interrupt control and not worry about efficiency. :*/
R
Rusty Russell 已提交
252

R
Rusty Russell 已提交
253 254 255 256 257 258 259 260
/*G:034
 * The Interrupt Descriptor Table (IDT).
 *
 * The IDT tells the processor what to do when an interrupt comes in.  Each
 * entry in the table is a 64-bit descriptor: this holds the privilege level,
 * address of the handler, and... well, who cares?  The Guest just asks the
 * Host to make the change anyway, because the Host controls the real IDT.
 */
261 262
static void lguest_write_idt_entry(gate_desc *dt,
				   int entrynum, const gate_desc *g)
R
Rusty Russell 已提交
263
{
264 265 266 267
	/* The gate_desc structure is 8 bytes long: we hand it to the Host in
	 * two 32-bit chunks.  The whole 32-bit kernel used to hand descriptors
	 * around like this; typesafety wasn't a big concern in Linux's early
	 * years. */
268
	u32 *desc = (u32 *)g;
R
Rusty Russell 已提交
269
	/* Keep the local copy up to date. */
270
	native_write_idt_entry(dt, entrynum, g);
R
Rusty Russell 已提交
271
	/* Tell Host about this new entry. */
272
	kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
R
Rusty Russell 已提交
273 274
}

R
Rusty Russell 已提交
275 276 277
/* Changing to a different IDT is very rare: we keep the IDT up-to-date every
 * time it is written, so we can simply loop through all entries and tell the
 * Host about them. */
278
static void lguest_load_idt(const struct desc_ptr *desc)
R
Rusty Russell 已提交
279 280 281 282 283
{
	unsigned int i;
	struct desc_struct *idt = (void *)desc->address;

	for (i = 0; i < (desc->size+1)/8; i++)
284
		kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
R
Rusty Russell 已提交
285 286
}

R
Rusty Russell 已提交
287 288 289 290 291 292 293 294 295 296
/*
 * The Global Descriptor Table.
 *
 * The Intel architecture defines another table, called the Global Descriptor
 * Table (GDT).  You tell the CPU where it is (and its size) using the "lgdt"
 * instruction, and then several other instructions refer to entries in the
 * table.  There are three entries which the Switcher needs, so the Host simply
 * controls the entire thing and the Guest asks it to make changes using the
 * LOAD_GDT hypercall.
 *
297
 * This is the exactly like the IDT code.
R
Rusty Russell 已提交
298
 */
299
static void lguest_load_gdt(const struct desc_ptr *desc)
R
Rusty Russell 已提交
300
{
301 302 303 304 305
	unsigned int i;
	struct desc_struct *gdt = (void *)desc->address;

	for (i = 0; i < (desc->size+1)/8; i++)
		kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
R
Rusty Russell 已提交
306 307
}

R
Rusty Russell 已提交
308 309 310
/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
 * then tell the Host to reload the entire thing.  This operation is so rare
 * that this naive implementation is reasonable. */
311 312
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
				   const void *desc, int type)
R
Rusty Russell 已提交
313
{
314
	native_write_gdt_entry(dt, entrynum, desc, type);
315 316 317
	/* Tell Host about this new entry. */
	kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
		       dt[entrynum].a, dt[entrynum].b);
R
Rusty Russell 已提交
318 319
}

R
Rusty Russell 已提交
320 321 322
/* OK, I lied.  There are three "thread local storage" GDT entries which change
 * on every context switch (these three entries are how glibc implements
 * __thread variables).  So we have a hypercall specifically for this case. */
R
Rusty Russell 已提交
323 324
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{
325 326 327
	/* There's one problem which normal hardware doesn't have: the Host
	 * can't handle us removing entries we're currently using.  So we clear
	 * the GS register here: if it's needed it'll be reloaded anyway. */
328
	lazy_load_gs(0);
329
	lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu);
R
Rusty Russell 已提交
330 331
}

R
Rusty Russell 已提交
332
/*G:038 That's enough excitement for now, back to ploughing through each of
333
 * the different pv_ops structures (we're about 1/3 of the way through).
R
Rusty Russell 已提交
334 335 336 337
 *
 * This is the Local Descriptor Table, another weird Intel thingy.  Linux only
 * uses this for some strange applications like Wine.  We don't do anything
 * here, so they'll get an informative and friendly Segmentation Fault. */
R
Rusty Russell 已提交
338 339 340 341
static void lguest_set_ldt(const void *addr, unsigned entries)
{
}

R
Rusty Russell 已提交
342 343 344 345 346 347 348 349 350
/* This loads a GDT entry into the "Task Register": that entry points to a
 * structure called the Task State Segment.  Some comments scattered though the
 * kernel code indicate that this used for task switching in ages past, along
 * with blood sacrifice and astrology.
 *
 * Now there's nothing interesting in here that we don't get told elsewhere.
 * But the native version uses the "ltr" instruction, which makes the Host
 * complain to the Guest about a Segmentation Fault and it'll oops.  So we
 * override the native version with a do-nothing version. */
R
Rusty Russell 已提交
351 352 353 354
static void lguest_load_tr_desc(void)
{
}

R
Rusty Russell 已提交
355 356
/* The "cpuid" instruction is a way of querying both the CPU identity
 * (manufacturer, model, etc) and its features.  It was introduced before the
357 358 359
 * Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
 * As you might imagine, after a decade and a half this treatment, it is now a
 * giant ball of hair.  Its entry in the current Intel manual runs to 28 pages.
R
Rusty Russell 已提交
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
 *
 * This instruction even it has its own Wikipedia entry.  The Wikipedia entry
 * has been translated into 4 languages.  I am not making this up!
 *
 * We could get funky here and identify ourselves as "GenuineLguest", but
 * instead we just use the real "cpuid" instruction.  Then I pretty much turned
 * off feature bits until the Guest booted.  (Don't say that: you'll damage
 * lguest sales!)  Shut up, inner voice!  (Hey, just pointing out that this is
 * hardly future proof.)  Noone's listening!  They don't like you anyway,
 * parenthetic weirdo!
 *
 * Replacing the cpuid so we can turn features off is great for the kernel, but
 * anyone (including userspace) can just use the raw "cpuid" instruction and
 * the Host won't even notice since it isn't privileged.  So we try not to get
 * too worked up about it. */
375 376
static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
			 unsigned int *cx, unsigned int *dx)
R
Rusty Russell 已提交
377
{
378
	int function = *ax;
R
Rusty Russell 已提交
379

380
	native_cpuid(ax, bx, cx, dx);
R
Rusty Russell 已提交
381
	switch (function) {
382 383 384 385 386
	case 0: /* ID and highest CPUID.  Futureproof a little by sticking to
		 * older ones. */
		if (*ax > 5)
			*ax = 5;
		break;
R
Rusty Russell 已提交
387 388
	case 1:	/* Basic feature request. */
		/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
389
		*cx &= 0x00002201;
M
Matias Zabaljauregui 已提交
390 391
		/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */
		*dx &= 0x07808151;
R
Rusty Russell 已提交
392 393 394 395 396
		/* The Host can do a nice optimization if it knows that the
		 * kernel mappings (addresses above 0xC0000000 or whatever
		 * PAGE_OFFSET is set to) haven't changed.  But Linux calls
		 * flush_tlb_user() for both user and kernel mappings unless
		 * the Page Global Enable (PGE) feature bit is set. */
397
		*dx |= 0x00002000;
398 399 400 401 402
		/* We also lie, and say we're family id 5.  6 or greater
		 * leads to a rdmsr in early_init_intel which we can't handle.
		 * Family ID is returned as bits 8-12 in ax. */
		*ax &= 0xFFFFF0FF;
		*ax |= 0x00000500;
R
Rusty Russell 已提交
403 404 405
		break;
	case 0x80000000:
		/* Futureproof this a little: if they ask how much extended
R
Rusty Russell 已提交
406
		 * processor information there is, limit it to known fields. */
407 408
		if (*ax > 0x80000008)
			*ax = 0x80000008;
R
Rusty Russell 已提交
409
		break;
M
Matias Zabaljauregui 已提交
410 411 412 413 414
	case 0x80000001:
		/* Here we should fix nx cap depending on host. */
		/* For this version of PAE, we just clear NX bit. */
		*dx &= ~(1 << 20);
		break;
R
Rusty Russell 已提交
415 416 417
	}
}

R
Rusty Russell 已提交
418 419 420 421 422
/* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
 * it.  The Host needs to know when the Guest wants to change them, so we have
 * a whole series of functions like read_cr0() and write_cr0().
 *
R
Rusty Russell 已提交
423
 * We start with cr0.  cr0 allows you to turn on and off all kinds of basic
R
Rusty Russell 已提交
424 425 426 427 428 429 430 431
 * features, but Linux only really cares about one: the horrifically-named Task
 * Switched (TS) bit at bit 3 (ie. 8)
 *
 * What does the TS bit do?  Well, it causes the CPU to trap (interrupt 7) if
 * the floating point unit is used.  Which allows us to restore FPU state
 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
 * name like "FPUTRAP bit" be a little less cryptic?
 *
R
Rusty Russell 已提交
432 433 434
 * We store cr0 locally because the Host never changes it.  The Guest sometimes
 * wants to read it and we'd prefer not to bother the Host unnecessarily. */
static unsigned long current_cr0;
R
Rusty Russell 已提交
435 436
static void lguest_write_cr0(unsigned long val)
{
437
	lazy_hcall1(LHCALL_TS, val & X86_CR0_TS);
R
Rusty Russell 已提交
438 439 440 441 442 443 444 445
	current_cr0 = val;
}

static unsigned long lguest_read_cr0(void)
{
	return current_cr0;
}

R
Rusty Russell 已提交
446 447 448
/* Intel provided a special instruction to clear the TS bit for people too cool
 * to use write_cr0() to do it.  This "clts" instruction is faster, because all
 * the vowels have been optimized out. */
R
Rusty Russell 已提交
449 450
static void lguest_clts(void)
{
451
	lazy_hcall1(LHCALL_TS, 0);
452
	current_cr0 &= ~X86_CR0_TS;
R
Rusty Russell 已提交
453 454
}

R
Rusty Russell 已提交
455
/* cr2 is the virtual address of the last page fault, which the Guest only ever
R
Rusty Russell 已提交
456 457
 * reads.  The Host kindly writes this into our "struct lguest_data", so we
 * just read it out of there. */
R
Rusty Russell 已提交
458 459 460 461 462
static unsigned long lguest_read_cr2(void)
{
	return lguest_data.cr2;
}

R
Rusty Russell 已提交
463 464 465
/* See lguest_set_pte() below. */
static bool cr3_changed = false;

R
Rusty Russell 已提交
466
/* cr3 is the current toplevel pagetable page: the principle is the same as
R
Rusty Russell 已提交
467 468 469
 * cr0.  Keep a local copy, and tell the Host when it changes.  The only
 * difference is that our local copy is in lguest_data because the Host needs
 * to set it upon our initial hypercall. */
R
Rusty Russell 已提交
470 471
static void lguest_write_cr3(unsigned long cr3)
{
R
Rusty Russell 已提交
472
	lguest_data.pgdir = cr3;
473
	lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
R
Rusty Russell 已提交
474
	cr3_changed = true;
R
Rusty Russell 已提交
475 476 477 478
}

static unsigned long lguest_read_cr3(void)
{
R
Rusty Russell 已提交
479
	return lguest_data.pgdir;
R
Rusty Russell 已提交
480 481
}

R
Rusty Russell 已提交
482
/* cr4 is used to enable and disable PGE, but we don't care. */
R
Rusty Russell 已提交
483 484 485 486 487 488 489 490 491
static unsigned long lguest_read_cr4(void)
{
	return 0;
}

static void lguest_write_cr4(unsigned long val)
{
}

R
Rusty Russell 已提交
492 493 494 495 496 497 498 499 500 501 502
/*
 * Page Table Handling.
 *
 * Now would be a good time to take a rest and grab a coffee or similarly
 * relaxing stimulant.  The easy parts are behind us, and the trek gradually
 * winds uphill from here.
 *
 * Quick refresher: memory is divided into "pages" of 4096 bytes each.  The CPU
 * maps virtual addresses to physical addresses using "page tables".  We could
 * use one huge index of 1 million entries: each address is 4 bytes, so that's
 * 1024 pages just to hold the page tables.   But since most virtual addresses
R
Rusty Russell 已提交
503
 * are unused, we use a two level index which saves space.  The cr3 register
R
Rusty Russell 已提交
504 505 506 507 508 509 510
 * contains the physical address of the top level "page directory" page, which
 * contains physical addresses of up to 1024 second-level pages.  Each of these
 * second level pages contains up to 1024 physical addresses of actual pages,
 * or Page Table Entries (PTEs).
 *
 * Here's a diagram, where arrows indicate physical addresses:
 *
R
Rusty Russell 已提交
511
 * cr3 ---> +---------+
R
Rusty Russell 已提交
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
 *	    |  	   --------->+---------+
 *	    |	      |	     | PADDR1  |
 *	  Top-level   |	     | PADDR2  |
 *	  (PMD) page  |	     | 	       |
 *	    |	      |	   Lower-level |
 *	    |	      |	   (PTE) page  |
 *	    |	      |	     |	       |
 *	      ....    	     	 ....
 *
 * So to convert a virtual address to a physical address, we look up the top
 * level, which points us to the second level, which gives us the physical
 * address of that page.  If the top level entry was not present, or the second
 * level entry was not present, then the virtual address is invalid (we
 * say "the page was not mapped").
 *
 * Put another way, a 32-bit virtual address is divided up like so:
 *
 *  1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
 *    Index into top     Index into second      Offset within page
 *  page directory page    pagetable page
 *
 * The kernel spends a lot of time changing both the top-level page directory
 * and lower-level pagetable pages.  The Guest doesn't know physical addresses,
 * so while it maintains these page tables exactly like normal, it also needs
 * to keep the Host informed whenever it makes a change: the Host will create
 * the real page tables based on the Guests'.
 */

/* The Guest calls this to set a second-level entry (pte), ie. to map a page
 * into a process' address space.  We set the entry then tell the Host the
 * toplevel and address this corresponds to.  The Guest uses one pagetable per
 * process, so we need to tell the Host which one we're changing (mm->pgd). */
545 546 547
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
			       pte_t *ptep)
{
M
Matias Zabaljauregui 已提交
548 549 550 551
#ifdef CONFIG_X86_PAE
	lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
		    ptep->pte_low, ptep->pte_high);
#else
552
	lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
M
Matias Zabaljauregui 已提交
553
#endif
554 555
}

R
Rusty Russell 已提交
556 557 558
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pteval)
{
559
	native_set_pte(ptep, pteval);
560
	lguest_pte_update(mm, addr, ptep);
R
Rusty Russell 已提交
561 562
}

M
Matias Zabaljauregui 已提交
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
/* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
 * to set a middle-level entry when PAE is activated.
 * Again, we set the entry then tell the Host which page we changed,
 * and the index of the entry we changed. */
#ifdef CONFIG_X86_PAE
static void lguest_set_pud(pud_t *pudp, pud_t pudval)
{
	native_set_pud(pudp, pudval);

	/* 32 bytes aligned pdpt address and the index. */
	lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
		   (__pa(pudp) & 0x1F) / sizeof(pud_t));
}

static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
	native_set_pmd(pmdp, pmdval);
	lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
		   (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
}
#else

/* The Guest calls lguest_set_pmd to set a top-level entry when PAE is not
 * activated. */
R
Rusty Russell 已提交
587 588
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
589
	native_set_pmd(pmdp, pmdval);
590
	lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
591
		   (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
R
Rusty Russell 已提交
592
}
M
Matias Zabaljauregui 已提交
593
#endif
R
Rusty Russell 已提交
594

R
Rusty Russell 已提交
595 596 597 598 599 600
/* There are a couple of legacy places where the kernel sets a PTE, but we
 * don't know the top level any more.  This is useless for us, since we don't
 * know which pagetable is changing or what address, so we just tell the Host
 * to forget all of them.  Fortunately, this is very rare.
 *
 * ... except in early boot when the kernel sets up the initial pagetables,
R
Rusty Russell 已提交
601 602 603
 * which makes booting astonishingly slow: 1.83 seconds!  So we don't even tell
 * the Host anything changed until we've done the first page table switch,
 * which brings boot back to 0.25 seconds. */
R
Rusty Russell 已提交
604 605
static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{
606
	native_set_pte(ptep, pteval);
R
Rusty Russell 已提交
607
	if (cr3_changed)
608
		lazy_hcall1(LHCALL_FLUSH_TLB, 1);
R
Rusty Russell 已提交
609 610
}

M
Matias Zabaljauregui 已提交
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
#ifdef CONFIG_X86_PAE
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
{
	native_set_pte_atomic(ptep, pte);
	if (cr3_changed)
		lazy_hcall1(LHCALL_FLUSH_TLB, 1);
}

void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	native_pte_clear(mm, addr, ptep);
	lguest_pte_update(mm, addr, ptep);
}

void lguest_pmd_clear(pmd_t *pmdp)
{
	lguest_set_pmd(pmdp, __pmd(0));
}
#endif

631
/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
R
Rusty Russell 已提交
632 633 634 635 636 637 638 639 640
 * native page table operations.  On native hardware you can set a new page
 * table entry whenever you want, but if you want to remove one you have to do
 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
 *
 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
 * called when a valid entry is written, not when it's removed (ie. marked not
 * present).  Instead, this is where we come when the Guest wants to remove a
 * page table entry: we tell the Host to set that entry to 0 (ie. the present
 * bit is zero). */
R
Rusty Russell 已提交
641 642
static void lguest_flush_tlb_single(unsigned long addr)
{
R
Rusty Russell 已提交
643
	/* Simply set it to zero: if it was not, it will fault back in. */
644
	lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0);
R
Rusty Russell 已提交
645 646
}

R
Rusty Russell 已提交
647 648 649
/* This is what happens after the Guest has removed a large number of entries.
 * This tells the Host that any of the page table entries for userspace might
 * have changed, ie. virtual addresses below PAGE_OFFSET. */
R
Rusty Russell 已提交
650 651
static void lguest_flush_tlb_user(void)
{
652
	lazy_hcall1(LHCALL_FLUSH_TLB, 0);
R
Rusty Russell 已提交
653 654
}

R
Rusty Russell 已提交
655 656 657
/* This is called when the kernel page tables have changed.  That's not very
 * common (unless the Guest is using highmem, which makes the Guest extremely
 * slow), so it's worth separating this from the user flushing above. */
R
Rusty Russell 已提交
658 659
static void lguest_flush_tlb_kernel(void)
{
660
	lazy_hcall1(LHCALL_FLUSH_TLB, 1);
R
Rusty Russell 已提交
661 662
}

R
Rusty Russell 已提交
663 664 665 666 667 668 669 670 671 672 673 674 675
/*
 * The Unadvanced Programmable Interrupt Controller.
 *
 * This is an attempt to implement the simplest possible interrupt controller.
 * I spent some time looking though routines like set_irq_chip_and_handler,
 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
 * I *think* this is as simple as it gets.
 *
 * We can tell the Host what interrupts we want blocked ready for using the
 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
 * simple as setting a bit.  We don't actually "ack" interrupts as such, we
 * just mask and unmask them.  I wonder if we should be cleverer?
 */
R
Rusty Russell 已提交
676 677 678 679 680 681 682 683 684 685
static void disable_lguest_irq(unsigned int irq)
{
	set_bit(irq, lguest_data.blocked_interrupts);
}

static void enable_lguest_irq(unsigned int irq)
{
	clear_bit(irq, lguest_data.blocked_interrupts);
}

R
Rusty Russell 已提交
686
/* This structure describes the lguest IRQ controller. */
R
Rusty Russell 已提交
687 688 689 690 691 692 693
static struct irq_chip lguest_irq_controller = {
	.name		= "lguest",
	.mask		= disable_lguest_irq,
	.mask_ack	= disable_lguest_irq,
	.unmask		= enable_lguest_irq,
};

R
Rusty Russell 已提交
694 695 696 697
/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
 * interrupt (except 128, which is used for system calls), and then tells the
 * Linux infrastructure that each interrupt is controlled by our level-based
 * lguest interrupt controller. */
R
Rusty Russell 已提交
698 699 700 701
static void __init lguest_init_IRQ(void)
{
	unsigned int i;

R
Rusty Russell 已提交
702
	for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
R
Rusty Russell 已提交
703 704
		/* Some systems map "vectors" to interrupts weirdly.  Lguest has
		 * a straightforward 1 to 1 mapping, so force that here. */
R
Rusty Russell 已提交
705 706 707
		__get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
		if (i != SYSCALL_VECTOR)
			set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
R
Rusty Russell 已提交
708
	}
R
Rusty Russell 已提交
709 710
	/* This call is required to set up for 4k stacks, where we have
	 * separate stacks for hard and soft interrupts. */
R
Rusty Russell 已提交
711 712 713
	irq_ctx_init(smp_processor_id());
}

714 715
void lguest_setup_irq(unsigned int irq)
{
716
	irq_to_desc_alloc_node(irq, 0);
717 718 719 720
	set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
				      handle_level_irq, "level");
}

R
Rusty Russell 已提交
721 722 723 724
/*
 * Time.
 *
 * It would be far better for everyone if the Guest had its own clock, but
725
 * until then the Host gives us the time on every interrupt.
R
Rusty Russell 已提交
726
 */
R
Rusty Russell 已提交
727 728
static unsigned long lguest_get_wallclock(void)
{
729
	return lguest_data.time.tv_sec;
R
Rusty Russell 已提交
730 731
}

732 733 734 735
/* The TSC is an Intel thing called the Time Stamp Counter.  The Host tells us
 * what speed it runs at, or 0 if it's unusable as a reliable clock source.
 * This matches what we want here: if we return 0 from this function, the x86
 * TSC clock will give up and not register itself. */
736
static unsigned long lguest_tsc_khz(void)
737 738 739 740
{
	return lguest_data.tsc_khz;
}

741 742
/* If we can't use the TSC, the kernel falls back to our lower-priority
 * "lguest_clock", where we read the time value given to us by the Host. */
743
static cycle_t lguest_clock_read(struct clocksource *cs)
R
Rusty Russell 已提交
744
{
745 746
	unsigned long sec, nsec;

747 748 749 750
	/* Since the time is in two parts (seconds and nanoseconds), we risk
	 * reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
	 * and getting 99 and 0.  As Linux tends to come apart under the stress
	 * of time travel, we must be careful: */
751 752 753 754 755 756 757 758 759 760 761 762 763 764
	do {
		/* First we read the seconds part. */
		sec = lguest_data.time.tv_sec;
		/* This read memory barrier tells the compiler and the CPU that
		 * this can't be reordered: we have to complete the above
		 * before going on. */
		rmb();
		/* Now we read the nanoseconds part. */
		nsec = lguest_data.time.tv_nsec;
		/* Make sure we've done that. */
		rmb();
		/* Now if the seconds part has changed, try again. */
	} while (unlikely(lguest_data.time.tv_sec != sec));

765
	/* Our lguest clock is in real nanoseconds. */
766
	return sec*1000000000ULL + nsec;
R
Rusty Russell 已提交
767 768
}

769
/* This is the fallback clocksource: lower priority than the TSC clocksource. */
R
Rusty Russell 已提交
770 771
static struct clocksource lguest_clock = {
	.name		= "lguest",
772
	.rating		= 200,
R
Rusty Russell 已提交
773
	.read		= lguest_clock_read,
774
	.mask		= CLOCKSOURCE_MASK(64),
775 776
	.mult		= 1 << 22,
	.shift		= 22,
777
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
R
Rusty Russell 已提交
778 779 780 781 782 783 784 785
};

/* We also need a "struct clock_event_device": Linux asks us to set it to go
 * off some time in the future.  Actually, James Morris figured all this out, I
 * just applied the patch. */
static int lguest_clockevent_set_next_event(unsigned long delta,
                                           struct clock_event_device *evt)
{
786 787
	/* FIXME: I don't think this can ever happen, but James tells me he had
	 * to put this code in.  Maybe we should remove it now.  Anyone? */
R
Rusty Russell 已提交
788 789 790
	if (delta < LG_CLOCK_MIN_DELTA) {
		if (printk_ratelimit())
			printk(KERN_DEBUG "%s: small delta %lu ns\n",
791
			       __func__, delta);
R
Rusty Russell 已提交
792 793
		return -ETIME;
	}
794 795

	/* Please wake us this far in the future. */
796
	kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta);
R
Rusty Russell 已提交
797 798 799 800 801 802 803 804 805 806
	return 0;
}

static void lguest_clockevent_set_mode(enum clock_event_mode mode,
                                      struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		/* A 0 argument shuts the clock down. */
807
		kvm_hypercall0(LHCALL_SET_CLOCKEVENT);
R
Rusty Russell 已提交
808 809 810 811 812 813
		break;
	case CLOCK_EVT_MODE_ONESHOT:
		/* This is what we expect. */
		break;
	case CLOCK_EVT_MODE_PERIODIC:
		BUG();
T
Thomas Gleixner 已提交
814 815
	case CLOCK_EVT_MODE_RESUME:
		break;
R
Rusty Russell 已提交
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
	}
}

/* This describes our primitive timer chip. */
static struct clock_event_device lguest_clockevent = {
	.name                   = "lguest",
	.features               = CLOCK_EVT_FEAT_ONESHOT,
	.set_next_event         = lguest_clockevent_set_next_event,
	.set_mode               = lguest_clockevent_set_mode,
	.rating                 = INT_MAX,
	.mult                   = 1,
	.shift                  = 0,
	.min_delta_ns           = LG_CLOCK_MIN_DELTA,
	.max_delta_ns           = LG_CLOCK_MAX_DELTA,
};

/* This is the Guest timer interrupt handler (hardware interrupt 0).  We just
 * call the clockevent infrastructure and it does whatever needs doing. */
R
Rusty Russell 已提交
834 835
static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
{
R
Rusty Russell 已提交
836 837 838 839 840 841
	unsigned long flags;

	/* Don't interrupt us while this is running. */
	local_irq_save(flags);
	lguest_clockevent.event_handler(&lguest_clockevent);
	local_irq_restore(flags);
R
Rusty Russell 已提交
842 843
}

R
Rusty Russell 已提交
844 845 846 847
/* At some point in the boot process, we get asked to set up our timing
 * infrastructure.  The kernel doesn't expect timer interrupts before this, but
 * we cleverly initialized the "blocked_interrupts" field of "struct
 * lguest_data" so that timer interrupts were blocked until now. */
R
Rusty Russell 已提交
848 849
static void lguest_time_init(void)
{
R
Rusty Russell 已提交
850
	/* Set up the timer interrupt (0) to go to our simple timer routine */
R
Rusty Russell 已提交
851 852
	set_irq_handler(0, lguest_time_irq);

R
Rusty Russell 已提交
853 854
	clocksource_register(&lguest_clock);

R
Rusty Russell 已提交
855 856
	/* We can't set cpumask in the initializer: damn C limitations!  Set it
	 * here and register our timer device. */
857
	lguest_clockevent.cpumask = cpumask_of(0);
R
Rusty Russell 已提交
858 859
	clockevents_register_device(&lguest_clockevent);

R
Rusty Russell 已提交
860
	/* Finally, we unblock the timer interrupt. */
R
Rusty Russell 已提交
861
	enable_lguest_irq(0);
R
Rusty Russell 已提交
862 863
}

R
Rusty Russell 已提交
864 865 866 867 868 869 870
/*
 * Miscellaneous bits and pieces.
 *
 * Here is an oddball collection of functions which the Guest needs for things
 * to work.  They're pretty simple.
 */

R
Rusty Russell 已提交
871
/* The Guest needs to tell the Host what stack it expects traps to use.  For
R
Rusty Russell 已提交
872 873 874 875 876 877 878
 * native hardware, this is part of the Task State Segment mentioned above in
 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
 *
 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
 * segment), the privilege level (we're privilege level 1, the Host is 0 and
 * will not tolerate us trying to use that), the stack pointer, and the number
 * of pages in the stack. */
879
static void lguest_load_sp0(struct tss_struct *tss,
880
			    struct thread_struct *thread)
R
Rusty Russell 已提交
881
{
882 883
	lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
		   THREAD_SIZE / PAGE_SIZE);
R
Rusty Russell 已提交
884 885
}

R
Rusty Russell 已提交
886
/* Let's just say, I wouldn't do debugging under a Guest. */
R
Rusty Russell 已提交
887 888 889 890 891
static void lguest_set_debugreg(int regno, unsigned long value)
{
	/* FIXME: Implement */
}

R
Rusty Russell 已提交
892 893 894 895 896 897 898 899 900 901
/* There are times when the kernel wants to make sure that no memory writes are
 * caught in the cache (that they've all reached real hardware devices).  This
 * doesn't matter for the Guest which has virtual hardware.
 *
 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
 * (clflush) instruction is available and the kernel uses that.  Otherwise, it
 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
 * Unlike clflush, wbinvd can only be run at privilege level 0.  So we can
 * ignore clflush, but replace wbinvd.
 */
R
Rusty Russell 已提交
902 903 904 905
static void lguest_wbinvd(void)
{
}

R
Rusty Russell 已提交
906 907 908 909 910
/* If the Guest expects to have an Advanced Programmable Interrupt Controller,
 * we play dumb by ignoring writes and returning 0 for reads.  So it's no
 * longer Programmable nor Controlling anything, and I don't think 8 lines of
 * code qualifies for Advanced.  It will also never interrupt anything.  It
 * does, however, allow us to get through the Linux boot code. */
R
Rusty Russell 已提交
911
#ifdef CONFIG_X86_LOCAL_APIC
912
static void lguest_apic_write(u32 reg, u32 v)
R
Rusty Russell 已提交
913 914 915
{
}

916
static u32 lguest_apic_read(u32 reg)
R
Rusty Russell 已提交
917 918 919
{
	return 0;
}
S
Suresh Siddha 已提交
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941

static u64 lguest_apic_icr_read(void)
{
	return 0;
}

static void lguest_apic_icr_write(u32 low, u32 id)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}

static void lguest_apic_wait_icr_idle(void)
{
	return;
}

static u32 lguest_apic_safe_wait_icr_idle(void)
{
	return 0;
}

Y
Yinghai Lu 已提交
942 943 944 945 946 947 948 949
static void set_lguest_basic_apic_ops(void)
{
	apic->read = lguest_apic_read;
	apic->write = lguest_apic_write;
	apic->icr_read = lguest_apic_icr_read;
	apic->icr_write = lguest_apic_icr_write;
	apic->wait_icr_idle = lguest_apic_wait_icr_idle;
	apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
S
Suresh Siddha 已提交
950
};
R
Rusty Russell 已提交
951 952
#endif

R
Rusty Russell 已提交
953
/* STOP!  Until an interrupt comes in. */
R
Rusty Russell 已提交
954 955
static void lguest_safe_halt(void)
{
956
	kvm_hypercall0(LHCALL_HALT);
R
Rusty Russell 已提交
957 958
}

959 960
/* The SHUTDOWN hypercall takes a string to describe what's happening, and
 * an argument which says whether this to restart (reboot) the Guest or not.
R
Rusty Russell 已提交
961 962 963
 *
 * Note that the Host always prefers that the Guest speak in physical addresses
 * rather than virtual addresses, so we use __pa() here. */
R
Rusty Russell 已提交
964 965
static void lguest_power_off(void)
{
966 967
	kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"),
					LGUEST_SHUTDOWN_POWEROFF);
R
Rusty Russell 已提交
968 969
}

R
Rusty Russell 已提交
970 971 972 973 974
/*
 * Panicing.
 *
 * Don't.  But if you did, this is what happens.
 */
R
Rusty Russell 已提交
975 976
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
{
977
	kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF);
R
Rusty Russell 已提交
978
	/* The hcall won't return, but to keep gcc happy, we're "done". */
R
Rusty Russell 已提交
979 980 981 982 983 984 985
	return NOTIFY_DONE;
}

static struct notifier_block paniced = {
	.notifier_call = lguest_panic
};

R
Rusty Russell 已提交
986
/* Setting up memory is fairly easy. */
R
Rusty Russell 已提交
987 988
static __init char *lguest_memory_setup(void)
{
989 990 991
	/* We do this here and not earlier because lockcheck used to barf if we
	 * did it before start_kernel().  I think we fixed that, so it'd be
	 * nice to move it back to lguest_init.  Patch welcome... */
R
Rusty Russell 已提交
992 993
	atomic_notifier_chain_register(&panic_notifier_list, &paniced);

R
Rusty Russell 已提交
994 995
	/* The Linux bootloader header contains an "e820" memory map: the
	 * Launcher populated the first entry with our memory limit. */
996
	e820_add_region(boot_params.e820_map[0].addr,
997 998
			  boot_params.e820_map[0].size,
			  boot_params.e820_map[0].type);
R
Rusty Russell 已提交
999 1000

	/* This string is for the boot messages. */
R
Rusty Russell 已提交
1001 1002 1003
	return "LGUEST";
}

R
Rusty Russell 已提交
1004 1005 1006
/* We will eventually use the virtio console device to produce console output,
 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
 * console output. */
R
Rusty Russell 已提交
1007 1008 1009 1010 1011
static __init int early_put_chars(u32 vtermno, const char *buf, int count)
{
	char scratch[17];
	unsigned int len = count;

R
Rusty Russell 已提交
1012 1013
	/* We use a nul-terminated string, so we have to make a copy.  Icky,
	 * huh? */
R
Rusty Russell 已提交
1014 1015 1016 1017
	if (len > sizeof(scratch) - 1)
		len = sizeof(scratch) - 1;
	scratch[len] = '\0';
	memcpy(scratch, buf, len);
1018
	kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch));
R
Rusty Russell 已提交
1019 1020 1021 1022 1023

	/* This routine returns the number of bytes actually written. */
	return len;
}

1024 1025 1026 1027
/* Rebooting also tells the Host we're finished, but the RESTART flag tells the
 * Launcher to reboot us. */
static void lguest_restart(char *reason)
{
1028
	kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
1029 1030
}

R
Rusty Russell 已提交
1031 1032 1033
/*G:050
 * Patching (Powerfully Placating Performance Pedants)
 *
1034 1035 1036
 * We have already seen that pv_ops structures let us replace simple native
 * instructions with calls to the appropriate back end all throughout the
 * kernel.  This allows the same kernel to run as a Guest and as a native
R
Rusty Russell 已提交
1037 1038 1039 1040 1041 1042 1043 1044 1045
 * kernel, but it's slow because of all the indirect branches.
 *
 * Remember that David Wheeler quote about "Any problem in computer science can
 * be solved with another layer of indirection"?  The rest of that quote is
 * "... But that usually will create another problem."  This is the first of
 * those problems.
 *
 * Our current solution is to allow the paravirt back end to optionally patch
 * over the indirect calls to replace them with something more efficient.  We
1046 1047 1048 1049
 * patch two of the simplest of the most commonly called functions: disable
 * interrupts and save interrupts.  We usually have 6 or 10 bytes to patch
 * into: the Guest versions of these operations are small enough that we can
 * fit comfortably.
R
Rusty Russell 已提交
1050 1051
 *
 * First we need assembly templates of each of the patchable Guest operations,
A
Atsushi SAKAI 已提交
1052
 * and these are in i386_head.S. */
R
Rusty Russell 已提交
1053 1054

/*G:060 We construct a table from the assembler templates: */
R
Rusty Russell 已提交
1055 1056 1057 1058
static const struct lguest_insns
{
	const char *start, *end;
} lguest_insns[] = {
1059 1060
	[PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
	[PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
R
Rusty Russell 已提交
1061
};
R
Rusty Russell 已提交
1062 1063 1064 1065

/* Now our patch routine is fairly simple (based on the native one in
 * paravirt.c).  If we have a replacement, we copy it in and return how much of
 * the available space we used. */
1066 1067
static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
			     unsigned long addr, unsigned len)
R
Rusty Russell 已提交
1068 1069 1070
{
	unsigned int insn_len;

R
Rusty Russell 已提交
1071
	/* Don't do anything special if we don't have a replacement */
R
Rusty Russell 已提交
1072
	if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
1073
		return paravirt_patch_default(type, clobber, ibuf, addr, len);
R
Rusty Russell 已提交
1074 1075 1076

	insn_len = lguest_insns[type].end - lguest_insns[type].start;

R
Rusty Russell 已提交
1077 1078
	/* Similarly if we can't fit replacement (shouldn't happen, but let's
	 * be thorough). */
R
Rusty Russell 已提交
1079
	if (len < insn_len)
1080
		return paravirt_patch_default(type, clobber, ibuf, addr, len);
R
Rusty Russell 已提交
1081

R
Rusty Russell 已提交
1082
	/* Copy in our instructions. */
1083
	memcpy(ibuf, lguest_insns[type].start, insn_len);
R
Rusty Russell 已提交
1084 1085 1086
	return insn_len;
}

M
Matias Zabaljauregui 已提交
1087
/*G:029 Once we get to lguest_init(), we know we're a Guest.  The various
1088 1089
 * pv_ops structures in the kernel provide points for (almost) every routine we
 * have to override to avoid privileged instructions. */
1090
__init void lguest_init(void)
R
Rusty Russell 已提交
1091
{
R
Rusty Russell 已提交
1092 1093
	/* We're under lguest, paravirt is enabled, and we're running at
	 * privilege level 1, not 0 as normal. */
1094 1095 1096
	pv_info.name = "lguest";
	pv_info.paravirt_enabled = 1;
	pv_info.kernel_rpl = 1;
M
Matias Zabaljauregui 已提交
1097
	pv_info.shared_kernel_pmd = 1;
R
Rusty Russell 已提交
1098

R
Rusty Russell 已提交
1099 1100
	/* We set up all the lguest overrides for sensitive operations.  These
	 * are detailed with the operations themselves. */
1101 1102 1103

	/* interrupt-related operations */
	pv_irq_ops.init_IRQ = lguest_init_IRQ;
1104
	pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
1105
	pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
1106
	pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
1107
	pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	pv_irq_ops.safe_halt = lguest_safe_halt;

	/* init-time operations */
	pv_init_ops.memory_setup = lguest_memory_setup;
	pv_init_ops.patch = lguest_patch;

	/* Intercepts of various cpu instructions */
	pv_cpu_ops.load_gdt = lguest_load_gdt;
	pv_cpu_ops.cpuid = lguest_cpuid;
	pv_cpu_ops.load_idt = lguest_load_idt;
	pv_cpu_ops.iret = lguest_iret;
1119
	pv_cpu_ops.load_sp0 = lguest_load_sp0;
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
	pv_cpu_ops.set_ldt = lguest_set_ldt;
	pv_cpu_ops.load_tls = lguest_load_tls;
	pv_cpu_ops.set_debugreg = lguest_set_debugreg;
	pv_cpu_ops.clts = lguest_clts;
	pv_cpu_ops.read_cr0 = lguest_read_cr0;
	pv_cpu_ops.write_cr0 = lguest_write_cr0;
	pv_cpu_ops.read_cr4 = lguest_read_cr4;
	pv_cpu_ops.write_cr4 = lguest_write_cr4;
	pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
	pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
	pv_cpu_ops.wbinvd = lguest_wbinvd;
1132 1133
	pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
	pv_cpu_ops.end_context_switch = lguest_end_context_switch;
1134 1135 1136 1137 1138 1139 1140 1141 1142

	/* pagetable management */
	pv_mmu_ops.write_cr3 = lguest_write_cr3;
	pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
	pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
	pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
	pv_mmu_ops.set_pte = lguest_set_pte;
	pv_mmu_ops.set_pte_at = lguest_set_pte_at;
	pv_mmu_ops.set_pmd = lguest_set_pmd;
M
Matias Zabaljauregui 已提交
1143 1144 1145 1146 1147 1148
#ifdef CONFIG_X86_PAE
	pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
	pv_mmu_ops.pte_clear = lguest_pte_clear;
	pv_mmu_ops.pmd_clear = lguest_pmd_clear;
	pv_mmu_ops.set_pud = lguest_set_pud;
#endif
1149 1150
	pv_mmu_ops.read_cr2 = lguest_read_cr2;
	pv_mmu_ops.read_cr3 = lguest_read_cr3;
1151
	pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1152
	pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1153 1154
	pv_mmu_ops.pte_update = lguest_pte_update;
	pv_mmu_ops.pte_update_defer = lguest_pte_update;
1155

R
Rusty Russell 已提交
1156
#ifdef CONFIG_X86_LOCAL_APIC
1157
	/* apic read/write intercepts */
Y
Yinghai Lu 已提交
1158
	set_lguest_basic_apic_ops();
R
Rusty Russell 已提交
1159
#endif
1160 1161 1162 1163

	/* time operations */
	pv_time_ops.get_wallclock = lguest_get_wallclock;
	pv_time_ops.time_init = lguest_time_init;
1164
	pv_time_ops.get_tsc_khz = lguest_tsc_khz;
1165

R
Rusty Russell 已提交
1166 1167 1168 1169 1170
	/* Now is a good time to look at the implementations of these functions
	 * before returning to the rest of lguest_init(). */

	/*G:070 Now we've seen all the paravirt_ops, we return to
	 * lguest_init() where the rest of the fairly chaotic boot setup
1171
	 * occurs. */
R
Rusty Russell 已提交
1172

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
	/* The stack protector is a weird thing where gcc places a canary
	 * value on the stack and then checks it on return.  This file is
	 * compiled with -fno-stack-protector it, so we got this far without
	 * problems.  The value of the canary is kept at offset 20 from the
	 * %gs register, so we need to set that up before calling C functions
	 * in other files. */
	setup_stack_canary_segment(0);
	/* We could just call load_stack_canary_segment(), but we might as
	 * call switch_to_new_gdt() which loads the whole table and sets up
	 * the per-cpu segment descriptor register %fs as well. */
	switch_to_new_gdt(0);

1185 1186 1187
	/* As described in head_32.S, we map the first 128M of memory. */
	max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;

1188 1189 1190
	/* The Host<->Guest Switcher lives at the top of our address space, and
	 * the Host told us how big it is when we made LGUEST_INIT hypercall:
	 * it put the answer in lguest_data.reserve_mem  */
R
Rusty Russell 已提交
1191 1192
	reserve_top_address(lguest_data.reserve_mem);

R
Rusty Russell 已提交
1193 1194
	/* If we don't initialize the lock dependency checker now, it crashes
	 * paravirt_disable_iospace. */
R
Rusty Russell 已提交
1195 1196
	lockdep_init();

R
Rusty Russell 已提交
1197 1198 1199 1200
	/* The IDE code spends about 3 seconds probing for disks: if we reserve
	 * all the I/O ports up front it can't get them and so doesn't probe.
	 * Other device drivers are similar (but less severe).  This cuts the
	 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
R
Rusty Russell 已提交
1201 1202
	paravirt_disable_iospace();

R
Rusty Russell 已提交
1203 1204
	/* This is messy CPU setup stuff which the native boot code does before
	 * start_kernel, so we have to do, too: */
R
Rusty Russell 已提交
1205 1206 1207 1208 1209 1210 1211
	cpu_detect(&new_cpu_data);
	/* head.S usually sets up the first capability word, so do it here. */
	new_cpu_data.x86_capability[0] = cpuid_edx(1);

	/* Math is always hard! */
	new_cpu_data.hard_math = 1;

1212
	/* We don't have features.  We have puppies!  Puppies! */
R
Rusty Russell 已提交
1213 1214 1215 1216 1217 1218 1219 1220
#ifdef CONFIG_X86_MCE
	mce_disabled = 1;
#endif
#ifdef CONFIG_ACPI
	acpi_disabled = 1;
	acpi_ht = 0;
#endif

A
Atsushi SAKAI 已提交
1221
	/* We set the preferred console to "hvc".  This is the "hypervisor
R
Rusty Russell 已提交
1222 1223
	 * virtual console" driver written by the PowerPC people, which we also
	 * adapted for lguest's use. */
R
Rusty Russell 已提交
1224 1225
	add_preferred_console("hvc", 0, NULL);

R
Rusty Russell 已提交
1226 1227 1228
	/* Register our very early console. */
	virtio_cons_early_init(early_put_chars);

R
Rusty Russell 已提交
1229
	/* Last of all, we set the power management poweroff hook to point to
1230 1231
	 * the Guest routine to power off, and the reboot hook to our restart
	 * routine. */
R
Rusty Russell 已提交
1232
	pm_power_off = lguest_power_off;
B
Balaji Rao 已提交
1233
	machine_ops.restart = lguest_restart;
1234

1235
	/* Now we're set up, call i386_start_kernel() in head32.c and we proceed
R
Rusty Russell 已提交
1236
	 * to boot as normal.  It never returns. */
1237
	i386_start_kernel();
R
Rusty Russell 已提交
1238
}
R
Rusty Russell 已提交
1239 1240 1241
/*
 * This marks the end of stage II of our journey, The Guest.
 *
R
Rusty Russell 已提交
1242 1243
 * It is now time for us to explore the layer of virtual drivers and complete
 * our understanding of the Guest in "make Drivers".
R
Rusty Russell 已提交
1244
 */