boot.c 47.9 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*P:010
 * A hypervisor allows multiple Operating Systems to run on a single machine.
 * To quote David Wheeler: "Any problem in computer science can be solved with
 * another layer of indirection."
 *
 * We keep things simple in two ways.  First, we start with a normal Linux
 * kernel and insert a module (lg.ko) which allows us to run other Linux
 * kernels the same way we'd run processes.  We call the first kernel the Host,
 * and the others the Guests.  The program which sets up and configures Guests
R
Rob Landley 已提交
10
 * (such as the example in Documentation/virtual/lguest/lguest.c) is called the
11 12
 * Launcher.
 *
13 14 15 16
 * Secondly, we only run specially modified Guests, not normal kernels: setting
 * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
 * how to be a Guest at boot time.  This means that you can use the same kernel
 * you boot normally (ie. as a Host) as a Guest.
R
Rusty Russell 已提交
17
 *
18 19 20 21 22
 * These Guests know that they cannot do privileged operations, such as disable
 * interrupts, and that they have to ask the Host to do such things explicitly.
 * This file consists of all the replacements for such low-level native
 * hardware operations: these special Guest versions call the Host.
 *
23 24
 * So how does the kernel know it's a Guest?  We'll see that later, but let's
 * just say that we end up here where we replace the native functions various
R
Rusty Russell 已提交
25 26
 * "paravirt" structures with our Guest versions, then boot like normal.
:*/
27 28

/*
R
Rusty Russell 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
#include <linux/kernel.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
R
Rusty Russell 已提交
53 54
#include <linux/clocksource.h>
#include <linux/clockchips.h>
R
Rusty Russell 已提交
55 56
#include <linux/lguest.h>
#include <linux/lguest_launcher.h>
R
Rusty Russell 已提交
57
#include <linux/virtio_console.h>
J
Jeff Garzik 已提交
58
#include <linux/pm.h>
59
#include <linux/export.h>
I
Ingo Molnar 已提交
60
#include <asm/apic.h>
61
#include <asm/lguest.h>
R
Rusty Russell 已提交
62 63 64 65 66 67 68 69 70
#include <asm/paravirt.h>
#include <asm/param.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/e820.h>
#include <asm/mce.h>
#include <asm/io.h>
71
#include <asm/i387.h>
72
#include <asm/stackprotector.h>
B
Balaji Rao 已提交
73
#include <asm/reboot.h>		/* for struct machine_ops */
74
#include <asm/kvm_para.h>
R
Rusty Russell 已提交
75

R
Rusty Russell 已提交
76 77
/*G:010
 * Welcome to the Guest!
R
Rusty Russell 已提交
78 79 80
 *
 * The Guest in our tale is a simple creature: identical to the Host but
 * behaving in simplified but equivalent ways.  In particular, the Guest is the
R
Rusty Russell 已提交
81 82
 * same kernel as the Host (or at least, built from the same source code).
:*/
R
Rusty Russell 已提交
83

R
Rusty Russell 已提交
84 85 86 87
struct lguest_data lguest_data = {
	.hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
	.noirq_start = (u32)lguest_noirq_start,
	.noirq_end = (u32)lguest_noirq_end,
88
	.kernel_address = PAGE_OFFSET,
R
Rusty Russell 已提交
89
	.blocked_interrupts = { 1 }, /* Block timer interrupts */
90
	.syscall_vec = SYSCALL_VECTOR,
R
Rusty Russell 已提交
91 92
};

R
Rusty Russell 已提交
93 94
/*G:037
 * async_hcall() is pretty simple: I'm quite proud of it really.  We have a
R
Rusty Russell 已提交
95
 * ring buffer of stored hypercalls which the Host will run though next time we
96
 * do a normal hypercall.  Each entry in the ring has 5 slots for the hypercall
R
Rusty Russell 已提交
97 98 99 100 101 102
 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
 * and 255 once the Host has finished with it.
 *
 * If we come around to a slot which hasn't been finished, then the table is
 * full and we just make the hypercall directly.  This has the nice side
 * effect of causing the Host to run all the stored calls in the ring buffer
R
Rusty Russell 已提交
103 104
 * which empties it for next time!
 */
A
Adrian Bunk 已提交
105
static void async_hcall(unsigned long call, unsigned long arg1,
106 107
			unsigned long arg2, unsigned long arg3,
			unsigned long arg4)
R
Rusty Russell 已提交
108 109 110 111 112
{
	/* Note: This code assumes we're uniprocessor. */
	static unsigned int next_call;
	unsigned long flags;

R
Rusty Russell 已提交
113 114
	/*
	 * Disable interrupts if not already disabled: we don't want an
R
Rusty Russell 已提交
115
	 * interrupt handler making a hypercall while we're already doing
R
Rusty Russell 已提交
116 117
	 * one!
	 */
R
Rusty Russell 已提交
118 119 120
	local_irq_save(flags);
	if (lguest_data.hcall_status[next_call] != 0xFF) {
		/* Table full, so do normal hcall which will flush table. */
121
		hcall(call, arg1, arg2, arg3, arg4);
R
Rusty Russell 已提交
122
	} else {
J
Jes Sorensen 已提交
123 124 125 126
		lguest_data.hcalls[next_call].arg0 = call;
		lguest_data.hcalls[next_call].arg1 = arg1;
		lguest_data.hcalls[next_call].arg2 = arg2;
		lguest_data.hcalls[next_call].arg3 = arg3;
127
		lguest_data.hcalls[next_call].arg4 = arg4;
R
Rusty Russell 已提交
128
		/* Arguments must all be written before we mark it to go */
R
Rusty Russell 已提交
129 130 131 132 133 134 135
		wmb();
		lguest_data.hcall_status[next_call] = 0;
		if (++next_call == LHCALL_RING_SIZE)
			next_call = 0;
	}
	local_irq_restore(flags);
}
A
Adrian Bunk 已提交
136

R
Rusty Russell 已提交
137 138 139
/*G:035
 * Notice the lazy_hcall() above, rather than hcall().  This is our first real
 * optimization trick!
R
Rusty Russell 已提交
140 141 142 143 144 145 146 147 148
 *
 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
 * them as a batch when lazy_mode is eventually turned off.  Because hypercalls
 * are reasonably expensive, batching them up makes sense.  For example, a
 * large munmap might update dozens of page table entries: that code calls
 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
 * lguest_leave_lazy_mode().
 *
 * So, when we're in lazy mode, we call async_hcall() to store the call for
R
Rusty Russell 已提交
149 150
 * future processing:
 */
151
static void lazy_hcall1(unsigned long call, unsigned long arg1)
152 153
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
154
		hcall(call, arg1, 0, 0, 0);
155
	else
156
		async_hcall(call, arg1, 0, 0, 0);
157 158
}

R
Rusty Russell 已提交
159
/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
160
static void lazy_hcall2(unsigned long call,
161 162
			unsigned long arg1,
			unsigned long arg2)
163 164
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
165
		hcall(call, arg1, arg2, 0, 0);
166
	else
167
		async_hcall(call, arg1, arg2, 0, 0);
168 169 170
}

static void lazy_hcall3(unsigned long call,
171 172 173
			unsigned long arg1,
			unsigned long arg2,
			unsigned long arg3)
A
Adrian Bunk 已提交
174 175
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
176
		hcall(call, arg1, arg2, arg3, 0);
A
Adrian Bunk 已提交
177
	else
178 179 180
		async_hcall(call, arg1, arg2, arg3, 0);
}

M
Matias Zabaljauregui 已提交
181
#ifdef CONFIG_X86_PAE
182
static void lazy_hcall4(unsigned long call,
183 184 185 186
			unsigned long arg1,
			unsigned long arg2,
			unsigned long arg3,
			unsigned long arg4)
187 188
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
189
		hcall(call, arg1, arg2, arg3, arg4);
190 191
	else
		async_hcall(call, arg1, arg2, arg3, arg4);
A
Adrian Bunk 已提交
192
}
M
Matias Zabaljauregui 已提交
193
#endif
R
Rusty Russell 已提交
194

R
Rusty Russell 已提交
195
/*G:036
R
Rusty Russell 已提交
196 197 198 199
 * When lazy mode is turned off, we issue the do-nothing hypercall to
 * flush any stored calls, and call the generic helper to reset the
 * per-cpu lazy mode variable.
 */
200
static void lguest_leave_lazy_mmu_mode(void)
R
Rusty Russell 已提交
201
{
202
	hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
203 204 205
	paravirt_leave_lazy_mmu();
}

R
Rusty Russell 已提交
206 207 208 209 210 211 212
/*
 * We also catch the end of context switch; we enter lazy mode for much of
 * that too, so again we need to flush here.
 *
 * (Technically, this is lazy CPU mode, and normally we're in lazy MMU
 * mode, but unlike Xen, lguest doesn't care about the difference).
 */
213
static void lguest_end_context_switch(struct task_struct *next)
214
{
215
	hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
216
	paravirt_end_context_switch(next);
R
Rusty Russell 已提交
217
}
R
Rusty Russell 已提交
218

219
/*G:032
R
Rusty Russell 已提交
220 221
 * After that diversion we return to our first native-instruction
 * replacements: four functions for interrupt control.
R
Rusty Russell 已提交
222 223 224 225 226 227 228
 *
 * The simplest way of implementing these would be to have "turn interrupts
 * off" and "turn interrupts on" hypercalls.  Unfortunately, this is too slow:
 * these are by far the most commonly called functions of those we override.
 *
 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
 * which the Guest can update with a single instruction.  The Host knows to
229
 * check there before it tries to deliver an interrupt.
R
Rusty Russell 已提交
230 231
 */

R
Rusty Russell 已提交
232 233
/*
 * save_flags() is expected to return the processor state (ie. "flags").  The
234
 * flags word contains all kind of stuff, but in practice Linux only cares
R
Rusty Russell 已提交
235 236
 * about the interrupt flag.  Our "save_flags()" just returns that.
 */
R
Rusty Russell 已提交
237 238 239 240 241
static unsigned long save_fl(void)
{
	return lguest_data.irq_enabled;
}

R
Rusty Russell 已提交
242
/* Interrupts go off... */
R
Rusty Russell 已提交
243 244 245 246 247
static void irq_disable(void)
{
	lguest_data.irq_enabled = 0;
}

R
Rusty Russell 已提交
248 249
/*
 * Let's pause a moment.  Remember how I said these are called so often?
250 251 252 253 254
 * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
 * break some rules.  In particular, these functions are assumed to save their
 * own registers if they need to: normal C functions assume they can trash the
 * eax register.  To use normal C functions, we use
 * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
R
Rusty Russell 已提交
255 256
 * C function, then restores it.
 */
257 258 259
PV_CALLEE_SAVE_REGS_THUNK(save_fl);
PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
/*:*/
260

261 262 263
/* These are in i386_head.S */
extern void lg_irq_enable(void);
extern void lg_restore_fl(unsigned long flags);
264

R
Rusty Russell 已提交
265
/*M:003
R
Rusty Russell 已提交
266 267 268 269 270
 * We could be more efficient in our checking of outstanding interrupts, rather
 * than using a branch.  One way would be to put the "irq_enabled" field in a
 * page by itself, and have the Host write-protect it when an interrupt comes
 * in when irqs are disabled.  There will then be a page fault as soon as
 * interrupts are re-enabled.
271 272 273 274
 *
 * A better method is to implement soft interrupt disable generally for x86:
 * instead of disabling interrupts, we set a flag.  If an interrupt does come
 * in, we then disable them for real.  This is uncommon, so we could simply use
R
Rusty Russell 已提交
275 276
 * a hypercall for interrupt control and not worry about efficiency.
:*/
R
Rusty Russell 已提交
277

R
Rusty Russell 已提交
278 279 280 281 282 283 284 285
/*G:034
 * The Interrupt Descriptor Table (IDT).
 *
 * The IDT tells the processor what to do when an interrupt comes in.  Each
 * entry in the table is a 64-bit descriptor: this holds the privilege level,
 * address of the handler, and... well, who cares?  The Guest just asks the
 * Host to make the change anyway, because the Host controls the real IDT.
 */
286 287
static void lguest_write_idt_entry(gate_desc *dt,
				   int entrynum, const gate_desc *g)
R
Rusty Russell 已提交
288
{
R
Rusty Russell 已提交
289 290
	/*
	 * The gate_desc structure is 8 bytes long: we hand it to the Host in
291 292
	 * two 32-bit chunks.  The whole 32-bit kernel used to hand descriptors
	 * around like this; typesafety wasn't a big concern in Linux's early
R
Rusty Russell 已提交
293 294
	 * years.
	 */
295
	u32 *desc = (u32 *)g;
R
Rusty Russell 已提交
296
	/* Keep the local copy up to date. */
297
	native_write_idt_entry(dt, entrynum, g);
R
Rusty Russell 已提交
298
	/* Tell Host about this new entry. */
299
	hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
R
Rusty Russell 已提交
300 301
}

R
Rusty Russell 已提交
302 303
/*
 * Changing to a different IDT is very rare: we keep the IDT up-to-date every
R
Rusty Russell 已提交
304
 * time it is written, so we can simply loop through all entries and tell the
R
Rusty Russell 已提交
305 306
 * Host about them.
 */
307
static void lguest_load_idt(const struct desc_ptr *desc)
R
Rusty Russell 已提交
308 309 310 311 312
{
	unsigned int i;
	struct desc_struct *idt = (void *)desc->address;

	for (i = 0; i < (desc->size+1)/8; i++)
313
		hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
R
Rusty Russell 已提交
314 315
}

R
Rusty Russell 已提交
316 317 318 319 320 321 322 323 324 325
/*
 * The Global Descriptor Table.
 *
 * The Intel architecture defines another table, called the Global Descriptor
 * Table (GDT).  You tell the CPU where it is (and its size) using the "lgdt"
 * instruction, and then several other instructions refer to entries in the
 * table.  There are three entries which the Switcher needs, so the Host simply
 * controls the entire thing and the Guest asks it to make changes using the
 * LOAD_GDT hypercall.
 *
326
 * This is the exactly like the IDT code.
R
Rusty Russell 已提交
327
 */
328
static void lguest_load_gdt(const struct desc_ptr *desc)
R
Rusty Russell 已提交
329
{
330 331 332 333
	unsigned int i;
	struct desc_struct *gdt = (void *)desc->address;

	for (i = 0; i < (desc->size+1)/8; i++)
334
		hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
R
Rusty Russell 已提交
335 336
}

R
Rusty Russell 已提交
337
/*
338 339
 * For a single GDT entry which changes, we simply change our copy and
 * then tell the host about it.
R
Rusty Russell 已提交
340
 */
341 342
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
				   const void *desc, int type)
R
Rusty Russell 已提交
343
{
344
	native_write_gdt_entry(dt, entrynum, desc, type);
345
	/* Tell Host about this new entry. */
346 347
	hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
	      dt[entrynum].a, dt[entrynum].b, 0);
R
Rusty Russell 已提交
348 349
}

R
Rusty Russell 已提交
350
/*
351
 * There are three "thread local storage" GDT entries which change
R
Rusty Russell 已提交
352
 * on every context switch (these three entries are how glibc implements
353 354 355 356 357
 * __thread variables).  As an optimization, we have a hypercall
 * specifically for this case.
 *
 * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
 * which took a range of entries?
R
Rusty Russell 已提交
358
 */
R
Rusty Russell 已提交
359 360
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{
R
Rusty Russell 已提交
361 362
	/*
	 * There's one problem which normal hardware doesn't have: the Host
363
	 * can't handle us removing entries we're currently using.  So we clear
R
Rusty Russell 已提交
364 365
	 * the GS register here: if it's needed it'll be reloaded anyway.
	 */
366
	lazy_load_gs(0);
367
	lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu);
R
Rusty Russell 已提交
368 369
}

R
Rusty Russell 已提交
370 371 372
/*G:038
 * That's enough excitement for now, back to ploughing through each of the
 * different pv_ops structures (we're about 1/3 of the way through).
R
Rusty Russell 已提交
373 374 375
 *
 * This is the Local Descriptor Table, another weird Intel thingy.  Linux only
 * uses this for some strange applications like Wine.  We don't do anything
R
Rusty Russell 已提交
376 377
 * here, so they'll get an informative and friendly Segmentation Fault.
 */
R
Rusty Russell 已提交
378 379 380 381
static void lguest_set_ldt(const void *addr, unsigned entries)
{
}

R
Rusty Russell 已提交
382 383
/*
 * This loads a GDT entry into the "Task Register": that entry points to a
R
Rusty Russell 已提交
384 385 386 387 388 389 390
 * structure called the Task State Segment.  Some comments scattered though the
 * kernel code indicate that this used for task switching in ages past, along
 * with blood sacrifice and astrology.
 *
 * Now there's nothing interesting in here that we don't get told elsewhere.
 * But the native version uses the "ltr" instruction, which makes the Host
 * complain to the Guest about a Segmentation Fault and it'll oops.  So we
R
Rusty Russell 已提交
391 392
 * override the native version with a do-nothing version.
 */
R
Rusty Russell 已提交
393 394 395 396
static void lguest_load_tr_desc(void)
{
}

R
Rusty Russell 已提交
397 398
/*
 * The "cpuid" instruction is a way of querying both the CPU identity
R
Rusty Russell 已提交
399
 * (manufacturer, model, etc) and its features.  It was introduced before the
400 401 402
 * Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
 * As you might imagine, after a decade and a half this treatment, it is now a
 * giant ball of hair.  Its entry in the current Intel manual runs to 28 pages.
R
Rusty Russell 已提交
403 404
 *
 * This instruction even it has its own Wikipedia entry.  The Wikipedia entry
405
 * has been translated into 6 languages.  I am not making this up!
R
Rusty Russell 已提交
406 407 408 409 410
 *
 * We could get funky here and identify ourselves as "GenuineLguest", but
 * instead we just use the real "cpuid" instruction.  Then I pretty much turned
 * off feature bits until the Guest booted.  (Don't say that: you'll damage
 * lguest sales!)  Shut up, inner voice!  (Hey, just pointing out that this is
L
Lucas De Marchi 已提交
411
 * hardly future proof.)  No one's listening!  They don't like you anyway,
R
Rusty Russell 已提交
412 413 414 415 416
 * parenthetic weirdo!
 *
 * Replacing the cpuid so we can turn features off is great for the kernel, but
 * anyone (including userspace) can just use the raw "cpuid" instruction and
 * the Host won't even notice since it isn't privileged.  So we try not to get
R
Rusty Russell 已提交
417 418
 * too worked up about it.
 */
419 420
static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
			 unsigned int *cx, unsigned int *dx)
R
Rusty Russell 已提交
421
{
422
	int function = *ax;
R
Rusty Russell 已提交
423

424
	native_cpuid(ax, bx, cx, dx);
R
Rusty Russell 已提交
425
	switch (function) {
R
Rusty Russell 已提交
426 427 428 429 430
	/*
	 * CPUID 0 gives the highest legal CPUID number (and the ID string).
	 * We futureproof our code a little by sticking to known CPUID values.
	 */
	case 0:
431 432 433
		if (*ax > 5)
			*ax = 5;
		break;
R
Rusty Russell 已提交
434 435 436 437 438 439 440 441

	/*
	 * CPUID 1 is a basic feature request.
	 *
	 * CX: we only allow kernel to see SSE3, CMPXCHG16B and SSSE3
	 * DX: SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU and PAE.
	 */
	case 1:
442
		*cx &= 0x00002201;
M
Matias Zabaljauregui 已提交
443
		*dx &= 0x07808151;
R
Rusty Russell 已提交
444 445
		/*
		 * The Host can do a nice optimization if it knows that the
R
Rusty Russell 已提交
446 447 448
		 * kernel mappings (addresses above 0xC0000000 or whatever
		 * PAGE_OFFSET is set to) haven't changed.  But Linux calls
		 * flush_tlb_user() for both user and kernel mappings unless
R
Rusty Russell 已提交
449 450
		 * the Page Global Enable (PGE) feature bit is set.
		 */
451
		*dx |= 0x00002000;
R
Rusty Russell 已提交
452 453
		/*
		 * We also lie, and say we're family id 5.  6 or greater
454
		 * leads to a rdmsr in early_init_intel which we can't handle.
R
Rusty Russell 已提交
455 456
		 * Family ID is returned as bits 8-12 in ax.
		 */
457 458
		*ax &= 0xFFFFF0FF;
		*ax |= 0x00000500;
R
Rusty Russell 已提交
459
		break;
460 461 462 463 464 465 466 467 468

	/*
	 * This is used to detect if we're running under KVM.  We might be,
	 * but that's a Host matter, not us.  So say we're not.
	 */
	case KVM_CPUID_SIGNATURE:
		*bx = *cx = *dx = 0;
		break;

R
Rusty Russell 已提交
469 470 471 472
	/*
	 * 0x80000000 returns the highest Extended Function, so we futureproof
	 * like we do above by limiting it to known fields.
	 */
R
Rusty Russell 已提交
473
	case 0x80000000:
474 475
		if (*ax > 0x80000008)
			*ax = 0x80000008;
R
Rusty Russell 已提交
476
		break;
R
Rusty Russell 已提交
477 478 479 480

	/*
	 * PAE systems can mark pages as non-executable.  Linux calls this the
	 * NX bit.  Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
481
	 * Virus Protection).  We just switch it off here, since we don't
R
Rusty Russell 已提交
482 483
	 * support it.
	 */
M
Matias Zabaljauregui 已提交
484 485 486
	case 0x80000001:
		*dx &= ~(1 << 20);
		break;
R
Rusty Russell 已提交
487 488 489
	}
}

R
Rusty Russell 已提交
490 491
/*
 * Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
R
Rusty Russell 已提交
492 493 494 495
 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
 * it.  The Host needs to know when the Guest wants to change them, so we have
 * a whole series of functions like read_cr0() and write_cr0().
 *
R
Rusty Russell 已提交
496
 * We start with cr0.  cr0 allows you to turn on and off all kinds of basic
R
Rusty Russell 已提交
497 498 499 500 501 502 503 504
 * features, but Linux only really cares about one: the horrifically-named Task
 * Switched (TS) bit at bit 3 (ie. 8)
 *
 * What does the TS bit do?  Well, it causes the CPU to trap (interrupt 7) if
 * the floating point unit is used.  Which allows us to restore FPU state
 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
 * name like "FPUTRAP bit" be a little less cryptic?
 *
R
Rusty Russell 已提交
505
 * We store cr0 locally because the Host never changes it.  The Guest sometimes
R
Rusty Russell 已提交
506 507
 * wants to read it and we'd prefer not to bother the Host unnecessarily.
 */
R
Rusty Russell 已提交
508
static unsigned long current_cr0;
R
Rusty Russell 已提交
509 510
static void lguest_write_cr0(unsigned long val)
{
511
	lazy_hcall1(LHCALL_TS, val & X86_CR0_TS);
R
Rusty Russell 已提交
512 513 514 515 516 517 518 519
	current_cr0 = val;
}

static unsigned long lguest_read_cr0(void)
{
	return current_cr0;
}

R
Rusty Russell 已提交
520 521
/*
 * Intel provided a special instruction to clear the TS bit for people too cool
R
Rusty Russell 已提交
522
 * to use write_cr0() to do it.  This "clts" instruction is faster, because all
R
Rusty Russell 已提交
523 524
 * the vowels have been optimized out.
 */
R
Rusty Russell 已提交
525 526
static void lguest_clts(void)
{
527
	lazy_hcall1(LHCALL_TS, 0);
528
	current_cr0 &= ~X86_CR0_TS;
R
Rusty Russell 已提交
529 530
}

R
Rusty Russell 已提交
531 532
/*
 * cr2 is the virtual address of the last page fault, which the Guest only ever
R
Rusty Russell 已提交
533
 * reads.  The Host kindly writes this into our "struct lguest_data", so we
R
Rusty Russell 已提交
534 535
 * just read it out of there.
 */
R
Rusty Russell 已提交
536 537 538 539 540
static unsigned long lguest_read_cr2(void)
{
	return lguest_data.cr2;
}

R
Rusty Russell 已提交
541 542
/* See lguest_set_pte() below. */
static bool cr3_changed = false;
543
static unsigned long current_cr3;
R
Rusty Russell 已提交
544

R
Rusty Russell 已提交
545 546
/*
 * cr3 is the current toplevel pagetable page: the principle is the same as
547
 * cr0.  Keep a local copy, and tell the Host when it changes.
R
Rusty Russell 已提交
548
 */
R
Rusty Russell 已提交
549 550
static void lguest_write_cr3(unsigned long cr3)
{
551
	lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
552
	current_cr3 = cr3;
R
Rusty Russell 已提交
553 554 555 556

	/* These two page tables are simple, linear, and used during boot */
	if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
		cr3_changed = true;
R
Rusty Russell 已提交
557 558 559 560
}

static unsigned long lguest_read_cr3(void)
{
561
	return current_cr3;
R
Rusty Russell 已提交
562 563
}

R
Rusty Russell 已提交
564
/* cr4 is used to enable and disable PGE, but we don't care. */
R
Rusty Russell 已提交
565 566 567 568 569 570 571 572 573
static unsigned long lguest_read_cr4(void)
{
	return 0;
}

static void lguest_write_cr4(unsigned long val)
{
}

R
Rusty Russell 已提交
574 575 576 577 578 579 580 581 582 583 584
/*
 * Page Table Handling.
 *
 * Now would be a good time to take a rest and grab a coffee or similarly
 * relaxing stimulant.  The easy parts are behind us, and the trek gradually
 * winds uphill from here.
 *
 * Quick refresher: memory is divided into "pages" of 4096 bytes each.  The CPU
 * maps virtual addresses to physical addresses using "page tables".  We could
 * use one huge index of 1 million entries: each address is 4 bytes, so that's
 * 1024 pages just to hold the page tables.   But since most virtual addresses
R
Rusty Russell 已提交
585
 * are unused, we use a two level index which saves space.  The cr3 register
R
Rusty Russell 已提交
586 587 588 589 590 591 592
 * contains the physical address of the top level "page directory" page, which
 * contains physical addresses of up to 1024 second-level pages.  Each of these
 * second level pages contains up to 1024 physical addresses of actual pages,
 * or Page Table Entries (PTEs).
 *
 * Here's a diagram, where arrows indicate physical addresses:
 *
R
Rusty Russell 已提交
593
 * cr3 ---> +---------+
R
Rusty Russell 已提交
594 595
 *	    |  	   --------->+---------+
 *	    |	      |	     | PADDR1  |
R
Rusty Russell 已提交
596
 *	  Mid-level   |	     | PADDR2  |
R
Rusty Russell 已提交
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
 *	  (PMD) page  |	     | 	       |
 *	    |	      |	   Lower-level |
 *	    |	      |	   (PTE) page  |
 *	    |	      |	     |	       |
 *	      ....    	     	 ....
 *
 * So to convert a virtual address to a physical address, we look up the top
 * level, which points us to the second level, which gives us the physical
 * address of that page.  If the top level entry was not present, or the second
 * level entry was not present, then the virtual address is invalid (we
 * say "the page was not mapped").
 *
 * Put another way, a 32-bit virtual address is divided up like so:
 *
 *  1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
 *    Index into top     Index into second      Offset within page
 *  page directory page    pagetable page
 *
R
Rusty Russell 已提交
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
 * Now, unfortunately, this isn't the whole story: Intel added Physical Address
 * Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits).
 * These are held in 64-bit page table entries, so we can now only fit 512
 * entries in a page, and the neat three-level tree breaks down.
 *
 * The result is a four level page table:
 *
 * cr3 --> [ 4 Upper  ]
 *	   [   Level  ]
 *	   [  Entries ]
 *	   [(PUD Page)]---> +---------+
 *	 		    |  	   --------->+---------+
 *	 		    |	      |	     | PADDR1  |
 *	 		  Mid-level   |	     | PADDR2  |
 *	 		  (PMD) page  |	     | 	       |
 *	 		    |	      |	   Lower-level |
 *	 		    |	      |	   (PTE) page  |
 *	 		    |	      |	     |	       |
 *	 		      ....    	     	 ....
 *
 *
 * And the virtual address is decoded as:
 *
 *         1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 *      |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>|
 * Index into    Index into mid    Index into lower    Offset within page
 * top entries   directory page     pagetable page
 *
 * It's too hard to switch between these two formats at runtime, so Linux only
 * supports one or the other depending on whether CONFIG_X86_PAE is set.  Many
 * distributions turn it on, and not just for people with silly amounts of
 * memory: the larger PTE entries allow room for the NX bit, which lets the
 * kernel disable execution of pages and increase security.
 *
 * This was a problem for lguest, which couldn't run on these distributions;
 * then Matias Zabaljauregui figured it all out and implemented it, and only a
 * handful of puppies were crushed in the process!
 *
 * Back to our point: the kernel spends a lot of time changing both the
 * top-level page directory and lower-level pagetable pages.  The Guest doesn't
 * know physical addresses, so while it maintains these page tables exactly
 * like normal, it also needs to keep the Host informed whenever it makes a
 * change: the Host will create the real page tables based on the Guests'.
R
Rusty Russell 已提交
659 660
 */

R
Rusty Russell 已提交
661
/*
R
Rusty Russell 已提交
662
 * The Guest calls this after it has set a second-level entry (pte), ie. to map
R
Rusty Russell 已提交
663
 * a page into a process' address space.  We tell the Host the toplevel and
R
Rusty Russell 已提交
664 665
 * address this corresponds to.  The Guest uses one pagetable per process, so
 * we need to tell the Host which one we're changing (mm->pgd).
R
Rusty Russell 已提交
666
 */
667 668 669
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
			       pte_t *ptep)
{
M
Matias Zabaljauregui 已提交
670
#ifdef CONFIG_X86_PAE
R
Rusty Russell 已提交
671
	/* PAE needs to hand a 64 bit page table entry, so it uses two args. */
M
Matias Zabaljauregui 已提交
672 673 674
	lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
		    ptep->pte_low, ptep->pte_high);
#else
675
	lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
M
Matias Zabaljauregui 已提交
676
#endif
677 678
}

R
Rusty Russell 已提交
679
/* This is the "set and update" combo-meal-deal version. */
R
Rusty Russell 已提交
680 681 682
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pteval)
{
683
	native_set_pte(ptep, pteval);
684
	lguest_pte_update(mm, addr, ptep);
R
Rusty Russell 已提交
685 686
}

R
Rusty Russell 已提交
687 688
/*
 * The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
M
Matias Zabaljauregui 已提交
689
 * to set a middle-level entry when PAE is activated.
R
Rusty Russell 已提交
690
 *
M
Matias Zabaljauregui 已提交
691
 * Again, we set the entry then tell the Host which page we changed,
R
Rusty Russell 已提交
692 693
 * and the index of the entry we changed.
 */
M
Matias Zabaljauregui 已提交
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
#ifdef CONFIG_X86_PAE
static void lguest_set_pud(pud_t *pudp, pud_t pudval)
{
	native_set_pud(pudp, pudval);

	/* 32 bytes aligned pdpt address and the index. */
	lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
		   (__pa(pudp) & 0x1F) / sizeof(pud_t));
}

static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
	native_set_pmd(pmdp, pmdval);
	lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
		   (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
}
#else

R
Rusty Russell 已提交
712
/* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */
R
Rusty Russell 已提交
713 714
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
715
	native_set_pmd(pmdp, pmdval);
716
	lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
717
		   (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
R
Rusty Russell 已提交
718
}
M
Matias Zabaljauregui 已提交
719
#endif
R
Rusty Russell 已提交
720

R
Rusty Russell 已提交
721 722
/*
 * There are a couple of legacy places where the kernel sets a PTE, but we
R
Rusty Russell 已提交
723 724 725 726 727
 * don't know the top level any more.  This is useless for us, since we don't
 * know which pagetable is changing or what address, so we just tell the Host
 * to forget all of them.  Fortunately, this is very rare.
 *
 * ... except in early boot when the kernel sets up the initial pagetables,
R
Rusty Russell 已提交
728 729 730
 * which makes booting astonishingly slow: 48 seconds!  So we don't even tell
 * the Host anything changed until we've done the first real page table switch,
 * which brings boot back to 4.3 seconds.
R
Rusty Russell 已提交
731
 */
R
Rusty Russell 已提交
732 733
static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{
734
	native_set_pte(ptep, pteval);
R
Rusty Russell 已提交
735
	if (cr3_changed)
736
		lazy_hcall1(LHCALL_FLUSH_TLB, 1);
R
Rusty Russell 已提交
737 738
}

M
Matias Zabaljauregui 已提交
739
#ifdef CONFIG_X86_PAE
R
Rusty Russell 已提交
740 741 742 743 744
/*
 * With 64-bit PTE values, we need to be careful setting them: if we set 32
 * bits at a time, the hardware could see a weird half-set entry.  These
 * versions ensure we update all 64 bits at once.
 */
M
Matias Zabaljauregui 已提交
745 746 747 748 749 750 751
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
{
	native_set_pte_atomic(ptep, pte);
	if (cr3_changed)
		lazy_hcall1(LHCALL_FLUSH_TLB, 1);
}

R
Rusty Russell 已提交
752 753
static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep)
M
Matias Zabaljauregui 已提交
754 755 756 757 758
{
	native_pte_clear(mm, addr, ptep);
	lguest_pte_update(mm, addr, ptep);
}

R
Rusty Russell 已提交
759
static void lguest_pmd_clear(pmd_t *pmdp)
M
Matias Zabaljauregui 已提交
760 761 762 763 764
{
	lguest_set_pmd(pmdp, __pmd(0));
}
#endif

R
Rusty Russell 已提交
765 766
/*
 * Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
R
Rusty Russell 已提交
767 768 769 770 771 772 773 774
 * native page table operations.  On native hardware you can set a new page
 * table entry whenever you want, but if you want to remove one you have to do
 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
 *
 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
 * called when a valid entry is written, not when it's removed (ie. marked not
 * present).  Instead, this is where we come when the Guest wants to remove a
 * page table entry: we tell the Host to set that entry to 0 (ie. the present
R
Rusty Russell 已提交
775 776
 * bit is zero).
 */
R
Rusty Russell 已提交
777 778
static void lguest_flush_tlb_single(unsigned long addr)
{
R
Rusty Russell 已提交
779
	/* Simply set it to zero: if it was not, it will fault back in. */
780
	lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0);
R
Rusty Russell 已提交
781 782
}

R
Rusty Russell 已提交
783 784
/*
 * This is what happens after the Guest has removed a large number of entries.
R
Rusty Russell 已提交
785
 * This tells the Host that any of the page table entries for userspace might
R
Rusty Russell 已提交
786 787
 * have changed, ie. virtual addresses below PAGE_OFFSET.
 */
R
Rusty Russell 已提交
788 789
static void lguest_flush_tlb_user(void)
{
790
	lazy_hcall1(LHCALL_FLUSH_TLB, 0);
R
Rusty Russell 已提交
791 792
}

R
Rusty Russell 已提交
793 794
/*
 * This is called when the kernel page tables have changed.  That's not very
R
Rusty Russell 已提交
795
 * common (unless the Guest is using highmem, which makes the Guest extremely
R
Rusty Russell 已提交
796 797
 * slow), so it's worth separating this from the user flushing above.
 */
R
Rusty Russell 已提交
798 799
static void lguest_flush_tlb_kernel(void)
{
800
	lazy_hcall1(LHCALL_FLUSH_TLB, 1);
R
Rusty Russell 已提交
801 802
}

R
Rusty Russell 已提交
803 804 805 806 807 808 809 810 811 812 813 814 815
/*
 * The Unadvanced Programmable Interrupt Controller.
 *
 * This is an attempt to implement the simplest possible interrupt controller.
 * I spent some time looking though routines like set_irq_chip_and_handler,
 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
 * I *think* this is as simple as it gets.
 *
 * We can tell the Host what interrupts we want blocked ready for using the
 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
 * simple as setting a bit.  We don't actually "ack" interrupts as such, we
 * just mask and unmask them.  I wonder if we should be cleverer?
 */
816
static void disable_lguest_irq(struct irq_data *data)
R
Rusty Russell 已提交
817
{
818
	set_bit(data->irq, lguest_data.blocked_interrupts);
R
Rusty Russell 已提交
819 820
}

821
static void enable_lguest_irq(struct irq_data *data)
R
Rusty Russell 已提交
822
{
823
	clear_bit(data->irq, lguest_data.blocked_interrupts);
R
Rusty Russell 已提交
824 825
}

R
Rusty Russell 已提交
826
/* This structure describes the lguest IRQ controller. */
R
Rusty Russell 已提交
827 828
static struct irq_chip lguest_irq_controller = {
	.name		= "lguest",
829 830 831
	.irq_mask	= disable_lguest_irq,
	.irq_mask_ack	= disable_lguest_irq,
	.irq_unmask	= enable_lguest_irq,
R
Rusty Russell 已提交
832 833
};

R
Rusty Russell 已提交
834 835
/*
 * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
R
Rusty Russell 已提交
836 837
 * interrupt (except 128, which is used for system calls), and then tells the
 * Linux infrastructure that each interrupt is controlled by our level-based
R
Rusty Russell 已提交
838 839
 * lguest interrupt controller.
 */
R
Rusty Russell 已提交
840 841 842 843
static void __init lguest_init_IRQ(void)
{
	unsigned int i;

R
Rusty Russell 已提交
844
	for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
R
Rusty Russell 已提交
845
		/* Some systems map "vectors" to interrupts weirdly.  Not us! */
R
Rusty Russell 已提交
846
		__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
R
Rusty Russell 已提交
847 848
		if (i != SYSCALL_VECTOR)
			set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
R
Rusty Russell 已提交
849
	}
R
Rusty Russell 已提交
850 851 852 853 854

	/*
	 * This call is required to set up for 4k stacks, where we have
	 * separate stacks for hard and soft interrupts.
	 */
R
Rusty Russell 已提交
855 856 857
	irq_ctx_init(smp_processor_id());
}

R
Rusty Russell 已提交
858
/*
859 860 861 862
 * Interrupt descriptors are allocated as-needed, but low-numbered ones are
 * reserved by the generic x86 code.  So we ignore irq_alloc_desc_at if it
 * tells us the irq is already used: other errors (ie. ENOMEM) we take
 * seriously.
R
Rusty Russell 已提交
863
 */
864
int lguest_setup_irq(unsigned int irq)
865
{
866 867 868 869 870 871 872
	int err;

	/* Returns -ve error or vector number. */
	err = irq_alloc_desc_at(irq, 0);
	if (err < 0 && err != -EEXIST)
		return err;

873
	irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
874
				      handle_level_irq, "level");
875
	return 0;
876 877
}

R
Rusty Russell 已提交
878 879 880 881
/*
 * Time.
 *
 * It would be far better for everyone if the Guest had its own clock, but
882
 * until then the Host gives us the time on every interrupt.
R
Rusty Russell 已提交
883
 */
R
Rusty Russell 已提交
884 885
static unsigned long lguest_get_wallclock(void)
{
886
	return lguest_data.time.tv_sec;
R
Rusty Russell 已提交
887 888
}

R
Rusty Russell 已提交
889 890
/*
 * The TSC is an Intel thing called the Time Stamp Counter.  The Host tells us
891 892
 * what speed it runs at, or 0 if it's unusable as a reliable clock source.
 * This matches what we want here: if we return 0 from this function, the x86
R
Rusty Russell 已提交
893 894
 * TSC clock will give up and not register itself.
 */
895
static unsigned long lguest_tsc_khz(void)
896 897 898 899
{
	return lguest_data.tsc_khz;
}

R
Rusty Russell 已提交
900 901 902 903
/*
 * If we can't use the TSC, the kernel falls back to our lower-priority
 * "lguest_clock", where we read the time value given to us by the Host.
 */
904
static cycle_t lguest_clock_read(struct clocksource *cs)
R
Rusty Russell 已提交
905
{
906 907
	unsigned long sec, nsec;

R
Rusty Russell 已提交
908 909
	/*
	 * Since the time is in two parts (seconds and nanoseconds), we risk
910 911
	 * reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
	 * and getting 99 and 0.  As Linux tends to come apart under the stress
R
Rusty Russell 已提交
912 913
	 * of time travel, we must be careful:
	 */
914 915 916
	do {
		/* First we read the seconds part. */
		sec = lguest_data.time.tv_sec;
R
Rusty Russell 已提交
917 918
		/*
		 * This read memory barrier tells the compiler and the CPU that
919
		 * this can't be reordered: we have to complete the above
R
Rusty Russell 已提交
920 921
		 * before going on.
		 */
922 923 924 925 926 927 928 929
		rmb();
		/* Now we read the nanoseconds part. */
		nsec = lguest_data.time.tv_nsec;
		/* Make sure we've done that. */
		rmb();
		/* Now if the seconds part has changed, try again. */
	} while (unlikely(lguest_data.time.tv_sec != sec));

930
	/* Our lguest clock is in real nanoseconds. */
931
	return sec*1000000000ULL + nsec;
R
Rusty Russell 已提交
932 933
}

934
/* This is the fallback clocksource: lower priority than the TSC clocksource. */
R
Rusty Russell 已提交
935 936
static struct clocksource lguest_clock = {
	.name		= "lguest",
937
	.rating		= 200,
R
Rusty Russell 已提交
938
	.read		= lguest_clock_read,
939
	.mask		= CLOCKSOURCE_MASK(64),
940
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
R
Rusty Russell 已提交
941 942
};

R
Rusty Russell 已提交
943 944
/*
 * We also need a "struct clock_event_device": Linux asks us to set it to go
R
Rusty Russell 已提交
945
 * off some time in the future.  Actually, James Morris figured all this out, I
R
Rusty Russell 已提交
946 947
 * just applied the patch.
 */
R
Rusty Russell 已提交
948 949 950
static int lguest_clockevent_set_next_event(unsigned long delta,
                                           struct clock_event_device *evt)
{
951 952
	/* FIXME: I don't think this can ever happen, but James tells me he had
	 * to put this code in.  Maybe we should remove it now.  Anyone? */
R
Rusty Russell 已提交
953 954 955
	if (delta < LG_CLOCK_MIN_DELTA) {
		if (printk_ratelimit())
			printk(KERN_DEBUG "%s: small delta %lu ns\n",
956
			       __func__, delta);
R
Rusty Russell 已提交
957 958
		return -ETIME;
	}
959 960

	/* Please wake us this far in the future. */
961
	hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
R
Rusty Russell 已提交
962 963 964 965 966 967 968 969 970 971
	return 0;
}

static void lguest_clockevent_set_mode(enum clock_event_mode mode,
                                      struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		/* A 0 argument shuts the clock down. */
972
		hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
R
Rusty Russell 已提交
973 974 975 976 977 978
		break;
	case CLOCK_EVT_MODE_ONESHOT:
		/* This is what we expect. */
		break;
	case CLOCK_EVT_MODE_PERIODIC:
		BUG();
T
Thomas Gleixner 已提交
979 980
	case CLOCK_EVT_MODE_RESUME:
		break;
R
Rusty Russell 已提交
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
	}
}

/* This describes our primitive timer chip. */
static struct clock_event_device lguest_clockevent = {
	.name                   = "lguest",
	.features               = CLOCK_EVT_FEAT_ONESHOT,
	.set_next_event         = lguest_clockevent_set_next_event,
	.set_mode               = lguest_clockevent_set_mode,
	.rating                 = INT_MAX,
	.mult                   = 1,
	.shift                  = 0,
	.min_delta_ns           = LG_CLOCK_MIN_DELTA,
	.max_delta_ns           = LG_CLOCK_MAX_DELTA,
};

R
Rusty Russell 已提交
997 998 999 1000
/*
 * This is the Guest timer interrupt handler (hardware interrupt 0).  We just
 * call the clockevent infrastructure and it does whatever needs doing.
 */
R
Rusty Russell 已提交
1001 1002
static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
{
R
Rusty Russell 已提交
1003 1004 1005 1006 1007 1008
	unsigned long flags;

	/* Don't interrupt us while this is running. */
	local_irq_save(flags);
	lguest_clockevent.event_handler(&lguest_clockevent);
	local_irq_restore(flags);
R
Rusty Russell 已提交
1009 1010
}

R
Rusty Russell 已提交
1011 1012
/*
 * At some point in the boot process, we get asked to set up our timing
R
Rusty Russell 已提交
1013 1014
 * infrastructure.  The kernel doesn't expect timer interrupts before this, but
 * we cleverly initialized the "blocked_interrupts" field of "struct
R
Rusty Russell 已提交
1015 1016
 * lguest_data" so that timer interrupts were blocked until now.
 */
R
Rusty Russell 已提交
1017 1018
static void lguest_time_init(void)
{
R
Rusty Russell 已提交
1019
	/* Set up the timer interrupt (0) to go to our simple timer routine */
1020
	lguest_setup_irq(0);
1021
	irq_set_handler(0, lguest_time_irq);
R
Rusty Russell 已提交
1022

1023
	clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
R
Rusty Russell 已提交
1024

R
Rusty Russell 已提交
1025 1026
	/* We can't set cpumask in the initializer: damn C limitations!  Set it
	 * here and register our timer device. */
1027
	lguest_clockevent.cpumask = cpumask_of(0);
R
Rusty Russell 已提交
1028 1029
	clockevents_register_device(&lguest_clockevent);

R
Rusty Russell 已提交
1030
	/* Finally, we unblock the timer interrupt. */
R
Rusty Russell 已提交
1031
	clear_bit(0, lguest_data.blocked_interrupts);
R
Rusty Russell 已提交
1032 1033
}

R
Rusty Russell 已提交
1034 1035 1036 1037 1038 1039 1040
/*
 * Miscellaneous bits and pieces.
 *
 * Here is an oddball collection of functions which the Guest needs for things
 * to work.  They're pretty simple.
 */

R
Rusty Russell 已提交
1041 1042
/*
 * The Guest needs to tell the Host what stack it expects traps to use.  For
R
Rusty Russell 已提交
1043 1044 1045 1046 1047 1048
 * native hardware, this is part of the Task State Segment mentioned above in
 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
 *
 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
 * segment), the privilege level (we're privilege level 1, the Host is 0 and
 * will not tolerate us trying to use that), the stack pointer, and the number
R
Rusty Russell 已提交
1049 1050
 * of pages in the stack.
 */
1051
static void lguest_load_sp0(struct tss_struct *tss,
1052
			    struct thread_struct *thread)
R
Rusty Russell 已提交
1053
{
1054 1055
	lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
		   THREAD_SIZE / PAGE_SIZE);
R
Rusty Russell 已提交
1056 1057
}

R
Rusty Russell 已提交
1058
/* Let's just say, I wouldn't do debugging under a Guest. */
R
Rusty Russell 已提交
1059 1060 1061 1062 1063
static void lguest_set_debugreg(int regno, unsigned long value)
{
	/* FIXME: Implement */
}

R
Rusty Russell 已提交
1064 1065
/*
 * There are times when the kernel wants to make sure that no memory writes are
R
Rusty Russell 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074
 * caught in the cache (that they've all reached real hardware devices).  This
 * doesn't matter for the Guest which has virtual hardware.
 *
 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
 * (clflush) instruction is available and the kernel uses that.  Otherwise, it
 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
 * Unlike clflush, wbinvd can only be run at privilege level 0.  So we can
 * ignore clflush, but replace wbinvd.
 */
R
Rusty Russell 已提交
1075 1076 1077 1078
static void lguest_wbinvd(void)
{
}

R
Rusty Russell 已提交
1079 1080
/*
 * If the Guest expects to have an Advanced Programmable Interrupt Controller,
R
Rusty Russell 已提交
1081 1082 1083
 * we play dumb by ignoring writes and returning 0 for reads.  So it's no
 * longer Programmable nor Controlling anything, and I don't think 8 lines of
 * code qualifies for Advanced.  It will also never interrupt anything.  It
R
Rusty Russell 已提交
1084 1085
 * does, however, allow us to get through the Linux boot code.
 */
R
Rusty Russell 已提交
1086
#ifdef CONFIG_X86_LOCAL_APIC
1087
static void lguest_apic_write(u32 reg, u32 v)
R
Rusty Russell 已提交
1088 1089 1090
{
}

1091
static u32 lguest_apic_read(u32 reg)
R
Rusty Russell 已提交
1092 1093 1094
{
	return 0;
}
S
Suresh Siddha 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116

static u64 lguest_apic_icr_read(void)
{
	return 0;
}

static void lguest_apic_icr_write(u32 low, u32 id)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}

static void lguest_apic_wait_icr_idle(void)
{
	return;
}

static u32 lguest_apic_safe_wait_icr_idle(void)
{
	return 0;
}

Y
Yinghai Lu 已提交
1117 1118 1119 1120 1121 1122 1123 1124
static void set_lguest_basic_apic_ops(void)
{
	apic->read = lguest_apic_read;
	apic->write = lguest_apic_write;
	apic->icr_read = lguest_apic_icr_read;
	apic->icr_write = lguest_apic_icr_write;
	apic->wait_icr_idle = lguest_apic_wait_icr_idle;
	apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
S
Suresh Siddha 已提交
1125
};
R
Rusty Russell 已提交
1126 1127
#endif

R
Rusty Russell 已提交
1128
/* STOP!  Until an interrupt comes in. */
R
Rusty Russell 已提交
1129 1130
static void lguest_safe_halt(void)
{
1131
	hcall(LHCALL_HALT, 0, 0, 0, 0);
R
Rusty Russell 已提交
1132 1133
}

R
Rusty Russell 已提交
1134 1135
/*
 * The SHUTDOWN hypercall takes a string to describe what's happening, and
1136
 * an argument which says whether this to restart (reboot) the Guest or not.
R
Rusty Russell 已提交
1137 1138
 *
 * Note that the Host always prefers that the Guest speak in physical addresses
R
Rusty Russell 已提交
1139 1140
 * rather than virtual addresses, so we use __pa() here.
 */
R
Rusty Russell 已提交
1141 1142
static void lguest_power_off(void)
{
1143 1144
	hcall(LHCALL_SHUTDOWN, __pa("Power down"),
	      LGUEST_SHUTDOWN_POWEROFF, 0, 0);
R
Rusty Russell 已提交
1145 1146
}

R
Rusty Russell 已提交
1147 1148 1149 1150 1151
/*
 * Panicing.
 *
 * Don't.  But if you did, this is what happens.
 */
R
Rusty Russell 已提交
1152 1153
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
{
1154
	hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
R
Rusty Russell 已提交
1155
	/* The hcall won't return, but to keep gcc happy, we're "done". */
R
Rusty Russell 已提交
1156 1157 1158 1159 1160 1161 1162
	return NOTIFY_DONE;
}

static struct notifier_block paniced = {
	.notifier_call = lguest_panic
};

R
Rusty Russell 已提交
1163
/* Setting up memory is fairly easy. */
R
Rusty Russell 已提交
1164 1165
static __init char *lguest_memory_setup(void)
{
R
Rusty Russell 已提交
1166
	/*
R
Rusty Russell 已提交
1167
	 * The Linux bootloader header contains an "e820" memory map: the
R
Rusty Russell 已提交
1168 1169
	 * Launcher populated the first entry with our memory limit.
	 */
1170
	e820_add_region(boot_params.e820_map[0].addr,
1171 1172
			  boot_params.e820_map[0].size,
			  boot_params.e820_map[0].type);
R
Rusty Russell 已提交
1173 1174

	/* This string is for the boot messages. */
R
Rusty Russell 已提交
1175 1176 1177
	return "LGUEST";
}

R
Rusty Russell 已提交
1178 1179
/*
 * We will eventually use the virtio console device to produce console output,
R
Rusty Russell 已提交
1180
 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
R
Rusty Russell 已提交
1181 1182
 * console output.
 */
R
Rusty Russell 已提交
1183 1184 1185 1186 1187
static __init int early_put_chars(u32 vtermno, const char *buf, int count)
{
	char scratch[17];
	unsigned int len = count;

R
Rusty Russell 已提交
1188
	/* We use a nul-terminated string, so we make a copy.  Icky, huh? */
R
Rusty Russell 已提交
1189 1190 1191 1192
	if (len > sizeof(scratch) - 1)
		len = sizeof(scratch) - 1;
	scratch[len] = '\0';
	memcpy(scratch, buf, len);
1193
	hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
R
Rusty Russell 已提交
1194 1195 1196 1197 1198

	/* This routine returns the number of bytes actually written. */
	return len;
}

R
Rusty Russell 已提交
1199 1200 1201 1202
/*
 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
 * Launcher to reboot us.
 */
1203 1204
static void lguest_restart(char *reason)
{
1205
	hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
1206 1207
}

R
Rusty Russell 已提交
1208 1209 1210
/*G:050
 * Patching (Powerfully Placating Performance Pedants)
 *
1211 1212 1213
 * We have already seen that pv_ops structures let us replace simple native
 * instructions with calls to the appropriate back end all throughout the
 * kernel.  This allows the same kernel to run as a Guest and as a native
R
Rusty Russell 已提交
1214 1215 1216 1217 1218 1219 1220 1221 1222
 * kernel, but it's slow because of all the indirect branches.
 *
 * Remember that David Wheeler quote about "Any problem in computer science can
 * be solved with another layer of indirection"?  The rest of that quote is
 * "... But that usually will create another problem."  This is the first of
 * those problems.
 *
 * Our current solution is to allow the paravirt back end to optionally patch
 * over the indirect calls to replace them with something more efficient.  We
1223 1224 1225 1226
 * patch two of the simplest of the most commonly called functions: disable
 * interrupts and save interrupts.  We usually have 6 or 10 bytes to patch
 * into: the Guest versions of these operations are small enough that we can
 * fit comfortably.
R
Rusty Russell 已提交
1227 1228
 *
 * First we need assembly templates of each of the patchable Guest operations,
R
Rusty Russell 已提交
1229 1230
 * and these are in i386_head.S.
 */
R
Rusty Russell 已提交
1231 1232

/*G:060 We construct a table from the assembler templates: */
R
Rusty Russell 已提交
1233 1234 1235 1236
static const struct lguest_insns
{
	const char *start, *end;
} lguest_insns[] = {
1237 1238
	[PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
	[PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
R
Rusty Russell 已提交
1239
};
R
Rusty Russell 已提交
1240

R
Rusty Russell 已提交
1241 1242
/*
 * Now our patch routine is fairly simple (based on the native one in
R
Rusty Russell 已提交
1243
 * paravirt.c).  If we have a replacement, we copy it in and return how much of
R
Rusty Russell 已提交
1244 1245
 * the available space we used.
 */
1246 1247
static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
			     unsigned long addr, unsigned len)
R
Rusty Russell 已提交
1248 1249 1250
{
	unsigned int insn_len;

R
Rusty Russell 已提交
1251
	/* Don't do anything special if we don't have a replacement */
R
Rusty Russell 已提交
1252
	if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
1253
		return paravirt_patch_default(type, clobber, ibuf, addr, len);
R
Rusty Russell 已提交
1254 1255 1256

	insn_len = lguest_insns[type].end - lguest_insns[type].start;

R
Rusty Russell 已提交
1257
	/* Similarly if it can't fit (doesn't happen, but let's be thorough). */
R
Rusty Russell 已提交
1258
	if (len < insn_len)
1259
		return paravirt_patch_default(type, clobber, ibuf, addr, len);
R
Rusty Russell 已提交
1260

R
Rusty Russell 已提交
1261
	/* Copy in our instructions. */
1262
	memcpy(ibuf, lguest_insns[type].start, insn_len);
R
Rusty Russell 已提交
1263 1264 1265
	return insn_len;
}

R
Rusty Russell 已提交
1266 1267
/*G:029
 * Once we get to lguest_init(), we know we're a Guest.  The various
1268
 * pv_ops structures in the kernel provide points for (almost) every routine we
R
Rusty Russell 已提交
1269 1270
 * have to override to avoid privileged instructions.
 */
1271
__init void lguest_init(void)
R
Rusty Russell 已提交
1272
{
R
Rusty Russell 已提交
1273
	/* We're under lguest. */
1274
	pv_info.name = "lguest";
R
Rusty Russell 已提交
1275
	/* Paravirt is enabled. */
1276
	pv_info.paravirt_enabled = 1;
R
Rusty Russell 已提交
1277
	/* We're running at privilege level 1, not 0 as normal. */
1278
	pv_info.kernel_rpl = 1;
R
Rusty Russell 已提交
1279
	/* Everyone except Xen runs with this set. */
M
Matias Zabaljauregui 已提交
1280
	pv_info.shared_kernel_pmd = 1;
R
Rusty Russell 已提交
1281

R
Rusty Russell 已提交
1282 1283 1284 1285
	/*
	 * We set up all the lguest overrides for sensitive operations.  These
	 * are detailed with the operations themselves.
	 */
1286

R
Rusty Russell 已提交
1287
	/* Interrupt-related operations */
1288
	pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
1289
	pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
1290
	pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
1291
	pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
1292 1293
	pv_irq_ops.safe_halt = lguest_safe_halt;

R
Rusty Russell 已提交
1294
	/* Setup operations */
1295 1296
	pv_init_ops.patch = lguest_patch;

R
Rusty Russell 已提交
1297
	/* Intercepts of various CPU instructions */
1298 1299 1300 1301
	pv_cpu_ops.load_gdt = lguest_load_gdt;
	pv_cpu_ops.cpuid = lguest_cpuid;
	pv_cpu_ops.load_idt = lguest_load_idt;
	pv_cpu_ops.iret = lguest_iret;
1302
	pv_cpu_ops.load_sp0 = lguest_load_sp0;
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
	pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
	pv_cpu_ops.set_ldt = lguest_set_ldt;
	pv_cpu_ops.load_tls = lguest_load_tls;
	pv_cpu_ops.set_debugreg = lguest_set_debugreg;
	pv_cpu_ops.clts = lguest_clts;
	pv_cpu_ops.read_cr0 = lguest_read_cr0;
	pv_cpu_ops.write_cr0 = lguest_write_cr0;
	pv_cpu_ops.read_cr4 = lguest_read_cr4;
	pv_cpu_ops.write_cr4 = lguest_write_cr4;
	pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
	pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
	pv_cpu_ops.wbinvd = lguest_wbinvd;
1315 1316
	pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
	pv_cpu_ops.end_context_switch = lguest_end_context_switch;
1317

R
Rusty Russell 已提交
1318
	/* Pagetable management */
1319 1320 1321 1322 1323 1324 1325
	pv_mmu_ops.write_cr3 = lguest_write_cr3;
	pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
	pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
	pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
	pv_mmu_ops.set_pte = lguest_set_pte;
	pv_mmu_ops.set_pte_at = lguest_set_pte_at;
	pv_mmu_ops.set_pmd = lguest_set_pmd;
M
Matias Zabaljauregui 已提交
1326 1327 1328 1329 1330 1331
#ifdef CONFIG_X86_PAE
	pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
	pv_mmu_ops.pte_clear = lguest_pte_clear;
	pv_mmu_ops.pmd_clear = lguest_pmd_clear;
	pv_mmu_ops.set_pud = lguest_set_pud;
#endif
1332 1333
	pv_mmu_ops.read_cr2 = lguest_read_cr2;
	pv_mmu_ops.read_cr3 = lguest_read_cr3;
1334
	pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1335
	pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1336 1337
	pv_mmu_ops.pte_update = lguest_pte_update;
	pv_mmu_ops.pte_update_defer = lguest_pte_update;
1338

R
Rusty Russell 已提交
1339
#ifdef CONFIG_X86_LOCAL_APIC
R
Rusty Russell 已提交
1340
	/* APIC read/write intercepts */
Y
Yinghai Lu 已提交
1341
	set_lguest_basic_apic_ops();
R
Rusty Russell 已提交
1342
#endif
1343

1344
	x86_init.resources.memory_setup = lguest_memory_setup;
1345
	x86_init.irqs.intr_init = lguest_init_IRQ;
1346
	x86_init.timers.timer_init = lguest_time_init;
1347
	x86_platform.calibrate_tsc = lguest_tsc_khz;
1348
	x86_platform.get_wallclock =  lguest_get_wallclock;
1349

R
Rusty Russell 已提交
1350 1351 1352 1353
	/*
	 * Now is a good time to look at the implementations of these functions
	 * before returning to the rest of lguest_init().
	 */
R
Rusty Russell 已提交
1354

R
Rusty Russell 已提交
1355 1356
	/*G:070
	 * Now we've seen all the paravirt_ops, we return to
R
Rusty Russell 已提交
1357
	 * lguest_init() where the rest of the fairly chaotic boot setup
R
Rusty Russell 已提交
1358 1359
	 * occurs.
	 */
R
Rusty Russell 已提交
1360

R
Rusty Russell 已提交
1361 1362
	/*
	 * The stack protector is a weird thing where gcc places a canary
1363 1364 1365 1366
	 * value on the stack and then checks it on return.  This file is
	 * compiled with -fno-stack-protector it, so we got this far without
	 * problems.  The value of the canary is kept at offset 20 from the
	 * %gs register, so we need to set that up before calling C functions
R
Rusty Russell 已提交
1367 1368
	 * in other files.
	 */
1369
	setup_stack_canary_segment(0);
R
Rusty Russell 已提交
1370 1371 1372 1373 1374 1375

	/*
	 * We could just call load_stack_canary_segment(), but we might as well
	 * call switch_to_new_gdt() which loads the whole table and sets up the
	 * per-cpu segment descriptor register %fs as well.
	 */
1376 1377
	switch_to_new_gdt(0);

R
Rusty Russell 已提交
1378 1379
	/*
	 * The Host<->Guest Switcher lives at the top of our address space, and
1380
	 * the Host told us how big it is when we made LGUEST_INIT hypercall:
R
Rusty Russell 已提交
1381 1382
	 * it put the answer in lguest_data.reserve_mem
	 */
R
Rusty Russell 已提交
1383 1384
	reserve_top_address(lguest_data.reserve_mem);

R
Rusty Russell 已提交
1385 1386
	/*
	 * If we don't initialize the lock dependency checker now, it crashes
1387
	 * atomic_notifier_chain_register, then paravirt_disable_iospace.
R
Rusty Russell 已提交
1388
	 */
R
Rusty Russell 已提交
1389 1390
	lockdep_init();

1391 1392 1393
	/* Hook in our special panic hypercall code. */
	atomic_notifier_chain_register(&panic_notifier_list, &paniced);

R
Rusty Russell 已提交
1394 1395
	/*
	 * The IDE code spends about 3 seconds probing for disks: if we reserve
R
Rusty Russell 已提交
1396 1397
	 * all the I/O ports up front it can't get them and so doesn't probe.
	 * Other device drivers are similar (but less severe).  This cuts the
R
Rusty Russell 已提交
1398 1399
	 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds.
	 */
R
Rusty Russell 已提交
1400 1401
	paravirt_disable_iospace();

R
Rusty Russell 已提交
1402 1403 1404 1405
	/*
	 * This is messy CPU setup stuff which the native boot code does before
	 * start_kernel, so we have to do, too:
	 */
R
Rusty Russell 已提交
1406 1407 1408 1409 1410 1411 1412
	cpu_detect(&new_cpu_data);
	/* head.S usually sets up the first capability word, so do it here. */
	new_cpu_data.x86_capability[0] = cpuid_edx(1);

	/* Math is always hard! */
	new_cpu_data.hard_math = 1;

1413
	/* We don't have features.  We have puppies!  Puppies! */
R
Rusty Russell 已提交
1414 1415 1416 1417 1418 1419 1420
#ifdef CONFIG_X86_MCE
	mce_disabled = 1;
#endif
#ifdef CONFIG_ACPI
	acpi_disabled = 1;
#endif

R
Rusty Russell 已提交
1421 1422
	/*
	 * We set the preferred console to "hvc".  This is the "hypervisor
R
Rusty Russell 已提交
1423
	 * virtual console" driver written by the PowerPC people, which we also
R
Rusty Russell 已提交
1424 1425
	 * adapted for lguest's use.
	 */
R
Rusty Russell 已提交
1426 1427
	add_preferred_console("hvc", 0, NULL);

R
Rusty Russell 已提交
1428 1429 1430
	/* Register our very early console. */
	virtio_cons_early_init(early_put_chars);

R
Rusty Russell 已提交
1431 1432
	/*
	 * Last of all, we set the power management poweroff hook to point to
1433
	 * the Guest routine to power off, and the reboot hook to our restart
R
Rusty Russell 已提交
1434 1435
	 * routine.
	 */
R
Rusty Russell 已提交
1436
	pm_power_off = lguest_power_off;
B
Balaji Rao 已提交
1437
	machine_ops.restart = lguest_restart;
1438

R
Rusty Russell 已提交
1439 1440 1441 1442
	/*
	 * Now we're set up, call i386_start_kernel() in head32.c and we proceed
	 * to boot as normal.  It never returns.
	 */
1443
	i386_start_kernel();
R
Rusty Russell 已提交
1444
}
R
Rusty Russell 已提交
1445 1446 1447
/*
 * This marks the end of stage II of our journey, The Guest.
 *
R
Rusty Russell 已提交
1448 1449
 * It is now time for us to explore the layer of virtual drivers and complete
 * our understanding of the Guest in "make Drivers".
R
Rusty Russell 已提交
1450
 */