boot.c 51.8 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*P:010
 * A hypervisor allows multiple Operating Systems to run on a single machine.
 * To quote David Wheeler: "Any problem in computer science can be solved with
 * another layer of indirection."
 *
 * We keep things simple in two ways.  First, we start with a normal Linux
 * kernel and insert a module (lg.ko) which allows us to run other Linux
 * kernels the same way we'd run processes.  We call the first kernel the Host,
 * and the others the Guests.  The program which sets up and configures Guests
10
 * (such as the example in tools/lguest/lguest.c) is called the Launcher.
11
 *
12 13 14 15
 * Secondly, we only run specially modified Guests, not normal kernels: setting
 * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
 * how to be a Guest at boot time.  This means that you can use the same kernel
 * you boot normally (ie. as a Host) as a Guest.
R
Rusty Russell 已提交
16
 *
17 18 19 20 21
 * These Guests know that they cannot do privileged operations, such as disable
 * interrupts, and that they have to ask the Host to do such things explicitly.
 * This file consists of all the replacements for such low-level native
 * hardware operations: these special Guest versions call the Host.
 *
22 23
 * So how does the kernel know it's a Guest?  We'll see that later, but let's
 * just say that we end up here where we replace the native functions various
R
Rusty Russell 已提交
24 25
 * "paravirt" structures with our Guest versions, then boot like normal.
:*/
26 27

/*
R
Rusty Russell 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
#include <linux/kernel.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
R
Rusty Russell 已提交
52 53
#include <linux/clocksource.h>
#include <linux/clockchips.h>
R
Rusty Russell 已提交
54 55
#include <linux/lguest.h>
#include <linux/lguest_launcher.h>
R
Rusty Russell 已提交
56
#include <linux/virtio_console.h>
J
Jeff Garzik 已提交
57
#include <linux/pm.h>
58
#include <linux/export.h>
59
#include <linux/pci.h>
60
#include <linux/virtio_pci.h>
R
Rusty Russell 已提交
61
#include <asm/acpi.h>
I
Ingo Molnar 已提交
62
#include <asm/apic.h>
63
#include <asm/lguest.h>
R
Rusty Russell 已提交
64 65 66 67 68 69 70 71 72
#include <asm/paravirt.h>
#include <asm/param.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/e820.h>
#include <asm/mce.h>
#include <asm/io.h>
73
#include <asm/i387.h>
74
#include <asm/stackprotector.h>
B
Balaji Rao 已提交
75
#include <asm/reboot.h>		/* for struct machine_ops */
76
#include <asm/kvm_para.h>
77
#include <asm/pci_x86.h>
78
#include <asm/pci-direct.h>
R
Rusty Russell 已提交
79

R
Rusty Russell 已提交
80 81
/*G:010
 * Welcome to the Guest!
R
Rusty Russell 已提交
82 83 84
 *
 * The Guest in our tale is a simple creature: identical to the Host but
 * behaving in simplified but equivalent ways.  In particular, the Guest is the
R
Rusty Russell 已提交
85 86
 * same kernel as the Host (or at least, built from the same source code).
:*/
R
Rusty Russell 已提交
87

R
Rusty Russell 已提交
88 89
struct lguest_data lguest_data = {
	.hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
90
	.noirq_iret = (u32)lguest_noirq_iret,
91
	.kernel_address = PAGE_OFFSET,
R
Rusty Russell 已提交
92
	.blocked_interrupts = { 1 }, /* Block timer interrupts */
93
	.syscall_vec = SYSCALL_VECTOR,
R
Rusty Russell 已提交
94 95
};

R
Rusty Russell 已提交
96 97
/*G:037
 * async_hcall() is pretty simple: I'm quite proud of it really.  We have a
R
Rusty Russell 已提交
98
 * ring buffer of stored hypercalls which the Host will run though next time we
99
 * do a normal hypercall.  Each entry in the ring has 5 slots for the hypercall
R
Rusty Russell 已提交
100 101 102 103 104 105
 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
 * and 255 once the Host has finished with it.
 *
 * If we come around to a slot which hasn't been finished, then the table is
 * full and we just make the hypercall directly.  This has the nice side
 * effect of causing the Host to run all the stored calls in the ring buffer
R
Rusty Russell 已提交
106 107
 * which empties it for next time!
 */
A
Adrian Bunk 已提交
108
static void async_hcall(unsigned long call, unsigned long arg1,
109 110
			unsigned long arg2, unsigned long arg3,
			unsigned long arg4)
R
Rusty Russell 已提交
111 112 113 114 115
{
	/* Note: This code assumes we're uniprocessor. */
	static unsigned int next_call;
	unsigned long flags;

R
Rusty Russell 已提交
116 117
	/*
	 * Disable interrupts if not already disabled: we don't want an
R
Rusty Russell 已提交
118
	 * interrupt handler making a hypercall while we're already doing
R
Rusty Russell 已提交
119 120
	 * one!
	 */
R
Rusty Russell 已提交
121 122 123
	local_irq_save(flags);
	if (lguest_data.hcall_status[next_call] != 0xFF) {
		/* Table full, so do normal hcall which will flush table. */
124
		hcall(call, arg1, arg2, arg3, arg4);
R
Rusty Russell 已提交
125
	} else {
J
Jes Sorensen 已提交
126 127 128 129
		lguest_data.hcalls[next_call].arg0 = call;
		lguest_data.hcalls[next_call].arg1 = arg1;
		lguest_data.hcalls[next_call].arg2 = arg2;
		lguest_data.hcalls[next_call].arg3 = arg3;
130
		lguest_data.hcalls[next_call].arg4 = arg4;
R
Rusty Russell 已提交
131
		/* Arguments must all be written before we mark it to go */
R
Rusty Russell 已提交
132 133 134 135 136 137 138
		wmb();
		lguest_data.hcall_status[next_call] = 0;
		if (++next_call == LHCALL_RING_SIZE)
			next_call = 0;
	}
	local_irq_restore(flags);
}
A
Adrian Bunk 已提交
139

R
Rusty Russell 已提交
140 141 142
/*G:035
 * Notice the lazy_hcall() above, rather than hcall().  This is our first real
 * optimization trick!
R
Rusty Russell 已提交
143 144 145 146 147 148 149 150 151
 *
 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
 * them as a batch when lazy_mode is eventually turned off.  Because hypercalls
 * are reasonably expensive, batching them up makes sense.  For example, a
 * large munmap might update dozens of page table entries: that code calls
 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
 * lguest_leave_lazy_mode().
 *
 * So, when we're in lazy mode, we call async_hcall() to store the call for
R
Rusty Russell 已提交
152 153
 * future processing:
 */
154
static void lazy_hcall1(unsigned long call, unsigned long arg1)
155 156
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
157
		hcall(call, arg1, 0, 0, 0);
158
	else
159
		async_hcall(call, arg1, 0, 0, 0);
160 161
}

R
Rusty Russell 已提交
162
/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
163
static void lazy_hcall2(unsigned long call,
164 165
			unsigned long arg1,
			unsigned long arg2)
166 167
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
168
		hcall(call, arg1, arg2, 0, 0);
169
	else
170
		async_hcall(call, arg1, arg2, 0, 0);
171 172 173
}

static void lazy_hcall3(unsigned long call,
174 175 176
			unsigned long arg1,
			unsigned long arg2,
			unsigned long arg3)
A
Adrian Bunk 已提交
177 178
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
179
		hcall(call, arg1, arg2, arg3, 0);
A
Adrian Bunk 已提交
180
	else
181 182 183
		async_hcall(call, arg1, arg2, arg3, 0);
}

M
Matias Zabaljauregui 已提交
184
#ifdef CONFIG_X86_PAE
185
static void lazy_hcall4(unsigned long call,
186 187 188 189
			unsigned long arg1,
			unsigned long arg2,
			unsigned long arg3,
			unsigned long arg4)
190 191
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
192
		hcall(call, arg1, arg2, arg3, arg4);
193 194
	else
		async_hcall(call, arg1, arg2, arg3, arg4);
A
Adrian Bunk 已提交
195
}
M
Matias Zabaljauregui 已提交
196
#endif
R
Rusty Russell 已提交
197

R
Rusty Russell 已提交
198
/*G:036
R
Rusty Russell 已提交
199 200 201 202
 * When lazy mode is turned off, we issue the do-nothing hypercall to
 * flush any stored calls, and call the generic helper to reset the
 * per-cpu lazy mode variable.
 */
203
static void lguest_leave_lazy_mmu_mode(void)
R
Rusty Russell 已提交
204
{
205
	hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
206 207 208
	paravirt_leave_lazy_mmu();
}

R
Rusty Russell 已提交
209 210 211 212 213 214 215
/*
 * We also catch the end of context switch; we enter lazy mode for much of
 * that too, so again we need to flush here.
 *
 * (Technically, this is lazy CPU mode, and normally we're in lazy MMU
 * mode, but unlike Xen, lguest doesn't care about the difference).
 */
216
static void lguest_end_context_switch(struct task_struct *next)
217
{
218
	hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
219
	paravirt_end_context_switch(next);
R
Rusty Russell 已提交
220
}
R
Rusty Russell 已提交
221

222
/*G:032
R
Rusty Russell 已提交
223 224
 * After that diversion we return to our first native-instruction
 * replacements: four functions for interrupt control.
R
Rusty Russell 已提交
225 226 227 228 229 230 231
 *
 * The simplest way of implementing these would be to have "turn interrupts
 * off" and "turn interrupts on" hypercalls.  Unfortunately, this is too slow:
 * these are by far the most commonly called functions of those we override.
 *
 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
 * which the Guest can update with a single instruction.  The Host knows to
232
 * check there before it tries to deliver an interrupt.
R
Rusty Russell 已提交
233 234
 */

R
Rusty Russell 已提交
235 236
/*
 * save_flags() is expected to return the processor state (ie. "flags").  The
237
 * flags word contains all kind of stuff, but in practice Linux only cares
R
Rusty Russell 已提交
238 239
 * about the interrupt flag.  Our "save_flags()" just returns that.
 */
240
asmlinkage __visible unsigned long lguest_save_fl(void)
R
Rusty Russell 已提交
241 242 243 244
{
	return lguest_data.irq_enabled;
}

R
Rusty Russell 已提交
245
/* Interrupts go off... */
246
asmlinkage __visible void lguest_irq_disable(void)
R
Rusty Russell 已提交
247 248 249 250
{
	lguest_data.irq_enabled = 0;
}

R
Rusty Russell 已提交
251 252
/*
 * Let's pause a moment.  Remember how I said these are called so often?
253 254 255 256 257
 * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
 * break some rules.  In particular, these functions are assumed to save their
 * own registers if they need to: normal C functions assume they can trash the
 * eax register.  To use normal C functions, we use
 * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
R
Rusty Russell 已提交
258 259
 * C function, then restores it.
 */
260 261
PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
262
/*:*/
263

264
/* These are in head_32.S */
265 266
extern void lg_irq_enable(void);
extern void lg_restore_fl(unsigned long flags);
267

R
Rusty Russell 已提交
268
/*M:003
R
Rusty Russell 已提交
269 270 271 272 273
 * We could be more efficient in our checking of outstanding interrupts, rather
 * than using a branch.  One way would be to put the "irq_enabled" field in a
 * page by itself, and have the Host write-protect it when an interrupt comes
 * in when irqs are disabled.  There will then be a page fault as soon as
 * interrupts are re-enabled.
274 275 276 277
 *
 * A better method is to implement soft interrupt disable generally for x86:
 * instead of disabling interrupts, we set a flag.  If an interrupt does come
 * in, we then disable them for real.  This is uncommon, so we could simply use
R
Rusty Russell 已提交
278 279
 * a hypercall for interrupt control and not worry about efficiency.
:*/
R
Rusty Russell 已提交
280

R
Rusty Russell 已提交
281 282 283 284 285 286 287 288
/*G:034
 * The Interrupt Descriptor Table (IDT).
 *
 * The IDT tells the processor what to do when an interrupt comes in.  Each
 * entry in the table is a 64-bit descriptor: this holds the privilege level,
 * address of the handler, and... well, who cares?  The Guest just asks the
 * Host to make the change anyway, because the Host controls the real IDT.
 */
289 290
static void lguest_write_idt_entry(gate_desc *dt,
				   int entrynum, const gate_desc *g)
R
Rusty Russell 已提交
291
{
R
Rusty Russell 已提交
292 293
	/*
	 * The gate_desc structure is 8 bytes long: we hand it to the Host in
294 295
	 * two 32-bit chunks.  The whole 32-bit kernel used to hand descriptors
	 * around like this; typesafety wasn't a big concern in Linux's early
R
Rusty Russell 已提交
296 297
	 * years.
	 */
298
	u32 *desc = (u32 *)g;
R
Rusty Russell 已提交
299
	/* Keep the local copy up to date. */
300
	native_write_idt_entry(dt, entrynum, g);
R
Rusty Russell 已提交
301
	/* Tell Host about this new entry. */
302
	hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
R
Rusty Russell 已提交
303 304
}

R
Rusty Russell 已提交
305 306
/*
 * Changing to a different IDT is very rare: we keep the IDT up-to-date every
R
Rusty Russell 已提交
307
 * time it is written, so we can simply loop through all entries and tell the
R
Rusty Russell 已提交
308 309
 * Host about them.
 */
310
static void lguest_load_idt(const struct desc_ptr *desc)
R
Rusty Russell 已提交
311 312 313 314 315
{
	unsigned int i;
	struct desc_struct *idt = (void *)desc->address;

	for (i = 0; i < (desc->size+1)/8; i++)
316
		hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
R
Rusty Russell 已提交
317 318
}

R
Rusty Russell 已提交
319 320 321 322 323 324 325 326 327 328
/*
 * The Global Descriptor Table.
 *
 * The Intel architecture defines another table, called the Global Descriptor
 * Table (GDT).  You tell the CPU where it is (and its size) using the "lgdt"
 * instruction, and then several other instructions refer to entries in the
 * table.  There are three entries which the Switcher needs, so the Host simply
 * controls the entire thing and the Guest asks it to make changes using the
 * LOAD_GDT hypercall.
 *
329
 * This is the exactly like the IDT code.
R
Rusty Russell 已提交
330
 */
331
static void lguest_load_gdt(const struct desc_ptr *desc)
R
Rusty Russell 已提交
332
{
333 334 335 336
	unsigned int i;
	struct desc_struct *gdt = (void *)desc->address;

	for (i = 0; i < (desc->size+1)/8; i++)
337
		hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
R
Rusty Russell 已提交
338 339
}

R
Rusty Russell 已提交
340
/*
341 342
 * For a single GDT entry which changes, we simply change our copy and
 * then tell the host about it.
R
Rusty Russell 已提交
343
 */
344 345
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
				   const void *desc, int type)
R
Rusty Russell 已提交
346
{
347
	native_write_gdt_entry(dt, entrynum, desc, type);
348
	/* Tell Host about this new entry. */
349 350
	hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
	      dt[entrynum].a, dt[entrynum].b, 0);
R
Rusty Russell 已提交
351 352
}

R
Rusty Russell 已提交
353
/*
354
 * There are three "thread local storage" GDT entries which change
R
Rusty Russell 已提交
355
 * on every context switch (these three entries are how glibc implements
356 357 358 359 360
 * __thread variables).  As an optimization, we have a hypercall
 * specifically for this case.
 *
 * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
 * which took a range of entries?
R
Rusty Russell 已提交
361
 */
R
Rusty Russell 已提交
362 363
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{
R
Rusty Russell 已提交
364 365
	/*
	 * There's one problem which normal hardware doesn't have: the Host
366
	 * can't handle us removing entries we're currently using.  So we clear
R
Rusty Russell 已提交
367 368
	 * the GS register here: if it's needed it'll be reloaded anyway.
	 */
369
	lazy_load_gs(0);
370
	lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu);
R
Rusty Russell 已提交
371 372
}

R
Rusty Russell 已提交
373 374 375
/*G:038
 * That's enough excitement for now, back to ploughing through each of the
 * different pv_ops structures (we're about 1/3 of the way through).
R
Rusty Russell 已提交
376 377 378
 *
 * This is the Local Descriptor Table, another weird Intel thingy.  Linux only
 * uses this for some strange applications like Wine.  We don't do anything
R
Rusty Russell 已提交
379 380
 * here, so they'll get an informative and friendly Segmentation Fault.
 */
R
Rusty Russell 已提交
381 382 383 384
static void lguest_set_ldt(const void *addr, unsigned entries)
{
}

R
Rusty Russell 已提交
385 386
/*
 * This loads a GDT entry into the "Task Register": that entry points to a
R
Rusty Russell 已提交
387 388 389 390 391 392 393
 * structure called the Task State Segment.  Some comments scattered though the
 * kernel code indicate that this used for task switching in ages past, along
 * with blood sacrifice and astrology.
 *
 * Now there's nothing interesting in here that we don't get told elsewhere.
 * But the native version uses the "ltr" instruction, which makes the Host
 * complain to the Guest about a Segmentation Fault and it'll oops.  So we
R
Rusty Russell 已提交
394 395
 * override the native version with a do-nothing version.
 */
R
Rusty Russell 已提交
396 397 398 399
static void lguest_load_tr_desc(void)
{
}

R
Rusty Russell 已提交
400 401
/*
 * The "cpuid" instruction is a way of querying both the CPU identity
R
Rusty Russell 已提交
402
 * (manufacturer, model, etc) and its features.  It was introduced before the
403 404 405
 * Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
 * As you might imagine, after a decade and a half this treatment, it is now a
 * giant ball of hair.  Its entry in the current Intel manual runs to 28 pages.
R
Rusty Russell 已提交
406 407
 *
 * This instruction even it has its own Wikipedia entry.  The Wikipedia entry
408
 * has been translated into 6 languages.  I am not making this up!
R
Rusty Russell 已提交
409 410 411 412 413
 *
 * We could get funky here and identify ourselves as "GenuineLguest", but
 * instead we just use the real "cpuid" instruction.  Then I pretty much turned
 * off feature bits until the Guest booted.  (Don't say that: you'll damage
 * lguest sales!)  Shut up, inner voice!  (Hey, just pointing out that this is
L
Lucas De Marchi 已提交
414
 * hardly future proof.)  No one's listening!  They don't like you anyway,
R
Rusty Russell 已提交
415 416 417 418 419
 * parenthetic weirdo!
 *
 * Replacing the cpuid so we can turn features off is great for the kernel, but
 * anyone (including userspace) can just use the raw "cpuid" instruction and
 * the Host won't even notice since it isn't privileged.  So we try not to get
R
Rusty Russell 已提交
420 421
 * too worked up about it.
 */
422 423
static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
			 unsigned int *cx, unsigned int *dx)
R
Rusty Russell 已提交
424
{
425
	int function = *ax;
R
Rusty Russell 已提交
426

427
	native_cpuid(ax, bx, cx, dx);
R
Rusty Russell 已提交
428
	switch (function) {
R
Rusty Russell 已提交
429 430 431 432 433
	/*
	 * CPUID 0 gives the highest legal CPUID number (and the ID string).
	 * We futureproof our code a little by sticking to known CPUID values.
	 */
	case 0:
434 435 436
		if (*ax > 5)
			*ax = 5;
		break;
R
Rusty Russell 已提交
437 438 439 440 441 442 443 444

	/*
	 * CPUID 1 is a basic feature request.
	 *
	 * CX: we only allow kernel to see SSE3, CMPXCHG16B and SSSE3
	 * DX: SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU and PAE.
	 */
	case 1:
445
		*cx &= 0x00002201;
M
Matias Zabaljauregui 已提交
446
		*dx &= 0x07808151;
R
Rusty Russell 已提交
447 448
		/*
		 * The Host can do a nice optimization if it knows that the
R
Rusty Russell 已提交
449 450 451
		 * kernel mappings (addresses above 0xC0000000 or whatever
		 * PAGE_OFFSET is set to) haven't changed.  But Linux calls
		 * flush_tlb_user() for both user and kernel mappings unless
R
Rusty Russell 已提交
452 453
		 * the Page Global Enable (PGE) feature bit is set.
		 */
454
		*dx |= 0x00002000;
R
Rusty Russell 已提交
455 456
		/*
		 * We also lie, and say we're family id 5.  6 or greater
457
		 * leads to a rdmsr in early_init_intel which we can't handle.
R
Rusty Russell 已提交
458 459
		 * Family ID is returned as bits 8-12 in ax.
		 */
460 461
		*ax &= 0xFFFFF0FF;
		*ax |= 0x00000500;
R
Rusty Russell 已提交
462
		break;
463 464 465 466 467 468 469 470 471

	/*
	 * This is used to detect if we're running under KVM.  We might be,
	 * but that's a Host matter, not us.  So say we're not.
	 */
	case KVM_CPUID_SIGNATURE:
		*bx = *cx = *dx = 0;
		break;

R
Rusty Russell 已提交
472 473 474 475
	/*
	 * 0x80000000 returns the highest Extended Function, so we futureproof
	 * like we do above by limiting it to known fields.
	 */
R
Rusty Russell 已提交
476
	case 0x80000000:
477 478
		if (*ax > 0x80000008)
			*ax = 0x80000008;
R
Rusty Russell 已提交
479
		break;
R
Rusty Russell 已提交
480 481 482 483

	/*
	 * PAE systems can mark pages as non-executable.  Linux calls this the
	 * NX bit.  Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
484
	 * Virus Protection).  We just switch it off here, since we don't
R
Rusty Russell 已提交
485 486
	 * support it.
	 */
M
Matias Zabaljauregui 已提交
487 488 489
	case 0x80000001:
		*dx &= ~(1 << 20);
		break;
R
Rusty Russell 已提交
490 491 492
	}
}

R
Rusty Russell 已提交
493 494
/*
 * Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
R
Rusty Russell 已提交
495 496 497 498
 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
 * it.  The Host needs to know when the Guest wants to change them, so we have
 * a whole series of functions like read_cr0() and write_cr0().
 *
R
Rusty Russell 已提交
499
 * We start with cr0.  cr0 allows you to turn on and off all kinds of basic
R
Rusty Russell 已提交
500 501 502 503 504 505 506 507
 * features, but Linux only really cares about one: the horrifically-named Task
 * Switched (TS) bit at bit 3 (ie. 8)
 *
 * What does the TS bit do?  Well, it causes the CPU to trap (interrupt 7) if
 * the floating point unit is used.  Which allows us to restore FPU state
 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
 * name like "FPUTRAP bit" be a little less cryptic?
 *
R
Rusty Russell 已提交
508
 * We store cr0 locally because the Host never changes it.  The Guest sometimes
R
Rusty Russell 已提交
509 510
 * wants to read it and we'd prefer not to bother the Host unnecessarily.
 */
R
Rusty Russell 已提交
511
static unsigned long current_cr0;
R
Rusty Russell 已提交
512 513
static void lguest_write_cr0(unsigned long val)
{
514
	lazy_hcall1(LHCALL_TS, val & X86_CR0_TS);
R
Rusty Russell 已提交
515 516 517 518 519 520 521 522
	current_cr0 = val;
}

static unsigned long lguest_read_cr0(void)
{
	return current_cr0;
}

R
Rusty Russell 已提交
523 524
/*
 * Intel provided a special instruction to clear the TS bit for people too cool
R
Rusty Russell 已提交
525
 * to use write_cr0() to do it.  This "clts" instruction is faster, because all
R
Rusty Russell 已提交
526 527
 * the vowels have been optimized out.
 */
R
Rusty Russell 已提交
528 529
static void lguest_clts(void)
{
530
	lazy_hcall1(LHCALL_TS, 0);
531
	current_cr0 &= ~X86_CR0_TS;
R
Rusty Russell 已提交
532 533
}

R
Rusty Russell 已提交
534 535
/*
 * cr2 is the virtual address of the last page fault, which the Guest only ever
R
Rusty Russell 已提交
536
 * reads.  The Host kindly writes this into our "struct lguest_data", so we
R
Rusty Russell 已提交
537 538
 * just read it out of there.
 */
R
Rusty Russell 已提交
539 540 541 542 543
static unsigned long lguest_read_cr2(void)
{
	return lguest_data.cr2;
}

R
Rusty Russell 已提交
544 545
/* See lguest_set_pte() below. */
static bool cr3_changed = false;
546
static unsigned long current_cr3;
R
Rusty Russell 已提交
547

R
Rusty Russell 已提交
548 549
/*
 * cr3 is the current toplevel pagetable page: the principle is the same as
550
 * cr0.  Keep a local copy, and tell the Host when it changes.
R
Rusty Russell 已提交
551
 */
R
Rusty Russell 已提交
552 553
static void lguest_write_cr3(unsigned long cr3)
{
554
	lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
555
	current_cr3 = cr3;
R
Rusty Russell 已提交
556 557

	/* These two page tables are simple, linear, and used during boot */
558 559
	if (cr3 != __pa_symbol(swapper_pg_dir) &&
	    cr3 != __pa_symbol(initial_page_table))
R
Rusty Russell 已提交
560
		cr3_changed = true;
R
Rusty Russell 已提交
561 562 563 564
}

static unsigned long lguest_read_cr3(void)
{
565
	return current_cr3;
R
Rusty Russell 已提交
566 567
}

R
Rusty Russell 已提交
568
/* cr4 is used to enable and disable PGE, but we don't care. */
R
Rusty Russell 已提交
569 570 571 572 573 574 575 576 577
static unsigned long lguest_read_cr4(void)
{
	return 0;
}

static void lguest_write_cr4(unsigned long val)
{
}

R
Rusty Russell 已提交
578 579 580 581 582 583 584 585 586 587 588
/*
 * Page Table Handling.
 *
 * Now would be a good time to take a rest and grab a coffee or similarly
 * relaxing stimulant.  The easy parts are behind us, and the trek gradually
 * winds uphill from here.
 *
 * Quick refresher: memory is divided into "pages" of 4096 bytes each.  The CPU
 * maps virtual addresses to physical addresses using "page tables".  We could
 * use one huge index of 1 million entries: each address is 4 bytes, so that's
 * 1024 pages just to hold the page tables.   But since most virtual addresses
R
Rusty Russell 已提交
589
 * are unused, we use a two level index which saves space.  The cr3 register
R
Rusty Russell 已提交
590 591 592 593 594 595 596
 * contains the physical address of the top level "page directory" page, which
 * contains physical addresses of up to 1024 second-level pages.  Each of these
 * second level pages contains up to 1024 physical addresses of actual pages,
 * or Page Table Entries (PTEs).
 *
 * Here's a diagram, where arrows indicate physical addresses:
 *
R
Rusty Russell 已提交
597
 * cr3 ---> +---------+
R
Rusty Russell 已提交
598 599
 *	    |  	   --------->+---------+
 *	    |	      |	     | PADDR1  |
R
Rusty Russell 已提交
600
 *	  Mid-level   |	     | PADDR2  |
R
Rusty Russell 已提交
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
 *	  (PMD) page  |	     | 	       |
 *	    |	      |	   Lower-level |
 *	    |	      |	   (PTE) page  |
 *	    |	      |	     |	       |
 *	      ....    	     	 ....
 *
 * So to convert a virtual address to a physical address, we look up the top
 * level, which points us to the second level, which gives us the physical
 * address of that page.  If the top level entry was not present, or the second
 * level entry was not present, then the virtual address is invalid (we
 * say "the page was not mapped").
 *
 * Put another way, a 32-bit virtual address is divided up like so:
 *
 *  1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
 *    Index into top     Index into second      Offset within page
 *  page directory page    pagetable page
 *
R
Rusty Russell 已提交
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
 * Now, unfortunately, this isn't the whole story: Intel added Physical Address
 * Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits).
 * These are held in 64-bit page table entries, so we can now only fit 512
 * entries in a page, and the neat three-level tree breaks down.
 *
 * The result is a four level page table:
 *
 * cr3 --> [ 4 Upper  ]
 *	   [   Level  ]
 *	   [  Entries ]
 *	   [(PUD Page)]---> +---------+
 *	 		    |  	   --------->+---------+
 *	 		    |	      |	     | PADDR1  |
 *	 		  Mid-level   |	     | PADDR2  |
 *	 		  (PMD) page  |	     | 	       |
 *	 		    |	      |	   Lower-level |
 *	 		    |	      |	   (PTE) page  |
 *	 		    |	      |	     |	       |
 *	 		      ....    	     	 ....
 *
 *
 * And the virtual address is decoded as:
 *
 *         1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 *      |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>|
 * Index into    Index into mid    Index into lower    Offset within page
 * top entries   directory page     pagetable page
 *
 * It's too hard to switch between these two formats at runtime, so Linux only
 * supports one or the other depending on whether CONFIG_X86_PAE is set.  Many
 * distributions turn it on, and not just for people with silly amounts of
 * memory: the larger PTE entries allow room for the NX bit, which lets the
 * kernel disable execution of pages and increase security.
 *
 * This was a problem for lguest, which couldn't run on these distributions;
 * then Matias Zabaljauregui figured it all out and implemented it, and only a
 * handful of puppies were crushed in the process!
 *
 * Back to our point: the kernel spends a lot of time changing both the
 * top-level page directory and lower-level pagetable pages.  The Guest doesn't
 * know physical addresses, so while it maintains these page tables exactly
 * like normal, it also needs to keep the Host informed whenever it makes a
 * change: the Host will create the real page tables based on the Guests'.
R
Rusty Russell 已提交
663 664
 */

R
Rusty Russell 已提交
665
/*
R
Rusty Russell 已提交
666
 * The Guest calls this after it has set a second-level entry (pte), ie. to map
R
Rusty Russell 已提交
667
 * a page into a process' address space.  We tell the Host the toplevel and
R
Rusty Russell 已提交
668 669
 * address this corresponds to.  The Guest uses one pagetable per process, so
 * we need to tell the Host which one we're changing (mm->pgd).
R
Rusty Russell 已提交
670
 */
671 672 673
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
			       pte_t *ptep)
{
M
Matias Zabaljauregui 已提交
674
#ifdef CONFIG_X86_PAE
R
Rusty Russell 已提交
675
	/* PAE needs to hand a 64 bit page table entry, so it uses two args. */
M
Matias Zabaljauregui 已提交
676 677 678
	lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
		    ptep->pte_low, ptep->pte_high);
#else
679
	lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
M
Matias Zabaljauregui 已提交
680
#endif
681 682
}

R
Rusty Russell 已提交
683
/* This is the "set and update" combo-meal-deal version. */
R
Rusty Russell 已提交
684 685 686
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pteval)
{
687
	native_set_pte(ptep, pteval);
688
	lguest_pte_update(mm, addr, ptep);
R
Rusty Russell 已提交
689 690
}

R
Rusty Russell 已提交
691 692
/*
 * The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
M
Matias Zabaljauregui 已提交
693
 * to set a middle-level entry when PAE is activated.
R
Rusty Russell 已提交
694
 *
M
Matias Zabaljauregui 已提交
695
 * Again, we set the entry then tell the Host which page we changed,
R
Rusty Russell 已提交
696 697
 * and the index of the entry we changed.
 */
M
Matias Zabaljauregui 已提交
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
#ifdef CONFIG_X86_PAE
static void lguest_set_pud(pud_t *pudp, pud_t pudval)
{
	native_set_pud(pudp, pudval);

	/* 32 bytes aligned pdpt address and the index. */
	lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
		   (__pa(pudp) & 0x1F) / sizeof(pud_t));
}

static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
	native_set_pmd(pmdp, pmdval);
	lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
		   (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
}
#else

R
Rusty Russell 已提交
716
/* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */
R
Rusty Russell 已提交
717 718
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
719
	native_set_pmd(pmdp, pmdval);
720
	lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
721
		   (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
R
Rusty Russell 已提交
722
}
M
Matias Zabaljauregui 已提交
723
#endif
R
Rusty Russell 已提交
724

R
Rusty Russell 已提交
725 726
/*
 * There are a couple of legacy places where the kernel sets a PTE, but we
R
Rusty Russell 已提交
727 728 729 730 731
 * don't know the top level any more.  This is useless for us, since we don't
 * know which pagetable is changing or what address, so we just tell the Host
 * to forget all of them.  Fortunately, this is very rare.
 *
 * ... except in early boot when the kernel sets up the initial pagetables,
R
Rusty Russell 已提交
732 733 734
 * which makes booting astonishingly slow: 48 seconds!  So we don't even tell
 * the Host anything changed until we've done the first real page table switch,
 * which brings boot back to 4.3 seconds.
R
Rusty Russell 已提交
735
 */
R
Rusty Russell 已提交
736 737
static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{
738
	native_set_pte(ptep, pteval);
R
Rusty Russell 已提交
739
	if (cr3_changed)
740
		lazy_hcall1(LHCALL_FLUSH_TLB, 1);
R
Rusty Russell 已提交
741 742
}

M
Matias Zabaljauregui 已提交
743
#ifdef CONFIG_X86_PAE
R
Rusty Russell 已提交
744 745 746 747 748
/*
 * With 64-bit PTE values, we need to be careful setting them: if we set 32
 * bits at a time, the hardware could see a weird half-set entry.  These
 * versions ensure we update all 64 bits at once.
 */
M
Matias Zabaljauregui 已提交
749 750 751 752 753 754 755
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
{
	native_set_pte_atomic(ptep, pte);
	if (cr3_changed)
		lazy_hcall1(LHCALL_FLUSH_TLB, 1);
}

R
Rusty Russell 已提交
756 757
static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep)
M
Matias Zabaljauregui 已提交
758 759 760 761 762
{
	native_pte_clear(mm, addr, ptep);
	lguest_pte_update(mm, addr, ptep);
}

R
Rusty Russell 已提交
763
static void lguest_pmd_clear(pmd_t *pmdp)
M
Matias Zabaljauregui 已提交
764 765 766 767 768
{
	lguest_set_pmd(pmdp, __pmd(0));
}
#endif

R
Rusty Russell 已提交
769 770
/*
 * Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
R
Rusty Russell 已提交
771 772 773 774 775 776 777 778
 * native page table operations.  On native hardware you can set a new page
 * table entry whenever you want, but if you want to remove one you have to do
 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
 *
 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
 * called when a valid entry is written, not when it's removed (ie. marked not
 * present).  Instead, this is where we come when the Guest wants to remove a
 * page table entry: we tell the Host to set that entry to 0 (ie. the present
R
Rusty Russell 已提交
779 780
 * bit is zero).
 */
R
Rusty Russell 已提交
781 782
static void lguest_flush_tlb_single(unsigned long addr)
{
R
Rusty Russell 已提交
783
	/* Simply set it to zero: if it was not, it will fault back in. */
784
	lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0);
R
Rusty Russell 已提交
785 786
}

R
Rusty Russell 已提交
787 788
/*
 * This is what happens after the Guest has removed a large number of entries.
R
Rusty Russell 已提交
789
 * This tells the Host that any of the page table entries for userspace might
R
Rusty Russell 已提交
790 791
 * have changed, ie. virtual addresses below PAGE_OFFSET.
 */
R
Rusty Russell 已提交
792 793
static void lguest_flush_tlb_user(void)
{
794
	lazy_hcall1(LHCALL_FLUSH_TLB, 0);
R
Rusty Russell 已提交
795 796
}

R
Rusty Russell 已提交
797 798
/*
 * This is called when the kernel page tables have changed.  That's not very
R
Rusty Russell 已提交
799
 * common (unless the Guest is using highmem, which makes the Guest extremely
R
Rusty Russell 已提交
800 801
 * slow), so it's worth separating this from the user flushing above.
 */
R
Rusty Russell 已提交
802 803
static void lguest_flush_tlb_kernel(void)
{
804
	lazy_hcall1(LHCALL_FLUSH_TLB, 1);
R
Rusty Russell 已提交
805 806
}

R
Rusty Russell 已提交
807 808 809 810 811 812 813 814 815 816 817 818 819
/*
 * The Unadvanced Programmable Interrupt Controller.
 *
 * This is an attempt to implement the simplest possible interrupt controller.
 * I spent some time looking though routines like set_irq_chip_and_handler,
 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
 * I *think* this is as simple as it gets.
 *
 * We can tell the Host what interrupts we want blocked ready for using the
 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
 * simple as setting a bit.  We don't actually "ack" interrupts as such, we
 * just mask and unmask them.  I wonder if we should be cleverer?
 */
820
static void disable_lguest_irq(struct irq_data *data)
R
Rusty Russell 已提交
821
{
822
	set_bit(data->irq, lguest_data.blocked_interrupts);
R
Rusty Russell 已提交
823 824
}

825
static void enable_lguest_irq(struct irq_data *data)
R
Rusty Russell 已提交
826
{
827
	clear_bit(data->irq, lguest_data.blocked_interrupts);
R
Rusty Russell 已提交
828 829
}

R
Rusty Russell 已提交
830
/* This structure describes the lguest IRQ controller. */
R
Rusty Russell 已提交
831 832
static struct irq_chip lguest_irq_controller = {
	.name		= "lguest",
833 834 835
	.irq_mask	= disable_lguest_irq,
	.irq_mask_ack	= disable_lguest_irq,
	.irq_unmask	= enable_lguest_irq,
R
Rusty Russell 已提交
836 837
};

838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
static int lguest_enable_irq(struct pci_dev *dev)
{
	u8 line = 0;

	/* We literally use the PCI interrupt line as the irq number. */
	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
	irq_set_chip_and_handler_name(line, &lguest_irq_controller,
				      handle_level_irq, "level");
	dev->irq = line;
	return 0;
}

/* We don't do hotplug PCI, so this shouldn't be called. */
static void lguest_disable_irq(struct pci_dev *dev)
{
	WARN_ON(1);
}

R
Rusty Russell 已提交
856 857
/*
 * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
R
Rusty Russell 已提交
858 859
 * interrupt (except 128, which is used for system calls), and then tells the
 * Linux infrastructure that each interrupt is controlled by our level-based
R
Rusty Russell 已提交
860 861
 * lguest interrupt controller.
 */
R
Rusty Russell 已提交
862 863 864 865
static void __init lguest_init_IRQ(void)
{
	unsigned int i;

866
	for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
R
Rusty Russell 已提交
867
		/* Some systems map "vectors" to interrupts weirdly.  Not us! */
R
Rusty Russell 已提交
868
		__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
R
Rusty Russell 已提交
869
		if (i != SYSCALL_VECTOR)
870 871
			set_intr_gate(i, irq_entries_start +
					8 * (i - FIRST_EXTERNAL_VECTOR));
R
Rusty Russell 已提交
872
	}
R
Rusty Russell 已提交
873 874 875 876 877

	/*
	 * This call is required to set up for 4k stacks, where we have
	 * separate stacks for hard and soft interrupts.
	 */
R
Rusty Russell 已提交
878 879 880
	irq_ctx_init(smp_processor_id());
}

R
Rusty Russell 已提交
881
/*
882 883 884 885
 * Interrupt descriptors are allocated as-needed, but low-numbered ones are
 * reserved by the generic x86 code.  So we ignore irq_alloc_desc_at if it
 * tells us the irq is already used: other errors (ie. ENOMEM) we take
 * seriously.
R
Rusty Russell 已提交
886
 */
887
int lguest_setup_irq(unsigned int irq)
888
{
889 890 891 892 893 894 895
	int err;

	/* Returns -ve error or vector number. */
	err = irq_alloc_desc_at(irq, 0);
	if (err < 0 && err != -EEXIST)
		return err;

896
	irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
897
				      handle_level_irq, "level");
898
	return 0;
899 900
}

R
Rusty Russell 已提交
901 902 903 904
/*
 * Time.
 *
 * It would be far better for everyone if the Guest had its own clock, but
905
 * until then the Host gives us the time on every interrupt.
R
Rusty Russell 已提交
906
 */
907
static void lguest_get_wallclock(struct timespec *now)
R
Rusty Russell 已提交
908
{
909
	*now = lguest_data.time;
R
Rusty Russell 已提交
910 911
}

R
Rusty Russell 已提交
912 913
/*
 * The TSC is an Intel thing called the Time Stamp Counter.  The Host tells us
914 915
 * what speed it runs at, or 0 if it's unusable as a reliable clock source.
 * This matches what we want here: if we return 0 from this function, the x86
R
Rusty Russell 已提交
916 917
 * TSC clock will give up and not register itself.
 */
918
static unsigned long lguest_tsc_khz(void)
919 920 921 922
{
	return lguest_data.tsc_khz;
}

R
Rusty Russell 已提交
923 924 925 926
/*
 * If we can't use the TSC, the kernel falls back to our lower-priority
 * "lguest_clock", where we read the time value given to us by the Host.
 */
927
static cycle_t lguest_clock_read(struct clocksource *cs)
R
Rusty Russell 已提交
928
{
929 930
	unsigned long sec, nsec;

R
Rusty Russell 已提交
931 932
	/*
	 * Since the time is in two parts (seconds and nanoseconds), we risk
933 934
	 * reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
	 * and getting 99 and 0.  As Linux tends to come apart under the stress
R
Rusty Russell 已提交
935 936
	 * of time travel, we must be careful:
	 */
937 938 939
	do {
		/* First we read the seconds part. */
		sec = lguest_data.time.tv_sec;
R
Rusty Russell 已提交
940 941
		/*
		 * This read memory barrier tells the compiler and the CPU that
942
		 * this can't be reordered: we have to complete the above
R
Rusty Russell 已提交
943 944
		 * before going on.
		 */
945 946 947 948 949 950 951 952
		rmb();
		/* Now we read the nanoseconds part. */
		nsec = lguest_data.time.tv_nsec;
		/* Make sure we've done that. */
		rmb();
		/* Now if the seconds part has changed, try again. */
	} while (unlikely(lguest_data.time.tv_sec != sec));

953
	/* Our lguest clock is in real nanoseconds. */
954
	return sec*1000000000ULL + nsec;
R
Rusty Russell 已提交
955 956
}

957
/* This is the fallback clocksource: lower priority than the TSC clocksource. */
R
Rusty Russell 已提交
958 959
static struct clocksource lguest_clock = {
	.name		= "lguest",
960
	.rating		= 200,
R
Rusty Russell 已提交
961
	.read		= lguest_clock_read,
962
	.mask		= CLOCKSOURCE_MASK(64),
963
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
R
Rusty Russell 已提交
964 965
};

R
Rusty Russell 已提交
966 967
/*
 * We also need a "struct clock_event_device": Linux asks us to set it to go
R
Rusty Russell 已提交
968
 * off some time in the future.  Actually, James Morris figured all this out, I
R
Rusty Russell 已提交
969 970
 * just applied the patch.
 */
R
Rusty Russell 已提交
971 972 973
static int lguest_clockevent_set_next_event(unsigned long delta,
                                           struct clock_event_device *evt)
{
974 975
	/* FIXME: I don't think this can ever happen, but James tells me he had
	 * to put this code in.  Maybe we should remove it now.  Anyone? */
R
Rusty Russell 已提交
976 977 978
	if (delta < LG_CLOCK_MIN_DELTA) {
		if (printk_ratelimit())
			printk(KERN_DEBUG "%s: small delta %lu ns\n",
979
			       __func__, delta);
R
Rusty Russell 已提交
980 981
		return -ETIME;
	}
982 983

	/* Please wake us this far in the future. */
984
	hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
R
Rusty Russell 已提交
985 986 987 988 989 990 991 992 993 994
	return 0;
}

static void lguest_clockevent_set_mode(enum clock_event_mode mode,
                                      struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		/* A 0 argument shuts the clock down. */
995
		hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
R
Rusty Russell 已提交
996 997 998 999 1000 1001
		break;
	case CLOCK_EVT_MODE_ONESHOT:
		/* This is what we expect. */
		break;
	case CLOCK_EVT_MODE_PERIODIC:
		BUG();
T
Thomas Gleixner 已提交
1002 1003
	case CLOCK_EVT_MODE_RESUME:
		break;
R
Rusty Russell 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	}
}

/* This describes our primitive timer chip. */
static struct clock_event_device lguest_clockevent = {
	.name                   = "lguest",
	.features               = CLOCK_EVT_FEAT_ONESHOT,
	.set_next_event         = lguest_clockevent_set_next_event,
	.set_mode               = lguest_clockevent_set_mode,
	.rating                 = INT_MAX,
	.mult                   = 1,
	.shift                  = 0,
	.min_delta_ns           = LG_CLOCK_MIN_DELTA,
	.max_delta_ns           = LG_CLOCK_MAX_DELTA,
};

R
Rusty Russell 已提交
1020 1021 1022 1023
/*
 * This is the Guest timer interrupt handler (hardware interrupt 0).  We just
 * call the clockevent infrastructure and it does whatever needs doing.
 */
R
Rusty Russell 已提交
1024 1025
static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
{
R
Rusty Russell 已提交
1026 1027 1028 1029 1030 1031
	unsigned long flags;

	/* Don't interrupt us while this is running. */
	local_irq_save(flags);
	lguest_clockevent.event_handler(&lguest_clockevent);
	local_irq_restore(flags);
R
Rusty Russell 已提交
1032 1033
}

R
Rusty Russell 已提交
1034 1035
/*
 * At some point in the boot process, we get asked to set up our timing
R
Rusty Russell 已提交
1036 1037
 * infrastructure.  The kernel doesn't expect timer interrupts before this, but
 * we cleverly initialized the "blocked_interrupts" field of "struct
R
Rusty Russell 已提交
1038 1039
 * lguest_data" so that timer interrupts were blocked until now.
 */
R
Rusty Russell 已提交
1040 1041
static void lguest_time_init(void)
{
R
Rusty Russell 已提交
1042
	/* Set up the timer interrupt (0) to go to our simple timer routine */
1043
	lguest_setup_irq(0);
1044
	irq_set_handler(0, lguest_time_irq);
R
Rusty Russell 已提交
1045

1046
	clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
R
Rusty Russell 已提交
1047

R
Rusty Russell 已提交
1048 1049
	/* We can't set cpumask in the initializer: damn C limitations!  Set it
	 * here and register our timer device. */
1050
	lguest_clockevent.cpumask = cpumask_of(0);
R
Rusty Russell 已提交
1051 1052
	clockevents_register_device(&lguest_clockevent);

R
Rusty Russell 已提交
1053
	/* Finally, we unblock the timer interrupt. */
R
Rusty Russell 已提交
1054
	clear_bit(0, lguest_data.blocked_interrupts);
R
Rusty Russell 已提交
1055 1056
}

R
Rusty Russell 已提交
1057 1058 1059 1060 1061 1062 1063
/*
 * Miscellaneous bits and pieces.
 *
 * Here is an oddball collection of functions which the Guest needs for things
 * to work.  They're pretty simple.
 */

R
Rusty Russell 已提交
1064 1065
/*
 * The Guest needs to tell the Host what stack it expects traps to use.  For
R
Rusty Russell 已提交
1066 1067 1068 1069 1070 1071
 * native hardware, this is part of the Task State Segment mentioned above in
 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
 *
 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
 * segment), the privilege level (we're privilege level 1, the Host is 0 and
 * will not tolerate us trying to use that), the stack pointer, and the number
R
Rusty Russell 已提交
1072 1073
 * of pages in the stack.
 */
1074
static void lguest_load_sp0(struct tss_struct *tss,
1075
			    struct thread_struct *thread)
R
Rusty Russell 已提交
1076
{
1077 1078
	lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
		   THREAD_SIZE / PAGE_SIZE);
1079
	tss->x86_tss.sp0 = thread->sp0;
R
Rusty Russell 已提交
1080 1081
}

R
Rusty Russell 已提交
1082
/* Let's just say, I wouldn't do debugging under a Guest. */
1083 1084 1085 1086 1087 1088
static unsigned long lguest_get_debugreg(int regno)
{
	/* FIXME: Implement */
	return 0;
}

R
Rusty Russell 已提交
1089 1090 1091 1092 1093
static void lguest_set_debugreg(int regno, unsigned long value)
{
	/* FIXME: Implement */
}

R
Rusty Russell 已提交
1094 1095
/*
 * There are times when the kernel wants to make sure that no memory writes are
R
Rusty Russell 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104
 * caught in the cache (that they've all reached real hardware devices).  This
 * doesn't matter for the Guest which has virtual hardware.
 *
 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
 * (clflush) instruction is available and the kernel uses that.  Otherwise, it
 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
 * Unlike clflush, wbinvd can only be run at privilege level 0.  So we can
 * ignore clflush, but replace wbinvd.
 */
R
Rusty Russell 已提交
1105 1106 1107 1108
static void lguest_wbinvd(void)
{
}

R
Rusty Russell 已提交
1109 1110
/*
 * If the Guest expects to have an Advanced Programmable Interrupt Controller,
R
Rusty Russell 已提交
1111 1112 1113
 * we play dumb by ignoring writes and returning 0 for reads.  So it's no
 * longer Programmable nor Controlling anything, and I don't think 8 lines of
 * code qualifies for Advanced.  It will also never interrupt anything.  It
R
Rusty Russell 已提交
1114 1115
 * does, however, allow us to get through the Linux boot code.
 */
R
Rusty Russell 已提交
1116
#ifdef CONFIG_X86_LOCAL_APIC
1117
static void lguest_apic_write(u32 reg, u32 v)
R
Rusty Russell 已提交
1118 1119 1120
{
}

1121
static u32 lguest_apic_read(u32 reg)
R
Rusty Russell 已提交
1122 1123 1124
{
	return 0;
}
S
Suresh Siddha 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146

static u64 lguest_apic_icr_read(void)
{
	return 0;
}

static void lguest_apic_icr_write(u32 low, u32 id)
{
	/* Warn to see if there's any stray references */
	WARN_ON(1);
}

static void lguest_apic_wait_icr_idle(void)
{
	return;
}

static u32 lguest_apic_safe_wait_icr_idle(void)
{
	return 0;
}

Y
Yinghai Lu 已提交
1147 1148 1149 1150 1151 1152 1153 1154
static void set_lguest_basic_apic_ops(void)
{
	apic->read = lguest_apic_read;
	apic->write = lguest_apic_write;
	apic->icr_read = lguest_apic_icr_read;
	apic->icr_write = lguest_apic_icr_write;
	apic->wait_icr_idle = lguest_apic_wait_icr_idle;
	apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
S
Suresh Siddha 已提交
1155
};
R
Rusty Russell 已提交
1156 1157
#endif

R
Rusty Russell 已提交
1158
/* STOP!  Until an interrupt comes in. */
R
Rusty Russell 已提交
1159 1160
static void lguest_safe_halt(void)
{
1161
	hcall(LHCALL_HALT, 0, 0, 0, 0);
R
Rusty Russell 已提交
1162 1163
}

R
Rusty Russell 已提交
1164 1165
/*
 * The SHUTDOWN hypercall takes a string to describe what's happening, and
1166
 * an argument which says whether this to restart (reboot) the Guest or not.
R
Rusty Russell 已提交
1167 1168
 *
 * Note that the Host always prefers that the Guest speak in physical addresses
R
Rusty Russell 已提交
1169 1170
 * rather than virtual addresses, so we use __pa() here.
 */
R
Rusty Russell 已提交
1171 1172
static void lguest_power_off(void)
{
1173 1174
	hcall(LHCALL_SHUTDOWN, __pa("Power down"),
	      LGUEST_SHUTDOWN_POWEROFF, 0, 0);
R
Rusty Russell 已提交
1175 1176
}

R
Rusty Russell 已提交
1177 1178 1179 1180 1181
/*
 * Panicing.
 *
 * Don't.  But if you did, this is what happens.
 */
R
Rusty Russell 已提交
1182 1183
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
{
1184
	hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
R
Rusty Russell 已提交
1185
	/* The hcall won't return, but to keep gcc happy, we're "done". */
R
Rusty Russell 已提交
1186 1187 1188 1189 1190 1191 1192
	return NOTIFY_DONE;
}

static struct notifier_block paniced = {
	.notifier_call = lguest_panic
};

R
Rusty Russell 已提交
1193
/* Setting up memory is fairly easy. */
R
Rusty Russell 已提交
1194 1195
static __init char *lguest_memory_setup(void)
{
R
Rusty Russell 已提交
1196
	/*
R
Rusty Russell 已提交
1197
	 * The Linux bootloader header contains an "e820" memory map: the
R
Rusty Russell 已提交
1198 1199
	 * Launcher populated the first entry with our memory limit.
	 */
1200
	e820_add_region(boot_params.e820_map[0].addr,
1201 1202
			  boot_params.e820_map[0].size,
			  boot_params.e820_map[0].type);
R
Rusty Russell 已提交
1203 1204

	/* This string is for the boot messages. */
R
Rusty Russell 已提交
1205 1206 1207
	return "LGUEST";
}

1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
/* Offset within PCI config space of BAR access capability. */
static int console_cfg_offset = 0;
static int console_access_cap;

/* Set up so that we access off in bar0 (on bus 0, device 1, function 0) */
static void set_cfg_window(u32 cfg_offset, u32 off)
{
	write_pci_config_byte(0, 1, 0,
			      cfg_offset + offsetof(struct virtio_pci_cap, bar),
			      0);
	write_pci_config(0, 1, 0,
			 cfg_offset + offsetof(struct virtio_pci_cap, length),
			 4);
	write_pci_config(0, 1, 0,
			 cfg_offset + offsetof(struct virtio_pci_cap, offset),
			 off);
}

static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val)
{
1228 1229 1230 1231 1232
	/*
	 * We could set this up once, then leave it; nothing else in the *
	 * kernel should touch these registers.  But if it went wrong, that
	 * would be a horrible bug to find.
	 */
1233 1234 1235 1236 1237 1238 1239 1240
	set_cfg_window(cfg_offset, off);
	write_pci_config(0, 1, 0,
			 cfg_offset + sizeof(struct virtio_pci_cap), val);
}

static void probe_pci_console(void)
{
	u8 cap, common_cap = 0, device_cap = 0;
1241 1242 1243
	/* Offset within BAR0 */
	u32 device_offset;
	u32 device_len;
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265

	/* Avoid recursive printk into here. */
	console_cfg_offset = -1;

	if (!early_pci_allowed()) {
		printk(KERN_ERR "lguest: early PCI access not allowed!\n");
		return;
	}

	/* We expect a console PCI device at BUS0, slot 1. */
	if (read_pci_config(0, 1, 0, 0) != 0x10431AF4) {
		printk(KERN_ERR "lguest: PCI device is %#x!\n",
		       read_pci_config(0, 1, 0, 0));
		return;
	}

	/* Find the capabilities we need (must be in bar0) */
	cap = read_pci_config_byte(0, 1, 0, PCI_CAPABILITY_LIST);
	while (cap) {
		u8 vndr = read_pci_config_byte(0, 1, 0, cap);
		if (vndr == PCI_CAP_ID_VNDR) {
			u8 type, bar;
1266
			u32 offset, length;
1267 1268 1269 1270 1271 1272 1273

			type = read_pci_config_byte(0, 1, 0,
			    cap + offsetof(struct virtio_pci_cap, cfg_type));
			bar = read_pci_config_byte(0, 1, 0,
			    cap + offsetof(struct virtio_pci_cap, bar));
			offset = read_pci_config(0, 1, 0,
			    cap + offsetof(struct virtio_pci_cap, offset));
1274 1275
			length = read_pci_config(0, 1, 0,
			    cap + offsetof(struct virtio_pci_cap, length));
1276 1277 1278 1279 1280 1281

			switch (type) {
			case VIRTIO_PCI_CAP_DEVICE_CFG:
				if (bar == 0) {
					device_cap = cap;
					device_offset = offset;
1282
					device_len = length;
1283 1284 1285 1286 1287 1288 1289 1290 1291
				}
				break;
			case VIRTIO_PCI_CAP_PCI_CFG:
				console_access_cap = cap;
				break;
			}
		}
		cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT);
	}
1292
	if (!device_cap || !console_access_cap) {
1293 1294 1295 1296 1297
		printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n",
		       common_cap, device_cap, console_access_cap);
		return;
	}

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
	/*
	 * Note that we can't check features, until we've set the DRIVER
	 * status bit.  We don't want to do that until we have a real driver,
	 * so we just check that the device-specific config has room for
	 * emerg_wr.  If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE
	 * it should ignore the access.
	 */
	if (device_len < (offsetof(struct virtio_console_config, emerg_wr)
			  + sizeof(u32))) {
		printk(KERN_ERR "lguest: console missing emerg_wr field\n");
1308 1309 1310 1311
		return;
	}

	console_cfg_offset = device_offset;
1312
	printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n");
1313 1314
}

R
Rusty Russell 已提交
1315 1316
/*
 * We will eventually use the virtio console device to produce console output,
1317 1318 1319
 * but before that is set up we use the virtio PCI console's backdoor mmio
 * access and the "emergency" write facility (which is legal even before the
 * device is configured).
R
Rusty Russell 已提交
1320
 */
R
Rusty Russell 已提交
1321 1322
static __init int early_put_chars(u32 vtermno, const char *buf, int count)
{
1323 1324 1325
	/* If we couldn't find PCI console, forget it. */
	if (console_cfg_offset < 0)
		return count;
R
Rusty Russell 已提交
1326

1327 1328 1329 1330 1331
	if (unlikely(!console_cfg_offset)) {
		probe_pci_console();
		if (console_cfg_offset < 0)
			return count;
	}
R
Rusty Russell 已提交
1332

1333 1334 1335 1336 1337
	write_bar_via_cfg(console_access_cap,
			  console_cfg_offset
			  + offsetof(struct virtio_console_config, emerg_wr),
			  buf[0]);
	return 1;
R
Rusty Russell 已提交
1338 1339
}

R
Rusty Russell 已提交
1340 1341 1342 1343
/*
 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
 * Launcher to reboot us.
 */
1344 1345
static void lguest_restart(char *reason)
{
1346
	hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
1347 1348
}

R
Rusty Russell 已提交
1349 1350 1351
/*G:050
 * Patching (Powerfully Placating Performance Pedants)
 *
1352 1353 1354
 * We have already seen that pv_ops structures let us replace simple native
 * instructions with calls to the appropriate back end all throughout the
 * kernel.  This allows the same kernel to run as a Guest and as a native
R
Rusty Russell 已提交
1355 1356 1357 1358 1359 1360 1361 1362 1363
 * kernel, but it's slow because of all the indirect branches.
 *
 * Remember that David Wheeler quote about "Any problem in computer science can
 * be solved with another layer of indirection"?  The rest of that quote is
 * "... But that usually will create another problem."  This is the first of
 * those problems.
 *
 * Our current solution is to allow the paravirt back end to optionally patch
 * over the indirect calls to replace them with something more efficient.  We
1364 1365 1366 1367
 * patch two of the simplest of the most commonly called functions: disable
 * interrupts and save interrupts.  We usually have 6 or 10 bytes to patch
 * into: the Guest versions of these operations are small enough that we can
 * fit comfortably.
R
Rusty Russell 已提交
1368 1369
 *
 * First we need assembly templates of each of the patchable Guest operations,
1370
 * and these are in head_32.S.
R
Rusty Russell 已提交
1371
 */
R
Rusty Russell 已提交
1372 1373

/*G:060 We construct a table from the assembler templates: */
R
Rusty Russell 已提交
1374 1375 1376 1377
static const struct lguest_insns
{
	const char *start, *end;
} lguest_insns[] = {
1378 1379
	[PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
	[PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
R
Rusty Russell 已提交
1380
};
R
Rusty Russell 已提交
1381

R
Rusty Russell 已提交
1382 1383
/*
 * Now our patch routine is fairly simple (based on the native one in
R
Rusty Russell 已提交
1384
 * paravirt.c).  If we have a replacement, we copy it in and return how much of
R
Rusty Russell 已提交
1385 1386
 * the available space we used.
 */
1387 1388
static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
			     unsigned long addr, unsigned len)
R
Rusty Russell 已提交
1389 1390 1391
{
	unsigned int insn_len;

R
Rusty Russell 已提交
1392
	/* Don't do anything special if we don't have a replacement */
R
Rusty Russell 已提交
1393
	if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
1394
		return paravirt_patch_default(type, clobber, ibuf, addr, len);
R
Rusty Russell 已提交
1395 1396 1397

	insn_len = lguest_insns[type].end - lguest_insns[type].start;

R
Rusty Russell 已提交
1398
	/* Similarly if it can't fit (doesn't happen, but let's be thorough). */
R
Rusty Russell 已提交
1399
	if (len < insn_len)
1400
		return paravirt_patch_default(type, clobber, ibuf, addr, len);
R
Rusty Russell 已提交
1401

R
Rusty Russell 已提交
1402
	/* Copy in our instructions. */
1403
	memcpy(ibuf, lguest_insns[type].start, insn_len);
R
Rusty Russell 已提交
1404 1405 1406
	return insn_len;
}

R
Rusty Russell 已提交
1407 1408
/*G:029
 * Once we get to lguest_init(), we know we're a Guest.  The various
1409
 * pv_ops structures in the kernel provide points for (almost) every routine we
R
Rusty Russell 已提交
1410 1411
 * have to override to avoid privileged instructions.
 */
1412
__init void lguest_init(void)
R
Rusty Russell 已提交
1413
{
R
Rusty Russell 已提交
1414
	/* We're under lguest. */
1415
	pv_info.name = "lguest";
R
Rusty Russell 已提交
1416
	/* Paravirt is enabled. */
1417
	pv_info.paravirt_enabled = 1;
R
Rusty Russell 已提交
1418
	/* We're running at privilege level 1, not 0 as normal. */
1419
	pv_info.kernel_rpl = 1;
R
Rusty Russell 已提交
1420
	/* Everyone except Xen runs with this set. */
M
Matias Zabaljauregui 已提交
1421
	pv_info.shared_kernel_pmd = 1;
R
Rusty Russell 已提交
1422

R
Rusty Russell 已提交
1423 1424 1425 1426
	/*
	 * We set up all the lguest overrides for sensitive operations.  These
	 * are detailed with the operations themselves.
	 */
1427

R
Rusty Russell 已提交
1428
	/* Interrupt-related operations */
1429
	pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl);
1430
	pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
1431
	pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable);
1432
	pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
1433 1434
	pv_irq_ops.safe_halt = lguest_safe_halt;

R
Rusty Russell 已提交
1435
	/* Setup operations */
1436 1437
	pv_init_ops.patch = lguest_patch;

R
Rusty Russell 已提交
1438
	/* Intercepts of various CPU instructions */
1439 1440 1441 1442
	pv_cpu_ops.load_gdt = lguest_load_gdt;
	pv_cpu_ops.cpuid = lguest_cpuid;
	pv_cpu_ops.load_idt = lguest_load_idt;
	pv_cpu_ops.iret = lguest_iret;
1443
	pv_cpu_ops.load_sp0 = lguest_load_sp0;
1444 1445 1446
	pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
	pv_cpu_ops.set_ldt = lguest_set_ldt;
	pv_cpu_ops.load_tls = lguest_load_tls;
1447
	pv_cpu_ops.get_debugreg = lguest_get_debugreg;
1448 1449 1450 1451 1452 1453 1454 1455 1456
	pv_cpu_ops.set_debugreg = lguest_set_debugreg;
	pv_cpu_ops.clts = lguest_clts;
	pv_cpu_ops.read_cr0 = lguest_read_cr0;
	pv_cpu_ops.write_cr0 = lguest_write_cr0;
	pv_cpu_ops.read_cr4 = lguest_read_cr4;
	pv_cpu_ops.write_cr4 = lguest_write_cr4;
	pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
	pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
	pv_cpu_ops.wbinvd = lguest_wbinvd;
1457 1458
	pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
	pv_cpu_ops.end_context_switch = lguest_end_context_switch;
1459

R
Rusty Russell 已提交
1460
	/* Pagetable management */
1461 1462 1463 1464 1465 1466 1467
	pv_mmu_ops.write_cr3 = lguest_write_cr3;
	pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
	pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
	pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
	pv_mmu_ops.set_pte = lguest_set_pte;
	pv_mmu_ops.set_pte_at = lguest_set_pte_at;
	pv_mmu_ops.set_pmd = lguest_set_pmd;
M
Matias Zabaljauregui 已提交
1468 1469 1470 1471 1472 1473
#ifdef CONFIG_X86_PAE
	pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
	pv_mmu_ops.pte_clear = lguest_pte_clear;
	pv_mmu_ops.pmd_clear = lguest_pmd_clear;
	pv_mmu_ops.set_pud = lguest_set_pud;
#endif
1474 1475
	pv_mmu_ops.read_cr2 = lguest_read_cr2;
	pv_mmu_ops.read_cr3 = lguest_read_cr3;
1476
	pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1477
	pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1478
	pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
1479 1480
	pv_mmu_ops.pte_update = lguest_pte_update;
	pv_mmu_ops.pte_update_defer = lguest_pte_update;
1481

R
Rusty Russell 已提交
1482
#ifdef CONFIG_X86_LOCAL_APIC
R
Rusty Russell 已提交
1483
	/* APIC read/write intercepts */
Y
Yinghai Lu 已提交
1484
	set_lguest_basic_apic_ops();
R
Rusty Russell 已提交
1485
#endif
1486

1487
	x86_init.resources.memory_setup = lguest_memory_setup;
1488
	x86_init.irqs.intr_init = lguest_init_IRQ;
1489
	x86_init.timers.timer_init = lguest_time_init;
1490
	x86_platform.calibrate_tsc = lguest_tsc_khz;
1491
	x86_platform.get_wallclock =  lguest_get_wallclock;
1492

R
Rusty Russell 已提交
1493 1494 1495 1496
	/*
	 * Now is a good time to look at the implementations of these functions
	 * before returning to the rest of lguest_init().
	 */
R
Rusty Russell 已提交
1497

R
Rusty Russell 已提交
1498 1499
	/*G:070
	 * Now we've seen all the paravirt_ops, we return to
R
Rusty Russell 已提交
1500
	 * lguest_init() where the rest of the fairly chaotic boot setup
R
Rusty Russell 已提交
1501 1502
	 * occurs.
	 */
R
Rusty Russell 已提交
1503

R
Rusty Russell 已提交
1504 1505
	/*
	 * The stack protector is a weird thing where gcc places a canary
1506 1507 1508 1509
	 * value on the stack and then checks it on return.  This file is
	 * compiled with -fno-stack-protector it, so we got this far without
	 * problems.  The value of the canary is kept at offset 20 from the
	 * %gs register, so we need to set that up before calling C functions
R
Rusty Russell 已提交
1510 1511
	 * in other files.
	 */
1512
	setup_stack_canary_segment(0);
R
Rusty Russell 已提交
1513 1514 1515 1516 1517 1518

	/*
	 * We could just call load_stack_canary_segment(), but we might as well
	 * call switch_to_new_gdt() which loads the whole table and sets up the
	 * per-cpu segment descriptor register %fs as well.
	 */
1519 1520
	switch_to_new_gdt(0);

R
Rusty Russell 已提交
1521 1522
	/*
	 * The Host<->Guest Switcher lives at the top of our address space, and
1523
	 * the Host told us how big it is when we made LGUEST_INIT hypercall:
R
Rusty Russell 已提交
1524 1525
	 * it put the answer in lguest_data.reserve_mem
	 */
R
Rusty Russell 已提交
1526 1527
	reserve_top_address(lguest_data.reserve_mem);

R
Rusty Russell 已提交
1528 1529
	/*
	 * If we don't initialize the lock dependency checker now, it crashes
1530
	 * atomic_notifier_chain_register, then paravirt_disable_iospace.
R
Rusty Russell 已提交
1531
	 */
R
Rusty Russell 已提交
1532 1533
	lockdep_init();

1534 1535 1536
	/* Hook in our special panic hypercall code. */
	atomic_notifier_chain_register(&panic_notifier_list, &paniced);

R
Rusty Russell 已提交
1537 1538 1539 1540
	/*
	 * This is messy CPU setup stuff which the native boot code does before
	 * start_kernel, so we have to do, too:
	 */
R
Rusty Russell 已提交
1541 1542 1543 1544 1545
	cpu_detect(&new_cpu_data);
	/* head.S usually sets up the first capability word, so do it here. */
	new_cpu_data.x86_capability[0] = cpuid_edx(1);

	/* Math is always hard! */
1546
	set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
R
Rusty Russell 已提交
1547

1548
	/* We don't have features.  We have puppies!  Puppies! */
R
Rusty Russell 已提交
1549
#ifdef CONFIG_X86_MCE
1550
	mca_cfg.disabled = true;
R
Rusty Russell 已提交
1551 1552 1553 1554 1555
#endif
#ifdef CONFIG_ACPI
	acpi_disabled = 1;
#endif

R
Rusty Russell 已提交
1556 1557
	/*
	 * We set the preferred console to "hvc".  This is the "hypervisor
R
Rusty Russell 已提交
1558
	 * virtual console" driver written by the PowerPC people, which we also
R
Rusty Russell 已提交
1559 1560
	 * adapted for lguest's use.
	 */
R
Rusty Russell 已提交
1561 1562
	add_preferred_console("hvc", 0, NULL);

R
Rusty Russell 已提交
1563 1564 1565
	/* Register our very early console. */
	virtio_cons_early_init(early_put_chars);

R
Rusty Russell 已提交
1566 1567 1568
	/* Don't let ACPI try to control our PCI interrupts. */
	disable_acpi();

1569 1570 1571 1572
	/* We control them ourselves, by overriding these two hooks. */
	pcibios_enable_irq = lguest_enable_irq;
	pcibios_disable_irq = lguest_disable_irq;

R
Rusty Russell 已提交
1573 1574
	/*
	 * Last of all, we set the power management poweroff hook to point to
1575
	 * the Guest routine to power off, and the reboot hook to our restart
R
Rusty Russell 已提交
1576 1577
	 * routine.
	 */
R
Rusty Russell 已提交
1578
	pm_power_off = lguest_power_off;
B
Balaji Rao 已提交
1579
	machine_ops.restart = lguest_restart;
1580

R
Rusty Russell 已提交
1581 1582 1583 1584
	/*
	 * Now we're set up, call i386_start_kernel() in head32.c and we proceed
	 * to boot as normal.  It never returns.
	 */
1585
	i386_start_kernel();
R
Rusty Russell 已提交
1586
}
R
Rusty Russell 已提交
1587 1588 1589
/*
 * This marks the end of stage II of our journey, The Guest.
 *
R
Rusty Russell 已提交
1590 1591
 * It is now time for us to explore the layer of virtual drivers and complete
 * our understanding of the Guest in "make Drivers".
R
Rusty Russell 已提交
1592
 */