提交 7f3591cf 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest: (31 commits)
  lguest: add support for indirect ring entries
  lguest: suppress notifications in example Launcher
  lguest: try to batch interrupts on network receive
  lguest: avoid sending interrupts to Guest when no activity occurs.
  lguest: implement deferred interrupts in example Launcher
  lguest: remove obsolete LHREQ_BREAK call
  lguest: have example Launcher service all devices in separate threads
  lguest: use eventfds for device notification
  eventfd: export eventfd_signal and eventfd_fget for lguest
  lguest: allow any process to send interrupts
  lguest: PAE fixes
  lguest: PAE support
  lguest: Add support for kvm_hypercall4()
  lguest: replace hypercall name LHCALL_SET_PMD with LHCALL_SET_PGD
  lguest: use native_set_* macros, which properly handle 64-bit entries when PAE is activated
  lguest: map switcher with executable page table entries
  lguest: fix writev returning short on console output
  lguest: clean up length-used value in example launcher
  lguest: Segment selectors are 16-bit long. Fix lg_cpu.ss1 definition.
  lguest: beyond ARRAY_SIZE of cpu->arch.gdt
  ...
# This creates the demonstration utility "lguest" which runs a Linux guest. # This creates the demonstration utility "lguest" which runs a Linux guest.
CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE
LDLIBS:=-lz
all: lguest all: lguest
......
此差异已折叠。
...@@ -37,7 +37,6 @@ Running Lguest: ...@@ -37,7 +37,6 @@ Running Lguest:
"Paravirtualized guest support" = Y "Paravirtualized guest support" = Y
"Lguest guest support" = Y "Lguest guest support" = Y
"High Memory Support" = off/4GB "High Memory Support" = off/4GB
"PAE (Physical Address Extension) Support" = N
"Alignment value to which kernel should be aligned" = 0x100000 "Alignment value to which kernel should be aligned" = 0x100000
(CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and (CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and
CONFIG_PHYSICAL_ALIGN=0x100000) CONFIG_PHYSICAL_ALIGN=0x100000)
......
...@@ -17,8 +17,13 @@ ...@@ -17,8 +17,13 @@
/* Pages for switcher itself, then two pages per cpu */ /* Pages for switcher itself, then two pages per cpu */
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
/* We map at -4M for ease of mapping into the guest (one PTE page). */ /* We map at -4M (-2M when PAE is activated) for ease of mapping
* into the guest (one PTE page). */
#ifdef CONFIG_X86_PAE
#define SWITCHER_ADDR 0xFFE00000
#else
#define SWITCHER_ADDR 0xFFC00000 #define SWITCHER_ADDR 0xFFC00000
#endif
/* Found in switcher.S */ /* Found in switcher.S */
extern unsigned long default_idt_entries[]; extern unsigned long default_idt_entries[];
......
...@@ -12,11 +12,13 @@ ...@@ -12,11 +12,13 @@
#define LHCALL_TS 8 #define LHCALL_TS 8
#define LHCALL_SET_CLOCKEVENT 9 #define LHCALL_SET_CLOCKEVENT 9
#define LHCALL_HALT 10 #define LHCALL_HALT 10
#define LHCALL_SET_PMD 13
#define LHCALL_SET_PTE 14 #define LHCALL_SET_PTE 14
#define LHCALL_SET_PMD 15 #define LHCALL_SET_PGD 15
#define LHCALL_LOAD_TLS 16 #define LHCALL_LOAD_TLS 16
#define LHCALL_NOTIFY 17 #define LHCALL_NOTIFY 17
#define LHCALL_LOAD_GDT_ENTRY 18 #define LHCALL_LOAD_GDT_ENTRY 18
#define LHCALL_SEND_INTERRUPTS 19
#define LGUEST_TRAP_ENTRY 0x1F #define LGUEST_TRAP_ENTRY 0x1F
...@@ -32,10 +34,10 @@ ...@@ -32,10 +34,10 @@
* operations? There are two ways: the direct way is to make a "hypercall", * operations? There are two ways: the direct way is to make a "hypercall",
* to make requests of the Host Itself. * to make requests of the Host Itself.
* *
* We use the KVM hypercall mechanism. Eighteen hypercalls are * We use the KVM hypercall mechanism. Seventeen hypercalls are
* available: the hypercall number is put in the %eax register, and the * available: the hypercall number is put in the %eax register, and the
* arguments (when required) are placed in %ebx, %ecx and %edx. If a return * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
* value makes sense, it's returned in %eax. * If a return value makes sense, it's returned in %eax.
* *
* Grossly invalid calls result in Sudden Death at the hands of the vengeful * Grossly invalid calls result in Sudden Death at the hands of the vengeful
* Host, rather than returning failure. This reflects Winston Churchill's * Host, rather than returning failure. This reflects Winston Churchill's
...@@ -47,8 +49,9 @@ ...@@ -47,8 +49,9 @@
#define LHCALL_RING_SIZE 64 #define LHCALL_RING_SIZE 64
struct hcall_args { struct hcall_args {
/* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ /* These map directly onto eax, ebx, ecx, edx and esi
unsigned long arg0, arg1, arg2, arg3; * in struct lguest_regs */
unsigned long arg0, arg1, arg2, arg3, arg4;
}; };
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -126,6 +126,7 @@ void foo(void) ...@@ -126,6 +126,7 @@ void foo(void)
#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
BLANK(); BLANK();
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);
OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
BLANK(); BLANK();
......
...@@ -2,7 +2,6 @@ config LGUEST_GUEST ...@@ -2,7 +2,6 @@ config LGUEST_GUEST
bool "Lguest guest support" bool "Lguest guest support"
select PARAVIRT select PARAVIRT
depends on X86_32 depends on X86_32
depends on !X86_PAE
select VIRTIO select VIRTIO
select VIRTIO_RING select VIRTIO_RING
select VIRTIO_CONSOLE select VIRTIO_CONSOLE
......
...@@ -87,7 +87,7 @@ struct lguest_data lguest_data = { ...@@ -87,7 +87,7 @@ struct lguest_data lguest_data = {
/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
* ring buffer of stored hypercalls which the Host will run though next time we * ring buffer of stored hypercalls which the Host will run though next time we
* do a normal hypercall. Each entry in the ring has 4 slots for the hypercall * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall
* arguments, and a "hcall_status" word which is 0 if the call is ready to go, * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
* and 255 once the Host has finished with it. * and 255 once the Host has finished with it.
* *
...@@ -96,7 +96,8 @@ struct lguest_data lguest_data = { ...@@ -96,7 +96,8 @@ struct lguest_data lguest_data = {
* effect of causing the Host to run all the stored calls in the ring buffer * effect of causing the Host to run all the stored calls in the ring buffer
* which empties it for next time! */ * which empties it for next time! */
static void async_hcall(unsigned long call, unsigned long arg1, static void async_hcall(unsigned long call, unsigned long arg1,
unsigned long arg2, unsigned long arg3) unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{ {
/* Note: This code assumes we're uniprocessor. */ /* Note: This code assumes we're uniprocessor. */
static unsigned int next_call; static unsigned int next_call;
...@@ -108,12 +109,13 @@ static void async_hcall(unsigned long call, unsigned long arg1, ...@@ -108,12 +109,13 @@ static void async_hcall(unsigned long call, unsigned long arg1,
local_irq_save(flags); local_irq_save(flags);
if (lguest_data.hcall_status[next_call] != 0xFF) { if (lguest_data.hcall_status[next_call] != 0xFF) {
/* Table full, so do normal hcall which will flush table. */ /* Table full, so do normal hcall which will flush table. */
kvm_hypercall3(call, arg1, arg2, arg3); kvm_hypercall4(call, arg1, arg2, arg3, arg4);
} else { } else {
lguest_data.hcalls[next_call].arg0 = call; lguest_data.hcalls[next_call].arg0 = call;
lguest_data.hcalls[next_call].arg1 = arg1; lguest_data.hcalls[next_call].arg1 = arg1;
lguest_data.hcalls[next_call].arg2 = arg2; lguest_data.hcalls[next_call].arg2 = arg2;
lguest_data.hcalls[next_call].arg3 = arg3; lguest_data.hcalls[next_call].arg3 = arg3;
lguest_data.hcalls[next_call].arg4 = arg4;
/* Arguments must all be written before we mark it to go */ /* Arguments must all be written before we mark it to go */
wmb(); wmb();
lguest_data.hcall_status[next_call] = 0; lguest_data.hcall_status[next_call] = 0;
...@@ -141,7 +143,7 @@ static void lazy_hcall1(unsigned long call, ...@@ -141,7 +143,7 @@ static void lazy_hcall1(unsigned long call,
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
kvm_hypercall1(call, arg1); kvm_hypercall1(call, arg1);
else else
async_hcall(call, arg1, 0, 0); async_hcall(call, arg1, 0, 0, 0);
} }
static void lazy_hcall2(unsigned long call, static void lazy_hcall2(unsigned long call,
...@@ -151,7 +153,7 @@ static void lazy_hcall2(unsigned long call, ...@@ -151,7 +153,7 @@ static void lazy_hcall2(unsigned long call,
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
kvm_hypercall2(call, arg1, arg2); kvm_hypercall2(call, arg1, arg2);
else else
async_hcall(call, arg1, arg2, 0); async_hcall(call, arg1, arg2, 0, 0);
} }
static void lazy_hcall3(unsigned long call, static void lazy_hcall3(unsigned long call,
...@@ -162,9 +164,23 @@ static void lazy_hcall3(unsigned long call, ...@@ -162,9 +164,23 @@ static void lazy_hcall3(unsigned long call,
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
kvm_hypercall3(call, arg1, arg2, arg3); kvm_hypercall3(call, arg1, arg2, arg3);
else else
async_hcall(call, arg1, arg2, arg3); async_hcall(call, arg1, arg2, arg3, 0);
} }
#ifdef CONFIG_X86_PAE
static void lazy_hcall4(unsigned long call,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4)
{
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
kvm_hypercall4(call, arg1, arg2, arg3, arg4);
else
async_hcall(call, arg1, arg2, arg3, arg4);
}
#endif
/* When lazy mode is turned off reset the per-cpu lazy mode variable and then /* When lazy mode is turned off reset the per-cpu lazy mode variable and then
* issue the do-nothing hypercall to flush any stored calls. */ * issue the do-nothing hypercall to flush any stored calls. */
static void lguest_leave_lazy_mmu_mode(void) static void lguest_leave_lazy_mmu_mode(void)
...@@ -179,7 +195,7 @@ static void lguest_end_context_switch(struct task_struct *next) ...@@ -179,7 +195,7 @@ static void lguest_end_context_switch(struct task_struct *next)
paravirt_end_context_switch(next); paravirt_end_context_switch(next);
} }
/*G:033 /*G:032
* After that diversion we return to our first native-instruction * After that diversion we return to our first native-instruction
* replacements: four functions for interrupt control. * replacements: four functions for interrupt control.
* *
...@@ -199,30 +215,28 @@ static unsigned long save_fl(void) ...@@ -199,30 +215,28 @@ static unsigned long save_fl(void)
{ {
return lguest_data.irq_enabled; return lguest_data.irq_enabled;
} }
PV_CALLEE_SAVE_REGS_THUNK(save_fl);
/* restore_flags() just sets the flags back to the value given. */
static void restore_fl(unsigned long flags)
{
lguest_data.irq_enabled = flags;
}
PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
/* Interrupts go off... */ /* Interrupts go off... */
static void irq_disable(void) static void irq_disable(void)
{ {
lguest_data.irq_enabled = 0; lguest_data.irq_enabled = 0;
} }
/* Let's pause a moment. Remember how I said these are called so often?
* Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
* break some rules. In particular, these functions are assumed to save their
* own registers if they need to: normal C functions assume they can trash the
* eax register. To use normal C functions, we use
* PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
* C function, then restores it. */
PV_CALLEE_SAVE_REGS_THUNK(save_fl);
PV_CALLEE_SAVE_REGS_THUNK(irq_disable); PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
/*:*/
/* Interrupts go on... */ /* These are in i386_head.S */
static void irq_enable(void) extern void lg_irq_enable(void);
{ extern void lg_restore_fl(unsigned long flags);
lguest_data.irq_enabled = X86_EFLAGS_IF;
}
PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
/*:*/
/*M:003 Note that we don't check for outstanding interrupts when we re-enable /*M:003 Note that we don't check for outstanding interrupts when we re-enable
* them (or when we unmask an interrupt). This seems to work for the moment, * them (or when we unmask an interrupt). This seems to work for the moment,
* since interrupts are rare and we'll just get the interrupt on the next timer * since interrupts are rare and we'll just get the interrupt on the next timer
...@@ -368,8 +382,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, ...@@ -368,8 +382,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
case 1: /* Basic feature request. */ case 1: /* Basic feature request. */
/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */ /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
*cx &= 0x00002201; *cx &= 0x00002201;
/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU. */ /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */
*dx &= 0x07808111; *dx &= 0x07808151;
/* The Host can do a nice optimization if it knows that the /* The Host can do a nice optimization if it knows that the
* kernel mappings (addresses above 0xC0000000 or whatever * kernel mappings (addresses above 0xC0000000 or whatever
* PAGE_OFFSET is set to) haven't changed. But Linux calls * PAGE_OFFSET is set to) haven't changed. But Linux calls
...@@ -388,6 +402,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, ...@@ -388,6 +402,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
if (*ax > 0x80000008) if (*ax > 0x80000008)
*ax = 0x80000008; *ax = 0x80000008;
break; break;
case 0x80000001:
/* Here we should fix nx cap depending on host. */
/* For this version of PAE, we just clear NX bit. */
*dx &= ~(1 << 20);
break;
} }
} }
...@@ -521,25 +540,52 @@ static void lguest_write_cr4(unsigned long val) ...@@ -521,25 +540,52 @@ static void lguest_write_cr4(unsigned long val)
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
#ifdef CONFIG_X86_PAE
lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
ptep->pte_low, ptep->pte_high);
#else
lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
#endif
} }
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval) pte_t *ptep, pte_t pteval)
{ {
*ptep = pteval; native_set_pte(ptep, pteval);
lguest_pte_update(mm, addr, ptep); lguest_pte_update(mm, addr, ptep);
} }
/* The Guest calls this to set a top-level entry. Again, we set the entry then /* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
* tell the Host which top-level page we changed, and the index of the entry we * to set a middle-level entry when PAE is activated.
* changed. */ * Again, we set the entry then tell the Host which page we changed,
* and the index of the entry we changed. */
#ifdef CONFIG_X86_PAE
static void lguest_set_pud(pud_t *pudp, pud_t pudval)
{
native_set_pud(pudp, pudval);
/* 32 bytes aligned pdpt address and the index. */
lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
(__pa(pudp) & 0x1F) / sizeof(pud_t));
}
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{ {
*pmdp = pmdval; native_set_pmd(pmdp, pmdval);
lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
(__pa(pmdp) & (PAGE_SIZE - 1)) / 4); (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
} }
#else
/* The Guest calls lguest_set_pmd to set a top-level entry when PAE is not
* activated. */
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
native_set_pmd(pmdp, pmdval);
lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
(__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
}
#endif
/* There are a couple of legacy places where the kernel sets a PTE, but we /* There are a couple of legacy places where the kernel sets a PTE, but we
* don't know the top level any more. This is useless for us, since we don't * don't know the top level any more. This is useless for us, since we don't
...@@ -552,11 +598,31 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) ...@@ -552,11 +598,31 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
* which brings boot back to 0.25 seconds. */ * which brings boot back to 0.25 seconds. */
static void lguest_set_pte(pte_t *ptep, pte_t pteval) static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{ {
*ptep = pteval; native_set_pte(ptep, pteval);
if (cr3_changed)
lazy_hcall1(LHCALL_FLUSH_TLB, 1);
}
#ifdef CONFIG_X86_PAE
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
{
native_set_pte_atomic(ptep, pte);
if (cr3_changed) if (cr3_changed)
lazy_hcall1(LHCALL_FLUSH_TLB, 1); lazy_hcall1(LHCALL_FLUSH_TLB, 1);
} }
void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
native_pte_clear(mm, addr, ptep);
lguest_pte_update(mm, addr, ptep);
}
void lguest_pmd_clear(pmd_t *pmdp)
{
lguest_set_pmd(pmdp, __pmd(0));
}
#endif
/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
* native page table operations. On native hardware you can set a new page * native page table operations. On native hardware you can set a new page
* table entry whenever you want, but if you want to remove one you have to do * table entry whenever you want, but if you want to remove one you have to do
...@@ -628,13 +694,12 @@ static void __init lguest_init_IRQ(void) ...@@ -628,13 +694,12 @@ static void __init lguest_init_IRQ(void)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < LGUEST_IRQS; i++) { for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
/* Some systems map "vectors" to interrupts weirdly. Lguest has /* Some systems map "vectors" to interrupts weirdly. Lguest has
* a straightforward 1 to 1 mapping, so force that here. */ * a straightforward 1 to 1 mapping, so force that here. */
__get_cpu_var(vector_irq)[vector] = i; __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
if (vector != SYSCALL_VECTOR) if (i != SYSCALL_VECTOR)
set_intr_gate(vector, interrupt[i]); set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
} }
/* This call is required to set up for 4k stacks, where we have /* This call is required to set up for 4k stacks, where we have
* separate stacks for hard and soft interrupts. */ * separate stacks for hard and soft interrupts. */
...@@ -973,10 +1038,10 @@ static void lguest_restart(char *reason) ...@@ -973,10 +1038,10 @@ static void lguest_restart(char *reason)
* *
* Our current solution is to allow the paravirt back end to optionally patch * Our current solution is to allow the paravirt back end to optionally patch
* over the indirect calls to replace them with something more efficient. We * over the indirect calls to replace them with something more efficient. We
* patch the four most commonly called functions: disable interrupts, enable * patch two of the simplest of the most commonly called functions: disable
* interrupts, restore interrupts and save interrupts. We usually have 6 or 10 * interrupts and save interrupts. We usually have 6 or 10 bytes to patch
* bytes to patch into: the Guest versions of these operations are small enough * into: the Guest versions of these operations are small enough that we can
* that we can fit comfortably. * fit comfortably.
* *
* First we need assembly templates of each of the patchable Guest operations, * First we need assembly templates of each of the patchable Guest operations,
* and these are in i386_head.S. */ * and these are in i386_head.S. */
...@@ -987,8 +1052,6 @@ static const struct lguest_insns ...@@ -987,8 +1052,6 @@ static const struct lguest_insns
const char *start, *end; const char *start, *end;
} lguest_insns[] = { } lguest_insns[] = {
[PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli }, [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
[PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
[PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
[PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
}; };
...@@ -1026,6 +1089,7 @@ __init void lguest_init(void) ...@@ -1026,6 +1089,7 @@ __init void lguest_init(void)
pv_info.name = "lguest"; pv_info.name = "lguest";
pv_info.paravirt_enabled = 1; pv_info.paravirt_enabled = 1;
pv_info.kernel_rpl = 1; pv_info.kernel_rpl = 1;
pv_info.shared_kernel_pmd = 1;
/* We set up all the lguest overrides for sensitive operations. These /* We set up all the lguest overrides for sensitive operations. These
* are detailed with the operations themselves. */ * are detailed with the operations themselves. */
...@@ -1033,9 +1097,9 @@ __init void lguest_init(void) ...@@ -1033,9 +1097,9 @@ __init void lguest_init(void)
/* interrupt-related operations */ /* interrupt-related operations */
pv_irq_ops.init_IRQ = lguest_init_IRQ; pv_irq_ops.init_IRQ = lguest_init_IRQ;
pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl); pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable); pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
pv_irq_ops.safe_halt = lguest_safe_halt; pv_irq_ops.safe_halt = lguest_safe_halt;
/* init-time operations */ /* init-time operations */
...@@ -1071,6 +1135,12 @@ __init void lguest_init(void) ...@@ -1071,6 +1135,12 @@ __init void lguest_init(void)
pv_mmu_ops.set_pte = lguest_set_pte; pv_mmu_ops.set_pte = lguest_set_pte;
pv_mmu_ops.set_pte_at = lguest_set_pte_at; pv_mmu_ops.set_pte_at = lguest_set_pte_at;
pv_mmu_ops.set_pmd = lguest_set_pmd; pv_mmu_ops.set_pmd = lguest_set_pmd;
#ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
pv_mmu_ops.pte_clear = lguest_pte_clear;
pv_mmu_ops.pmd_clear = lguest_pmd_clear;
pv_mmu_ops.set_pud = lguest_set_pud;
#endif
pv_mmu_ops.read_cr2 = lguest_read_cr2; pv_mmu_ops.read_cr2 = lguest_read_cr2;
pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
......
...@@ -46,10 +46,64 @@ ENTRY(lguest_entry) ...@@ -46,10 +46,64 @@ ENTRY(lguest_entry)
.globl lgstart_##name; .globl lgend_##name .globl lgstart_##name; .globl lgend_##name
LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
/*:*/
/*G:033 But using those wrappers is inefficient (we'll see why that doesn't
* matter for save_fl and irq_disable later). If we write our routines
* carefully in assembler, we can avoid clobbering any registers and avoid
* jumping through the wrapper functions.
*
* I skipped over our first piece of assembler, but this one is worth studying
* in a bit more detail so I'll describe in easy stages. First, the routine
* to enable interrupts: */
ENTRY(lg_irq_enable)
/* The reverse of irq_disable, this sets lguest_data.irq_enabled to
* X86_EFLAGS_IF (ie. "Interrupts enabled"). */
movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled
/* But now we need to check if the Host wants to know: there might have
* been interrupts waiting to be delivered, in which case it will have
* set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we
* jump to send_interrupts, otherwise we're done. */
testl $0, lguest_data+LGUEST_DATA_irq_pending
jnz send_interrupts
/* One cool thing about x86 is that you can do many things without using
* a register. In this case, the normal path hasn't needed to save or
* restore any registers at all! */
ret
send_interrupts:
/* OK, now we need a register: eax is used for the hypercall number,
* which is LHCALL_SEND_INTERRUPTS.
*
* We used not to bother with this pending detection at all, which was
* much simpler. Sooner or later the Host would realize it had to
* send us an interrupt. But that turns out to make performance 7
* times worse on a simple tcp benchmark. So now we do this the hard
* way. */
pushl %eax
movl $LHCALL_SEND_INTERRUPTS, %eax
/* This is a vmcall instruction (same thing that KVM uses). Older
* assembler versions might not know the "vmcall" instruction, so we
* create one manually here. */
.byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
popl %eax
ret
/* Finally, the "popf" or "restore flags" routine. The %eax register holds the
* flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're
* enabling interrupts again, if it's 0 we're leaving them off. */
ENTRY(lg_restore_fl)
/* This is just "lguest_data.irq_enabled = flags;" */
movl %eax, lguest_data+LGUEST_DATA_irq_enabled
/* Now, if the %eax value has enabled interrupts and
* lguest_data.irq_pending is set, we want to tell the Host so it can
* deliver any outstanding interrupts. Fortunately, both values will
* be X86_EFLAGS_IF (ie. 512) in that case, and the "testl"
* instruction will AND them together for us. If both are set, we
* jump to send_interrupts. */
testl lguest_data+LGUEST_DATA_irq_pending, %eax
jnz send_interrupts
/* Again, the normal path has used no extra registers. Clever, huh? */
ret
/* These demark the EIP range where host should never deliver interrupts. */ /* These demark the EIP range where host should never deliver interrupts. */
.global lguest_noirq_start .global lguest_noirq_start
......
config LGUEST config LGUEST
tristate "Linux hypervisor example code" tristate "Linux hypervisor example code"
depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX depends on X86_32 && EXPERIMENTAL && EVENTFD
select HVC_DRIVER select HVC_DRIVER
---help--- ---help---
This is a very simple module which allows you to run This is a very simple module which allows you to run
......
...@@ -95,7 +95,7 @@ static __init int map_switcher(void) ...@@ -95,7 +95,7 @@ static __init int map_switcher(void)
* array of struct pages. It increments that pointer, but we don't * array of struct pages. It increments that pointer, but we don't
* care. */ * care. */
pagep = switcher_page; pagep = switcher_page;
err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep); err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
if (err) { if (err) {
printk("lguest: map_vm_area failed: %i\n", err); printk("lguest: map_vm_area failed: %i\n", err);
goto free_vma; goto free_vma;
...@@ -188,6 +188,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) ...@@ -188,6 +188,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
{ {
/* We stop running once the Guest is dead. */ /* We stop running once the Guest is dead. */
while (!cpu->lg->dead) { while (!cpu->lg->dead) {
unsigned int irq;
bool more;
/* First we run any hypercalls the Guest wants done. */ /* First we run any hypercalls the Guest wants done. */
if (cpu->hcall) if (cpu->hcall)
do_hypercalls(cpu); do_hypercalls(cpu);
...@@ -195,23 +198,23 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) ...@@ -195,23 +198,23 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
/* It's possible the Guest did a NOTIFY hypercall to the /* It's possible the Guest did a NOTIFY hypercall to the
* Launcher, in which case we return from the read() now. */ * Launcher, in which case we return from the read() now. */
if (cpu->pending_notify) { if (cpu->pending_notify) {
if (put_user(cpu->pending_notify, user)) if (!send_notify_to_eventfd(cpu)) {
return -EFAULT; if (put_user(cpu->pending_notify, user))
return sizeof(cpu->pending_notify); return -EFAULT;
return sizeof(cpu->pending_notify);
}
} }
/* Check for signals */ /* Check for signals */
if (signal_pending(current)) if (signal_pending(current))
return -ERESTARTSYS; return -ERESTARTSYS;
/* If Waker set break_out, return to Launcher. */
if (cpu->break_out)
return -EAGAIN;
/* Check if there are any interrupts which can be delivered now: /* Check if there are any interrupts which can be delivered now:
* if so, this sets up the hander to be executed when we next * if so, this sets up the hander to be executed when we next
* run the Guest. */ * run the Guest. */
maybe_do_interrupt(cpu); irq = interrupt_pending(cpu, &more);
if (irq < LGUEST_IRQS)
try_deliver_interrupt(cpu, irq, more);
/* All long-lived kernel loops need to check with this horrible /* All long-lived kernel loops need to check with this horrible
* thing called the freezer. If the Host is trying to suspend, * thing called the freezer. If the Host is trying to suspend,
...@@ -224,10 +227,15 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) ...@@ -224,10 +227,15 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
break; break;
/* If the Guest asked to be stopped, we sleep. The Guest's /* If the Guest asked to be stopped, we sleep. The Guest's
* clock timer or LHREQ_BREAK from the Waker will wake us. */ * clock timer will wake us. */
if (cpu->halted) { if (cpu->halted) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
schedule(); /* Just before we sleep, make sure no interrupt snuck in
* which we should be doing. */
if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
set_current_state(TASK_RUNNING);
else
schedule();
continue; continue;
} }
......
...@@ -37,6 +37,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) ...@@ -37,6 +37,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
/* This call does nothing, except by breaking out of the Guest /* This call does nothing, except by breaking out of the Guest
* it makes us process all the asynchronous hypercalls. */ * it makes us process all the asynchronous hypercalls. */
break; break;
case LHCALL_SEND_INTERRUPTS:
/* This call does nothing too, but by breaking out of the Guest
* it makes us process any pending interrupts. */
break;
case LHCALL_LGUEST_INIT: case LHCALL_LGUEST_INIT:
/* You can't get here unless you're already initialized. Don't /* You can't get here unless you're already initialized. Don't
* do that. */ * do that. */
...@@ -73,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) ...@@ -73,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
break; break;
case LHCALL_SET_PTE: case LHCALL_SET_PTE:
#ifdef CONFIG_X86_PAE
guest_set_pte(cpu, args->arg1, args->arg2,
__pte(args->arg3 | (u64)args->arg4 << 32));
#else
guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
#endif
break;
case LHCALL_SET_PGD:
guest_set_pgd(cpu->lg, args->arg1, args->arg2);
break; break;
#ifdef CONFIG_X86_PAE
case LHCALL_SET_PMD: case LHCALL_SET_PMD:
guest_set_pmd(cpu->lg, args->arg1, args->arg2); guest_set_pmd(cpu->lg, args->arg1, args->arg2);
break; break;
#endif
case LHCALL_SET_CLOCKEVENT: case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1); guest_set_clockevent(cpu, args->arg1);
break; break;
......
...@@ -128,30 +128,39 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, ...@@ -128,30 +128,39 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
/*H:205 /*H:205
* Virtual Interrupts. * Virtual Interrupts.
* *
* maybe_do_interrupt() gets called before every entry to the Guest, to see if * interrupt_pending() returns the first pending interrupt which isn't blocked
* we should divert the Guest to running an interrupt handler. */ * by the Guest. It is called before every entry to the Guest, and just before
void maybe_do_interrupt(struct lg_cpu *cpu) * we go to sleep when the Guest has halted itself. */
unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
{ {
unsigned int irq; unsigned int irq;
DECLARE_BITMAP(blk, LGUEST_IRQS); DECLARE_BITMAP(blk, LGUEST_IRQS);
struct desc_struct *idt;
/* If the Guest hasn't even initialized yet, we can do nothing. */ /* If the Guest hasn't even initialized yet, we can do nothing. */
if (!cpu->lg->lguest_data) if (!cpu->lg->lguest_data)
return; return LGUEST_IRQS;
/* Take our "irqs_pending" array and remove any interrupts the Guest /* Take our "irqs_pending" array and remove any interrupts the Guest
* wants blocked: the result ends up in "blk". */ * wants blocked: the result ends up in "blk". */
if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
sizeof(blk))) sizeof(blk)))
return; return LGUEST_IRQS;
bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
/* Find the first interrupt. */ /* Find the first interrupt. */
irq = find_first_bit(blk, LGUEST_IRQS); irq = find_first_bit(blk, LGUEST_IRQS);
/* None? Nothing to do */ *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
if (irq >= LGUEST_IRQS)
return; return irq;
}
/* This actually diverts the Guest to running an interrupt handler, once an
* interrupt has been identified by interrupt_pending(). */
void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
{
struct desc_struct *idt;
BUG_ON(irq >= LGUEST_IRQS);
/* They may be in the middle of an iret, where they asked us never to /* They may be in the middle of an iret, where they asked us never to
* deliver interrupts. */ * deliver interrupts. */
...@@ -170,8 +179,12 @@ void maybe_do_interrupt(struct lg_cpu *cpu) ...@@ -170,8 +179,12 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
u32 irq_enabled; u32 irq_enabled;
if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
irq_enabled = 0; irq_enabled = 0;
if (!irq_enabled) if (!irq_enabled) {
/* Make sure they know an IRQ is pending. */
put_user(X86_EFLAGS_IF,
&cpu->lg->lguest_data->irq_pending);
return; return;
}
} }
/* Look at the IDT entry the Guest gave us for this interrupt. The /* Look at the IDT entry the Guest gave us for this interrupt. The
...@@ -194,6 +207,25 @@ void maybe_do_interrupt(struct lg_cpu *cpu) ...@@ -194,6 +207,25 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
* here is a compromise which means at least it gets updated every * here is a compromise which means at least it gets updated every
* timer interrupt. */ * timer interrupt. */
write_timestamp(cpu); write_timestamp(cpu);
/* If there are no other interrupts we want to deliver, clear
* the pending flag. */
if (!more)
put_user(0, &cpu->lg->lguest_data->irq_pending);
}
/* And this is the routine when we want to set an interrupt for the Guest. */
void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
{
/* Next time the Guest runs, the core code will see if it can deliver
* this interrupt. */
set_bit(irq, cpu->irqs_pending);
/* Make sure it sees it; it might be asleep (eg. halted), or
* running the Guest right now, in which case kick_process()
* will knock it out. */
if (!wake_up_process(cpu->tsk))
kick_process(cpu->tsk);
} }
/*:*/ /*:*/
...@@ -510,10 +542,7 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) ...@@ -510,10 +542,7 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
/* Remember the first interrupt is the timer interrupt. */ /* Remember the first interrupt is the timer interrupt. */
set_bit(0, cpu->irqs_pending); set_interrupt(cpu, 0);
/* If the Guest is actually stopped, we need to wake it up. */
if (cpu->halted)
wake_up_process(cpu->tsk);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
......
...@@ -49,7 +49,7 @@ struct lg_cpu { ...@@ -49,7 +49,7 @@ struct lg_cpu {
u32 cr2; u32 cr2;
int ts; int ts;
u32 esp1; u32 esp1;
u8 ss1; u16 ss1;
/* Bitmap of what has changed: see CHANGED_* above. */ /* Bitmap of what has changed: see CHANGED_* above. */
int changed; int changed;
...@@ -71,9 +71,7 @@ struct lg_cpu { ...@@ -71,9 +71,7 @@ struct lg_cpu {
/* Virtual clock device */ /* Virtual clock device */
struct hrtimer hrt; struct hrtimer hrt;
/* Do we need to stop what we're doing and return to userspace? */ /* Did the Guest tell us to halt? */
int break_out;
wait_queue_head_t break_wq;
int halted; int halted;
/* Pending virtual interrupts */ /* Pending virtual interrupts */
...@@ -82,6 +80,16 @@ struct lg_cpu { ...@@ -82,6 +80,16 @@ struct lg_cpu {
struct lg_cpu_arch arch; struct lg_cpu_arch arch;
}; };
struct lg_eventfd {
unsigned long addr;
struct file *event;
};
struct lg_eventfd_map {
unsigned int num;
struct lg_eventfd map[];
};
/* The private info the thread maintains about the guest. */ /* The private info the thread maintains about the guest. */
struct lguest struct lguest
{ {
...@@ -102,6 +110,8 @@ struct lguest ...@@ -102,6 +110,8 @@ struct lguest
unsigned int stack_pages; unsigned int stack_pages;
u32 tsc_khz; u32 tsc_khz;
struct lg_eventfd_map *eventfds;
/* Dead? */ /* Dead? */
const char *dead; const char *dead;
}; };
...@@ -137,9 +147,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); ...@@ -137,9 +147,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
* in the kernel. */ * in the kernel. */
#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT)
/* interrupts_and_traps.c: */ /* interrupts_and_traps.c: */
void maybe_do_interrupt(struct lg_cpu *cpu); unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
bool deliver_trap(struct lg_cpu *cpu, unsigned int num); bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
u32 low, u32 hi); u32 low, u32 hi);
...@@ -150,6 +164,7 @@ void setup_default_idt_entries(struct lguest_ro_state *state, ...@@ -150,6 +164,7 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
const unsigned long *def); const unsigned long *def);
void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta); void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
bool send_notify_to_eventfd(struct lg_cpu *cpu);
void init_clockdev(struct lg_cpu *cpu); void init_clockdev(struct lg_cpu *cpu);
bool check_syscall_vector(struct lguest *lg); bool check_syscall_vector(struct lguest *lg);
int init_interrupts(void); int init_interrupts(void);
...@@ -168,7 +183,10 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); ...@@ -168,7 +183,10 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
int init_guest_pagetable(struct lguest *lg); int init_guest_pagetable(struct lguest *lg);
void free_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg);
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i);
#ifdef CONFIG_X86_PAE
void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
#endif
void guest_pagetable_clear_all(struct lg_cpu *cpu); void guest_pagetable_clear_all(struct lg_cpu *cpu);
void guest_pagetable_flush_user(struct lg_cpu *cpu); void guest_pagetable_flush_user(struct lg_cpu *cpu);
void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
......
...@@ -7,32 +7,83 @@ ...@@ -7,32 +7,83 @@
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include "lg.h" #include "lg.h"
/*L:055 When something happens, the Waker process needs a way to stop the bool send_notify_to_eventfd(struct lg_cpu *cpu)
* kernel running the Guest and return to the Launcher. So the Waker writes
* LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher
* has done whatever needs attention, it writes LHREQ_BREAK and "0" to release
* the Waker. */
static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input)
{ {
unsigned long on; unsigned int i;
struct lg_eventfd_map *map;
/* lg->eventfds is RCU-protected */
rcu_read_lock();
map = rcu_dereference(cpu->lg->eventfds);
for (i = 0; i < map->num; i++) {
if (map->map[i].addr == cpu->pending_notify) {
eventfd_signal(map->map[i].event, 1);
cpu->pending_notify = 0;
break;
}
}
rcu_read_unlock();
return cpu->pending_notify == 0;
}
/* Fetch whether they're turning break on or off. */ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
if (get_user(on, input) != 0) {
return -EFAULT; struct lg_eventfd_map *new, *old = lg->eventfds;
if (on) { if (!addr)
cpu->break_out = 1; return -EINVAL;
/* Pop it out of the Guest (may be running on different CPU) */
wake_up_process(cpu->tsk); /* Replace the old array with the new one, carefully: others can
/* Wait for them to reset it */ * be accessing it at the same time */
return wait_event_interruptible(cpu->break_wq, !cpu->break_out); new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
} else { GFP_KERNEL);
cpu->break_out = 0; if (!new)
wake_up(&cpu->break_wq); return -ENOMEM;
return 0;
/* First make identical copy. */
memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
new->num = old->num;
/* Now append new entry. */
new->map[new->num].addr = addr;
new->map[new->num].event = eventfd_fget(fd);
if (IS_ERR(new->map[new->num].event)) {
kfree(new);
return PTR_ERR(new->map[new->num].event);
} }
new->num++;
/* Now put new one in place. */
rcu_assign_pointer(lg->eventfds, new);
/* We're not in a big hurry. Wait until noone's looking at old
* version, then delete it. */
synchronize_rcu();
kfree(old);
return 0;
}
static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
{
unsigned long addr, fd;
int err;
if (get_user(addr, input) != 0)
return -EFAULT;
input++;
if (get_user(fd, input) != 0)
return -EFAULT;
mutex_lock(&lguest_lock);
err = add_eventfd(lg, addr, fd);
mutex_unlock(&lguest_lock);
return 0;
} }
/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt /*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
...@@ -45,9 +96,8 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) ...@@ -45,9 +96,8 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
return -EFAULT; return -EFAULT;
if (irq >= LGUEST_IRQS) if (irq >= LGUEST_IRQS)
return -EINVAL; return -EINVAL;
/* Next time the Guest runs, the core code will see if it can deliver
* this interrupt. */ set_interrupt(cpu, irq);
set_bit(irq, cpu->irqs_pending);
return 0; return 0;
} }
...@@ -126,9 +176,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) ...@@ -126,9 +176,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
* address. */ * address. */
lguest_arch_setup_regs(cpu, start_ip); lguest_arch_setup_regs(cpu, start_ip);
/* Initialize the queue for the Waker to wait on */
init_waitqueue_head(&cpu->break_wq);
/* We keep a pointer to the Launcher task (ie. current task) for when /* We keep a pointer to the Launcher task (ie. current task) for when
* other Guests want to wake this one (eg. console input). */ * other Guests want to wake this one (eg. console input). */
cpu->tsk = current; cpu->tsk = current;
...@@ -185,6 +232,13 @@ static int initialize(struct file *file, const unsigned long __user *input) ...@@ -185,6 +232,13 @@ static int initialize(struct file *file, const unsigned long __user *input)
goto unlock; goto unlock;
} }
lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
if (!lg->eventfds) {
err = -ENOMEM;
goto free_lg;
}
lg->eventfds->num = 0;
/* Populate the easy fields of our "struct lguest" */ /* Populate the easy fields of our "struct lguest" */
lg->mem_base = (void __user *)args[0]; lg->mem_base = (void __user *)args[0];
lg->pfn_limit = args[1]; lg->pfn_limit = args[1];
...@@ -192,7 +246,7 @@ static int initialize(struct file *file, const unsigned long __user *input) ...@@ -192,7 +246,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
/* This is the first cpu (cpu 0) and it will start booting at args[2] */ /* This is the first cpu (cpu 0) and it will start booting at args[2] */
err = lg_cpu_start(&lg->cpus[0], 0, args[2]); err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
if (err) if (err)
goto release_guest; goto free_eventfds;
/* Initialize the Guest's shadow page tables, using the toplevel /* Initialize the Guest's shadow page tables, using the toplevel
* address the Launcher gave us. This allocates memory, so can fail. */ * address the Launcher gave us. This allocates memory, so can fail. */
...@@ -211,7 +265,9 @@ static int initialize(struct file *file, const unsigned long __user *input) ...@@ -211,7 +265,9 @@ static int initialize(struct file *file, const unsigned long __user *input)
free_regs: free_regs:
/* FIXME: This should be in free_vcpu */ /* FIXME: This should be in free_vcpu */
free_page(lg->cpus[0].regs_page); free_page(lg->cpus[0].regs_page);
release_guest: free_eventfds:
kfree(lg->eventfds);
free_lg:
kfree(lg); kfree(lg);
unlock: unlock:
mutex_unlock(&lguest_lock); mutex_unlock(&lguest_lock);
...@@ -252,11 +308,6 @@ static ssize_t write(struct file *file, const char __user *in, ...@@ -252,11 +308,6 @@ static ssize_t write(struct file *file, const char __user *in,
/* Once the Guest is dead, you can only read() why it died. */ /* Once the Guest is dead, you can only read() why it died. */
if (lg->dead) if (lg->dead)
return -ENOENT; return -ENOENT;
/* If you're not the task which owns the Guest, all you can do
* is break the Launcher out of running the Guest. */
if (current != cpu->tsk && req != LHREQ_BREAK)
return -EPERM;
} }
switch (req) { switch (req) {
...@@ -264,8 +315,8 @@ static ssize_t write(struct file *file, const char __user *in, ...@@ -264,8 +315,8 @@ static ssize_t write(struct file *file, const char __user *in,
return initialize(file, input); return initialize(file, input);
case LHREQ_IRQ: case LHREQ_IRQ:
return user_send_irq(cpu, input); return user_send_irq(cpu, input);
case LHREQ_BREAK: case LHREQ_EVENTFD:
return break_guest_out(cpu, input); return attach_eventfd(lg, input);
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -303,6 +354,12 @@ static int close(struct inode *inode, struct file *file) ...@@ -303,6 +354,12 @@ static int close(struct inode *inode, struct file *file)
* the Launcher's memory management structure. */ * the Launcher's memory management structure. */
mmput(lg->cpus[i].mm); mmput(lg->cpus[i].mm);
} }
/* Release any eventfds they registered. */
for (i = 0; i < lg->eventfds->num; i++)
fput(lg->eventfds->map[i].event);
kfree(lg->eventfds);
/* If lg->dead doesn't contain an error code it will be NULL or a /* If lg->dead doesn't contain an error code it will be NULL or a
* kmalloc()ed string, either of which is ok to hand to kfree(). */ * kmalloc()ed string, either of which is ok to hand to kfree(). */
if (!IS_ERR(lg->dead)) if (!IS_ERR(lg->dead))
......
此差异已折叠。
...@@ -150,7 +150,7 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) ...@@ -150,7 +150,7 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
{ {
/* We assume the Guest has the same number of GDT entries as the /* We assume the Guest has the same number of GDT entries as the
* Host, otherwise we'd have to dynamically allocate the Guest GDT. */ * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
if (num > ARRAY_SIZE(cpu->arch.gdt)) if (num >= ARRAY_SIZE(cpu->arch.gdt))
kill_guest(cpu, "too many gdt entries %i", num); kill_guest(cpu, "too many gdt entries %i", num);
/* Set it up, then fix it. */ /* Set it up, then fix it. */
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/anon_inodes.h> #include <linux/anon_inodes.h>
#include <linux/eventfd.h> #include <linux/eventfd.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/module.h>
struct eventfd_ctx { struct eventfd_ctx {
wait_queue_head_t wqh; wait_queue_head_t wqh;
...@@ -56,6 +57,7 @@ int eventfd_signal(struct file *file, int n) ...@@ -56,6 +57,7 @@ int eventfd_signal(struct file *file, int n)
return n; return n;
} }
EXPORT_SYMBOL_GPL(eventfd_signal);
static int eventfd_release(struct inode *inode, struct file *file) static int eventfd_release(struct inode *inode, struct file *file)
{ {
...@@ -197,6 +199,7 @@ struct file *eventfd_fget(int fd) ...@@ -197,6 +199,7 @@ struct file *eventfd_fget(int fd)
return file; return file;
} }
EXPORT_SYMBOL_GPL(eventfd_fget);
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{ {
......
...@@ -30,6 +30,10 @@ struct lguest_data ...@@ -30,6 +30,10 @@ struct lguest_data
/* Wallclock time set by the Host. */ /* Wallclock time set by the Host. */
struct timespec time; struct timespec time;
/* Interrupt pending set by the Host. The Guest should do a hypercall
* if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */
int irq_pending;
/* Async hypercall ring. Instead of directly making hypercalls, we can /* Async hypercall ring. Instead of directly making hypercalls, we can
* place them in here for processing the next time the Host wants. * place them in here for processing the next time the Host wants.
* This batching can be quite efficient. */ * This batching can be quite efficient. */
......
...@@ -57,7 +57,8 @@ enum lguest_req ...@@ -57,7 +57,8 @@ enum lguest_req
LHREQ_INITIALIZE, /* + base, pfnlimit, start */ LHREQ_INITIALIZE, /* + base, pfnlimit, start */
LHREQ_GETDMA, /* No longer used */ LHREQ_GETDMA, /* No longer used */
LHREQ_IRQ, /* + irq */ LHREQ_IRQ, /* + irq */
LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ LHREQ_BREAK, /* No longer used */
LHREQ_EVENTFD, /* + address, fd. */
}; };
/* The alignment to use between consumer and producer parts of vring. /* The alignment to use between consumer and producer parts of vring.
......
...@@ -2192,6 +2192,7 @@ void kick_process(struct task_struct *p) ...@@ -2192,6 +2192,7 @@ void kick_process(struct task_struct *p)
smp_send_reschedule(cpu); smp_send_reschedule(cpu);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL_GPL(kick_process);
/* /*
* Return a low guess at the load of a migration-source cpu weighted * Return a low guess at the load of a migration-source cpu weighted
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册