提交 d657a98e 编写于 作者: Z Zhang Xiantao 提交者: Avi Kivity

KVM: Portability: Move kvm_vcpu definition back to kvm.h

This patches moves kvm_vcpu definition to kvm.h, and finally
kvm.h includes x86.h.
Signed-off-by: NZhang Xiantao <xiantao.zhang@intel.com>
Acked-by: NCarsten Otte <cotte@de.ibm.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 1d737c8a
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include "iodev.h" #include "iodev.h"
#include "x86.h" #include "kvm.h"
struct kvm; struct kvm;
struct kvm_vcpu; struct kvm_vcpu;
......
...@@ -22,17 +22,13 @@ ...@@ -22,17 +22,13 @@
#include "types.h" #include "types.h"
#include "x86.h"
#define KVM_MAX_VCPUS 4 #define KVM_MAX_VCPUS 4
#define KVM_ALIAS_SLOTS 4 #define KVM_ALIAS_SLOTS 4
#define KVM_MEMORY_SLOTS 8 #define KVM_MEMORY_SLOTS 8
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_NUM_MMU_PAGES 1024
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
#define KVM_PIO_PAGE_OFFSET 1 #define KVM_PIO_PAGE_OFFSET 1
...@@ -41,111 +37,16 @@ ...@@ -41,111 +37,16 @@
*/ */
#define KVM_REQ_TLB_FLUSH 0 #define KVM_REQ_TLB_FLUSH 0
#define NR_PTE_CHAIN_ENTRIES 5
struct kvm_pte_chain {
u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
struct hlist_node link;
};
/*
* kvm_mmu_page_role, below, is defined as:
*
* bits 0:3 - total guest paging levels (2-4, or zero for real mode)
* bits 4:7 - page table level for this shadow (1-4)
* bits 8:9 - page table quadrant for 2-level guests
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
* bits 17:19 - common access permissions for all ptes in this shadow page
*/
union kvm_mmu_page_role {
unsigned word;
struct {
unsigned glevels : 4;
unsigned level : 4;
unsigned quadrant : 2;
unsigned pad_for_nice_hex_output : 6;
unsigned metaphysical : 1;
unsigned access : 3;
};
};
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
/*
* The following two entries are used to key the shadow page in the
* hash table.
*/
gfn_t gfn;
union kvm_mmu_page_role role;
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
unsigned long slot_bitmap; /* One bit set per slot which has memory
* in this shadow page.
*/
int multimapped; /* More than one parent_pte? */
int root_count; /* Currently serving as active root */
union {
u64 *parent_pte; /* !multimapped */
struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
};
};
struct kvm_vcpu; struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache; extern struct kmem_cache *kvm_vcpu_cache;
/*
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
* 32-bit). The kvm_mmu structure abstracts the details of the current mmu
* mode.
*/
struct kvm_mmu {
void (*new_cr3)(struct kvm_vcpu *vcpu);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
void (*prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page);
hpa_t root_hpa;
int root_level;
int shadow_root_level;
u64 *pae_root;
};
#define KVM_NR_MEM_OBJS 40
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
void *objects[KVM_NR_MEM_OBJS];
};
struct kvm_guest_debug { struct kvm_guest_debug {
int enabled; int enabled;
unsigned long bp[4]; unsigned long bp[4];
int singlestep; int singlestep;
}; };
struct kvm_pio_request {
unsigned long count;
int cur_count;
struct page *guest_pages[2];
unsigned guest_page_offset;
int in;
int port;
int size;
int string;
int down;
int rep;
};
struct kvm_vcpu_stat { struct kvm_vcpu_stat {
u32 pf_fixed; u32 pf_fixed;
u32 pf_guest; u32 pf_guest;
...@@ -218,6 +119,12 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus, ...@@ -218,6 +119,12 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
struct kvm_vcpu_stat stat; \ struct kvm_vcpu_stat stat; \
KVM_VCPU_MMIO KVM_VCPU_MMIO
struct kvm_vcpu {
KVM_VCPU_COMM;
struct kvm_vcpu_arch arch;
};
struct kvm_mem_alias { struct kvm_mem_alias {
gfn_t base_gfn; gfn_t base_gfn;
unsigned long npages; unsigned long npages;
...@@ -441,8 +348,4 @@ struct kvm_stats_debugfs_item { ...@@ -441,8 +348,4 @@ struct kvm_stats_debugfs_item {
}; };
extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct kvm_stats_debugfs_item debugfs_entries[];
#if defined(CONFIG_X86)
#include "x86.h"
#endif
#endif #endif
...@@ -38,4 +38,17 @@ typedef unsigned long hva_t; ...@@ -38,4 +38,17 @@ typedef unsigned long hva_t;
typedef u64 hpa_t; typedef u64 hpa_t;
typedef unsigned long hfn_t; typedef unsigned long hfn_t;
struct kvm_pio_request {
unsigned long count;
int cur_count;
struct page *guest_pages[2];
unsigned guest_page_offset;
int in;
int port;
int size;
int string;
int down;
int rep;
};
#endif /* __KVM_TYPES_H__ */ #endif /* __KVM_TYPES_H__ */
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#ifndef KVM_X86_H #ifndef KVM_X86_H
#define KVM_X86_H #define KVM_X86_H
#include "kvm.h"
#include <linux/types.h> #include <linux/types.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -21,6 +19,8 @@ ...@@ -21,6 +19,8 @@
#include <asm/desc.h> #include <asm/desc.h>
#include "types.h"
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
...@@ -54,9 +54,19 @@ ...@@ -54,9 +54,19 @@
#define IOPL_SHIFT 12 #define IOPL_SHIFT 12
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_NUM_MMU_PAGES 1024
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
extern spinlock_t kvm_lock; extern spinlock_t kvm_lock;
extern struct list_head vm_list; extern struct list_head vm_list;
struct kvm_vcpu;
struct kvm;
enum { enum {
VCPU_REGS_RAX = 0, VCPU_REGS_RAX = 0,
VCPU_REGS_RCX = 1, VCPU_REGS_RCX = 1,
...@@ -92,6 +102,89 @@ enum { ...@@ -92,6 +102,89 @@ enum {
#include "x86_emulate.h" #include "x86_emulate.h"
#define KVM_NR_MEM_OBJS 40
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
void *objects[KVM_NR_MEM_OBJS];
};
#define NR_PTE_CHAIN_ENTRIES 5
struct kvm_pte_chain {
u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
struct hlist_node link;
};
/*
* kvm_mmu_page_role, below, is defined as:
*
* bits 0:3 - total guest paging levels (2-4, or zero for real mode)
* bits 4:7 - page table level for this shadow (1-4)
* bits 8:9 - page table quadrant for 2-level guests
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
* bits 17:19 - common access permissions for all ptes in this shadow page
*/
union kvm_mmu_page_role {
unsigned word;
struct {
unsigned glevels : 4;
unsigned level : 4;
unsigned quadrant : 2;
unsigned pad_for_nice_hex_output : 6;
unsigned metaphysical : 1;
unsigned access : 3;
};
};
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
/*
* The following two entries are used to key the shadow page in the
* hash table.
*/
gfn_t gfn;
union kvm_mmu_page_role role;
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
unsigned long slot_bitmap; /* One bit set per slot which has memory
* in this shadow page.
*/
int multimapped; /* More than one parent_pte? */
int root_count; /* Currently serving as active root */
union {
u64 *parent_pte; /* !multimapped */
struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
};
};
/*
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
* 32-bit). The kvm_mmu structure abstracts the details of the current mmu
* mode.
*/
struct kvm_mmu {
void (*new_cr3)(struct kvm_vcpu *vcpu);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
void (*prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page);
hpa_t root_hpa;
int root_level;
int shadow_root_level;
u64 *pae_root;
};
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
u64 host_tsc; u64 host_tsc;
int interrupt_window_open; int interrupt_window_open;
...@@ -162,11 +255,6 @@ struct kvm_vcpu_arch { ...@@ -162,11 +255,6 @@ struct kvm_vcpu_arch {
struct x86_emulate_ctxt emulate_ctxt; struct x86_emulate_ctxt emulate_ctxt;
}; };
struct kvm_vcpu {
KVM_VCPU_COMM;
struct kvm_vcpu_arch arch;
};
struct descriptor_table { struct descriptor_table {
u16 limit; u16 limit;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册