提交 3dcf6c1b 编写于 作者: L Linus Torvalds

Merge branch 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (74 commits)
  KVM: PPC: Whitespace fix for kvm.h
  KVM: Fix whitespace in kvm_para.h
  KVM: PPC: annotate kvm_rma_init as __init
  KVM: x86 emulator: implement RDPMC (0F 33)
  KVM: x86 emulator: fix RDPMC privilege check
  KVM: Expose the architectural performance monitoring CPUID leaf
  KVM: VMX: Intercept RDPMC
  KVM: SVM: Intercept RDPMC
  KVM: Add generic RDPMC support
  KVM: Expose a version 2 architectural PMU to a guests
  KVM: Expose kvm_lapic_local_deliver()
  KVM: x86 emulator: Use opcode::execute for Group 9 instruction
  KVM: x86 emulator: Use opcode::execute for Group 4/5 instructions
  KVM: x86 emulator: Use opcode::execute for Group 1A instruction
  KVM: ensure that debugfs entries have been created
  KVM: drop bsp_vcpu pointer from kvm struct
  KVM: x86: Consolidate PIT legacy test
  KVM: x86: Do not rely on implicit inclusions
  KVM: Make KVM_INTEL depend on CPU_SUP_INTEL
  KVM: Use memdup_user instead of kmalloc/copy_from_user
  ...
...@@ -350,15 +350,6 @@ Who: anybody or Florian Mickler <florian@mickler.org> ...@@ -350,15 +350,6 @@ Who: anybody or Florian Mickler <florian@mickler.org>
---------------------------- ----------------------------
What: KVM paravirt mmu host support
When: January 2011
Why: The paravirt mmu host support is slower than non-paravirt mmu, both
on newer and older hardware. It is already not exposed to the guest,
and kept only for live migration purposes.
Who: Avi Kivity <avi@redhat.com>
----------------------------
What: iwlwifi 50XX module parameters What: iwlwifi 50XX module parameters
When: 3.0 When: 3.0
Why: The "..50" modules parameters were used to configure 5000 series and Why: The "..50" modules parameters were used to configure 5000 series and
......
...@@ -1178,9 +1178,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1178,9 +1178,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP) Default is 0 (don't ignore, but inject #GP)
kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
Default is 1 (enabled)
kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit
KVM MMU at runtime. KVM MMU at runtime.
Default is 0 (off) Default is 0 (off)
......
...@@ -1466,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have ...@@ -1466,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have
an RMA, or 1 if the processor can use an RMA but doesn't require it, an RMA, or 1 if the processor can use an RMA but doesn't require it,
because it supports the Virtual RMA (VRMA) facility. because it supports the Virtual RMA (VRMA) facility.
4.64 KVM_NMI
Capability: KVM_CAP_USER_NMI
Architectures: x86
Type: vcpu ioctl
Parameters: none
Returns: 0 on success, -1 on error
Queues an NMI on the thread's vcpu. Note this is well defined only
when KVM_CREATE_IRQCHIP has not been called, since this is an interface
between the virtual cpu core and virtual local APIC. After KVM_CREATE_IRQCHIP
has been called, this interface is completely emulated within the kernel.
To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the
following algorithm:
- pause the vpcu
- read the local APIC's state (KVM_GET_LAPIC)
- check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1)
- if so, issue KVM_NMI
- resume the vcpu
Some guests configure the LINT1 NMI input to cause a panic, aiding in
debugging.
5. The kvm_run structure 5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by Application code obtains a pointer to the kvm_run structure by
......
...@@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void) ...@@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void)
return kvm; return kvm;
} }
struct kvm_io_range { struct kvm_ia64_io_range {
unsigned long start; unsigned long start;
unsigned long size; unsigned long size;
unsigned long type; unsigned long type;
}; };
static const struct kvm_io_range io_ranges[] = { static const struct kvm_ia64_io_range io_ranges[] = {
{VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
{MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
{LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
...@@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm) ...@@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int i, j; int j;
unsigned long base_gfn; unsigned long base_gfn;
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) { kvm_for_each_memslot(memslot, slots) {
memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn; base_gfn = memslot->base_gfn;
for (j = 0; j < memslot->npages; j++) { for (j = 0; j < memslot->npages; j++) {
if (memslot->rmap[j]) if (memslot->rmap[j])
put_page((struct page *)memslot->rmap[j]); put_page((struct page *)memslot->rmap[j]);
...@@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS) if (log->slot >= KVM_MEMORY_SLOTS)
goto out; goto out;
memslot = &kvm->memslots->memslots[log->slot]; memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT; r = -ENOENT;
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
......
...@@ -170,8 +170,8 @@ struct kvm_sregs { ...@@ -170,8 +170,8 @@ struct kvm_sregs {
} ppc64; } ppc64;
struct { struct {
__u32 sr[16]; __u32 sr[16];
__u64 ibat[8]; __u64 ibat[8];
__u64 dbat[8]; __u64 dbat[8];
} ppc32; } ppc32;
} s; } s;
struct { struct {
......
...@@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */ /* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
memslot = &kvm->memslots->memslots[log->slot]; memslot = id_to_memslot(kvm->memslots, log->slot);
ga = memslot->base_gfn << PAGE_SHIFT; ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT); ga_end = ga + (memslot->npages << PAGE_SHIFT);
......
...@@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size) ...@@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
* to allocate contiguous physical memory for the real memory * to allocate contiguous physical memory for the real memory
* areas for guests. * areas for guests.
*/ */
void kvm_rma_init(void) void __init kvm_rma_init(void)
{ {
unsigned long i; unsigned long i;
unsigned long j, npages; unsigned long j, npages;
......
...@@ -197,7 +197,10 @@ ...@@ -197,7 +197,10 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ #define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
......
...@@ -181,6 +181,7 @@ struct x86_emulate_ops { ...@@ -181,6 +181,7 @@ struct x86_emulate_ops {
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
void (*halt)(struct x86_emulate_ctxt *ctxt); void (*halt)(struct x86_emulate_ctxt *ctxt);
void (*wbinvd)(struct x86_emulate_ctxt *ctxt); void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
...@@ -364,6 +365,7 @@ enum x86_intercept { ...@@ -364,6 +365,7 @@ enum x86_intercept {
#endif #endif
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1 #define EMULATION_FAILED -1
#define EMULATION_OK 0 #define EMULATION_OK 0
#define EMULATION_RESTART 1 #define EMULATION_RESTART 1
......
...@@ -16,10 +16,12 @@ ...@@ -16,10 +16,12 @@
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/irq_work.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <linux/perf_event.h>
#include <asm/pvclock-abi.h> #include <asm/pvclock-abi.h>
#include <asm/desc.h> #include <asm/desc.h>
...@@ -31,6 +33,8 @@ ...@@ -31,6 +33,8 @@
#define KVM_MEMORY_SLOTS 32 #define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#define KVM_MMIO_SIZE 16 #define KVM_MMIO_SIZE 16
#define KVM_PIO_PAGE_OFFSET 1 #define KVM_PIO_PAGE_OFFSET 1
...@@ -228,7 +232,7 @@ struct kvm_mmu_page { ...@@ -228,7 +232,7 @@ struct kvm_mmu_page {
* One bit set per slot which has memory * One bit set per slot which has memory
* in this shadow page. * in this shadow page.
*/ */
DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
bool unsync; bool unsync;
int root_count; /* Currently serving as active root */ int root_count; /* Currently serving as active root */
unsigned int unsync_children; unsigned int unsync_children;
...@@ -239,14 +243,9 @@ struct kvm_mmu_page { ...@@ -239,14 +243,9 @@ struct kvm_mmu_page {
int clear_spte_count; int clear_spte_count;
#endif #endif
struct rcu_head rcu; int write_flooding_count;
};
struct kvm_pv_mmu_op_buffer { struct rcu_head rcu;
void *ptr;
unsigned len;
unsigned processed;
char buf[512] __aligned(sizeof(long));
}; };
struct kvm_pio_request { struct kvm_pio_request {
...@@ -294,6 +293,37 @@ struct kvm_mmu { ...@@ -294,6 +293,37 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */ u64 pdptrs[4]; /* pae */
}; };
enum pmc_type {
KVM_PMC_GP = 0,
KVM_PMC_FIXED,
};
struct kvm_pmc {
enum pmc_type type;
u8 idx;
u64 counter;
u64 eventsel;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
};
struct kvm_pmu {
unsigned nr_arch_gp_counters;
unsigned nr_arch_fixed_counters;
unsigned available_event_types;
u64 fixed_ctr_ctrl;
u64 global_ctrl;
u64 global_status;
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
u8 version;
struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
};
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
/* /*
* rip and regs accesses must go through * rip and regs accesses must go through
...@@ -345,19 +375,10 @@ struct kvm_vcpu_arch { ...@@ -345,19 +375,10 @@ struct kvm_vcpu_arch {
*/ */
struct kvm_mmu *walk_mmu; struct kvm_mmu *walk_mmu;
/* only needed in kvm_pv_mmu_op() path, but it's hot so
* put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer;
struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache; struct kvm_mmu_memory_cache mmu_page_header_cache;
gfn_t last_pt_write_gfn;
int last_pt_write_count;
u64 *last_pte_updated;
gfn_t last_pte_gfn;
struct fpu guest_fpu; struct fpu guest_fpu;
u64 xcr0; u64 xcr0;
...@@ -436,6 +457,8 @@ struct kvm_vcpu_arch { ...@@ -436,6 +457,8 @@ struct kvm_vcpu_arch {
unsigned access; unsigned access;
gfn_t mmio_gfn; gfn_t mmio_gfn;
struct kvm_pmu pmu;
/* used for guest single stepping over the given code position */ /* used for guest single stepping over the given code position */
unsigned long singlestep_rip; unsigned long singlestep_rip;
...@@ -444,6 +467,9 @@ struct kvm_vcpu_arch { ...@@ -444,6 +467,9 @@ struct kvm_vcpu_arch {
cpumask_var_t wbinvd_dirty_mask; cpumask_var_t wbinvd_dirty_mask;
unsigned long last_retry_eip;
unsigned long last_retry_addr;
struct { struct {
bool halted; bool halted;
gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
...@@ -459,7 +485,6 @@ struct kvm_arch { ...@@ -459,7 +485,6 @@ struct kvm_arch {
unsigned int n_requested_mmu_pages; unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages; unsigned int n_max_mmu_pages;
unsigned int indirect_shadow_pages; unsigned int indirect_shadow_pages;
atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/* /*
* Hash table of struct kvm_mmu_page. * Hash table of struct kvm_mmu_page.
...@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, ...@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
struct kvm_memory_slot *slot);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
...@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); ...@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes); const void *val, int bytes);
int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
gpa_t addr, unsigned long *ret);
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
extern bool tdp_enabled; extern bool tdp_enabled;
...@@ -692,6 +717,7 @@ enum emulation_result { ...@@ -692,6 +717,7 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2) #define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_RETRY (1 << 3)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len); int emulation_type, void *insn, int insn_len);
...@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); ...@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
...@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu); ...@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu);
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes, const u8 *new, int bytes);
bool guest_initiated); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception); struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
...@@ -782,6 +810,11 @@ void kvm_disable_tdp(void); ...@@ -782,6 +810,11 @@ void kvm_disable_tdp(void);
int complete_pio(struct kvm_vcpu *vcpu); int complete_pio(struct kvm_vcpu *vcpu);
bool kvm_check_iopl(struct kvm_vcpu *vcpu); bool kvm_check_iopl(struct kvm_vcpu *vcpu);
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
return gpa;
}
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{ {
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
...@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); ...@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */
...@@ -39,8 +39,6 @@ ...@@ -39,8 +39,6 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define MMU_QUEUE_SIZE 1024
static int kvmapf = 1; static int kvmapf = 1;
static int parse_no_kvmapf(char *arg) static int parse_no_kvmapf(char *arg)
...@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg) ...@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc); early_param("no-steal-acc", parse_no_stealacc);
struct kvm_para_state {
u8 mmu_queue[MMU_QUEUE_SIZE];
int mmu_queue_len;
};
static DEFINE_PER_CPU(struct kvm_para_state, para_state);
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0; static int has_steal_clock = 0;
static struct kvm_para_state *kvm_para_state(void)
{
return &per_cpu(para_state, raw_smp_processor_id());
}
/* /*
* No need for any "IO delay" on KVM * No need for any "IO delay" on KVM
*/ */
...@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
} }
} }
static void kvm_mmu_op(void *buffer, unsigned len)
{
int r;
unsigned long a1, a2;
do {
a1 = __pa(buffer);
a2 = 0; /* on i386 __pa() always returns <4G */
r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
buffer += r;
len -= r;
} while (len);
}
static void mmu_queue_flush(struct kvm_para_state *state)
{
if (state->mmu_queue_len) {
kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
state->mmu_queue_len = 0;
}
}
static void kvm_deferred_mmu_op(void *buffer, int len)
{
struct kvm_para_state *state = kvm_para_state();
if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
kvm_mmu_op(buffer, len);
return;
}
if (state->mmu_queue_len + len > sizeof state->mmu_queue)
mmu_queue_flush(state);
memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
state->mmu_queue_len += len;
}
static void kvm_mmu_write(void *dest, u64 val)
{
__u64 pte_phys;
struct kvm_mmu_op_write_pte wpte;
#ifdef CONFIG_HIGHPTE
struct page *page;
unsigned long dst = (unsigned long) dest;
page = kmap_atomic_to_page(dest);
pte_phys = page_to_pfn(page);
pte_phys <<= PAGE_SHIFT;
pte_phys += (dst & ~(PAGE_MASK));
#else
pte_phys = (unsigned long)__pa(dest);
#endif
wpte.header.op = KVM_MMU_OP_WRITE_PTE;
wpte.pte_val = val;
wpte.pte_phys = pte_phys;
kvm_deferred_mmu_op(&wpte, sizeof wpte);
}
/*
* We only need to hook operations that are MMU writes. We hook these so that
* we can use lazy MMU mode to batch these operations. We could probably
* improve the performance of the host code if we used some of the information
* here to simplify processing of batched writes.
*/
static void kvm_set_pte(pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
kvm_mmu_write(pmdp, pmd_val(pmd));
}
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
kvm_mmu_write(ptep, 0);
}
static void kvm_pmd_clear(pmd_t *pmdp)
{
kvm_mmu_write(pmdp, 0);
}
#endif
static void kvm_set_pud(pud_t *pudp, pud_t pud)
{
kvm_mmu_write(pudp, pud_val(pud));
}
#if PAGETABLE_LEVELS == 4
static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
kvm_mmu_write(pgdp, pgd_val(pgd));
}
#endif
#endif /* PAGETABLE_LEVELS >= 3 */
static void kvm_flush_tlb(void)
{
struct kvm_mmu_op_flush_tlb ftlb = {
.header.op = KVM_MMU_OP_FLUSH_TLB,
};
kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
}
static void kvm_release_pt(unsigned long pfn)
{
struct kvm_mmu_op_release_pt rpt = {
.header.op = KVM_MMU_OP_RELEASE_PT,
.pt_phys = (u64)pfn << PAGE_SHIFT,
};
kvm_mmu_op(&rpt, sizeof rpt);
}
static void kvm_enter_lazy_mmu(void)
{
paravirt_enter_lazy_mmu();
}
static void kvm_leave_lazy_mmu(void)
{
struct kvm_para_state *state = kvm_para_state();
mmu_queue_flush(state);
paravirt_leave_lazy_mmu();
}
static void __init paravirt_ops_setup(void) static void __init paravirt_ops_setup(void)
{ {
pv_info.name = "KVM"; pv_info.name = "KVM";
...@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void) ...@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void)
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay; pv_cpu_ops.io_delay = kvm_io_delay;
if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
pv_mmu_ops.set_pte = kvm_set_pte;
pv_mmu_ops.set_pte_at = kvm_set_pte_at;
pv_mmu_ops.set_pmd = kvm_set_pmd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
pv_mmu_ops.pte_clear = kvm_pte_clear;
pv_mmu_ops.pmd_clear = kvm_pmd_clear;
#endif
pv_mmu_ops.set_pud = kvm_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = kvm_set_pgd;
#endif
#endif
pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
pv_mmu_ops.release_pte = kvm_release_pt;
pv_mmu_ops.release_pmd = kvm_release_pt;
pv_mmu_ops.release_pud = kvm_release_pt;
pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
}
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
no_timer_check = 1; no_timer_check = 1;
#endif #endif
......
...@@ -35,6 +35,7 @@ config KVM ...@@ -35,6 +35,7 @@ config KVM
select KVM_MMIO select KVM_MMIO
select TASKSTATS select TASKSTATS
select TASK_DELAY_ACCT select TASK_DELAY_ACCT
select PERF_EVENTS
---help--- ---help---
Support hosting fully virtualized guest machines using hardware Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent virtualization extensions. You will need a fairly recent
...@@ -52,6 +53,8 @@ config KVM ...@@ -52,6 +53,8 @@ config KVM
config KVM_INTEL config KVM_INTEL
tristate "KVM for Intel processors support" tristate "KVM for Intel processors support"
depends on KVM depends on KVM
# for perf_guest_get_msrs():
depends on CPU_SUP_INTEL
---help--- ---help---
Provides support for KVM on Intel processors equipped with the VT Provides support for KVM on Intel processors equipped with the VT
extensions. extensions.
......
...@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) ...@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o) kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o)
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o timer.o i8254.o timer.o cpuid.o pmu.o
kvm-intel-y += vmx.o kvm-intel-y += vmx.o
kvm-amd-y += svm.o kvm-amd-y += svm.o
......
/*
* Kernel-based Virtual Machine driver for Linux
* cpuid support routines
*
* derived from arch/x86/kvm/x86.c
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
* Copyright IBM Corporation, 2008
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <asm/user.h>
#include <asm/xsave.h>
#include "cpuid.h"
#include "lapic.h"
#include "mmu.h"
#include "trace.h"
void kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
struct kvm_lapic *apic = vcpu->arch.apic;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
if (!best)
return;
/* Update OSXSAVE bit */
if (cpu_has_xsave && best->function == 0x1) {
best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
best->ecx |= bit(X86_FEATURE_OSXSAVE);
}
if (apic) {
if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
else
apic->lapic_timer.timer_mode_mask = 1 << 17;
}
kvm_pmu_cpuid_update(vcpu);
}
static int is_efer_nx(void)
{
unsigned long long efer = 0;
rdmsrl_safe(MSR_EFER, &efer);
return efer & EFER_NX;
}
static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_cpuid_entry2 *e, *entry;
entry = NULL;
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
e = &vcpu->arch.cpuid_entries[i];
if (e->function == 0x80000001) {
entry = e;
break;
}
}
if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
entry->edx &= ~(1 << 20);
printk(KERN_INFO "kvm: guest NX capability removed\n");
}
}
/* when an old userspace process fills a new kernel module */
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
struct kvm_cpuid_entry __user *entries)
{
int r, i;
struct kvm_cpuid_entry *cpuid_entries;
r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out;
r = -ENOMEM;
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
if (!cpuid_entries)
goto out;
r = -EFAULT;
if (copy_from_user(cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out_free;
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
vcpu->arch.cpuid_entries[i].index = 0;
vcpu->arch.cpuid_entries[i].flags = 0;
vcpu->arch.cpuid_entries[i].padding[0] = 0;
vcpu->arch.cpuid_entries[i].padding[1] = 0;
vcpu->arch.cpuid_entries[i].padding[2] = 0;
}
vcpu->arch.cpuid_nent = cpuid->nent;
cpuid_fix_nx_cap(vcpu);
r = 0;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
kvm_update_cpuid(vcpu);
out_free:
vfree(cpuid_entries);
out:
return r;
}
int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
int r;
r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out;
r = -EFAULT;
if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
kvm_update_cpuid(vcpu);
return 0;
out:
return r;
}
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
int r;
r = -E2BIG;
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
r = -EFAULT;
if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
return 0;
out:
cpuid->nent = vcpu->arch.cpuid_nent;
return r;
}
static void cpuid_mask(u32 *word, int wordnum)
{
*word &= boot_cpu_data.x86_capability[wordnum];
}
static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
u32 index)
{
entry->function = function;
entry->index = index;
cpuid_count(entry->function, entry->index,
&entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
entry->flags = 0;
}
static bool supported_xcr0_bit(unsigned bit)
{
u64 mask = ((u64)1 << bit);
return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
}
#define F(x) bit(X86_FEATURE_##x)
static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
u32 index, int *nent, int maxnent)
{
int r;
unsigned f_nx = is_efer_nx() ? F(NX) : 0;
#ifdef CONFIG_X86_64
unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
? F(GBPAGES) : 0;
unsigned f_lm = F(LM);
#else
unsigned f_gbpages = 0;
unsigned f_lm = 0;
#endif
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
/* cpuid 1.edx */
const u32 kvm_supported_word0_x86_features =
F(FPU) | F(VME) | F(DE) | F(PSE) |
F(TSC) | F(MSR) | F(PAE) | F(MCE) |
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
0 /* Reserved, DS, ACPI */ | F(MMX) |
F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
0 /* HTT, TM, Reserved, PBE */;
/* cpuid 0x80000001.edx */
const u32 kvm_supported_word1_x86_features =
F(FPU) | F(VME) | F(DE) | F(PSE) |
F(TSC) | F(MSR) | F(PAE) | F(MCE) |
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
F(PAT) | F(PSE36) | 0 /* Reserved */ |
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
/* cpuid 1.ecx */
const u32 kvm_supported_word4_x86_features =
F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
0 /* DS-CPL, VMX, SMX, EST */ |
0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
0 /* Reserved, DCA */ | F(XMM4_1) |
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
F(F16C) | F(RDRAND);
/* cpuid 0x80000001.ecx */
const u32 kvm_supported_word6_x86_features =
F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
/* cpuid 0xC0000001.edx */
const u32 kvm_supported_word5_x86_features =
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
F(PMM) | F(PMM_EN);
/* cpuid 7.0.ebx */
const u32 kvm_supported_word9_x86_features =
F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
r = -E2BIG;
if (*nent >= maxnent)
goto out;
do_cpuid_1_ent(entry, function, index);
++*nent;
switch (function) {
case 0:
entry->eax = min(entry->eax, (u32)0xd);
break;
case 1:
entry->edx &= kvm_supported_word0_x86_features;
cpuid_mask(&entry->edx, 0);
entry->ecx &= kvm_supported_word4_x86_features;
cpuid_mask(&entry->ecx, 4);
/* we support x2apic emulation even if host does not support
* it since we emulate x2apic in software */
entry->ecx |= F(X2APIC);
break;
/* function 2 entries are STATEFUL. That is, repeated cpuid commands
* may return different values. This forces us to get_cpu() before
* issuing the first command, and also to emulate this annoying behavior
* in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
case 2: {
int t, times = entry->eax & 0xff;
entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
for (t = 1; t < times; ++t) {
if (*nent >= maxnent)
goto out;
do_cpuid_1_ent(&entry[t], function, 0);
entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
++*nent;
}
break;
}
/* function 4 has additional index. */
case 4: {
int i, cache_type;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
/* read more entries until cache_type is zero */
for (i = 1; ; ++i) {
if (*nent >= maxnent)
goto out;
cache_type = entry[i - 1].eax & 0x1f;
if (!cache_type)
break;
do_cpuid_1_ent(&entry[i], function, i);
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
}
break;
}
case 7: {
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
/* Mask ebx against host capbability word 9 */
if (index == 0) {
entry->ebx &= kvm_supported_word9_x86_features;
cpuid_mask(&entry->ebx, 9);
} else
entry->ebx = 0;
entry->eax = 0;
entry->ecx = 0;
entry->edx = 0;
break;
}
case 9:
break;
case 0xa: { /* Architectural Performance Monitoring */
struct x86_pmu_capability cap;
union cpuid10_eax eax;
union cpuid10_edx edx;
perf_get_x86_pmu_capability(&cap);
/*
* Only support guest architectural pmu on a host
* with architectural pmu.
*/
if (!cap.version)
memset(&cap, 0, sizeof(cap));
eax.split.version_id = min(cap.version, 2);
eax.split.num_counters = cap.num_counters_gp;
eax.split.bit_width = cap.bit_width_gp;
eax.split.mask_length = cap.events_mask_len;
edx.split.num_counters_fixed = cap.num_counters_fixed;
edx.split.bit_width_fixed = cap.bit_width_fixed;
edx.split.reserved = 0;
entry->eax = eax.full;
entry->ebx = cap.events_mask;
entry->ecx = 0;
entry->edx = edx.full;
break;
}
/* function 0xb has additional index. */
case 0xb: {
int i, level_type;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
/* read more entries until level_type is zero */
for (i = 1; ; ++i) {
if (*nent >= maxnent)
goto out;
level_type = entry[i - 1].ecx & 0xff00;
if (!level_type)
break;
do_cpuid_1_ent(&entry[i], function, i);
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
}
break;
}
case 0xd: {
int idx, i;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
for (idx = 1, i = 1; idx < 64; ++idx) {
if (*nent >= maxnent)
goto out;
do_cpuid_1_ent(&entry[i], function, idx);
if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
continue;
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
++i;
}
break;
}
case KVM_CPUID_SIGNATURE: {
char signature[12] = "KVMKVMKVM\0\0";
u32 *sigptr = (u32 *)signature;
entry->eax = 0;
entry->ebx = sigptr[0];
entry->ecx = sigptr[1];
entry->edx = sigptr[2];
break;
}
case KVM_CPUID_FEATURES:
entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
(1 << KVM_FEATURE_NOP_IO_DELAY) |
(1 << KVM_FEATURE_CLOCKSOURCE2) |
(1 << KVM_FEATURE_ASYNC_PF) |
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
if (sched_info_on())
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
entry->ebx = 0;
entry->ecx = 0;
entry->edx = 0;
break;
case 0x80000000:
entry->eax = min(entry->eax, 0x8000001a);
break;
case 0x80000001:
entry->edx &= kvm_supported_word1_x86_features;
cpuid_mask(&entry->edx, 1);
entry->ecx &= kvm_supported_word6_x86_features;
cpuid_mask(&entry->ecx, 6);
break;
case 0x80000008: {
unsigned g_phys_as = (entry->eax >> 16) & 0xff;
unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
unsigned phys_as = entry->eax & 0xff;
if (!g_phys_as)
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
entry->ebx = entry->edx = 0;
break;
}
case 0x80000019:
entry->ecx = entry->edx = 0;
break;
case 0x8000001a:
break;
case 0x8000001d:
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
/*Just support up to 0xC0000004 now*/
entry->eax = min(entry->eax, 0xC0000004);
break;
case 0xC0000001:
entry->edx &= kvm_supported_word5_x86_features;
cpuid_mask(&entry->edx, 5);
break;
case 3: /* Processor serial number */
case 5: /* MONITOR/MWAIT */
case 6: /* Thermal management */
case 0x80000007: /* Advanced power management */
case 0xC0000002:
case 0xC0000003:
case 0xC0000004:
default:
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
kvm_x86_ops->set_supported_cpuid(function, entry);
r = 0;
out:
put_cpu();
return r;
}
#undef F
struct kvm_cpuid_param {
u32 func;
u32 idx;
bool has_leaf_count;
bool (*qualifier)(struct kvm_cpuid_param *param);
};
static bool is_centaur_cpu(struct kvm_cpuid_param *param)
{
return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
}
int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
struct kvm_cpuid_entry2 *cpuid_entries;
int limit, nent = 0, r = -E2BIG, i;
u32 func;
static struct kvm_cpuid_param param[] = {
{ .func = 0, .has_leaf_count = true },
{ .func = 0x80000000, .has_leaf_count = true },
{ .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
{ .func = KVM_CPUID_SIGNATURE },
{ .func = KVM_CPUID_FEATURES },
};
if (cpuid->nent < 1)
goto out;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
cpuid->nent = KVM_MAX_CPUID_ENTRIES;
r = -ENOMEM;
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
if (!cpuid_entries)
goto out;
r = 0;
for (i = 0; i < ARRAY_SIZE(param); i++) {
struct kvm_cpuid_param *ent = &param[i];
if (ent->qualifier && !ent->qualifier(ent))
continue;
r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
&nent, cpuid->nent);
if (r)
goto out_free;
if (!ent->has_leaf_count)
continue;
limit = cpuid_entries[nent - 1].eax;
for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
&nent, cpuid->nent);
if (r)
goto out_free;
}
r = -EFAULT;
if (copy_to_user(entries, cpuid_entries,
nent * sizeof(struct kvm_cpuid_entry2)))
goto out_free;
cpuid->nent = nent;
r = 0;
out_free:
vfree(cpuid_entries);
out:
return r;
}
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
int j, nent = vcpu->arch.cpuid_nent;
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
/* when no next entry is found, the current entry[i] is reselected */
for (j = i + 1; ; j = (j + 1) % nent) {
struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
if (ej->function == e->function) {
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
return j;
}
}
return 0; /* silence gcc, even though control never reaches here */
}
/* find an entry with matching function, matching index (if needed), and that
* should be read next (if it's stateful) */
static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
u32 function, u32 index)
{
if (e->function != function)
return 0;
if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
return 0;
if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
!(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
return 0;
return 1;
}
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index)
{
int i;
struct kvm_cpuid_entry2 *best = NULL;
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
struct kvm_cpuid_entry2 *e;
e = &vcpu->arch.cpuid_entries[i];
if (is_matching_cpuid_entry(e, function, index)) {
if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
move_to_next_stateful_cpuid_entry(vcpu, i);
best = e;
break;
}
}
return best;
}
EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
if (!best || best->eax < 0x80000008)
goto not_found;
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
if (best)
return best->eax & 0xff;
not_found:
return 36;
}
/*
* If no match is found, check whether we exceed the vCPU's limit
* and return the content of the highest valid _standard_ leaf instead.
* This is to satisfy the CPUID specification.
*/
static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
u32 function, u32 index)
{
struct kvm_cpuid_entry2 *maxlevel;
maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
if (!maxlevel || maxlevel->eax >= function)
return NULL;
if (function & 0x80000000) {
maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
if (!maxlevel)
return NULL;
}
return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
}
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
u32 function, index;
struct kvm_cpuid_entry2 *best;
function = kvm_register_read(vcpu, VCPU_REGS_RAX);
index = kvm_register_read(vcpu, VCPU_REGS_RCX);
kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
best = kvm_find_cpuid_entry(vcpu, function, index);
if (!best)
best = check_cpuid_limit(vcpu, function, index);
if (best) {
kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
}
kvm_x86_ops->skip_emulated_instruction(vcpu);
trace_kvm_cpuid(function,
kvm_register_read(vcpu, VCPU_REGS_RAX),
kvm_register_read(vcpu, VCPU_REGS_RBX),
kvm_register_read(vcpu, VCPU_REGS_RCX),
kvm_register_read(vcpu, VCPU_REGS_RDX));
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
#ifndef ARCH_X86_KVM_CPUID_H
#define ARCH_X86_KVM_CPUID_H
#include "x86.h"
void kvm_update_cpuid(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
struct kvm_cpuid_entry __user *entries);
int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_SMEP));
}
static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
}
#endif
此差异已折叠。
...@@ -344,7 +344,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) ...@@ -344,7 +344,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
struct kvm_timer *pt = &ps->pit_timer; struct kvm_timer *pt = &ps->pit_timer;
s64 interval; s64 interval;
if (!irqchip_in_kernel(kvm)) if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
return; return;
interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
...@@ -397,15 +397,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) ...@@ -397,15 +397,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
case 1: case 1:
/* FIXME: enhance mode 4 precision */ /* FIXME: enhance mode 4 precision */
case 4: case 4:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { create_pit_timer(kvm, val, 0);
create_pit_timer(kvm, val, 0);
}
break; break;
case 2: case 2:
case 3: case 3:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ create_pit_timer(kvm, val, 1);
create_pit_timer(kvm, val, 1);
}
break; break;
default: default:
destroy_pit_timer(kvm->arch.vpit); destroy_pit_timer(kvm->arch.vpit);
......
...@@ -262,9 +262,10 @@ int kvm_pic_read_irq(struct kvm *kvm) ...@@ -262,9 +262,10 @@ int kvm_pic_read_irq(struct kvm *kvm)
void kvm_pic_reset(struct kvm_kpic_state *s) void kvm_pic_reset(struct kvm_kpic_state *s)
{ {
int irq; int irq, i;
struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu; struct kvm_vcpu *vcpu;
u8 irr = s->irr, isr = s->imr; u8 irr = s->irr, isr = s->imr;
bool found = false;
s->last_irr = 0; s->last_irr = 0;
s->irr = 0; s->irr = 0;
...@@ -281,12 +282,19 @@ void kvm_pic_reset(struct kvm_kpic_state *s) ...@@ -281,12 +282,19 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
s->special_fully_nested_mode = 0; s->special_fully_nested_mode = 0;
s->init4 = 0; s->init4 = 0;
for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) if (kvm_apic_accept_pic_intr(vcpu)) {
if (irr & (1 << irq) || isr & (1 << irq)) { found = true;
pic_clear_isr(s, irq); break;
} }
}
if (!found)
return;
for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
if (irr & (1 << irq) || isr & (1 << irq))
pic_clear_isr(s, irq);
} }
static void pic_ioport_write(void *opaque, u32 addr, u32 val) static void pic_ioport_write(void *opaque, u32 addr, u32 val)
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "irq.h" #include "irq.h"
#include "trace.h" #include "trace.h"
#include "x86.h" #include "x86.h"
#include "cpuid.h"
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
...@@ -1120,7 +1121,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -1120,7 +1121,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{ {
u32 reg = apic_get_reg(apic, lvt_type); u32 reg = apic_get_reg(apic, lvt_type);
int vector, mode, trig_mode; int vector, mode, trig_mode;
......
...@@ -34,6 +34,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu); ...@@ -34,6 +34,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
......
此差异已折叠。
...@@ -19,6 +19,15 @@ ...@@ -19,6 +19,15 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
char const *audit_point_name[] = {
"pre page fault",
"post page fault",
"pre pte write",
"post pte write",
"pre sync",
"post sync"
};
#define audit_printk(kvm, fmt, args...) \ #define audit_printk(kvm, fmt, args...) \
printk(KERN_ERR "audit: (%s) error: " \ printk(KERN_ERR "audit: (%s) error: " \
fmt, audit_point_name[kvm->arch.audit_point], ##args) fmt, audit_point_name[kvm->arch.audit_point], ##args)
...@@ -224,7 +233,10 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu) ...@@ -224,7 +233,10 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
mmu_spte_walk(vcpu, audit_spte); mmu_spte_walk(vcpu, audit_spte);
} }
static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point) static bool mmu_audit;
static struct jump_label_key mmu_audit_key;
static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{ {
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
...@@ -236,18 +248,18 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point) ...@@ -236,18 +248,18 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
audit_vcpu_spte(vcpu); audit_vcpu_spte(vcpu);
} }
static bool mmu_audit; static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{
if (static_branch((&mmu_audit_key)))
__kvm_mmu_audit(vcpu, point);
}
static void mmu_audit_enable(void) static void mmu_audit_enable(void)
{ {
int ret;
if (mmu_audit) if (mmu_audit)
return; return;
ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); jump_label_inc(&mmu_audit_key);
WARN_ON(ret);
mmu_audit = true; mmu_audit = true;
} }
...@@ -256,8 +268,7 @@ static void mmu_audit_disable(void) ...@@ -256,8 +268,7 @@ static void mmu_audit_disable(void)
if (!mmu_audit) if (!mmu_audit)
return; return;
unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); jump_label_dec(&mmu_audit_key);
tracepoint_synchronize_unregister();
mmu_audit = false; mmu_audit = false;
} }
......
...@@ -243,25 +243,6 @@ TRACE_EVENT( ...@@ -243,25 +243,6 @@ TRACE_EVENT(
TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
__entry->access) __entry->access)
); );
TRACE_EVENT(
kvm_mmu_audit,
TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
TP_ARGS(vcpu, audit_point),
TP_STRUCT__entry(
__field(struct kvm_vcpu *, vcpu)
__field(int, audit_point)
),
TP_fast_assign(
__entry->vcpu = vcpu;
__entry->audit_point = audit_point;
),
TP_printk("vcpu:%d %s", __entry->vcpu->cpu,
audit_point_name[__entry->audit_point])
);
#endif /* _TRACE_KVMMMU_H */ #endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_walk_next(&it)) { shadow_walk_next(&it)) {
gfn_t table_gfn; gfn_t table_gfn;
clear_sp_write_flooding_count(it.sptep);
drop_large_spte(vcpu, it.sptep); drop_large_spte(vcpu, it.sptep);
sp = NULL; sp = NULL;
...@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_walk_next(&it)) { shadow_walk_next(&it)) {
gfn_t direct_gfn; gfn_t direct_gfn;
clear_sp_write_flooding_count(it.sptep);
validate_direct_spte(vcpu, it.sptep, direct_access); validate_direct_spte(vcpu, it.sptep, direct_access);
drop_large_spte(vcpu, it.sptep); drop_large_spte(vcpu, it.sptep);
...@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(it.sptep, sp); link_shadow_page(it.sptep, sp);
} }
clear_sp_write_flooding_count(it.sptep);
mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
user_fault, write_fault, emulate, it.level, user_fault, write_fault, emulate, it.level,
gw->gfn, pfn, prefault, map_writable); gw->gfn, pfn, prefault, map_writable);
...@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
*/ */
if (!r) { if (!r) {
pgprintk("%s: guest page fault\n", __func__); pgprintk("%s: guest page fault\n", __func__);
if (!prefault) { if (!prefault)
inject_page_fault(vcpu, &walker.fault); inject_page_fault(vcpu, &walker.fault);
/* reset fork detector */
vcpu->arch.last_pt_write_count = 0;
}
return 0; return 0;
} }
...@@ -631,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -631,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock; goto out_unlock;
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
if (!force_pt_level) if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
...@@ -641,11 +642,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -641,11 +642,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__, pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
sptep, *sptep, emulate); sptep, *sptep, emulate);
if (!emulate)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++vcpu->stat.pf_fixed; ++vcpu->stat.pf_fixed;
trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return emulate; return emulate;
...@@ -656,65 +654,66 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -656,65 +654,66 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0; return 0;
} }
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
int offset = 0;
WARN_ON(sp->role.level != 1);
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
}
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
gpa_t pte_gpa = -1;
int level; int level;
u64 *sptep; u64 *sptep;
int need_flush = 0;
vcpu_clear_mmio_info(vcpu, gva); vcpu_clear_mmio_info(vcpu, gva);
spin_lock(&vcpu->kvm->mmu_lock); /*
* No need to check return value here, rmap_can_add() can
* help us to skip pte prefetch later.
*/
mmu_topup_memory_caches(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) { for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level; level = iterator.level;
sptep = iterator.sptep; sptep = iterator.sptep;
sp = page_header(__pa(sptep)); sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) { if (is_last_spte(*sptep, level)) {
int offset, shift; pt_element_t gpte;
gpa_t pte_gpa;
if (!sp->unsync) if (!sp->unsync)
break; break;
shift = PAGE_SHIFT - pte_gpa = FNAME(get_level1_sp_gpa)(sp);
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift;
pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) { if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
if (is_large_pte(*sptep)) kvm_flush_remote_tlbs(vcpu->kvm);
--vcpu->kvm->stat.lpages;
drop_spte(vcpu->kvm, sptep);
need_flush = 1;
} else if (is_mmio_spte(*sptep))
mmu_spte_clear_no_track(sptep);
break; if (!rmap_can_add(vcpu))
break;
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
break;
FNAME(update_pte)(vcpu, sp, sptep, &gpte);
} }
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break; break;
} }
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
atomic_inc(&vcpu->kvm->arch.invlpg_counter);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
if (pte_gpa == -1)
return;
if (mmu_topup_memory_caches(vcpu))
return;
kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
} }
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
...@@ -769,19 +768,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, ...@@ -769,19 +768,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
int i, offset, nr_present; int i, nr_present = 0;
bool host_writable; bool host_writable;
gpa_t first_pte_gpa; gpa_t first_pte_gpa;
offset = nr_present = 0;
/* direct kvm_mmu_page can not be unsync. */ /* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct); BUG_ON(sp->role.direct);
if (PTTYPE == 32) first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
offset = sp->role.quadrant << PT64_LEVEL_BITS;
first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) { for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access; unsigned pte_access;
......
/*
* Kernel-based Virtual Machine -- Performane Monitoring Unit support
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Avi Kivity <avi@redhat.com>
* Gleb Natapov <gleb@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
static struct kvm_arch_event_perf_mapping {
u8 eventsel;
u8 unit_mask;
unsigned event_type;
bool inexact;
} arch_events[] = {
/* Index must match CPUID 0x0A.EBX bit vector */
[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
};
/* mapping between fixed pmc index and arch_events array */
int fixed_pmc_events[] = {1, 0, 2};
static bool pmc_is_gp(struct kvm_pmc *pmc)
{
return pmc->type == KVM_PMC_GP;
}
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
return pmu->counter_bitmask[pmc->type];
}
static inline bool pmc_enabled(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
}
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
{
if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
return &pmu->gp_counters[msr - base];
return NULL;
}
static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{
int base = MSR_CORE_PERF_FIXED_CTR0;
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
return &pmu->fixed_counters[msr - base];
return NULL;
}
static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
{
return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
}
static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{
if (idx < X86_PMC_IDX_FIXED)
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
else
return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
}
void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.apic)
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
}
static void trigger_pmi(struct irq_work *irq_work)
{
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
irq_work);
struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
arch.pmu);
kvm_deliver_pmi(vcpu);
}
static void kvm_perf_overflow(struct perf_event *perf_event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
}
static void kvm_perf_overflow_intr(struct perf_event *perf_event,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
kvm_perf_overflow(perf_event, data, regs);
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
/*
* Inject PMI. If vcpu was in a guest mode during NMI PMI
* can be ejected on a guest mode re-entry. Otherwise we can't
* be sure that vcpu wasn't executing hlt instruction at the
* time of vmexit and is not going to re-enter guest mode until,
* woken up. So we should wake it, but this is impossible from
* NMI context. Do it from irq work instead.
*/
if (!kvm_is_in_guest())
irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
else
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
}
}
static u64 read_pmc(struct kvm_pmc *pmc)
{
u64 counter, enabled, running;
counter = pmc->counter;
if (pmc->perf_event)
counter += perf_event_read_value(pmc->perf_event,
&enabled, &running);
/* FIXME: Scaling needed? */
return counter & pmc_bitmask(pmc);
}
static void stop_counter(struct kvm_pmc *pmc)
{
if (pmc->perf_event) {
pmc->counter = read_pmc(pmc);
perf_event_release_kernel(pmc->perf_event);
pmc->perf_event = NULL;
}
}
static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
unsigned config, bool exclude_user, bool exclude_kernel,
bool intr)
{
struct perf_event *event;
struct perf_event_attr attr = {
.type = type,
.size = sizeof(attr),
.pinned = true,
.exclude_idle = true,
.exclude_host = 1,
.exclude_user = exclude_user,
.exclude_kernel = exclude_kernel,
.config = config,
};
attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
event = perf_event_create_kernel_counter(&attr, -1, current,
intr ? kvm_perf_overflow_intr :
kvm_perf_overflow, pmc);
if (IS_ERR(event)) {
printk_once("kvm: pmu event creation failed %ld\n",
PTR_ERR(event));
return;
}
pmc->perf_event = event;
clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
}
static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
u8 unit_mask)
{
int i;
for (i = 0; i < ARRAY_SIZE(arch_events); i++)
if (arch_events[i].eventsel == event_select
&& arch_events[i].unit_mask == unit_mask
&& (pmu->available_event_types & (1 << i)))
break;
if (i == ARRAY_SIZE(arch_events))
return PERF_COUNT_HW_MAX;
return arch_events[i].event_type;
}
static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
unsigned config, type = PERF_TYPE_RAW;
u8 event_select, unit_mask;
pmc->eventsel = eventsel;
stop_counter(pmc);
if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
return;
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
if (!(event_select & (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
ARCH_PERFMON_EVENTSEL_CMASK))) {
config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
unit_mask);
if (config != PERF_COUNT_HW_MAX)
type = PERF_TYPE_HARDWARE;
}
if (type == PERF_TYPE_RAW)
config = eventsel & X86_RAW_EVENT_MASK;
reprogram_counter(pmc, type, config,
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
eventsel & ARCH_PERFMON_EVENTSEL_INT);
}
static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
{
unsigned en = en_pmi & 0x3;
bool pmi = en_pmi & 0x8;
stop_counter(pmc);
if (!en || !pmc_enabled(pmc))
return;
reprogram_counter(pmc, PERF_TYPE_HARDWARE,
arch_events[fixed_pmc_events[idx]].event_type,
!(en & 0x2), /* exclude user */
!(en & 0x1), /* exclude kernel */
pmi);
}
static inline u8 fixed_en_pmi(u64 ctrl, int idx)
{
return (ctrl >> (idx * 4)) & 0xf;
}
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
int i;
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
u8 en_pmi = fixed_en_pmi(data, i);
struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
continue;
reprogram_fixed_counter(pmc, en_pmi, i);
}
pmu->fixed_ctr_ctrl = data;
}
static void reprogram_idx(struct kvm_pmu *pmu, int idx)
{
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
if (!pmc)
return;
if (pmc_is_gp(pmc))
reprogram_gp_counter(pmc, pmc->eventsel);
else {
int fidx = idx - X86_PMC_IDX_FIXED;
reprogram_fixed_counter(pmc,
fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
}
}
static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
{
int bit;
u64 diff = pmu->global_ctrl ^ data;
pmu->global_ctrl = data;
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
reprogram_idx(pmu, bit);
}
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
int ret;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
case MSR_CORE_PERF_GLOBAL_STATUS:
case MSR_CORE_PERF_GLOBAL_CTRL:
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
ret = pmu->version > 1;
break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
|| get_fixed_pmc(pmu, msr);
break;
}
return ret;
}
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;
switch (index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
*data = pmu->fixed_ctr_ctrl;
return 0;
case MSR_CORE_PERF_GLOBAL_STATUS:
*data = pmu->global_status;
return 0;
case MSR_CORE_PERF_GLOBAL_CTRL:
*data = pmu->global_ctrl;
return 0;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
*data = pmu->global_ovf_ctrl;
return 0;
default:
if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, index))) {
*data = read_pmc(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
*data = pmc->eventsel;
return 0;
}
}
return 1;
}
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;
switch (index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
if (pmu->fixed_ctr_ctrl == data)
return 0;
if (!(data & 0xfffffffffffff444)) {
reprogram_fixed_counters(pmu, data);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
break; /* RO MSR */
case MSR_CORE_PERF_GLOBAL_CTRL:
if (pmu->global_ctrl == data)
return 0;
if (!(data & pmu->global_ctrl_mask)) {
global_ctrl_changed(pmu, data);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
pmu->global_status &= ~data;
pmu->global_ovf_ctrl = data;
return 0;
}
break;
default:
if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, index))) {
data = (s64)(s32)data;
pmc->counter += data - read_pmc(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
if (!(data & 0xffffffff00200000ull)) {
reprogram_gp_counter(pmc, data);
return 0;
}
}
}
return 1;
}
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool fast_mode = pmc & (1u << 31);
bool fixed = pmc & (1u << 30);
struct kvm_pmc *counters;
u64 ctr;
pmc &= (3u << 30) - 1;
if (!fixed && pmc >= pmu->nr_arch_gp_counters)
return 1;
if (fixed && pmc >= pmu->nr_arch_fixed_counters)
return 1;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
ctr = read_pmc(&counters[pmc]);
if (fast_mode)
ctr = (u32)ctr;
*data = ctr;
return 0;
}
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_cpuid_entry2 *entry;
unsigned bitmap_len;
pmu->nr_arch_gp_counters = 0;
pmu->nr_arch_fixed_counters = 0;
pmu->counter_bitmask[KVM_PMC_GP] = 0;
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
return;
pmu->version = entry->eax & 0xff;
if (!pmu->version)
return;
pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
X86_PMC_MAX_GENERIC);
pmu->counter_bitmask[KVM_PMC_GP] =
((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
bitmap_len = (entry->eax >> 24) & 0xff;
pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
if (pmu->version == 1) {
pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
return;
}
pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
X86_PMC_MAX_FIXED);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
| (((1ull << pmu->nr_arch_fixed_counters) - 1)
<< X86_PMC_IDX_FIXED));
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_pmu *pmu = &vcpu->arch.pmu;
memset(pmu, 0, sizeof(*pmu));
for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
}
for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
}
init_irq_work(&pmu->irq_work, trigger_pmi);
kvm_pmu_cpuid_update(vcpu);
}
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;
irq_work_sync(&pmu->irq_work);
for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
struct kvm_pmc *pmc = &pmu->gp_counters[i];
stop_counter(pmc);
pmc->counter = pmc->eventsel = 0;
}
for (i = 0; i < X86_PMC_MAX_FIXED; i++)
stop_counter(&pmu->fixed_counters[i]);
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0;
}
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{
kvm_pmu_reset(vcpu);
}
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
u64 bitmask;
int bit;
bitmask = pmu->reprogram_pmi;
for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
if (unlikely(!pmc || !pmc->perf_event)) {
clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
continue;
}
reprogram_idx(pmu, bit);
}
}
...@@ -1014,6 +1014,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1014,6 +1014,7 @@ static void init_vmcb(struct vcpu_svm *svm)
set_intercept(svm, INTERCEPT_NMI); set_intercept(svm, INTERCEPT_NMI);
set_intercept(svm, INTERCEPT_SMI); set_intercept(svm, INTERCEPT_SMI);
set_intercept(svm, INTERCEPT_SELECTIVE_CR0); set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
set_intercept(svm, INTERCEPT_RDPMC);
set_intercept(svm, INTERCEPT_CPUID); set_intercept(svm, INTERCEPT_CPUID);
set_intercept(svm, INTERCEPT_INVD); set_intercept(svm, INTERCEPT_INVD);
set_intercept(svm, INTERCEPT_HLT); set_intercept(svm, INTERCEPT_HLT);
...@@ -2770,6 +2771,19 @@ static int emulate_on_interception(struct vcpu_svm *svm) ...@@ -2770,6 +2771,19 @@ static int emulate_on_interception(struct vcpu_svm *svm)
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
} }
static int rdpmc_interception(struct vcpu_svm *svm)
{
int err;
if (!static_cpu_has(X86_FEATURE_NRIPS))
return emulate_on_interception(svm);
err = kvm_rdpmc(&svm->vcpu);
kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
}
bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
{ {
unsigned long cr0 = svm->vcpu.arch.cr0; unsigned long cr0 = svm->vcpu.arch.cr0;
...@@ -3190,6 +3204,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { ...@@ -3190,6 +3204,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_VINTR] = interrupt_window_interception,
[SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_INVD] = emulate_on_interception,
......
...@@ -18,9 +18,10 @@ ...@@ -18,9 +18,10 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include "kvm_timer.h" #include "kvm_timer.h"
static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer) enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{ {
int restart_timer = 0; struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_vcpu *vcpu = ktimer->vcpu;
wait_queue_head_t *q = &vcpu->wq; wait_queue_head_t *q = &vcpu->wq;
/* /*
...@@ -40,26 +41,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer) ...@@ -40,26 +41,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
if (ktimer->t_ops->is_periodic(ktimer)) { if (ktimer->t_ops->is_periodic(ktimer)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
restart_timer = 1;
}
return restart_timer;
}
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{
int restart_timer;
struct kvm_vcpu *vcpu;
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
vcpu = ktimer->vcpu;
if (!vcpu)
return HRTIMER_NORESTART;
restart_timer = __kvm_timer_fn(vcpu, ktimer);
if (restart_timer)
return HRTIMER_RESTART; return HRTIMER_RESTART;
else } else
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "irq.h" #include "irq.h"
#include "mmu.h" #include "mmu.h"
#include "cpuid.h"
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx) ...@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
int save_nmsrs, index; int save_nmsrs, index;
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
vmx_load_host_state(vmx);
save_nmsrs = 0; save_nmsrs = 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) { if (is_long_mode(&vmx->vcpu)) {
...@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void) ...@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
#endif #endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
CPU_BASED_RDPMC_EXITING |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/* /*
* We can allow some features even when not supported by the * We can allow some features even when not supported by the
...@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
return 1; return 1;
/* Otherwise falls through */ /* Otherwise falls through */
default: default:
vmx_load_host_state(to_vmx(vcpu));
if (vmx_get_vmx_msr(vcpu, msr_index, pdata)) if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
return 0; return 0;
msr = find_msr_entry(to_vmx(vcpu), msr_index); msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) { if (msr) {
vmx_load_host_state(to_vmx(vcpu));
data = msr->data; data = msr->data;
break; break;
} }
...@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
switch (msr_index) { switch (msr_index) {
case MSR_EFER: case MSR_EFER:
vmx_load_host_state(vmx);
ret = kvm_set_msr_common(vcpu, msr_index, data); ret = kvm_set_msr_common(vcpu, msr_index, data);
break; break;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
break; break;
msr = find_msr_entry(vmx, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr) {
vmx_load_host_state(vmx);
msr->data = data; msr->data = data;
break; break;
} }
...@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING | CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING; CPU_BASED_INVLPG_EXITING |
CPU_BASED_RDPMC_EXITING;
if (yield_on_hlt) if (yield_on_hlt)
min |= CPU_BASED_HLT_EXITING; min |= CPU_BASED_HLT_EXITING;
...@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm) ...@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm)
{ {
if (!kvm->arch.tss_addr) { if (!kvm->arch.tss_addr) {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
gfn_t base_gfn; gfn_t base_gfn;
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
base_gfn = slots->memslots[0].base_gfn + slot = id_to_memslot(slots, 0);
kvm->memslots->memslots[0].npages - 3; base_gfn = slot->base_gfn + slot->npages - 3;
return base_gfn << PAGE_SHIFT; return base_gfn << PAGE_SHIFT;
} }
return kvm->arch.tss_addr; return kvm->arch.tss_addr;
...@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) ...@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_irq_window(struct kvm_vcpu *vcpu)
{ {
u32 cpu_based_vm_exec_control; u32 cpu_based_vm_exec_control;
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
/* We can get here when nested_run_pending caused /*
* vmx_interrupt_allowed() to return false. In this case, do * We get here if vmx_interrupt_allowed() said we can't
* nothing - the interrupt will be injected later. * inject to L1 now because L2 must run. Ask L2 to exit
* right after entry, so we can inject to L1 more promptly.
*/ */
kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
return; return;
}
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
...@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) ...@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{ {
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (to_vmx(vcpu)->nested.nested_run_pending) if (to_vmx(vcpu)->nested.nested_run_pending ||
(vmcs12->idt_vectoring_info_field &
VECTORING_INFO_VALID_MASK))
return 0; return 0;
nested_vmx_vmexit(vcpu); nested_vmx_vmexit(vcpu);
vmcs12 = get_vmcs12(vcpu);
vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
vmcs12->vm_exit_intr_info = 0; vmcs12->vm_exit_intr_info = 0;
/* fall through to normal code, but now in L1, not L2 */ /* fall through to normal code, but now in L1, not L2 */
...@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu) ...@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
static int handle_rdpmc(struct kvm_vcpu *vcpu)
{
int err;
err = kvm_rdpmc(vcpu);
kvm_complete_insn_gp(vcpu, err);
return 1;
}
static int handle_wbinvd(struct kvm_vcpu *vcpu) static int handle_wbinvd(struct kvm_vcpu *vcpu)
{ {
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
...@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { ...@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_RDPMC] = handle_rdpmc,
[EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCALL] = handle_vmcall,
[EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMCLEAR] = handle_vmclear,
[EXIT_REASON_VMLAUNCH] = handle_vmlaunch, [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
......
此差异已折叠。
...@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr) ...@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
return (nr == BP_VECTOR) || (nr == OF_VECTOR); return (nr == BP_VECTOR) || (nr == OF_VECTOR);
} }
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
static inline bool is_protmode(struct kvm_vcpu *vcpu) static inline bool is_protmode(struct kvm_vcpu *vcpu)
{ {
return kvm_read_cr0_bits(vcpu, X86_CR0_PE); return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
...@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, ...@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception); struct x86_exception *exception);
extern u64 host_xcr0;
#endif #endif
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -50,6 +51,9 @@ ...@@ -50,6 +51,9 @@
#define KVM_REQ_APF_HALT 12 #define KVM_REQ_APF_HALT 12
#define KVM_REQ_STEAL_UPDATE 13 #define KVM_REQ_STEAL_UPDATE 13
#define KVM_REQ_NMI 14 #define KVM_REQ_NMI 14
#define KVM_REQ_IMMEDIATE_EXIT 15
#define KVM_REQ_PMU 16
#define KVM_REQ_PMI 17
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
...@@ -179,6 +183,7 @@ struct kvm_memory_slot { ...@@ -179,6 +183,7 @@ struct kvm_memory_slot {
unsigned long *rmap; unsigned long *rmap;
unsigned long *dirty_bitmap; unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_head; unsigned long *dirty_bitmap_head;
unsigned long nr_dirty_pages;
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long userspace_addr; unsigned long userspace_addr;
int user_alloc; int user_alloc;
...@@ -224,11 +229,20 @@ struct kvm_irq_routing_table {}; ...@@ -224,11 +229,20 @@ struct kvm_irq_routing_table {};
#endif #endif
#ifndef KVM_MEM_SLOTS_NUM
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
/*
* Note:
* memslots are not sorted by id anymore, please use id_to_memslot()
* to get the memslot by its id.
*/
struct kvm_memslots { struct kvm_memslots {
int nmemslots;
u64 generation; u64 generation;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
KVM_PRIVATE_MEM_SLOTS]; /* The mapping table from slot id to the index in memslots[]. */
int id_to_index[KVM_MEM_SLOTS_NUM];
}; };
struct kvm { struct kvm {
...@@ -239,7 +253,6 @@ struct kvm { ...@@ -239,7 +253,6 @@ struct kvm {
struct srcu_struct srcu; struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE #ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id; u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu;
#endif #endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus; atomic_t online_vcpus;
...@@ -302,6 +315,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) ...@@ -302,6 +315,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
idx++) idx++)
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
memslot++)
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
...@@ -314,6 +332,7 @@ void kvm_exit(void); ...@@ -314,6 +332,7 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm); void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm);
void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{ {
...@@ -322,6 +341,18 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) ...@@ -322,6 +341,18 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
|| lockdep_is_held(&kvm->slots_lock)); || lockdep_is_held(&kvm->slots_lock));
} }
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
slot = &slots->memslots[index];
WARN_ON(slot->id != id);
return slot;
}
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
......
...@@ -35,4 +35,3 @@ static inline int kvm_para_has_feature(unsigned int feature) ...@@ -35,4 +35,3 @@ static inline int kvm_para_has_feature(unsigned int feature)
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __LINUX_KVM_PARA_H */ #endif /* __LINUX_KVM_PARA_H */
...@@ -71,6 +71,7 @@ void jump_label_inc(struct jump_label_key *key) ...@@ -71,6 +71,7 @@ void jump_label_inc(struct jump_label_key *key)
atomic_inc(&key->enabled); atomic_inc(&key->enabled);
jump_label_unlock(); jump_label_unlock();
} }
EXPORT_SYMBOL_GPL(jump_label_inc);
static void __jump_label_dec(struct jump_label_key *key, static void __jump_label_dec(struct jump_label_key *key,
unsigned long rate_limit, struct delayed_work *work) unsigned long rate_limit, struct delayed_work *work)
...@@ -86,6 +87,7 @@ static void __jump_label_dec(struct jump_label_key *key, ...@@ -86,6 +87,7 @@ static void __jump_label_dec(struct jump_label_key *key,
jump_label_unlock(); jump_label_unlock();
} }
EXPORT_SYMBOL_GPL(jump_label_dec);
static void jump_label_update_timeout(struct work_struct *work) static void jump_label_update_timeout(struct work_struct *work)
{ {
......
...@@ -28,9 +28,15 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, ...@@ -28,9 +28,15 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
* (addr,len) is fully included in * (addr,len) is fully included in
* (zone->addr, zone->size) * (zone->addr, zone->size)
*/ */
if (len < 0)
return (dev->zone.addr <= addr && return 0;
addr + len <= dev->zone.addr + dev->zone.size); if (addr + len < addr)
return 0;
if (addr < dev->zone.addr)
return 0;
if (addr + len > dev->zone.addr + dev->zone.size)
return 0;
return 1;
} }
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
......
...@@ -185,7 +185,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) ...@@ -185,7 +185,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.dest_mode = 0; /* Physical mode. */ irqe.dest_mode = 0; /* Physical mode. */
/* need to read apic_id from apic regiest since /* need to read apic_id from apic regiest since
* it can be rewritten */ * it can be rewritten */
irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; irqe.dest_id = ioapic->kvm->bsp_vcpu_id;
} }
#endif #endif
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
...@@ -332,9 +332,18 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -332,9 +332,18 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
(void*)addr, len, val); (void*)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */ ASSERT(!(addr & 0xf)); /* check alignment */
if (len == 4 || len == 8) switch (len) {
case 8:
case 4:
data = *(u32 *) val; data = *(u32 *) val;
else { break;
case 2:
data = *(u16 *) val;
break;
case 1:
data = *(u8 *) val;
break;
default:
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
return 0; return 0;
} }
...@@ -343,7 +352,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -343,7 +352,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
spin_lock(&ioapic->lock); spin_lock(&ioapic->lock);
switch (addr) { switch (addr) {
case IOAPIC_REG_SELECT: case IOAPIC_REG_SELECT:
ioapic->ioregsel = data; ioapic->ioregsel = data & 0xFF; /* 8-bit register */
break; break;
case IOAPIC_REG_WINDOW: case IOAPIC_REG_WINDOW:
......
...@@ -134,14 +134,15 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) ...@@ -134,14 +134,15 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
static int kvm_iommu_map_memslots(struct kvm *kvm) static int kvm_iommu_map_memslots(struct kvm *kvm)
{ {
int i, idx, r = 0; int idx, r = 0;
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) { kvm_for_each_memslot(memslot, slots) {
r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); r = kvm_iommu_map_pages(kvm, memslot);
if (r) if (r)
break; break;
} }
...@@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm, ...@@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm) static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{ {
int i, idx; int idx;
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) { kvm_for_each_memslot(memslot, slots)
kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
slots->memslots[i].npages);
}
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
return 0; return 0;
......
...@@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) ...@@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
static void kvm_init_memslots_id(struct kvm *kvm)
{
int i;
struct kvm_memslots *slots = kvm->memslots;
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
slots->id_to_index[i] = slots->memslots[i].id = i;
}
static struct kvm *kvm_create_vm(void) static struct kvm *kvm_create_vm(void)
{ {
int r, i; int r, i;
...@@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void) ...@@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void)
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots) if (!kvm->memslots)
goto out_err_nosrcu; goto out_err_nosrcu;
kvm_init_memslots_id(kvm);
if (init_srcu_struct(&kvm->srcu)) if (init_srcu_struct(&kvm->srcu))
goto out_err_nosrcu; goto out_err_nosrcu;
for (i = 0; i < KVM_NR_BUSES; i++) { for (i = 0; i < KVM_NR_BUSES; i++) {
...@@ -547,11 +557,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, ...@@ -547,11 +557,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
void kvm_free_physmem(struct kvm *kvm) void kvm_free_physmem(struct kvm *kvm)
{ {
int i;
struct kvm_memslots *slots = kvm->memslots; struct kvm_memslots *slots = kvm->memslots;
struct kvm_memory_slot *memslot;
for (i = 0; i < slots->nmemslots; ++i) kvm_for_each_memslot(memslot, slots)
kvm_free_physmem_slot(&slots->memslots[i], NULL); kvm_free_physmem_slot(memslot, NULL);
kfree(kvm->memslots); kfree(kvm->memslots);
} }
...@@ -625,10 +635,69 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) ...@@ -625,10 +635,69 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
return -ENOMEM; return -ENOMEM;
memslot->dirty_bitmap_head = memslot->dirty_bitmap; memslot->dirty_bitmap_head = memslot->dirty_bitmap;
memslot->nr_dirty_pages = 0;
return 0; return 0;
} }
#endif /* !CONFIG_S390 */ #endif /* !CONFIG_S390 */
static struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
kvm_for_each_memslot(memslot, slots)
if (gfn >= memslot->base_gfn &&
gfn < memslot->base_gfn + memslot->npages)
return memslot;
return NULL;
}
static int cmp_memslot(const void *slot1, const void *slot2)
{
struct kvm_memory_slot *s1, *s2;
s1 = (struct kvm_memory_slot *)slot1;
s2 = (struct kvm_memory_slot *)slot2;
if (s1->npages < s2->npages)
return 1;
if (s1->npages > s2->npages)
return -1;
return 0;
}
/*
* Sort the memslots base on its size, so the larger slots
* will get better fit.
*/
static void sort_memslots(struct kvm_memslots *slots)
{
int i;
sort(slots->memslots, KVM_MEM_SLOTS_NUM,
sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
slots->id_to_index[slots->memslots[i].id] = i;
}
void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
{
if (new) {
int id = new->id;
struct kvm_memory_slot *old = id_to_memslot(slots, id);
unsigned long npages = old->npages;
*old = *new;
if (new->npages != npages)
sort_memslots(slots);
}
slots->generation++;
}
/* /*
* Allocate some memory and give it an address in the guest physical address * Allocate some memory and give it an address in the guest physical address
* space. * space.
...@@ -662,12 +731,12 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -662,12 +731,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
(void __user *)(unsigned long)mem->userspace_addr, (void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))) mem->memory_size)))
goto out; goto out;
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out; goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out; goto out;
memslot = &kvm->memslots->memslots[mem->slot]; memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT;
...@@ -774,15 +843,17 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -774,15 +843,17 @@ int __kvm_set_memory_region(struct kvm *kvm,
#endif /* not defined CONFIG_S390 */ #endif /* not defined CONFIG_S390 */
if (!npages) { if (!npages) {
struct kvm_memory_slot *slot;
r = -ENOMEM; r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots) if (!slots)
goto out_free; goto out_free;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); slot = id_to_memslot(slots, mem->slot);
if (mem->slot >= slots->nmemslots) slot->flags |= KVM_MEMSLOT_INVALID;
slots->nmemslots = mem->slot + 1;
slots->generation++; update_memslots(slots, NULL);
slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
old_memslots = kvm->memslots; old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots); rcu_assign_pointer(kvm->memslots, slots);
...@@ -810,13 +881,10 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -810,13 +881,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
} }
r = -ENOMEM; r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots) if (!slots)
goto out_free; goto out_free;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1;
slots->generation++;
/* actual memory is freed via old in kvm_free_physmem_slot below */ /* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) { if (!npages) {
...@@ -826,7 +894,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -826,7 +894,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.lpage_info[i] = NULL; new.lpage_info[i] = NULL;
} }
slots->memslots[mem->slot] = new; update_memslots(slots, &new);
old_memslots = kvm->memslots; old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots); rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu); synchronize_srcu_expedited(&kvm->srcu);
...@@ -888,7 +956,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -888,7 +956,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS) if (log->slot >= KVM_MEMORY_SLOTS)
goto out; goto out;
memslot = &kvm->memslots->memslots[log->slot]; memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT; r = -ENOENT;
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
...@@ -966,16 +1034,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); ...@@ -966,16 +1034,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
gfn_t gfn) gfn_t gfn)
{ {
int i; return search_memslots(slots, gfn);
for (i = 0; i < slots->nmemslots; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
return memslot;
}
return NULL;
} }
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
...@@ -986,20 +1045,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot); ...@@ -986,20 +1045,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{ {
int i; struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
struct kvm_memslots *slots = kvm_memslots(kvm);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
struct kvm_memory_slot *memslot = &slots->memslots[i]; memslot->flags & KVM_MEMSLOT_INVALID)
return 0;
if (memslot->flags & KVM_MEMSLOT_INVALID)
continue;
if (gfn >= memslot->base_gfn return 1;
&& gfn < memslot->base_gfn + memslot->npages)
return 1;
}
return 0;
} }
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
...@@ -1491,7 +1543,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -1491,7 +1543,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (memslot && memslot->dirty_bitmap) { if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn; unsigned long rel_gfn = gfn - memslot->base_gfn;
__set_bit_le(rel_gfn, memslot->dirty_bitmap); if (!__test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
memslot->nr_dirty_pages++;
} }
} }
...@@ -1690,10 +1743,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) ...@@ -1690,10 +1743,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
smp_wmb(); smp_wmb();
atomic_inc(&kvm->online_vcpus); atomic_inc(&kvm->online_vcpus);
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
if (kvm->bsp_vcpu_id == id)
kvm->bsp_vcpu = vcpu;
#endif
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return r; return r;
...@@ -1768,12 +1817,11 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -1768,12 +1817,11 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_regs *kvm_regs; struct kvm_regs *kvm_regs;
r = -ENOMEM; r = -ENOMEM;
kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
if (!kvm_regs) if (IS_ERR(kvm_regs)) {
r = PTR_ERR(kvm_regs);
goto out; goto out;
r = -EFAULT; }
if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
goto out_free2;
r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
if (r) if (r)
goto out_free2; goto out_free2;
...@@ -1797,13 +1845,11 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -1797,13 +1845,11 @@ static long kvm_vcpu_ioctl(struct file *filp,
break; break;
} }
case KVM_SET_SREGS: { case KVM_SET_SREGS: {
kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
r = -ENOMEM; if (IS_ERR(kvm_sregs)) {
if (!kvm_sregs) r = PTR_ERR(kvm_sregs);
goto out;
r = -EFAULT;
if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
goto out; goto out;
}
r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
if (r) if (r)
goto out; goto out;
...@@ -1899,13 +1945,11 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -1899,13 +1945,11 @@ static long kvm_vcpu_ioctl(struct file *filp,
break; break;
} }
case KVM_SET_FPU: { case KVM_SET_FPU: {
fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); fpu = memdup_user(argp, sizeof(*fpu));
r = -ENOMEM; if (IS_ERR(fpu)) {
if (!fpu) r = PTR_ERR(fpu);
goto out;
r = -EFAULT;
if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
goto out; goto out;
}
r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
if (r) if (r)
goto out; goto out;
...@@ -2520,10 +2564,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ...@@ -2520,10 +2564,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
if (bus->dev_count > NR_IOBUS_DEVS-1) if (bus->dev_count > NR_IOBUS_DEVS-1)
return -ENOSPC; return -ENOSPC;
new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
if (!new_bus) if (!new_bus)
return -ENOMEM; return -ENOMEM;
memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
kvm_io_bus_insert_dev(new_bus, dev, addr, len); kvm_io_bus_insert_dev(new_bus, dev, addr, len);
rcu_assign_pointer(kvm->buses[bus_idx], new_bus); rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu); synchronize_srcu_expedited(&kvm->srcu);
...@@ -2539,13 +2582,12 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -2539,13 +2582,12 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
int i, r; int i, r;
struct kvm_io_bus *new_bus, *bus; struct kvm_io_bus *new_bus, *bus;
new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); bus = kvm->buses[bus_idx];
new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
if (!new_bus) if (!new_bus)
return -ENOMEM; return -ENOMEM;
bus = kvm->buses[bus_idx];
memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
r = -ENOENT; r = -ENOENT;
for (i = 0; i < new_bus->dev_count; i++) for (i = 0; i < new_bus->dev_count; i++)
if (new_bus->range[i].dev == dev) { if (new_bus->range[i].dev == dev) {
...@@ -2612,15 +2654,29 @@ static const struct file_operations *stat_fops[] = { ...@@ -2612,15 +2654,29 @@ static const struct file_operations *stat_fops[] = {
[KVM_STAT_VM] = &vm_stat_fops, [KVM_STAT_VM] = &vm_stat_fops,
}; };
static void kvm_init_debug(void) static int kvm_init_debug(void)
{ {
int r = -EFAULT;
struct kvm_stats_debugfs_item *p; struct kvm_stats_debugfs_item *p;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
for (p = debugfs_entries; p->name; ++p) if (kvm_debugfs_dir == NULL)
goto out;
for (p = debugfs_entries; p->name; ++p) {
p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset, (void *)(long)p->offset,
stat_fops[p->kind]); stat_fops[p->kind]);
if (p->dentry == NULL)
goto out_dir;
}
return 0;
out_dir:
debugfs_remove_recursive(kvm_debugfs_dir);
out:
return r;
} }
static void kvm_exit_debug(void) static void kvm_exit_debug(void)
...@@ -2764,10 +2820,16 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ...@@ -2764,10 +2820,16 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
kvm_preempt_ops.sched_in = kvm_sched_in; kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out; kvm_preempt_ops.sched_out = kvm_sched_out;
kvm_init_debug(); r = kvm_init_debug();
if (r) {
printk(KERN_ERR "kvm: create debugfs files failed\n");
goto out_undebugfs;
}
return 0; return 0;
out_undebugfs:
unregister_syscore_ops(&kvm_syscore_ops);
out_unreg: out_unreg:
kvm_async_pf_deinit(); kvm_async_pf_deinit();
out_free: out_free:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册