提交 1d737c8a 编写于 作者: Z Zhang Xiantao 提交者: Avi Kivity

KVM: Portability: Split mmu-related static inline functions to mmu.h

Since these functions need to know the details of kvm or kvm_vcpu structure,
it can't be put in x86.h.  Create mmu.h to hold them.
Signed-off-by: NZhang Xiantao <xiantao.zhang@intel.com>
Acked-by: NCarsten Otte <cotte@de.ibm.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 ad312c7c
......@@ -396,6 +396,7 @@ void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
......
......@@ -20,6 +20,7 @@
#include "vmx.h"
#include "kvm.h"
#include "x86.h"
#include "mmu.h"
#include <linux/types.h>
#include <linux/string.h>
......
#ifndef __KVM_X86_MMU_H
#define __KVM_X86_MMU_H
#include "kvm.h"
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
__kvm_mmu_free_some_pages(vcpu);
}
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
return 0;
return kvm_mmu_load(vcpu);
}
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
return vcpu->arch.shadow_efer & EFER_LME;
#else
return 0;
#endif
}
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PAE;
}
static inline int is_pse(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PSE;
}
static inline int is_paging(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_PG;
}
#endif
......@@ -17,6 +17,7 @@
#include "kvm_svm.h"
#include "x86_emulate.h"
#include "irq.h"
#include "mmu.h"
#include <linux/module.h>
#include <linux/kernel.h>
......
......@@ -21,6 +21,7 @@
#include "irq.h"
#include "vmx.h"
#include "segment_descriptor.h"
#include "mmu.h"
#include <linux/module.h>
#include <linux/kernel.h>
......
......@@ -19,6 +19,7 @@
#include "x86_emulate.h"
#include "segment_descriptor.h"
#include "irq.h"
#include "mmu.h"
#include <linux/kvm.h>
#include <linux/fs.h>
......@@ -3139,3 +3140,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0;
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
|| vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
}
......@@ -334,44 +334,6 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
__kvm_mmu_free_some_pages(vcpu);
}
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
return 0;
return kvm_mmu_load(vcpu);
}
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
return vcpu->arch.shadow_efer & EFER_LME;
#else
return 0;
#endif
}
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PAE;
}
static inline int is_pse(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr4 & X86_CR4_PSE;
}
static inline int is_paging(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_PG;
}
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
......@@ -490,10 +452,4 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define TSS_REDIRECTION_SIZE (256 / 8)
#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
|| vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
}
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册