提交 6b7e2d09 编写于 作者: X Xiao Guangrong 提交者: Marcelo Tosatti

KVM: Add "exiting guest mode" state

Currently we keep track of only two states: guest mode and host
mode.  This patch adds an "exiting guest mode" state that tells
us that an IPI will happen soon, so unless we need to wait for the
IPI, we can avoid it completely.

Also
1: No need atomically to read/write ->mode in vcpu's thread

2: reorganize struct kvm_vcpu to make ->mode and ->requests
   in the same cache line explicitly
Signed-off-by: NXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 d48ead8b
...@@ -662,6 +662,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -662,6 +662,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
goto vcpu_run_fail; goto vcpu_run_fail;
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->mode = IN_GUEST_MODE;
kvm_guest_enter(); kvm_guest_enter();
/* /*
...@@ -683,6 +684,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -683,6 +684,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
*/ */
barrier(); barrier();
kvm_guest_exit(); kvm_guest_exit();
vcpu->mode = OUTSIDE_GUEST_MODE;
preempt_enable(); preempt_enable();
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
......
...@@ -5210,14 +5210,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5210,14 +5210,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_load_guest_fpu(vcpu); kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu); kvm_load_guest_xcr0(vcpu);
atomic_set(&vcpu->guest_mode, 1); vcpu->mode = IN_GUEST_MODE;
smp_wmb();
/* We should set ->mode before check ->requests,
* see the comment in make_all_cpus_request.
*/
smp_mb();
local_irq_disable(); local_irq_disable();
if (!atomic_read(&vcpu->guest_mode) || vcpu->requests if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) { || need_resched() || signal_pending(current)) {
atomic_set(&vcpu->guest_mode, 0); vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); smp_wmb();
local_irq_enable(); local_irq_enable();
preempt_enable(); preempt_enable();
...@@ -5253,7 +5257,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5253,7 +5257,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc); kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
atomic_set(&vcpu->guest_mode, 0); vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb(); smp_wmb();
local_irq_enable(); local_irq_enable();
...@@ -6157,7 +6161,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) ...@@ -6157,7 +6161,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
me = get_cpu(); me = get_cpu();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
if (atomic_xchg(&vcpu->guest_mode, 0)) if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
smp_send_reschedule(cpu); smp_send_reschedule(cpu);
put_cpu(); put_cpu();
} }
......
...@@ -98,19 +98,26 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, ...@@ -98,19 +98,26 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif #endif
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
EXITING_GUEST_MODE
};
struct kvm_vcpu { struct kvm_vcpu {
struct kvm *kvm; struct kvm *kvm;
#ifdef CONFIG_PREEMPT_NOTIFIERS #ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier preempt_notifier; struct preempt_notifier preempt_notifier;
#endif #endif
int cpu;
int vcpu_id; int vcpu_id;
struct mutex mutex; int srcu_idx;
int cpu; int mode;
atomic_t guest_mode;
struct kvm_run *run;
unsigned long requests; unsigned long requests;
unsigned long guest_debug; unsigned long guest_debug;
int srcu_idx;
struct mutex mutex;
struct kvm_run *run;
int fpu_active; int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded; int guest_fpu_loaded, guest_xcr0_loaded;
...@@ -140,6 +147,11 @@ struct kvm_vcpu { ...@@ -140,6 +147,11 @@ struct kvm_vcpu {
struct kvm_vcpu_arch arch; struct kvm_vcpu_arch arch;
}; };
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}
/* /*
* Some of the bitops functions do not support too long bitmaps. * Some of the bitops functions do not support too long bitmaps.
* This number must be determined not to exceed such limits. * This number must be determined not to exceed such limits.
......
...@@ -171,7 +171,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) ...@@ -171,7 +171,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
if (kvm_make_check_request(req, vcpu)) if (kvm_make_check_request(req, vcpu))
continue; continue;
cpu = vcpu->cpu; cpu = vcpu->cpu;
if (cpus != NULL && cpu != -1 && cpu != me)
/* Set ->requests bit before we read ->mode */
smp_mb();
if (cpus != NULL && cpu != -1 && cpu != me &&
kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
cpumask_set_cpu(cpu, cpus); cpumask_set_cpu(cpu, cpus);
} }
if (unlikely(cpus == NULL)) if (unlikely(cpus == NULL))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册