提交 b6d33834 编写于 作者: C Christoffer Dall 提交者: Avi Kivity

KVM: Factor out kvm_vcpu_kick to arch-generic code

The kvm_vcpu_kick function performs roughly the same funcitonality on
most all architectures, so we shouldn't have separate copies.

PowerPC keeps a pointer to interchanging waitqueues on the vcpu_arch
structure and to accomodate this special need a
__KVM_HAVE_ARCH_VCPU_GET_WQ define and accompanying function
kvm_arch_vcpu_wq have been defined. For all other architectures this
is a generic inline that just returns &vcpu->wq;
Acked-by: NScott Wood <scottwood@freescale.com>
Signed-off-by: NChristoffer Dall <c.dall@virtualopensystems.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 66ef8931
...@@ -365,6 +365,7 @@ struct thash_cb { ...@@ -365,6 +365,7 @@ struct thash_cb {
}; };
struct kvm_vcpu_stat { struct kvm_vcpu_stat {
u32 halt_wakeup;
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
......
...@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void) ...@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void)
{ {
} }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
me = get_cpu();
if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
smp_send_reschedule(cpu);
put_cpu();
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{ {
return __apic_accept_irq(vcpu, irq->vector); return __apic_accept_irq(vcpu, irq->vector);
...@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
(kvm_highest_pending_irq(vcpu) != -1); (kvm_highest_pending_irq(vcpu) != -1);
} }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
......
...@@ -498,4 +498,10 @@ struct kvm_vcpu_arch { ...@@ -498,4 +498,10 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_FQPR 0x0060
#define __KVM_HAVE_ARCH_VCPU_GET_WQ 1
static inline wait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
return vcpu->arch.wqp;
}
#endif /* __POWERPC_KVM_HOST_H__ */ #endif /* __POWERPC_KVM_HOST_H__ */
...@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) ...@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
v->requests; v->requests;
} }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return 1;
}
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{ {
int nr = kvmppc_get_gpr(vcpu, 11); int nr = kvmppc_get_gpr(vcpu, 11);
...@@ -588,21 +593,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -588,21 +593,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
return r; return r;
} }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
me = get_cpu();
if (waitqueue_active(vcpu->arch.wqp)) {
wake_up_interruptible(vcpu->arch.wqp);
vcpu->stat.halt_wakeup++;
} else if (cpu != me && cpu != -1) {
smp_send_reschedule(vcpu->cpu);
}
put_cpu();
}
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{ {
if (irq->irq == KVM_INTERRUPT_UNSET) { if (irq->irq == KVM_INTERRUPT_UNSET) {
...@@ -611,6 +601,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) ...@@ -611,6 +601,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
} }
kvmppc_core_queue_external(vcpu, irq); kvmppc_core_queue_external(vcpu, irq);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
return 0; return 0;
......
...@@ -423,6 +423,14 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -423,6 +423,14 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
/* kvm common code refers to this, but never calls it */
BUG();
return 0;
}
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{ {
kvm_s390_vcpu_initial_reset(vcpu); kvm_s390_vcpu_initial_reset(vcpu);
......
...@@ -6403,21 +6403,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -6403,21 +6403,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
kvm_cpu_has_interrupt(vcpu)); kvm_cpu_has_interrupt(vcpu));
} }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ {
int me; return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
int cpu = vcpu->cpu;
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
++vcpu->stat.halt_wakeup;
}
me = get_cpu();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
smp_send_reschedule(cpu);
put_cpu();
} }
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
......
...@@ -439,6 +439,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -439,6 +439,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
gfn_t gfn); gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
void kvm_resched(struct kvm_vcpu *vcpu); void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
...@@ -507,6 +508,7 @@ int kvm_arch_hardware_setup(void); ...@@ -507,6 +508,7 @@ int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void); void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn); void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm); void kvm_free_physmem(struct kvm *kvm);
...@@ -522,6 +524,13 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) ...@@ -522,6 +524,13 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
} }
#endif #endif
#ifndef __KVM_HAVE_ARCH_VCPU_GET_WQ
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
return &vcpu->wq;
}
#endif
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm); void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_free_all_assigned_devices(struct kvm *kvm); void kvm_free_all_assigned_devices(struct kvm *kvm);
......
...@@ -1514,6 +1514,28 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -1514,6 +1514,28 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
finish_wait(&vcpu->wq, &wait); finish_wait(&vcpu->wq, &wait);
} }
/*
* Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
*/
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
wait_queue_head_t *wqp;
wqp = kvm_arch_vcpu_wq(vcpu);
if (waitqueue_active(wqp)) {
wake_up_interruptible(wqp);
++vcpu->stat.halt_wakeup;
}
me = get_cpu();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
if (kvm_arch_vcpu_should_kick(vcpu))
smp_send_reschedule(cpu);
put_cpu();
}
void kvm_resched(struct kvm_vcpu *vcpu) void kvm_resched(struct kvm_vcpu *vcpu)
{ {
if (!need_resched()) if (!need_resched())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册