提交 5d5b99cd 编写于 作者: P Paul Mackerras 提交者: Alexander Graf

KVM: PPC: Book3S HV: Get rid of vcore nap_count and n_woken

We can tell when a secondary thread has finished running a guest by
the fact that it clears its kvm_hstate.kvm_vcpu pointer, so there
is no real need for the nap_count field in the kvmppc_vcore struct.
This changes kvmppc_wait_for_nap to poll the kvm_hstate.kvm_vcpu
pointers of the secondary threads rather than polling vc->nap_count.
Besides reducing the size of the kvmppc_vcore struct by 8 bytes,
this also means that we can tell which secondary threads have got
stuck and thus print a more informative error message.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 25fedfca
...@@ -272,8 +272,6 @@ struct kvmppc_vcore { ...@@ -272,8 +272,6 @@ struct kvmppc_vcore {
int n_runnable; int n_runnable;
int num_threads; int num_threads;
int entry_exit_count; int entry_exit_count;
int n_woken;
int nap_count;
int napping_threads; int napping_threads;
int first_vcpuid; int first_vcpuid;
u16 pcpu; u16 pcpu;
......
...@@ -563,7 +563,6 @@ int main(void) ...@@ -563,7 +563,6 @@ int main(void)
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm)); DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
......
...@@ -1729,8 +1729,10 @@ static int kvmppc_grab_hwthread(int cpu) ...@@ -1729,8 +1729,10 @@ static int kvmppc_grab_hwthread(int cpu)
tpaca = &paca[cpu]; tpaca = &paca[cpu];
/* Ensure the thread won't go into the kernel if it wakes */ /* Ensure the thread won't go into the kernel if it wakes */
tpaca->kvm_hstate.hwthread_req = 1;
tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcpu = NULL;
tpaca->kvm_hstate.napping = 0;
smp_wmb();
tpaca->kvm_hstate.hwthread_req = 1;
/* /*
* If the thread is already executing in the kernel (e.g. handling * If the thread is already executing in the kernel (e.g. handling
...@@ -1773,35 +1775,43 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) ...@@ -1773,35 +1775,43 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
} }
cpu = vc->pcpu + vcpu->arch.ptid; cpu = vc->pcpu + vcpu->arch.ptid;
tpaca = &paca[cpu]; tpaca = &paca[cpu];
tpaca->kvm_hstate.kvm_vcpu = vcpu;
tpaca->kvm_hstate.kvm_vcore = vc; tpaca->kvm_hstate.kvm_vcore = vc;
tpaca->kvm_hstate.ptid = vcpu->arch.ptid; tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
vcpu->cpu = vc->pcpu; vcpu->cpu = vc->pcpu;
/* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
smp_wmb(); smp_wmb();
tpaca->kvm_hstate.kvm_vcpu = vcpu;
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (cpu != smp_processor_id()) { if (cpu != smp_processor_id())
xics_wake_cpu(cpu); xics_wake_cpu(cpu);
if (vcpu->arch.ptid)
++vc->n_woken;
}
#endif #endif
} }
static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc) static void kvmppc_wait_for_nap(void)
{ {
int i; int cpu = smp_processor_id();
int i, loops;
HMT_low(); for (loops = 0; loops < 1000000; ++loops) {
i = 0; /*
while (vc->nap_count < vc->n_woken) { * Check if all threads are finished.
if (++i >= 1000000) { * We set the vcpu pointer when starting a thread
pr_err("kvmppc_wait_for_nap timeout %d %d\n", * and the thread clears it when finished, so we look
vc->nap_count, vc->n_woken); * for any threads that still have a non-NULL vcpu ptr.
break; */
for (i = 1; i < threads_per_subcore; ++i)
if (paca[cpu + i].kvm_hstate.kvm_vcpu)
break;
if (i == threads_per_subcore) {
HMT_medium();
return;
} }
cpu_relax(); HMT_low();
} }
HMT_medium(); HMT_medium();
for (i = 1; i < threads_per_subcore; ++i)
if (paca[cpu + i].kvm_hstate.kvm_vcpu)
pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
} }
/* /*
...@@ -1942,8 +1952,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -1942,8 +1952,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
/* /*
* Initialize *vc. * Initialize *vc.
*/ */
vc->n_woken = 0;
vc->nap_count = 0;
vc->entry_exit_count = 0; vc->entry_exit_count = 0;
vc->preempt_tb = TB_NIL; vc->preempt_tb = TB_NIL;
vc->in_guest = 0; vc->in_guest = 0;
...@@ -2002,8 +2010,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2002,8 +2010,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
vcpu->cpu = -1; vcpu->cpu = -1;
/* wait for secondary threads to finish writing their state to memory */ /* wait for secondary threads to finish writing their state to memory */
if (vc->nap_count < vc->n_woken) kvmppc_wait_for_nap();
kvmppc_wait_for_nap(vc);
for (i = 0; i < threads_per_subcore; ++i) for (i = 0; i < threads_per_subcore; ++i)
kvmppc_release_hwthread(vc->pcpu + i); kvmppc_release_hwthread(vc->pcpu + i);
/* prevent other vcpu threads from doing kvmppc_start_thread() now */ /* prevent other vcpu threads from doing kvmppc_start_thread() now */
......
...@@ -292,26 +292,21 @@ kvm_secondary_got_guest: ...@@ -292,26 +292,21 @@ kvm_secondary_got_guest:
ld r6, PACA_DSCR(r13) ld r6, PACA_DSCR(r13)
std r6, HSTATE_DSCR(r13) std r6, HSTATE_DSCR(r13)
/* Order load of vcore, ptid etc. after load of vcpu */
lwsync
bl kvmppc_hv_entry bl kvmppc_hv_entry
/* Back from the guest, go back to nap */ /* Back from the guest, go back to nap */
/* Clear our vcpu pointer so we don't come back in early */ /* Clear our vcpu pointer so we don't come back in early */
li r0, 0 li r0, 0
std r0, HSTATE_KVM_VCPU(r13)
/* /*
* Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing * Once we clear HSTATE_KVM_VCPU(r13), the code in
* the nap_count, because once the increment to nap_count is * kvmppc_run_core() is going to assume that all our vcpu
* visible we could be given another vcpu. * state is visible in memory. This lwsync makes sure
* that that is true.
*/ */
lwsync lwsync
std r0, HSTATE_KVM_VCPU(r13)
/* increment the nap count and then go to nap mode */
ld r4, HSTATE_KVM_VCORE(r13)
addi r4, r4, VCORE_NAP_COUNT
51: lwarx r3, 0, r4
addi r3, r3, 1
stwcx. r3, 0, r4
bne 51b
/* /*
* At this point we have finished executing in the guest. * At this point we have finished executing in the guest.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册