提交 ec257165 编写于 作者: P Paul Mackerras 提交者: Alexander Graf

KVM: PPC: Book3S HV: Make use of unused threads when running guests

When running a virtual core of a guest that is configured with fewer
threads per core than the physical cores have, the extra physical
threads are currently unused.  This makes it possible to use them to
run one or more other virtual cores from the same guest when certain
conditions are met.  This applies on POWER7, and on POWER8 to guests
with one thread per virtual core.  (It doesn't apply to POWER8 guests
with multiple threads per vcore because they require a 1-1 virtual to
physical thread mapping in order to be able to use msgsndp and the
TIR.)

The idea is that we maintain a list of preempted vcores for each
physical cpu (i.e. each core, since the host runs single-threaded).
Then, when a vcore is about to run, it checks to see if there are
any vcores on the list for its physical cpu that could be
piggybacked onto this vcore's execution.  If so, those additional
vcores are put into state VCORE_PIGGYBACK and their runnable VCPU
threads are started as well as the original vcore, which is called
the master vcore.

After the vcores have exited the guest, the extra ones are put back
onto the preempted list if any of their VCPUs are still runnable and
not idle.

This means that vcpu->arch.ptid is no longer necessarily the same as
the physical thread that the vcpu runs on.  In order to make it easier
for code that wants to send an IPI to know which CPU to target, we
now store that in a new field in struct vcpu_arch, called thread_cpu.
Reviewed-by: NDavid Gibson <david@gibson.dropbear.id.au>
Tested-by: NLaurent Vivier <lvivier@redhat.com>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 845ac985
......@@ -278,7 +278,9 @@ struct kvmppc_vcore {
u16 last_cpu;
u8 vcore_state;
u8 in_guest;
struct kvmppc_vcore *master_vcore;
struct list_head runnable_threads;
struct list_head preempt_list;
spinlock_t lock;
wait_queue_head_t wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
......@@ -300,12 +302,18 @@ struct kvmppc_vcore {
#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
/* Values for vcore_state */
/*
* Values for vcore_state.
* Note that these are arranged such that lower values
* (< VCORE_SLEEPING) don't require stolen time accounting
* on load/unload, and higher values do.
*/
#define VCORE_INACTIVE 0
#define VCORE_SLEEPING 1
#define VCORE_PREEMPT 2
#define VCORE_RUNNING 3
#define VCORE_EXITING 4
#define VCORE_PREEMPT 1
#define VCORE_PIGGYBACK 2
#define VCORE_SLEEPING 3
#define VCORE_RUNNING 4
#define VCORE_EXITING 5
/*
* Struct used to manage memory for a virtual processor area
......@@ -619,6 +627,7 @@ struct kvm_vcpu_arch {
int trap;
int state;
int ptid;
int thread_cpu;
bool timer_running;
wait_queue_head_t cpu_run;
......
......@@ -512,6 +512,8 @@ int main(void)
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst));
DEFINE(VCPU_CPU, offsetof(struct kvm_vcpu, cpu));
DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
......
......@@ -81,6 +81,9 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#define MPP_BUFFER_ORDER 3
#endif
static int target_smt_mode;
module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
......@@ -114,7 +117,7 @@ static bool kvmppc_ipi_thread(int cpu)
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
int cpu = vcpu->cpu;
int cpu;
wait_queue_head_t *wqp;
wqp = kvm_arch_vcpu_wq(vcpu);
......@@ -123,10 +126,11 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
++vcpu->stat.halt_wakeup;
}
if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))
if (kvmppc_ipi_thread(vcpu->arch.thread_cpu))
return;
/* CPU points to the first thread of the core */
cpu = vcpu->cpu;
if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
}
......@@ -164,6 +168,27 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* they should never fail.)
*/
static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
{
unsigned long flags;
spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = mftb();
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
{
unsigned long flags;
spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) {
vc->stolen_tb += mftb() - vc->preempt_tb;
vc->preempt_tb = TB_NIL;
}
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
......@@ -175,14 +200,9 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
* vcpu, and once it is set to this vcpu, only this task
* ever sets it to NULL.
*/
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) {
vc->stolen_tb += mftb() - vc->preempt_tb;
vc->preempt_tb = TB_NIL;
}
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_end_stolen(vc);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) {
......@@ -197,11 +217,9 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
spin_lock_irqsave(&vc->stoltb_lock, flags);
vc->preempt_tb = mftb();
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
kvmppc_core_start_stolen(vc);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb();
......@@ -641,7 +659,8 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
spin_lock(&vcore->lock);
if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
vcore->vcore_state != VCORE_INACTIVE)
vcore->vcore_state != VCORE_INACTIVE &&
vcore->runner)
target = vcore->runner;
spin_unlock(&vcore->lock);
......@@ -1431,6 +1450,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * threads_per_subcore;
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
vcore->mpp_buffer_is_valid = false;
......@@ -1655,6 +1675,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
spin_unlock(&vcore->lock);
vcpu->arch.vcore = vcore;
vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
vcpu->arch.thread_cpu = -1;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
......@@ -1787,6 +1808,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
int cpu;
struct paca_struct *tpaca;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
struct kvmppc_vcore *mvc = vc->master_vcore;
if (vcpu->arch.timer_running) {
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
......@@ -1794,10 +1816,11 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
}
cpu = vc->pcpu + vcpu->arch.ptid;
tpaca = &paca[cpu];
tpaca->kvm_hstate.kvm_vcore = vc;
tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
vcpu->cpu = vc->pcpu;
/* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
tpaca->kvm_hstate.kvm_vcore = mvc;
tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
vcpu->cpu = mvc->pcpu;
vcpu->arch.thread_cpu = cpu;
/* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
smp_wmb();
tpaca->kvm_hstate.kvm_vcpu = vcpu;
if (cpu != smp_processor_id())
......@@ -1890,6 +1913,114 @@ static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
}
/*
* A list of virtual cores for each physical CPU.
* These are vcores that could run but their runner VCPU tasks are
* (or may be) preempted.
*/
struct preempted_vcore_list {
struct list_head list;
spinlock_t lock;
};
static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
static void init_vcore_lists(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
spin_lock_init(&lp->lock);
INIT_LIST_HEAD(&lp->list);
}
}
static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
vc->vcore_state = VCORE_PREEMPT;
vc->pcpu = smp_processor_id();
if (vc->num_threads < threads_per_subcore) {
spin_lock(&lp->lock);
list_add_tail(&vc->preempt_list, &lp->list);
spin_unlock(&lp->lock);
}
/* Start accumulating stolen time */
kvmppc_core_start_stolen(vc);
}
static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
kvmppc_core_end_stolen(vc);
if (!list_empty(&vc->preempt_list)) {
spin_lock(&lp->lock);
list_del_init(&vc->preempt_list);
spin_unlock(&lp->lock);
}
vc->vcore_state = VCORE_INACTIVE;
}
struct core_info {
int total_threads;
struct list_head vcs;
};
static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
{
memset(cip, 0, sizeof(*cip));
cip->total_threads = vc->num_threads;
INIT_LIST_HEAD(&cip->vcs);
list_add_tail(&vc->preempt_list, &cip->vcs);
}
static void init_master_vcore(struct kvmppc_vcore *vc)
{
vc->master_vcore = vc;
vc->entry_exit_map = 0;
vc->in_guest = 0;
vc->napping_threads = 0;
vc->conferring_threads = 0;
}
/*
* Work out whether it is possible to piggyback the execute of
* vcore *pvc onto the execution of the other vcores described in *cip.
*/
static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
int target_threads)
{
struct kvmppc_vcore *vc;
vc = list_first_entry(&cip->vcs, struct kvmppc_vcore, preempt_list);
/* require same VM and same per-core reg values */
if (pvc->kvm != vc->kvm ||
pvc->tb_offset != vc->tb_offset ||
pvc->pcr != vc->pcr ||
pvc->lpcr != vc->lpcr)
return false;
/* P8 guest with > 1 thread per core would see wrong TIR value */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
(vc->num_threads > 1 || pvc->num_threads > 1))
return false;
if (cip->total_threads + pvc->num_threads > target_threads)
return false;
cip->total_threads += pvc->num_threads;
pvc->master_vcore = vc;
list_del(&pvc->preempt_list);
list_add_tail(&pvc->preempt_list, &cip->vcs);
return true;
}
static void prepare_threads(struct kvmppc_vcore *vc)
{
struct kvm_vcpu *vcpu, *vnext;
......@@ -1909,12 +2040,45 @@ static void prepare_threads(struct kvmppc_vcore *vc)
}
}
static void post_guest_process(struct kvmppc_vcore *vc)
static void collect_piggybacks(struct core_info *cip, int target_threads)
{
struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
struct kvmppc_vcore *pvc, *vcnext;
spin_lock(&lp->lock);
list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
if (!spin_trylock(&pvc->lock))
continue;
prepare_threads(pvc);
if (!pvc->n_runnable) {
list_del_init(&pvc->preempt_list);
if (pvc->runner == NULL) {
pvc->vcore_state = VCORE_INACTIVE;
kvmppc_core_end_stolen(pvc);
}
spin_unlock(&pvc->lock);
continue;
}
if (!can_piggyback(pvc, cip, target_threads)) {
spin_unlock(&pvc->lock);
continue;
}
kvmppc_core_end_stolen(pvc);
pvc->vcore_state = VCORE_PIGGYBACK;
if (cip->total_threads >= target_threads)
break;
}
spin_unlock(&lp->lock);
}
static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
{
int still_running = 0;
u64 now;
long ret;
struct kvm_vcpu *vcpu, *vnext;
spin_lock(&vc->lock);
now = get_tb();
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
......@@ -1933,17 +2097,31 @@ static void post_guest_process(struct kvmppc_vcore *vc)
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
if (vcpu->arch.ceded) {
if (!is_kvmppc_resume_guest(ret))
kvmppc_end_cede(vcpu);
else
if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
if (vcpu->arch.pending_exceptions)
kvmppc_core_prepare_to_enter(vcpu);
if (vcpu->arch.ceded)
kvmppc_set_timer(vcpu);
}
if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
else
++still_running;
} else {
kvmppc_remove_runnable(vc, vcpu);
wake_up(&vcpu->arch.cpu_run);
}
}
list_del_init(&vc->preempt_list);
if (!is_master) {
vc->vcore_state = vc->runner ? VCORE_PREEMPT : VCORE_INACTIVE;
if (still_running > 0)
kvmppc_vcore_preempt(vc);
if (vc->n_runnable > 0 && vc->runner == NULL) {
/* make sure there's a candidate runner awake */
vcpu = list_first_entry(&vc->runnable_threads,
struct kvm_vcpu, arch.run_list);
wake_up(&vcpu->arch.cpu_run);
}
}
spin_unlock(&vc->lock);
}
/*
......@@ -1955,6 +2133,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
struct kvm_vcpu *vcpu, *vnext;
int i;
int srcu_idx;
struct core_info core_info;
struct kvmppc_vcore *pvc, *vcnext;
int pcpu, thr;
int target_threads;
/*
* Remove from the list any threads that have a signal pending
......@@ -1969,11 +2151,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/*
* Initialize *vc.
*/
vc->entry_exit_map = 0;
init_master_vcore(vc);
vc->preempt_tb = TB_NIL;
vc->in_guest = 0;
vc->napping_threads = 0;
vc->conferring_threads = 0;
/*
* Make sure we are running on primary threads, and that secondary
......@@ -1991,12 +2170,28 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
goto out;
}
/*
* See if we could run any other vcores on the physical core
* along with this one.
*/
init_core_info(&core_info, vc);
pcpu = smp_processor_id();
target_threads = threads_per_subcore;
if (target_smt_mode && target_smt_mode < target_threads)
target_threads = target_smt_mode;
if (vc->num_threads < target_threads)
collect_piggybacks(&core_info, target_threads);
vc->pcpu = smp_processor_id();
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
trace_kvm_guest_enter(vcpu);
thr = 0;
list_for_each_entry(pvc, &core_info.vcs, preempt_list) {
pvc->pcpu = pcpu + thr;
list_for_each_entry(vcpu, &pvc->runnable_threads,
arch.run_list) {
kvmppc_start_thread(vcpu);
kvmppc_create_dtl_entry(vcpu, pvc);
trace_kvm_guest_enter(vcpu);
}
thr += pvc->num_threads;
}
/* Set this explicitly in case thread 0 doesn't have a vcpu */
......@@ -2008,7 +2203,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 0);
spin_unlock(&vc->lock);
list_for_each_entry(pvc, &core_info.vcs, preempt_list)
spin_unlock(&pvc->lock);
kvm_guest_enter();
......@@ -2019,32 +2215,30 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
__kvmppc_vcore_entry();
spin_lock(&vc->lock);
if (vc->mpp_buffer)
kvmppc_start_saving_l2_cache(vc);
/* disable sending of IPIs on virtual external irqs */
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
vcpu->cpu = -1;
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
spin_lock(&vc->lock);
/* prevent other vcpu threads from doing kvmppc_start_thread() now */
vc->vcore_state = VCORE_EXITING;
/* wait for secondary threads to finish writing their state to memory */
kvmppc_wait_for_nap();
for (i = 0; i < threads_per_subcore; ++i)
kvmppc_release_hwthread(vc->pcpu + i);
/* prevent other vcpu threads from doing kvmppc_start_thread() now */
vc->vcore_state = VCORE_EXITING;
spin_unlock(&vc->lock);
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();
kvm_guest_exit();
preempt_enable();
list_for_each_entry_safe(pvc, vcnext, &core_info.vcs, preempt_list)
post_guest_process(pvc, pvc == vc);
spin_lock(&vc->lock);
post_guest_process(vc);
preempt_enable();
out:
vc->vcore_state = VCORE_INACTIVE;
......@@ -2055,13 +2249,17 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Wait for some other vcpu thread to execute us, and
* wake us up when we need to handle something in the host.
*/
static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
struct kvm_vcpu *vcpu, int wait_state)
{
DEFINE_WAIT(wait);
prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
spin_unlock(&vc->lock);
schedule();
spin_lock(&vc->lock);
}
finish_wait(&vcpu->arch.cpu_run, &wait);
}
......@@ -2137,7 +2335,19 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* this thread straight away and have it join in.
*/
if (!signal_pending(current)) {
if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) {
if (vc->vcore_state == VCORE_PIGGYBACK) {
struct kvmppc_vcore *mvc = vc->master_vcore;
if (spin_trylock(&mvc->lock)) {
if (mvc->vcore_state == VCORE_RUNNING &&
!VCORE_IS_EXITING(mvc)) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
trace_kvm_guest_enter(vcpu);
}
spin_unlock(&mvc->lock);
}
} else if (vc->vcore_state == VCORE_RUNNING &&
!VCORE_IS_EXITING(vc)) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
trace_kvm_guest_enter(vcpu);
......@@ -2149,10 +2359,11 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
kvmppc_vcore_end_preempt(vc);
if (vc->vcore_state != VCORE_INACTIVE) {
spin_unlock(&vc->lock);
kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
spin_lock(&vc->lock);
kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
continue;
}
list_for_each_entry_safe(v, vn, &vc->runnable_threads,
......@@ -2179,10 +2390,11 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (n_ceded == vc->n_runnable) {
kvmppc_vcore_blocked(vc);
} else if (should_resched()) {
vc->vcore_state = VCORE_PREEMPT;
kvmppc_vcore_preempt(vc);
/* Let something else run */
cond_resched_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
if (vc->vcore_state == VCORE_PREEMPT)
kvmppc_vcore_end_preempt(vc);
} else {
kvmppc_run_core(vc);
}
......@@ -2191,11 +2403,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
(vc->vcore_state == VCORE_RUNNING ||
vc->vcore_state == VCORE_EXITING)) {
spin_unlock(&vc->lock);
kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
spin_lock(&vc->lock);
}
vc->vcore_state == VCORE_EXITING))
kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu);
......@@ -2755,6 +2964,8 @@ static int kvmppc_book3s_init_hv(void)
init_default_hcalls();
init_vcore_lists();
r = kvmppc_mmu_hv_init();
return r;
}
......
......@@ -110,14 +110,15 @@ void __init kvm_cma_reserve(void)
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
int ptid = local_paca->kvm_hstate.ptid;
int threads_running;
int threads_ceded;
int threads_conferring;
u64 stop = get_tb() + 10 * tb_ticks_per_usec;
int rv = H_SUCCESS; /* => don't yield */
set_bit(vcpu->arch.ptid, &vc->conferring_threads);
set_bit(ptid, &vc->conferring_threads);
while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
threads_running = VCORE_ENTRY_MAP(vc);
threads_ceded = vc->napping_threads;
......@@ -127,7 +128,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
break;
}
}
clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
clear_bit(ptid, &vc->conferring_threads);
return rv;
}
......
......@@ -67,14 +67,12 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
}
/* Check if the core is loaded, if not, too hard */
cpu = vcpu->cpu;
cpu = vcpu->arch.thread_cpu;
if (cpu < 0 || cpu >= nr_cpu_ids) {
this_icp->rm_action |= XICS_RM_KICK_VCPU;
this_icp->rm_kick_target = vcpu;
return;
}
/* In SMT cpu will always point to thread 0, we adjust it */
cpu += vcpu->arch.ptid;
smp_mb();
kvmhv_rm_send_ipi(cpu);
......
......@@ -1176,6 +1176,11 @@ mc_cont:
ld r9, HSTATE_KVM_VCPU(r13)
lwz r12, VCPU_TRAP(r9)
/* Stop others sending VCPU interrupts to this physical CPU */
li r0, -1
stw r0, VCPU_CPU(r9)
stw r0, VCPU_THREAD_CPU(r9)
/* Save guest CTRL register, set runlatch to 1 */
mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册